repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
eramirem/astroML | book_figures/chapter6/fig_GMM_clone.py | 3 | 2971 | """
Cloning a Distribution with Gaussian Mixtures
---------------------------------------------
Figure 6.10
Cloning a two-dimensional distribution. The left panel shows 1000 observed
points. The center panel shows a ten-component Gaussian mixture model fit to
the data (two components dominate over other eight). The third panel shows
5000 points drawn from the model in the second panel.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from sklearn.mixture import GMM
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Create our data: two overlapping gaussian clumps,
# in a uniform background
np.random.seed(1)
X = np.concatenate([np.random.normal(0, 1, (200, 2)),
np.random.normal(1, 1, (200, 2)),
np.random.normal(4, 1.5, (400, 2)),
9 - 12 * np.random.random((200, 2))])
#------------------------------------------------------------
# Use a GMM to model the density and clone the points
gmm = GMM(5, 'full').fit(X)
X_new = gmm.sample(5000)
xmin = -3
xmax = 9
Xgrid = np.meshgrid(np.linspace(xmin, xmax, 50),
np.linspace(xmin, xmax, 50))
Xgrid = np.array(Xgrid).reshape(2, -1).T
dens = np.exp(gmm.score(Xgrid)).reshape((50, 50))
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 2))
fig.subplots_adjust(left=0.1, right=0.95, wspace=0.05,
bottom=0.12, top=0.9)
# first plot the input
ax = fig.add_subplot(131, aspect='equal')
ax.plot(X[:, 0], X[:, 1], '.k', ms=2)
ax.set_title("Input Distribution")
ax.set_ylabel('$y$')
# next plot the gmm fit
ax = fig.add_subplot(132, aspect='equal')
ax.imshow(dens.T, origin='lower', extent=[xmin, xmax, xmin, xmax],
cmap=plt.cm.binary)
ax.set_title("Density Model")
ax.yaxis.set_major_formatter(plt.NullFormatter())
# next plot the cloned distribution
ax = fig.add_subplot(133, aspect='equal')
ax.plot(X_new[:, 0], X_new[:, 1], '.k', alpha=0.3, ms=2)
ax.set_title("Cloned Distribution")
ax.yaxis.set_major_formatter(plt.NullFormatter())
for ax in fig.axes:
ax.set_xlim(xmin, xmax)
ax.set_ylim(xmin, xmax)
ax.set_xlabel('$x$')
plt.show()
| bsd-2-clause |
seaotterman/tensorflow | tensorflow/contrib/learn/python/learn/grid_search_test.py | 137 | 2035 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Grid search tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
from tensorflow.contrib.learn.python import learn
from tensorflow.python.platform import test
HAS_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False)
if HAS_SKLEARN:
try:
# pylint: disable=g-import-not-at-top
from sklearn import datasets
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import accuracy_score
except ImportError:
HAS_SKLEARN = False
class GridSearchTest(test.TestCase):
"""Grid search tests."""
def testIrisDNN(self):
if HAS_SKLEARN:
random.seed(42)
iris = datasets.load_iris()
feature_columns = learn.infer_real_valued_columns_from_input(iris.data)
classifier = learn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3)
grid_search = GridSearchCV(
classifier, {'hidden_units': [[5, 5], [10, 10]]},
scoring='accuracy',
fit_params={'steps': [50]})
grid_search.fit(iris.data, iris.target)
score = accuracy_score(iris.target, grid_search.predict(iris.data))
self.assertGreater(score, 0.5, 'Failed with score = {0}'.format(score))
if __name__ == '__main__':
test.main()
| apache-2.0 |
FireCARES/fire-risk | scripts/fire_incident.py | 2 | 1868 | #Weinschenk
#12-14
from __future__ import division
import numpy as np
import pandas as pd
from pylab import *
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
import random
incident = pd.read_csv('data/arlington_incidents.csv', header=0)
total_incidents = len(incident['incident_class_code'])
total_fires = 0
for i in incident['incident_class_code']:
if i == 1:
total_fires = total_fires + 1
years_of_data = 6
#random pull off historical data for ignition
fire_call_year = int(total_incidents/years_of_data)
ignition = zeros(fire_call_year, dtype=bool)
for num in range(0,fire_call_year):
rand = random.randrange(1,len(incident['incident_class_code']),1)
if incident['incident_class_code'][rand] == 1:
ignition[num] = True
print sum(ignition), 'projected fires' #prints number of fires per year
#determine location of fire and structure type
#firegrowth model
fire_size = zeros(sum(ignition))
room_of_origin = 0
floor_of_origin = 0
structure_loss = 0
for num in range(0,sum(ignition)):
alpha = np.random.uniform(0.0029,0.047)
time_to_alarm = np.random.uniform(30,60)
time_to_dispatch = np.random.uniform(40,80)
time_to_turnout = np.random.uniform(60,100)
time_to_arrival = np.random.uniform(300,420)
time_to_suppress = np.random.uniform(60,180)
running_time = time_to_alarm+time_to_dispatch+time_to_turnout+time_to_arrival+time_to_suppress
fire_size[num] = alpha*(running_time)**2
#assesing damage typical resident structure
if fire_size[num] < 2000:
room_of_origin = room_of_origin + 1
elif fire_size[num] > 2000 and fire_size[num] < 10000:
floor_of_origin =floor_of_origin + 1
else:
structure_loss =structure_loss + 1
print room_of_origin, 'fire(s) room of origin |', floor_of_origin, ' fire(s) floor of origin |', structure_loss, 'fire(s) with total structure loss'
#firefighter response model
| mit |
mlyundin/scikit-learn | sklearn/tests/test_cross_validation.py | 31 | 46699 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
# avoid StratifiedKFold's Warning about least populated class in y
y = np.arange(10) % 3
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
assert_true(np.any(np.arange(100) != ind[test]))
assert_true(np.any(np.arange(100, 200) != ind[test]))
assert_true(np.any(np.arange(200, 300) != ind[test]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_label_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_labels = 15
n_samples = 1000
n_folds = 5
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
labels = rng.randint(0, n_labels, n_samples)
folds = cval.LabelKFold(labels, n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Construct the test data
labels = ['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis',
'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia']
labels = np.asarray(labels, dtype=object)
n_labels = len(np.unique(labels))
n_samples = len(labels)
n_folds = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
folds = cval.LabelKFold(labels, n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Should fail if there are more folds than labels
labels = np.array([1, 1, 1, 2, 2])
assert_raises(ValueError, cval.LabelKFold, labels, n_folds=3)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_label_shuffle_split():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
]
for y in ys:
n_iter = 6
test_size = 1. / 3
slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size,
random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(len(slo), n_iter)
y_unique = np.unique(y)
for train, test in slo:
# First test: no train label is in the test set and vice versa
y_train_unique = np.unique(y[train])
y_test_unique = np.unique(y[test])
assert_false(np.any(np.in1d(y[train], y_test_unique)))
assert_false(np.any(np.in1d(y[test], y_train_unique)))
# Second test: train and test add up to all the data
assert_equal(y[train].size + y[test].size, y.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test: # unique train and test labels are correct,
# +- 1 for rounding error
assert_true(abs(len(y_test_unique) -
round(test_size * len(y_unique))) <= 1)
assert_true(abs(len(y_train_unique) -
round((1.0 - test_size) * len(y_unique))) <= 1)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist(), allow_lists=False)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = cval.train_test_split(X_df, allow_lists=False)
assert_true(isinstance(X_train_arr, np.ndarray))
assert_true(isinstance(X_test_arr, np.ndarray))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
| bsd-3-clause |
rsignell-usgs/notebook | system-test/Scenario_2A_Water_Level_Bird_Redux.py | 1 | 29692 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# ># IOOS System Test: [Extreme Events Theme:](https://github.com/ioos/system-test/wiki/Development-of-Test-Themes#theme-2-extreme-events) Inundation
# <markdowncell>
# ### Can we estimate the return period of a water level by comparing modeled and/or observed water levels with NOAA exceedance probability plots?
# <markdowncell>
# Methodology:
#
# * Define temporal and spatial bounds of interest, as well as parameters of interest
# * Search for availavle service endpoints in the NGDC CSW catalog, then inform the user of the DAP (model) and SOS (observation) services endpoints available
# * Obtain the stations in the spatial boundaries, and processed to obtain observation data for temporal contraints, identifying the yearly max
# * Plot observation stations on a map and indicate to the user if the minimum number of years has been met for extreme value analysis (red marker if condition is false)
# * Using DAP (model) endpoints find all available models data sets that fall in the area of interest, for the specified time range, and extract a model grid cell closest to all the given station locations (<b>Still in Development</b>)
# * Plot the extracted model grid cell from each available model on to the map
# * Plot the annual max for each station as a timeseries plot
# * Perform extreme value analysis for a selected station identifying the return period and compare to NOAA tides and currents plot for one of the same stations
#
# Esimated Time To Process Notebook: --.--
# <headingcell level=4>
# import required libraries
# <codecell>
import matplotlib.pyplot as plt
from pylab import *
import sys
import csv
import json
from scipy.stats import genextreme
import scipy.stats as ss
import numpy as np
from owslib.csw import CatalogueServiceWeb
from owslib import fes
import random
import netCDF4
import pandas as pd
import datetime as dt
from pyoos.collectors.coops.coops_sos import CoopsSos
import cStringIO
import iris
import urllib2
import parser
from lxml import etree #TODO suggest using bs4 instead for ease of access to XML objects
#generated for csw interface
#from fes_date_filter_formatter import fes_date_filter #date formatter (R.Signell)
import requests #required for the processing of requests
from utilities import *
from IPython.display import HTML, Image
from shapely.geometry import Polygon,Point,LineString #used for lat lon points
import folium #required for leaflet mapping
from pydap.client import open_url #pypdap
import datetime as dt
from datetime import datetime
from datetime import timedelta
%matplotlib inline
# <markdowncell>
# some functions from [Rich Signell Notebook](http://nbviewer.ipython.org/github/rsignell-usgs/notebook/blob/fef9438303b49a923024892db1ef3115e34d8271/CSW/IOOS_inundation.ipynb)
# <headingcell level=4>
# Speficy Temporal and Spatial conditions
# <codecell>
#bounding box of interest,[bottom right[lat,lon], top left[lat,lon]]
bounding_box_type = "box"
bounding_box = [[-75.94,38.67],[-66.94,41.5]]
#temporal range
start_date = dt.datetime(1980,5,1).strftime('%Y-%m-%d %H:00')
end_date = dt.datetime(2014,5,1).strftime('%Y-%m-%d %H:00')
time_date_range = [start_date,end_date] #start_date_end_date
print start_date,'to',end_date
#number of years required for analysis, obs and model data
num_years_required = 30
# <codecell>
name_list=['water_surface_height_above_reference_datum',
'sea_surface_height_above_geoid','sea_surface_elevation',
'sea_surface_height_above_reference_ellipsoid','sea_surface_height_above_sea_level',
'sea_surface_height','water level']
sos_name = 'water_surface_height_above_reference_datum'
# <codecell>
endpoint = 'http://www.ngdc.noaa.gov/geoportal/csw' # NGDC Geoportal
csw = CatalogueServiceWeb(endpoint,timeout=60)
for oper in csw.operations:
if oper.name == 'GetRecords':
print '\nISO Queryables:\n',oper.constraints['SupportedISOQueryables']['values']
#pass
#put the names in a dict for ease of access
data_dict = {}
data_dict["water"] = {"names":['water_surface_height_above_reference_datum',
'sea_surface_height_above_geoid','sea_surface_elevation',
'sea_surface_height_above_reference_ellipsoid','sea_surface_height_above_sea_level',
'sea_surface_height','water level'], "sos_name":['water_surface_height_above_reference_datum']}
# <codecell>
def fes_date_filter(start_date='1900-01-01',stop_date='2100-01-01',constraint='overlaps'):
if constraint == 'overlaps':
start = fes.PropertyIsLessThanOrEqualTo(propertyname='apiso:TempExtent_begin', literal=stop_date)
stop = fes.PropertyIsGreaterThanOrEqualTo(propertyname='apiso:TempExtent_end', literal=start_date)
elif constraint == 'within':
start = fes.PropertyIsGreaterThanOrEqualTo(propertyname='apiso:TempExtent_begin', literal=start_date)
stop = fes.PropertyIsLessThanOrEqualTo(propertyname='apiso:TempExtent_end', literal=stop_date)
return start,stop
# <codecell>
# convert User Input into FES filters
start,stop = fes_date_filter(start_date,end_date)
box = []
box.append(bounding_box[0][0])
box.append(bounding_box[0][1])
box.append(bounding_box[1][0])
box.append(bounding_box[1][1])
bbox = fes.BBox(box)
or_filt = fes.Or([fes.PropertyIsLike(propertyname='apiso:AnyText',literal=('*%s*' % val),
escapeChar='\\',wildCard='*',singleChar='?') for val in name_list])
val = 'Averages'
not_filt = fes.Not([fes.PropertyIsLike(propertyname='apiso:AnyText',literal=('*%s*' % val),
escapeChar='\\',wildCard='*',singleChar='?')])
# <codecell>
filter_list = [fes.And([ bbox, start, stop, or_filt, not_filt]) ]
# connect to CSW, explore it's properties
# try request using multiple filters "and" syntax: [[filter1,filter2]]
csw.getrecords2(constraints=filter_list,maxrecords=1000,esn='full')
# <codecell>
def service_urls(records,service_string='urn:x-esri:specification:ServiceType:odp:url'):
"""
extract service_urls of a specific type (DAP, SOS) from records
"""
urls=[]
for key,rec in records.iteritems():
#create a generator object, and iterate through it until the match is found
#if not found, gets the default value (here "none")
url = next((d['url'] for d in rec.references if d['scheme'] == service_string), None)
if url is not None:
urls.append(url)
return urls
# <codecell>
#print records that are available
print "number of datasets available: ",len(csw.records.keys())
# <markdowncell>
# Print all the records (should you want too)
# <codecell>
#print "\n".join(csw.records)
# <markdowncell>
# Dap URLS
# <codecell>
dap_urls = service_urls(csw.records,service_string='urn:x-esri:specification:ServiceType:odp:url')
#remove duplicates and organize
dap_urls = sorted(set(dap_urls))
print "Total DAP:",len(dap_urls)
#print the first 5...
print "\n".join(dap_urls[:])
# <markdowncell>
# SOS URLs
# <codecell>
sos_urls = service_urls(csw.records,service_string='urn:x-esri:specification:ServiceType:sos:url')
#remove duplicates and organize
sos_urls = sorted(set(sos_urls))
print "Total SOS:",len(sos_urls)
print "\n".join(sos_urls)
# <markdowncell>
# ### SOS Requirements
# #### Use Pyoos SOS collector to obtain Observation data from COOPS
# <codecell>
#use the get caps to get station start and get time
# <codecell>
start_time = dt.datetime.strptime(start_date,'%Y-%m-%d %H:%M')
end_time = dt.datetime.strptime(end_date,'%Y-%m-%d %H:%M')
# <codecell>
iso_start = start_time.strftime('%Y-%m-%dT%H:%M:%SZ')
iso_end = end_time.strftime('%Y-%m-%dT%H:%M:%SZ')
collector = CoopsSos()
collector.set_datum('NAVD')
collector.server.identification.title
collector.start_time = start_time
collector.end_time = end_time
collector.variables = [data_dict["water"]["sos_name"]]
# <codecell>
print "Date: ",iso_start," to ", iso_end
box_str=','.join(str(e) for e in box)
print "Lat/Lon Box: ",box_str
#grab the sos url and use it for the service
url=(sos_urls[0].split("?")[0]+'?'
'service=SOS&request=GetObservation&version=1.0.0&'
'observedProperty=%s&offering=urn:ioos:network:NOAA.NOS.CO-OPS:WaterLevelActive&'
'featureOfInterest=BBOX:%s&responseFormat=text/tab-separated-values&eventTime=%s') % (sos_name,box_str,iso_end)
r = requests.get(url)
data = r.text
#get the headers for the cols
data = data.split("\n")
headers = data[0]
station_list_dict = dict()
#parse the headers so i can create a dict
c = 0
for h in headers.split("\t"):
field = h.split(":")[0].split(" ")[0]
station_list_dict[field] = {"id":c}
c+=1
# <codecell>
def get_coops_longName(sta):
"""
get longName for specific station from COOPS SOS using DescribeSensor request
"""
url=(sos_urls[0].split("?")[0]+'?service=SOS&'
'request=DescribeSensor&version=1.0.0&outputFormat=text/xml;subtype="sensorML/1.0.1"&'
'procedure=%s') % sta
tree = etree.parse(urllib2.urlopen(url))
root = tree.getroot()
longName=root.xpath("//sml:identifier[@name='longName']/sml:Term/sml:value/text()", namespaces={'sml':"http://www.opengis.net/sensorML/1.0.1"})
return longName
# <codecell>
#finds the max value given a json object
def findMaxVal(data):
dates_array = []
vals_array = []
for x in data:
dates_array.append(str(x["t"]))
vals_array.append(x["v"])
p = np.array(vals_array,dtype=np.float)
x = np.arange(len(p))
max_val = np.amax(p)
max_idx = np.argmax(p)
return (max_val,len(p),dates_array[max_idx])
# <markdowncell>
# #### Extract the Observation Data from the collector
# <codecell>
def coops2data(collector,station_id,sos_name):
collector.features = [station_id]
collector.variables = [sos_name]
station_data = dict()
#loop through the years and get the data needed
for year_station in range(int(collector.start_time.year),collector.end_time.year+1):
link = "http://tidesandcurrents.noaa.gov/api/datagetter?product="+sos_name+"&application=NOS.COOPS.TAC.WL&"
date1 = "begin_date="+str(year_station)+"0101"
date2 = "&end_date="+str(year_station)+"1231"
datum = "&datum=MHHW"
units = "&units=metric"
station_request = "&station="+station_id+"&time_zone=GMT&units=english&format=json"
http_request = link+date1+date2+units+datum+station_request
#print http_request
d_r = requests.get(http_request,timeout=20)
if "Great Lake station" in d_r.text:
pass
else:
key_list = d_r.json().keys()
if "data" in key_list:
data = d_r.json()['data']
max_value,num_samples,date_string = findMaxVal(data)
station_data[str(year_station)] = {"max":max_value,"num_samples":num_samples,"date_string":date_string,"raw":data}
#print "\tyear:",year_station," MaxValue:",max_value
return station_data
# <codecell>
#create dict of stations
station_list = []
for i in range(1,len(data)):
station_info = data[i].split("\t")
station = dict()
for field in station_list_dict.keys():
col = station_list_dict[field]["id"]
if col < len(station_info):
station[field] = station_info[col]
station["type"] = "obs"
station_list.append(station)
# <codecell>
def add_invalid_marker(map,s,popup_string):
map.circle_marker(location=[s["latitude"],s["longitude"]], popup=popup_string, fill_color='#ff0000', radius=10000, line_color='#ff0000')
# <markdowncell>
# TODO: Add check before extracting the data to see if the required number of years will be met, i.e use SOS GetCaps and begin and end time
# <codecell>
def does_station_have_enough_times():
return True
# <codecell>
#Embeds the HTML source of the map directly into the IPython notebook.
def inline_map(map):
map._build_map()
return HTML('<iframe srcdoc="{srcdoc}" style="width: 100%; height: 500px; border: none"></iframe>'.format(srcdoc=map.HTML.replace('"', '"')))
#print bounding_box[0]
map = folium.Map(location=[bounding_box[0][1], bounding_box[0][0]], zoom_start=6)
station_yearly_max = []
for s in station_list:
if s["type"] is "obs": #if its an obs station
#get the long name
s["long_name"] =get_coops_longName(s['station_id'])
s["station_num"] = str(s['station_id']).split(':')[-1]
#this is different than sos name, hourly height is hourly water level
s["data"] = coops2data(collector,s["station_num"],"high_low")
#verifies that there is the required amount of data at the station
if "latitude" in s:
if len(s["data"].keys()) >= num_years_required:
popup_string = '<b>Station:</b><br>'+str(s['station_id']) + "<br><b>Long Name:</b><br>"+str(s["long_name"])
map.simple_marker([s["latitude"],s["longitude"]],popup=popup_string)
else:
popup_string = '<b>Not Enough Station Data for number of years requested</b><br><br>Num requested:'+str(num_years_required)+'<br>Num Available:'+str(len(s["data"].keys()))+'<br><b>Station:</b><br>'+str(s['station_id']) + "<br><b>Long Name:</b><br>"+str(s["long_name"])
add_invalid_marker(map,s,popup_string)
else: #if its a model station
if "latitude" in s:
popup_string = '<b>Station:</b><br>'+str(s['station_id']) + "<br><b>Long Name:</b><br>"+str(s["long_name"])
map.simple_marker([s["latitude"],s["longitude"]],popup=popup_string)
# Create the map and add the bounding box line
map.line(get_coordinates(bounding_box,bounding_box_type), line_color='#FF0000', line_weight=5)
#show map of results
inline_map(map)
# <markdowncell>
# ### Creates a time series plot only showing those stations that have enough data
# <codecell>
import prettyplotlib as ppl
# Set the random seed for consistency
np.random.seed(12)
fig, ax = plt.subplots(1)
# Show the whole color range
for s in station_list:
if "data" in s:
years = s["data"].keys()
#only show the stations with enough data
if len(s["data"].keys()) >= num_years_required:
xx = []
yx = []
for y in years:
xx.append(int(y))
val = s["data"][y]["max"]
yx.append(val)
ax.scatter(xx,yx,marker='o')
ppl.scatter(ax, xx, yx, alpha=0.8, edgecolor='black', linewidth=0.15, label=str(s["station_num"]))
#ax.scatter(xx, yx, label=str(s["station_num"]))
ppl.legend(ax, loc='right', ncol=1)
#legend = ax.legend(loc='best')
# The frame is matplotlib.patches.Rectangle instance surrounding the legend.
#frame = legend.get_frame()
title = s["long_name"][0] + ' water level'
ax.set_xlabel('Year')
ax.set_ylabel('water level (m)')
ax.set_title("Stations exceeding "+str(num_years_required)+ " years worth of water level data (MHHW)")
fig.set_size_inches(14,8)
# <markdowncell>
# ### Number of stations available by number of years
# <codecell>
fig, ax = plt.subplots(1)
year_list_map = []
for s in station_list:
if "data" in s:
years = s["data"].keys()
year_list_map.append(len(years))
ppl.hist(ax,np.array(year_list_map), grid='y')
plt.plot([num_years_required, num_years_required], [0, 8], 'r-', lw=2)
ax.set_ylabel("Number of Stations")
ax.set_xlabel("Number of Years Available")
ax.set_title("Number of available stations vs available years\n(for bounding box) - red is minimum requested years")
#
# <markdowncell>
# ### Get Model Data, uses the netcdf4 library to get the model data (<b>Still in Development</b>)
# #### Obtains the model data from a given dap url, for a given location
# #### TODO: Temporal extraction based on temporal contraints
# <codecell>
#### IJ GRID READER
#use the simple grid to find the data requested
#lat_var,lon_var are pointers to the data
def find_closest_pts_ij(lat_var,lon_var,f_lat,f_lon):
x = lat_var[:]
y = lon_var[:]
dist = -1
xidx = -1
yidx = -1
for i in range(0,len(x)):
for j in range(0,len(y)):
distance = Point(x[i],y[j]).distance(Point(f_lat,f_lon))
if dist == -1:
dist = distance
xidx = i
yidx = j
elif distance < dist:
dist = distance
xidx = i
yidx = j
lat = x[xidx]
lon = y[yidx]
#lat lon index of point
vals = [lat,lon,xidx,yidx]
return vals
#### NCELL GRID READER
#use the simple grid to find the data requested
#lat_var,lon_var are pointers to the data
def find_closest_pts_ncell(map1,lat_var,lon_var,f_lat,f_lon,spacing):
x = lat_var[::spacing]
y = lon_var[::spacing]
idx = get_dist(x,y,f_lat,f_lon,'#666699','#666699',map1,False)
#find the idx that is closest
print spacing," :index: ",idx
idx = idx[0]*spacing
st_idx = idx-(2*spacing)
ed_idx = idx+(2*spacing)
x = lat_var[st_idx:ed_idx]
y = lon_var[st_idx:ed_idx]
ret = get_dist(x,y,f_lat,f_lon,'#00FFFF','#33CCFF',map1,False)
lat = x[ret[0]]
lon = y[ret[0]]
#lat lon index of point distance between points
vals = [lat,lon,ret[0],ret[1]]
return vals
def get_dist(x,y,f_lat,f_lon,color1,color2,map1,show_pts):
dist = -1
idx = -1
for i in range(0,len(x)):
distance = Point(x[i],y[i]).distance(Point(f_lat,f_lon))
if dist == -1:
dist = distance
idx = i
elif distance < dist:
dist = distance
idx = i
if show_pts:
map1.circle_marker(location=[x[i], y[i]], radius=500,popup="idx:"+str(i), line_color=color2,fill_color=color1, fill_opacity=0.3)
return [idx,dist]
#### VERIFIES THAT THE GRID IS VALID
def check_grid_is_valid(time_var,lat_var,lon_var,interest_var):
grid_type = None
# there is data with the fields of interest, now lets check the fields for validity
valid_grid = False
#they are both the same length
if len(lon_var.shape) == len(lat_var.shape):
if lon_var.shape[0] == lat_var.shape[0]:
#both the same size
#print "gridded data..."
valid_grid = True
else:
#both different, possibly meaning i,j grid field
#print "gridded data..."
valid_grid = True
else:
print "shapes are different?...moving on..."
valid_grid = False
if valid_grid:
#find out what the grid is structured
if (len(interest_var.dimensions) == 2) and (interest_var.dimensions[0] == "time") and (interest_var.dimensions[1] == "node"):
#ncell
grid_type = "ncell"
pass
elif (len(interest_var.dimensions) == 3) and (interest_var.dimensions[0] == "time") and (interest_var.dimensions[1] == "lat") and (interest_var.dimensions[2] == "lon"):
#ij
grid_type = "ij"
pass
else:
#make sure it stays none
grid_type = None
if grid_type is not None:
#can be used to print some info
#print "dims: ",interest_var.dimensions
#print "lat: ", lat_var.shape
#print "lon: ", lon_var.shape
pass
return grid_type
def is_model_in_time_range(time_var):
return True
# use only data where the standard deviation of the time series exceeds 0.01 m (1 cm)
# this eliminates flat line model time series that come from land points that
# should have had missing values.
# min_var_value = 0.01
def data_min_value_met(min_var_value,data):
std_value = np.std(data)
if np.isinf(std_value):
print "... value is inf"
return False
if np.isnan(std_value):
print "... value is nan"
return False
if np.amax(data) < min_var_value:
print "...max value to low"
return False
if np.amax(data) >999:
print "...max value to high"
return False
if std_value > min_var_value:
return True
else:
print "...std value to low"
return False
return False
def get_model_data(map1,dap_urls,st_lat,st_lon,start_dt,end_dt,name_list):
# use only data within 0.04 degrees (about 4 km)
max_dist=0.04
min_var_value = 0.01
# set the lat,lon and time fields
lon_list =["degrees_east"]
lat_list = ["degrees_north"]
time_list = ["time"]
model_data_store = []
for url in dap_urls:
try:
#open the url
nc = netCDF4.Dataset(url, 'r')
#get the list of variables
lon_var = None
lat_var = None
time_var = None
interest_var = None
#get the var
var_list = nc.variables.keys()
for var in var_list:
v = nc.variables[var]
try:
#lon
if (v.units in lon_list or v.long_name in lon_list) and "zonal" not in v.long_name:
lon_var = v
#lat
elif (v.units in lat_list or v.long_name in lat_list) and "zonal" not in v.long_name:
lat_var = v
#make sure there is time in there
elif v.long_name in time_list or v.standard_name in time_list:
time_var = v
#get the data of interest
elif v.long_name in name_list or v.standard_name in name_list:
interest_var = v
#it was something else i dont know or care about
else:
pass
except Exception, e:
#print "\t", e
pass
#is time in range?
if is_model_in_time_range(time_var):
#all the variables should be set
if (lon_var is None) and (lat_var is None) and (time_var is None) and (interest_var is None):
pass
else:
#check the grid is valid and of a type
grid_type = check_grid_is_valid(time_var,lat_var,lon_var,interest_var)
try:
if grid_type == "ncell":
#
#usually ncell grids are massive so lets slice the grid
#
print "processing the grid..."
spacing = 10
'''
the distance is the Euclidean Distance
or Linear distance between two points on a plane
and not the Great-circle distance between two points on a sphere
TODO convert dist to m
see (http://gis.stackexchange.com/questions/80881/what-is-the-unit-the-shapely-length-attribute)
'''
# vals = lat lon index of point distance between points
vals = find_closest_pts_ncell(map1,lat_var,lon_var,st_lat,st_lon,spacing)
if vals[3] < 1:
#if the dist to the cell is small enough
time_vals = time_var[:]
data = interest_var[:,vals[2]]
data = np.array(data)
bool_a = data_min_value_met(min_var_value,data)
print bool_a
if bool_a:
#add a marker
map1.circle_marker(location=[vals[0], vals[1]], radius=500,popup="dist:"+str(vals[3]), line_color='#33CC33',fill_color='#00FF00', fill_opacity=0.6)
print vals
print url
print "distance To Station:",vals[3]
print "num time values:",len(time_vals)
print "units: ",interest_var.units
x = np.arange(len(time_vals))
plt.figure()
plt.plot(x, data)
plt.title('Water Level');
plt.xlabel('time index')
plt.ylabel(interest_var.units)
#set maxs
plt.ylim([np.amin(data),np.amax(data)])
plt.show()
print "---------------------"
pass
elif grid_type == "ij":
#
# IJ
#
pass
except Exception, e:
print e
else:
print "model not in time range..."
#something went wrong trying to access the grids
except RuntimeError, e:
print "possible connection error for url"
pass
except:
pass
def inline_map(map1):
map1._build_map()
return HTML('<iframe srcdoc="{srcdoc}" style="width: 95%; height: 550px; border: none"></iframe>'.format(srcdoc=map1.HTML.replace('"', '"')))
pt_lat = 41.501
pt_lon = -71
map1 = folium.Map(location=[pt_lat, pt_lon], zoom_start=9)
map1.simple_marker([pt_lat, pt_lon],popup="")
#EXAMPLE get model data for a station
start_time = dt.datetime(2008, 9, 10, 5, 1, 1)
end_time = dt.datetime(2008, 9, 11, 5, 1, 1)
sample_data = get_model_data(map1,dap_urls,pt_lat,pt_lon,start_time,end_time,data_dict["water"]["names"])
# <markdowncell>
# #### Show model results on a map
# <codecell>
inline_map(map1)
# <headingcell level=3>
# Extreme Value Analysis:
# <codecell>
# Show the whole color range
for s in station_list:
if "data" in s:
years = s["data"].keys()
#only show the stations with enough data
if len(s["data"].keys()) >= num_years_required:
xx = []
yx = []
for y in years:
xx.append(int(y))
val = s["data"][y]["max"]
yx.append(val)
break
# <codecell>
annual_max_levels = yx
# <headingcell level=4>
# Fit data to GEV distribution
# <codecell>
def sea_levels_gev_pdf(x):
return genextreme.pdf(x, xi, loc=mu, scale=sigma)
# <codecell>
mle = genextreme.fit(sorted(annual_max_levels), 0)
mu = mle[1]
sigma = mle[2]
xi = mle[0]
print "The mean, sigma, and shape parameters are %s, %s, and %s, resp." % (mu, sigma, xi)
# <headingcell level=4>
# Probability Density Plot
# <codecell>
min_x = min(annual_max_levels)-0.5
max_x = max(annual_max_levels)+0.5
x = np.linspace(min_x, max_x, num=100)
y = [sea_levels_gev_pdf(z) for z in x]
fig = plt.figure(figsize=(12,6))
axes = fig.add_axes([0.1, 0.1, 0.8, 0.8])
xlabel = (s["long_name"][0] + " - Annual max water level (m)")
axes.set_title("Probability Density & Normalized Histogram")
axes.set_xlabel(xlabel)
axes.plot(x, y, color='Red')
axes.hist(annual_max_levels, bins=arange(min_x, max_x, abs((max_x-min_x)/10)), normed=1, color='Yellow')
#
# <headingcell level=4>
# Return Value Plot
# <markdowncell>
# This plot should match NOAA's [Annual Exceedance Probability Curves for station 8449130](http://tidesandcurrents.noaa.gov/est/curves.shtml?stnid=8449130)
# <codecell>
noaa_station_id = 8449130
Image(url='http://tidesandcurrents.noaa.gov/est/curves/high/'+str(noaa_station_id)+'.png')
# <codecell>
Image(url='http://tidesandcurrents.noaa.gov/est/images/color_legend.png')
# <markdowncell>
# <script type="text/javascript">
# $('div.input').show();
# </script>
# <codecell>
fig = plt.figure(figsize=(20,6))
axes = fig.add_axes([0.1, 0.1, 0.8, 0.8])
T=np.r_[1:250]
sT = genextreme.isf(1./T, 0, mu, sigma)
axes.semilogx(T, sT, 'r'), hold
N=np.r_[1:len(annual_max_levels)+1];
Nmax=max(N);
axes.plot(Nmax/N, sorted(annual_max_levels)[::-1], 'bo')
title = s["long_name"][0]
axes.set_title(title)
axes.set_xlabel('Return Period (yrs)')
axes.set_ylabel('Meters above MHHW')
axes.set_xticklabels([0,1,10,100,1000])
axes.set_xlim([0,260])
axes.set_ylim([0,1.8])
axes.grid(True)
# <markdowncell>
# This plot does not match exactly. NOAA's curves were calculated using the Extremes Toolkit software package in R whereas this notebook uses scipy. There is a python package based on the Extremes Toolkit called pywafo but this is experimental and isn't building properly on Mac OS X
| mit |
avocadoinnocenceproject/farflungfruit | data/tocsv.py | 1 | 2783 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# import CSV
from netCDF4 import Dataset
import pandas
import datetime
import calendar
filename = "_grib2netcdf-atls01-95e2cf679cd58ee9b4db4dd119a05a8d-uN8nco.nc"
nc = Dataset(filename, 'r', Format='NETCDF4')
# for var in nc.variables:
# print(var)
print nc.variables.keys()
# e = nc.variables['e'][:]
# stl1 = nc.variables['stl1'][:] soil temp 1
# stl2 = nc.variables['stl2'][:] soil temp 2
# swvl2 = nc.variables['swvl2'][:] soil water 2
latitude = nc.variables['latitude'][:]
longitude = nc.variables['longitude'][:]
time = nc.variables['time'][:]
temperature2metres = nc.variables['t2m'][:]
windspeed10metre = nc.variables['si10'][:]
soilwater = nc.variables['swvl1'][:]
sunlighthours = nc.variables['sund'][:]
snowfall = nc.variables['sf'][:]
precipitation = nc.variables['tp'][:]
watervapour = nc.variables['tcwv'][:]
# print(sund.shape)
# long/lat
# time
# Temperatures
# Wind => si10
# Humidity => tcwv
# snowfall => sf
# Sunlight => sund
# Rain => tp
# Soil temp => stl1
headings = "Lat,Lon,Time,Temp,Wind,Soilwater,Sunlight,Snowfall,Precipitation,Watervapour"
content = ""
total = 24 * 181 * 360
count = 0
for timeslice in range(0, 23):
for lat in range(0, 180):
for lon in range(0, 359):
line = ""
line += str(latitude[lat]) + ","
line += str((longitude[lon]) - 180) + ","
line += str(time[timeslice]) + ","
line += str(temperature2metres[timeslice][lat][lon]) + ","
line += str(windspeed10metre[timeslice][lat][lon]) + ","
line += str(soilwater[timeslice][lat][lon]) + ","
line += str(sunlighthours[timeslice][lat][lon]) + ","
line += str(snowfall[timeslice][lat][lon]) + ","
line += str(precipitation[timeslice][lat][lon] * 1000) + ","
line += str(watervapour[timeslice][lat][lon]) + "\n"
# print(line)
print str(count) + " of " + str(total)
count += 1
content += line
text_file = open("out.csv", "w")
text_file.write(headings + "\n" + content)
text_file.close()
df = pandas.read_csv("out.csv")
def time_to_datetime(i):
'''Take an integer in units hours since EPOCH and convert to datetime'''
EPOCH = datetime.datetime(1900, 1, 1)
dt = datetime.timedelta(hours=int(i))
return EPOCH + dt
# Convert time into actual time and save Month name
df['ActualTime'] = df.Time.apply(time_to_datetime)
df['Month'] = df.ActualTime.apply(lambda x: calendar.month_name[x.month])
dfsums = df.groupby(['Lat', 'Lon', 'Month']).sum()
dfmax = df.groupby(['Lat', 'Lon', 'Month']).max()
dfnew = dfsums
dfnew['Temp'] = dfmax['Temp']
keep_columns = [
'Lat',
'Lon',
'Month',
'Temp',
'Sunlight',
'Precipitation',
'Soilwater'
]
dfnew.reset_index(inplace=True)
dfnew = dfnew[keep_columns]
dfnew.to_csv('out.csv', index=False)
| mit |
wdurhamh/statsmodels | examples/python/predict.py | 33 | 1580 |
## Prediction (out of sample)
from __future__ import print_function
import numpy as np
import statsmodels.api as sm
# ## Artificial data
nsample = 50
sig = 0.25
x1 = np.linspace(0, 20, nsample)
X = np.column_stack((x1, np.sin(x1), (x1-5)**2))
X = sm.add_constant(X)
beta = [5., 0.5, 0.5, -0.02]
y_true = np.dot(X, beta)
y = y_true + sig * np.random.normal(size=nsample)
# ## Estimation
olsmod = sm.OLS(y, X)
olsres = olsmod.fit()
print(olsres.summary())
# ## In-sample prediction
ypred = olsres.predict(X)
print(ypred)
# ## Create a new sample of explanatory variables Xnew, predict and plot
x1n = np.linspace(20.5,25, 10)
Xnew = np.column_stack((x1n, np.sin(x1n), (x1n-5)**2))
Xnew = sm.add_constant(Xnew)
ynewpred = olsres.predict(Xnew) # predict out of sample
print(ynewpred)
# ## Plot comparison
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.plot(x1, y, 'o', label="Data")
ax.plot(x1, y_true, 'b-', label="True")
ax.plot(np.hstack((x1, x1n)), np.hstack((ypred, ynewpred)), 'r', label="OLS prediction")
ax.legend(loc="best");
### Predicting with Formulas
# Using formulas can make both estimation and prediction a lot easier
from statsmodels.formula.api import ols
data = {"x1" : x1, "y" : y}
res = ols("y ~ x1 + np.sin(x1) + I((x1-5)**2)", data=data).fit()
# We use the `I` to indicate use of the Identity transform. Ie., we don't want any expansion magic from using `**2`
res.params
# Now we only have to pass the single variable and we get the transformed right-hand side variables automatically
res.predict(exog=dict(x1=x1n))
| bsd-3-clause |
zzcclp/spark | python/pyspark/ml/clustering.py | 15 | 62447 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
from pyspark import since, keyword_only
from pyspark.ml.param.shared import HasMaxIter, HasFeaturesCol, HasSeed, HasPredictionCol, \
HasAggregationDepth, HasWeightCol, HasTol, HasProbabilityCol, HasDistanceMeasure, \
HasCheckpointInterval, Param, Params, TypeConverters
from pyspark.ml.util import JavaMLWritable, JavaMLReadable, GeneralJavaMLWritable, \
HasTrainingSummary, SparkContext
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams, JavaWrapper
from pyspark.ml.common import inherit_doc, _java2py
from pyspark.ml.stat import MultivariateGaussian
from pyspark.sql import DataFrame
__all__ = ['BisectingKMeans', 'BisectingKMeansModel', 'BisectingKMeansSummary',
'KMeans', 'KMeansModel', 'KMeansSummary',
'GaussianMixture', 'GaussianMixtureModel', 'GaussianMixtureSummary',
'LDA', 'LDAModel', 'LocalLDAModel', 'DistributedLDAModel', 'PowerIterationClustering']
class ClusteringSummary(JavaWrapper):
"""
Clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("2.1.0")
def predictionCol(self):
"""
Name for column of predicted clusters in `predictions`.
"""
return self._call_java("predictionCol")
@property
@since("2.1.0")
def predictions(self):
"""
DataFrame produced by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.1.0")
def featuresCol(self):
"""
Name for column of features in `predictions`.
"""
return self._call_java("featuresCol")
@property
@since("2.1.0")
def k(self):
"""
The number of clusters the model was trained with.
"""
return self._call_java("k")
@property
@since("2.1.0")
def cluster(self):
"""
DataFrame of predicted cluster centers for each training data point.
"""
return self._call_java("cluster")
@property
@since("2.1.0")
def clusterSizes(self):
"""
Size of (number of data points in) each cluster.
"""
return self._call_java("clusterSizes")
@property
@since("2.4.0")
def numIter(self):
"""
Number of iterations.
"""
return self._call_java("numIter")
@inherit_doc
class _GaussianMixtureParams(HasMaxIter, HasFeaturesCol, HasSeed, HasPredictionCol,
HasProbabilityCol, HasTol, HasAggregationDepth, HasWeightCol):
"""
Params for :py:class:`GaussianMixture` and :py:class:`GaussianMixtureModel`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k", "Number of independent Gaussians in the mixture model. " +
"Must be > 1.", typeConverter=TypeConverters.toInt)
def __init__(self, *args):
super(_GaussianMixtureParams, self).__init__(*args)
self._setDefault(k=2, tol=0.01, maxIter=100, aggregationDepth=2)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
class GaussianMixtureModel(JavaModel, _GaussianMixtureParams, JavaMLWritable, JavaMLReadable,
HasTrainingSummary):
"""
Model fitted by GaussianMixture.
.. versionadded:: 2.0.0
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("3.0.0")
def setProbabilityCol(self, value):
"""
Sets the value of :py:attr:`probabilityCol`.
"""
return self._set(probabilityCol=value)
@property
@since("2.0.0")
def weights(self):
"""
Weight for each Gaussian distribution in the mixture.
This is a multinomial probability distribution over the k Gaussians,
where weights[i] is the weight for Gaussian i, and weights sum to 1.
"""
return self._call_java("weights")
@property
@since("3.0.0")
def gaussians(self):
"""
Array of :py:class:`MultivariateGaussian` where gaussians[i] represents
the Multivariate Gaussian (Normal) Distribution for Gaussian i
"""
sc = SparkContext._active_spark_context
jgaussians = self._java_obj.gaussians()
return [
MultivariateGaussian(_java2py(sc, jgaussian.mean()), _java2py(sc, jgaussian.cov()))
for jgaussian in jgaussians]
@property
@since("2.0.0")
def gaussiansDF(self):
"""
Retrieve Gaussian distributions as a DataFrame.
Each row represents a Gaussian Distribution.
The DataFrame has two columns: mean (Vector) and cov (Matrix).
"""
return self._call_java("gaussiansDF")
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return GaussianMixtureSummary(super(GaussianMixtureModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@since("3.0.0")
def predict(self, value):
"""
Predict label for the given features.
"""
return self._call_java("predict", value)
@since("3.0.0")
def predictProbability(self, value):
"""
Predict probability for the given features.
"""
return self._call_java("predictProbability", value)
@inherit_doc
class GaussianMixture(JavaEstimator, _GaussianMixtureParams, JavaMLWritable, JavaMLReadable):
"""
GaussianMixture clustering.
This class performs expectation maximization for multivariate Gaussian
Mixture Models (GMMs). A GMM represents a composite distribution of
independent Gaussian distributions with associated "mixing" weights
specifying each's contribution to the composite.
Given a set of sample points, this class will maximize the log-likelihood
for a mixture of k Gaussians, iterating until the log-likelihood changes by
less than convergenceTol, or until it has reached the max number of iterations.
While this process is generally guaranteed to converge, it is not guaranteed
to find a global optimum.
.. versionadded:: 2.0.0
Notes
-----
For high-dimensional data (with many features), this algorithm may perform poorly.
This is due to high-dimensional data (a) making it difficult to cluster at all
(based on statistical/theoretical arguments) and (b) numerical issues with
Gaussian distributions.
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([-0.1, -0.05 ]),),
... (Vectors.dense([-0.01, -0.1]),),
... (Vectors.dense([0.9, 0.8]),),
... (Vectors.dense([0.75, 0.935]),),
... (Vectors.dense([-0.83, -0.68]),),
... (Vectors.dense([-0.91, -0.76]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> gm = GaussianMixture(k=3, tol=0.0001, seed=10)
>>> gm.getMaxIter()
100
>>> gm.setMaxIter(30)
GaussianMixture...
>>> gm.getMaxIter()
30
>>> model = gm.fit(df)
>>> model.getAggregationDepth()
2
>>> model.getFeaturesCol()
'features'
>>> model.setPredictionCol("newPrediction")
GaussianMixtureModel...
>>> model.predict(df.head().features)
2
>>> model.predictProbability(df.head().features)
DenseVector([0.0, 0.0, 1.0])
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
3
>>> summary.clusterSizes
[2, 2, 2]
>>> weights = model.weights
>>> len(weights)
3
>>> gaussians = model.gaussians
>>> len(gaussians)
3
>>> gaussians[0].mean
DenseVector([0.825, 0.8675])
>>> gaussians[0].cov
DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], 0)
>>> gaussians[1].mean
DenseVector([-0.87, -0.72])
>>> gaussians[1].cov
DenseMatrix(2, 2, [0.0016, 0.0016, 0.0016, 0.0016], 0)
>>> gaussians[2].mean
DenseVector([-0.055, -0.075])
>>> gaussians[2].cov
DenseMatrix(2, 2, [0.002, -0.0011, -0.0011, 0.0006], 0)
>>> model.gaussiansDF.select("mean").head()
Row(mean=DenseVector([0.825, 0.8675]))
>>> model.gaussiansDF.select("cov").head()
Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False))
>>> transformed = model.transform(df).select("features", "newPrediction")
>>> rows = transformed.collect()
>>> rows[4].newPrediction == rows[5].newPrediction
True
>>> rows[2].newPrediction == rows[3].newPrediction
True
>>> gmm_path = temp_path + "/gmm"
>>> gm.save(gmm_path)
>>> gm2 = GaussianMixture.load(gmm_path)
>>> gm2.getK()
3
>>> model_path = temp_path + "/gmm_model"
>>> model.save(model_path)
>>> model2 = GaussianMixtureModel.load(model_path)
>>> model2.hasSummary
False
>>> model2.weights == model.weights
True
>>> model2.gaussians[0].mean == model.gaussians[0].mean
True
>>> model2.gaussians[0].cov == model.gaussians[0].cov
True
>>> model2.gaussians[1].mean == model.gaussians[1].mean
True
>>> model2.gaussians[1].cov == model.gaussians[1].cov
True
>>> model2.gaussians[2].mean == model.gaussians[2].mean
True
>>> model2.gaussians[2].cov == model.gaussians[2].cov
True
>>> model2.gaussiansDF.select("mean").head()
Row(mean=DenseVector([0.825, 0.8675]))
>>> model2.gaussiansDF.select("cov").head()
Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False))
>>> model.transform(df).take(1) == model2.transform(df).take(1)
True
>>> gm2.setWeightCol("weight")
GaussianMixture...
"""
@keyword_only
def __init__(self, *, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None,
aggregationDepth=2, weightCol=None):
"""
__init__(self, \\*, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None, \
aggregationDepth=2, weightCol=None)
"""
super(GaussianMixture, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.GaussianMixture",
self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return GaussianMixtureModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, *, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None,
aggregationDepth=2, weightCol=None):
"""
setParams(self, \\*, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None, \
aggregationDepth=2, weightCol=None)
Sets params for GaussianMixture.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("2.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("2.0.0")
def setProbabilityCol(self, value):
"""
Sets the value of :py:attr:`probabilityCol`.
"""
return self._set(probabilityCol=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("2.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("2.0.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("3.0.0")
def setAggregationDepth(self, value):
"""
Sets the value of :py:attr:`aggregationDepth`.
"""
return self._set(aggregationDepth=value)
class GaussianMixtureSummary(ClusteringSummary):
"""
Gaussian mixture clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("2.1.0")
def probabilityCol(self):
"""
Name for column of predicted probability of each cluster in `predictions`.
"""
return self._call_java("probabilityCol")
@property
@since("2.1.0")
def probability(self):
"""
DataFrame of probabilities of each cluster for each training data point.
"""
return self._call_java("probability")
@property
@since("2.2.0")
def logLikelihood(self):
"""
Total log-likelihood for this model on the given data.
"""
return self._call_java("logLikelihood")
class KMeansSummary(ClusteringSummary):
"""
Summary of KMeans.
.. versionadded:: 2.1.0
"""
@property
@since("2.4.0")
def trainingCost(self):
"""
K-means cost (sum of squared distances to the nearest centroid for all points in the
training dataset). This is equivalent to sklearn's inertia.
"""
return self._call_java("trainingCost")
@inherit_doc
class _KMeansParams(HasMaxIter, HasFeaturesCol, HasSeed, HasPredictionCol, HasTol,
HasDistanceMeasure, HasWeightCol):
"""
Params for :py:class:`KMeans` and :py:class:`KMeansModel`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k", "The number of clusters to create. Must be > 1.",
typeConverter=TypeConverters.toInt)
initMode = Param(Params._dummy(), "initMode",
"The initialization algorithm. This can be either \"random\" to " +
"choose random points as initial cluster centers, or \"k-means||\" " +
"to use a parallel variant of k-means++",
typeConverter=TypeConverters.toString)
initSteps = Param(Params._dummy(), "initSteps", "The number of steps for k-means|| " +
"initialization mode. Must be > 0.", typeConverter=TypeConverters.toInt)
def __init__(self, *args):
super(_KMeansParams, self).__init__(*args)
self._setDefault(k=2, initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20,
distanceMeasure="euclidean")
@since("1.5.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
@since("1.5.0")
def getInitMode(self):
"""
Gets the value of `initMode`
"""
return self.getOrDefault(self.initMode)
@since("1.5.0")
def getInitSteps(self):
"""
Gets the value of `initSteps`
"""
return self.getOrDefault(self.initSteps)
class KMeansModel(JavaModel, _KMeansParams, GeneralJavaMLWritable, JavaMLReadable,
HasTrainingSummary):
"""
Model fitted by KMeans.
.. versionadded:: 1.5.0
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("1.5.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return KMeansSummary(super(KMeansModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@since("3.0.0")
def predict(self, value):
"""
Predict label for the given features.
"""
return self._call_java("predict", value)
@inherit_doc
class KMeans(JavaEstimator, _KMeansParams, JavaMLWritable, JavaMLReadable):
"""
K-means clustering with a k-means++ like initialization mode
(the k-means|| algorithm by Bahmani et al).
.. versionadded:: 1.5.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]), 2.0), (Vectors.dense([1.0, 1.0]), 2.0),
... (Vectors.dense([9.0, 8.0]), 2.0), (Vectors.dense([8.0, 9.0]), 2.0)]
>>> df = spark.createDataFrame(data, ["features", "weighCol"])
>>> kmeans = KMeans(k=2)
>>> kmeans.setSeed(1)
KMeans...
>>> kmeans.setWeightCol("weighCol")
KMeans...
>>> kmeans.setMaxIter(10)
KMeans...
>>> kmeans.getMaxIter()
10
>>> kmeans.clear(kmeans.maxIter)
>>> model = kmeans.fit(df)
>>> model.getDistanceMeasure()
'euclidean'
>>> model.setPredictionCol("newPrediction")
KMeansModel...
>>> model.predict(df.head().features)
0
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> transformed = model.transform(df).select("features", "newPrediction")
>>> rows = transformed.collect()
>>> rows[0].newPrediction == rows[1].newPrediction
True
>>> rows[2].newPrediction == rows[3].newPrediction
True
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
2
>>> summary.clusterSizes
[2, 2]
>>> summary.trainingCost
4.0
>>> kmeans_path = temp_path + "/kmeans"
>>> kmeans.save(kmeans_path)
>>> kmeans2 = KMeans.load(kmeans_path)
>>> kmeans2.getK()
2
>>> model_path = temp_path + "/kmeans_model"
>>> model.save(model_path)
>>> model2 = KMeansModel.load(model_path)
>>> model2.hasSummary
False
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
>>> model.transform(df).take(1) == model2.transform(df).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None,
distanceMeasure="euclidean", weightCol=None):
"""
__init__(self, \\*, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None, \
distanceMeasure="euclidean", weightCol=None)
"""
super(KMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.KMeans", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return KMeansModel(java_model)
@keyword_only
@since("1.5.0")
def setParams(self, *, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None,
distanceMeasure="euclidean", weightCol=None):
"""
setParams(self, \\*, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None, \
distanceMeasure="euclidean", weightCol=None)
Sets params for KMeans.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.5.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("1.5.0")
def setInitMode(self, value):
"""
Sets the value of :py:attr:`initMode`.
"""
return self._set(initMode=value)
@since("1.5.0")
def setInitSteps(self, value):
"""
Sets the value of :py:attr:`initSteps`.
"""
return self._set(initSteps=value)
@since("2.4.0")
def setDistanceMeasure(self, value):
"""
Sets the value of :py:attr:`distanceMeasure`.
"""
return self._set(distanceMeasure=value)
@since("1.5.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("1.5.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("1.5.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("1.5.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("1.5.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@inherit_doc
class _BisectingKMeansParams(HasMaxIter, HasFeaturesCol, HasSeed, HasPredictionCol,
HasDistanceMeasure, HasWeightCol):
"""
Params for :py:class:`BisectingKMeans` and :py:class:`BisectingKMeansModel`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k", "The desired number of leaf clusters. Must be > 1.",
typeConverter=TypeConverters.toInt)
minDivisibleClusterSize = Param(Params._dummy(), "minDivisibleClusterSize",
"The minimum number of points (if >= 1.0) or the minimum " +
"proportion of points (if < 1.0) of a divisible cluster.",
typeConverter=TypeConverters.toFloat)
def __init__(self, *args):
super(_BisectingKMeansParams, self).__init__(*args)
self._setDefault(maxIter=20, k=4, minDivisibleClusterSize=1.0)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def getMinDivisibleClusterSize(self):
"""
Gets the value of `minDivisibleClusterSize` or its default value.
"""
return self.getOrDefault(self.minDivisibleClusterSize)
class BisectingKMeansModel(JavaModel, _BisectingKMeansParams, JavaMLWritable, JavaMLReadable,
HasTrainingSummary):
"""
Model fitted by BisectingKMeans.
.. versionadded:: 2.0.0
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("2.0.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@since("2.0.0")
def computeCost(self, dataset):
"""
Computes the sum of squared distances between the input points
and their corresponding cluster centers.
.. deprecated:: 3.0.0
It will be removed in future versions. Use :py:class:`ClusteringEvaluator` instead.
You can also get the cost on the training dataset in the summary.
"""
warnings.warn("Deprecated in 3.0.0. It will be removed in future versions. Use "
"ClusteringEvaluator instead. You can also get the cost on the training "
"dataset in the summary.", FutureWarning)
return self._call_java("computeCost", dataset)
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return BisectingKMeansSummary(super(BisectingKMeansModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@since("3.0.0")
def predict(self, value):
"""
Predict label for the given features.
"""
return self._call_java("predict", value)
@inherit_doc
class BisectingKMeans(JavaEstimator, _BisectingKMeansParams, JavaMLWritable, JavaMLReadable):
"""
A bisecting k-means algorithm based on the paper "A comparison of document clustering
techniques" by Steinbach, Karypis, and Kumar, with modification to fit Spark.
The algorithm starts from a single cluster that contains all points.
Iteratively it finds divisible clusters on the bottom level and bisects each of them using
k-means, until there are `k` leaf clusters in total or no leaf clusters are divisible.
The bisecting steps of clusters on the same level are grouped together to increase parallelism.
If bisecting all divisible clusters on the bottom level would result more than `k` leaf
clusters, larger clusters get higher priority.
.. versionadded:: 2.0.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]), 2.0), (Vectors.dense([1.0, 1.0]), 2.0),
... (Vectors.dense([9.0, 8.0]), 2.0), (Vectors.dense([8.0, 9.0]), 2.0)]
>>> df = spark.createDataFrame(data, ["features", "weighCol"])
>>> bkm = BisectingKMeans(k=2, minDivisibleClusterSize=1.0)
>>> bkm.setMaxIter(10)
BisectingKMeans...
>>> bkm.getMaxIter()
10
>>> bkm.clear(bkm.maxIter)
>>> bkm.setSeed(1)
BisectingKMeans...
>>> bkm.setWeightCol("weighCol")
BisectingKMeans...
>>> bkm.getSeed()
1
>>> bkm.clear(bkm.seed)
>>> model = bkm.fit(df)
>>> model.getMaxIter()
20
>>> model.setPredictionCol("newPrediction")
BisectingKMeansModel...
>>> model.predict(df.head().features)
0
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> model.computeCost(df)
2.0
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
2
>>> summary.clusterSizes
[2, 2]
>>> summary.trainingCost
4.000...
>>> transformed = model.transform(df).select("features", "newPrediction")
>>> rows = transformed.collect()
>>> rows[0].newPrediction == rows[1].newPrediction
True
>>> rows[2].newPrediction == rows[3].newPrediction
True
>>> bkm_path = temp_path + "/bkm"
>>> bkm.save(bkm_path)
>>> bkm2 = BisectingKMeans.load(bkm_path)
>>> bkm2.getK()
2
>>> bkm2.getDistanceMeasure()
'euclidean'
>>> model_path = temp_path + "/bkm_model"
>>> model.save(model_path)
>>> model2 = BisectingKMeansModel.load(model_path)
>>> model2.hasSummary
False
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
>>> model.transform(df).take(1) == model2.transform(df).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean",
weightCol=None):
"""
__init__(self, \\*, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean", \
weightCol=None)
"""
super(BisectingKMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.BisectingKMeans",
self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, *, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean",
weightCol=None):
"""
setParams(self, \\*, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean", \
weightCol=None)
Sets params for BisectingKMeans.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def setMinDivisibleClusterSize(self, value):
"""
Sets the value of :py:attr:`minDivisibleClusterSize`.
"""
return self._set(minDivisibleClusterSize=value)
@since("2.4.0")
def setDistanceMeasure(self, value):
"""
Sets the value of :py:attr:`distanceMeasure`.
"""
return self._set(distanceMeasure=value)
@since("2.0.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("2.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("2.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
def _create_model(self, java_model):
return BisectingKMeansModel(java_model)
class BisectingKMeansSummary(ClusteringSummary):
"""
Bisecting KMeans clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("3.0.0")
def trainingCost(self):
"""
Sum of squared distances to the nearest centroid for all points in the training dataset.
This is equivalent to sklearn's inertia.
"""
return self._call_java("trainingCost")
@inherit_doc
class _LDAParams(HasMaxIter, HasFeaturesCol, HasSeed, HasCheckpointInterval):
"""
Params for :py:class:`LDA` and :py:class:`LDAModel`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k", "The number of topics (clusters) to infer. Must be > 1.",
typeConverter=TypeConverters.toInt)
optimizer = Param(Params._dummy(), "optimizer",
"Optimizer or inference algorithm used to estimate the LDA model. "
"Supported: online, em", typeConverter=TypeConverters.toString)
learningOffset = Param(Params._dummy(), "learningOffset",
"A (positive) learning parameter that downweights early iterations."
" Larger values make early iterations count less",
typeConverter=TypeConverters.toFloat)
learningDecay = Param(Params._dummy(), "learningDecay", "Learning rate, set as an"
"exponential decay rate. This should be between (0.5, 1.0] to "
"guarantee asymptotic convergence.", typeConverter=TypeConverters.toFloat)
subsamplingRate = Param(Params._dummy(), "subsamplingRate",
"Fraction of the corpus to be sampled and used in each iteration "
"of mini-batch gradient descent, in range (0, 1].",
typeConverter=TypeConverters.toFloat)
optimizeDocConcentration = Param(Params._dummy(), "optimizeDocConcentration",
"Indicates whether the docConcentration (Dirichlet parameter "
"for document-topic distribution) will be optimized during "
"training.", typeConverter=TypeConverters.toBoolean)
docConcentration = Param(Params._dummy(), "docConcentration",
"Concentration parameter (commonly named \"alpha\") for the "
"prior placed on documents' distributions over topics (\"theta\").",
typeConverter=TypeConverters.toListFloat)
topicConcentration = Param(Params._dummy(), "topicConcentration",
"Concentration parameter (commonly named \"beta\" or \"eta\") for "
"the prior placed on topic' distributions over terms.",
typeConverter=TypeConverters.toFloat)
topicDistributionCol = Param(Params._dummy(), "topicDistributionCol",
"Output column with estimates of the topic mixture distribution "
"for each document (often called \"theta\" in the literature). "
"Returns a vector of zeros for an empty document.",
typeConverter=TypeConverters.toString)
keepLastCheckpoint = Param(Params._dummy(), "keepLastCheckpoint",
"(For EM optimizer) If using checkpointing, this indicates whether"
" to keep the last checkpoint. If false, then the checkpoint will be"
" deleted. Deleting the checkpoint can cause failures if a data"
" partition is lost, so set this bit with care.",
TypeConverters.toBoolean)
def __init__(self, *args):
super(_LDAParams, self).__init__(*args)
self._setDefault(maxIter=20, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
@since("2.0.0")
def getK(self):
"""
Gets the value of :py:attr:`k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def getOptimizer(self):
"""
Gets the value of :py:attr:`optimizer` or its default value.
"""
return self.getOrDefault(self.optimizer)
@since("2.0.0")
def getLearningOffset(self):
"""
Gets the value of :py:attr:`learningOffset` or its default value.
"""
return self.getOrDefault(self.learningOffset)
@since("2.0.0")
def getLearningDecay(self):
"""
Gets the value of :py:attr:`learningDecay` or its default value.
"""
return self.getOrDefault(self.learningDecay)
@since("2.0.0")
def getSubsamplingRate(self):
"""
Gets the value of :py:attr:`subsamplingRate` or its default value.
"""
return self.getOrDefault(self.subsamplingRate)
@since("2.0.0")
def getOptimizeDocConcentration(self):
"""
Gets the value of :py:attr:`optimizeDocConcentration` or its default value.
"""
return self.getOrDefault(self.optimizeDocConcentration)
@since("2.0.0")
def getDocConcentration(self):
"""
Gets the value of :py:attr:`docConcentration` or its default value.
"""
return self.getOrDefault(self.docConcentration)
@since("2.0.0")
def getTopicConcentration(self):
"""
Gets the value of :py:attr:`topicConcentration` or its default value.
"""
return self.getOrDefault(self.topicConcentration)
@since("2.0.0")
def getTopicDistributionCol(self):
"""
Gets the value of :py:attr:`topicDistributionCol` or its default value.
"""
return self.getOrDefault(self.topicDistributionCol)
@since("2.0.0")
def getKeepLastCheckpoint(self):
"""
Gets the value of :py:attr:`keepLastCheckpoint` or its default value.
"""
return self.getOrDefault(self.keepLastCheckpoint)
@inherit_doc
class LDAModel(JavaModel, _LDAParams):
"""
Latent Dirichlet Allocation (LDA) model.
This abstraction permits for different underlying representations,
including local and distributed data structures.
.. versionadded:: 2.0.0
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setTopicDistributionCol(self, value):
"""
Sets the value of :py:attr:`topicDistributionCol`.
"""
return self._set(topicDistributionCol=value)
@since("2.0.0")
def isDistributed(self):
"""
Indicates whether this instance is of type DistributedLDAModel
"""
return self._call_java("isDistributed")
@since("2.0.0")
def vocabSize(self):
"""Vocabulary size (number of terms or words in the vocabulary)"""
return self._call_java("vocabSize")
@since("2.0.0")
def topicsMatrix(self):
"""
Inferred topics, where each topic is represented by a distribution over terms.
This is a matrix of size vocabSize x k, where each column is a topic.
No guarantees are given about the ordering of the topics.
.. warning:: If this model is actually a :py:class:`DistributedLDAModel`
instance produced by the Expectation-Maximization ("em") `optimizer`,
then this method could involve collecting a large amount of data
to the driver (on the order of vocabSize x k).
"""
return self._call_java("topicsMatrix")
@since("2.0.0")
def logLikelihood(self, dataset):
"""
Calculates a lower bound on the log likelihood of the entire corpus.
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
.. warning:: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logLikelihood", dataset)
@since("2.0.0")
def logPerplexity(self, dataset):
"""
Calculate an upper bound on perplexity. (Lower is better.)
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
.. warning:: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logPerplexity", dataset)
@since("2.0.0")
def describeTopics(self, maxTermsPerTopic=10):
"""
Return the topics described by their top-weighted terms.
"""
return self._call_java("describeTopics", maxTermsPerTopic)
@since("2.0.0")
def estimatedDocConcentration(self):
"""
Value for :py:attr:`LDA.docConcentration` estimated from data.
If Online LDA was used and :py:attr:`LDA.optimizeDocConcentration` was set to false,
then this returns the fixed (given) value for the :py:attr:`LDA.docConcentration` parameter.
"""
return self._call_java("estimatedDocConcentration")
@inherit_doc
class DistributedLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
Distributed model fitted by :py:class:`LDA`.
This type of model is currently only produced by Expectation-Maximization (EM).
This model stores the inferred topics, the full training dataset, and the topic distribution
for each training document.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def toLocal(self):
"""
Convert this distributed model to a local representation. This discards info about the
training dataset.
.. warning:: This involves collecting a large :py:func:`topicsMatrix` to the driver.
"""
model = LocalLDAModel(self._call_java("toLocal"))
# SPARK-10931: Temporary fix to be removed once LDAModel defines Params
model._create_params_from_java()
model._transfer_params_from_java()
return model
@since("2.0.0")
def trainingLogLikelihood(self):
"""
Log likelihood of the observed tokens in the training set,
given the current parameter estimates:
log P(docs | topics, topic distributions for docs, Dirichlet hyperparameters)
Notes
-----
- This excludes the prior; for that, use :py:func:`logPrior`.
- Even with :py:func:`logPrior`, this is NOT the same as the data log likelihood given
the hyperparameters.
- This is computed from the topic distributions computed during training. If you call
:py:func:`logLikelihood` on the same training dataset, the topic distributions
will be computed again, possibly giving different results.
"""
return self._call_java("trainingLogLikelihood")
@since("2.0.0")
def logPrior(self):
"""
Log probability of the current parameter estimate:
log P(topics, topic distributions for docs | alpha, eta)
"""
return self._call_java("logPrior")
def getCheckpointFiles(self):
"""
If using checkpointing and :py:attr:`LDA.keepLastCheckpoint` is set to true, then there may
be saved checkpoint files. This method is provided so that users can manage those files.
.. versionadded:: 2.0.0
Returns
-------
list
List of checkpoint files from training
Notes
-----
Removing the checkpoints can cause failures if a partition is lost and is needed
by certain :py:class:`DistributedLDAModel` methods. Reference counting will clean up
the checkpoints when this model and derivative data go out of scope.
"""
return self._call_java("getCheckpointFiles")
@inherit_doc
class LocalLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
Local (non-distributed) model fitted by :py:class:`LDA`.
This model stores the inferred topics only; it does not store info about the training dataset.
.. versionadded:: 2.0.0
"""
pass
@inherit_doc
class LDA(JavaEstimator, _LDAParams, JavaMLReadable, JavaMLWritable):
"""
Latent Dirichlet Allocation (LDA), a topic model designed for text documents.
Terminology:
- "term" = "word": an element of the vocabulary
- "token": instance of a term appearing in a document
- "topic": multinomial distribution over terms representing some concept
- "document": one piece of text, corresponding to one row in the input data
Original LDA paper (journal version):
Blei, Ng, and Jordan. "Latent Dirichlet Allocation." JMLR, 2003.
Input data (featuresCol):
LDA is given a collection of documents as input data, via the featuresCol parameter.
Each document is specified as a :py:class:`Vector` of length vocabSize, where each entry is the
count for the corresponding term (word) in the document. Feature transformers such as
:py:class:`pyspark.ml.feature.Tokenizer` and :py:class:`pyspark.ml.feature.CountVectorizer`
can be useful for converting text to word count vectors.
.. versionadded:: 2.0.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors, SparseVector
>>> from pyspark.ml.clustering import LDA
>>> df = spark.createDataFrame([[1, Vectors.dense([0.0, 1.0])],
... [2, SparseVector(2, {0: 1.0})],], ["id", "features"])
>>> lda = LDA(k=2, seed=1, optimizer="em")
>>> lda.setMaxIter(10)
LDA...
>>> lda.getMaxIter()
10
>>> lda.clear(lda.maxIter)
>>> model = lda.fit(df)
>>> model.setSeed(1)
DistributedLDAModel...
>>> model.getTopicDistributionCol()
'topicDistribution'
>>> model.isDistributed()
True
>>> localModel = model.toLocal()
>>> localModel.isDistributed()
False
>>> model.vocabSize()
2
>>> model.describeTopics().show()
+-----+-----------+--------------------+
|topic|termIndices| termWeights|
+-----+-----------+--------------------+
| 0| [1, 0]|[0.50401530077160...|
| 1| [0, 1]|[0.50401530077160...|
+-----+-----------+--------------------+
...
>>> model.topicsMatrix()
DenseMatrix(2, 2, [0.496, 0.504, 0.504, 0.496], 0)
>>> lda_path = temp_path + "/lda"
>>> lda.save(lda_path)
>>> sameLDA = LDA.load(lda_path)
>>> distributed_model_path = temp_path + "/lda_distributed_model"
>>> model.save(distributed_model_path)
>>> sameModel = DistributedLDAModel.load(distributed_model_path)
>>> local_model_path = temp_path + "/lda_local_model"
>>> localModel.save(local_model_path)
>>> sameLocalModel = LocalLDAModel.load(local_model_path)
>>> model.transform(df).take(1) == sameLocalModel.transform(df).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
__init__(self, \\*, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
"""
super(LDA, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.LDA", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
if self.getOptimizer() == "em":
return DistributedLDAModel(java_model)
else:
return LocalLDAModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, *, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
setParams(self, \\*, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
Sets params for LDA.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setCheckpointInterval(self, value):
"""
Sets the value of :py:attr:`checkpointInterval`.
"""
return self._set(checkpointInterval=value)
@since("2.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
>>> algo = LDA().setK(10)
>>> algo.getK()
10
"""
return self._set(k=value)
@since("2.0.0")
def setOptimizer(self, value):
"""
Sets the value of :py:attr:`optimizer`.
Currently only support 'em' and 'online'.
Examples
--------
>>> algo = LDA().setOptimizer("em")
>>> algo.getOptimizer()
'em'
"""
return self._set(optimizer=value)
@since("2.0.0")
def setLearningOffset(self, value):
"""
Sets the value of :py:attr:`learningOffset`.
Examples
--------
>>> algo = LDA().setLearningOffset(100)
>>> algo.getLearningOffset()
100.0
"""
return self._set(learningOffset=value)
@since("2.0.0")
def setLearningDecay(self, value):
"""
Sets the value of :py:attr:`learningDecay`.
Examples
--------
>>> algo = LDA().setLearningDecay(0.1)
>>> algo.getLearningDecay()
0.1...
"""
return self._set(learningDecay=value)
@since("2.0.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
Examples
--------
>>> algo = LDA().setSubsamplingRate(0.1)
>>> algo.getSubsamplingRate()
0.1...
"""
return self._set(subsamplingRate=value)
@since("2.0.0")
def setOptimizeDocConcentration(self, value):
"""
Sets the value of :py:attr:`optimizeDocConcentration`.
Examples
--------
>>> algo = LDA().setOptimizeDocConcentration(True)
>>> algo.getOptimizeDocConcentration()
True
"""
return self._set(optimizeDocConcentration=value)
@since("2.0.0")
def setDocConcentration(self, value):
"""
Sets the value of :py:attr:`docConcentration`.
Examples
--------
>>> algo = LDA().setDocConcentration([0.1, 0.2])
>>> algo.getDocConcentration()
[0.1..., 0.2...]
"""
return self._set(docConcentration=value)
@since("2.0.0")
def setTopicConcentration(self, value):
"""
Sets the value of :py:attr:`topicConcentration`.
Examples
--------
>>> algo = LDA().setTopicConcentration(0.5)
>>> algo.getTopicConcentration()
0.5...
"""
return self._set(topicConcentration=value)
@since("2.0.0")
def setTopicDistributionCol(self, value):
"""
Sets the value of :py:attr:`topicDistributionCol`.
Examples
--------
>>> algo = LDA().setTopicDistributionCol("topicDistributionCol")
>>> algo.getTopicDistributionCol()
'topicDistributionCol'
"""
return self._set(topicDistributionCol=value)
@since("2.0.0")
def setKeepLastCheckpoint(self, value):
"""
Sets the value of :py:attr:`keepLastCheckpoint`.
Examples
--------
>>> algo = LDA().setKeepLastCheckpoint(False)
>>> algo.getKeepLastCheckpoint()
False
"""
return self._set(keepLastCheckpoint=value)
@since("2.0.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@inherit_doc
class _PowerIterationClusteringParams(HasMaxIter, HasWeightCol):
"""
Params for :py:class:`PowerIterationClustering`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k",
"The number of clusters to create. Must be > 1.",
typeConverter=TypeConverters.toInt)
initMode = Param(Params._dummy(), "initMode",
"The initialization algorithm. This can be either " +
"'random' to use a random vector as vertex properties, or 'degree' to use " +
"a normalized sum of similarities with other vertices. Supported options: " +
"'random' and 'degree'.",
typeConverter=TypeConverters.toString)
srcCol = Param(Params._dummy(), "srcCol",
"Name of the input column for source vertex IDs.",
typeConverter=TypeConverters.toString)
dstCol = Param(Params._dummy(), "dstCol",
"Name of the input column for destination vertex IDs.",
typeConverter=TypeConverters.toString)
def __init__(self, *args):
super(_PowerIterationClusteringParams, self).__init__(*args)
self._setDefault(k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst")
@since("2.4.0")
def getK(self):
"""
Gets the value of :py:attr:`k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.4.0")
def getInitMode(self):
"""
Gets the value of :py:attr:`initMode` or its default value.
"""
return self.getOrDefault(self.initMode)
@since("2.4.0")
def getSrcCol(self):
"""
Gets the value of :py:attr:`srcCol` or its default value.
"""
return self.getOrDefault(self.srcCol)
@since("2.4.0")
def getDstCol(self):
"""
Gets the value of :py:attr:`dstCol` or its default value.
"""
return self.getOrDefault(self.dstCol)
@inherit_doc
class PowerIterationClustering(_PowerIterationClusteringParams, JavaParams, JavaMLReadable,
JavaMLWritable):
"""
Power Iteration Clustering (PIC), a scalable graph clustering algorithm developed by
`Lin and Cohen <http://www.cs.cmu.edu/~frank/papers/icml2010-pic-final.pdf>`_. From the
abstract: PIC finds a very low-dimensional embedding of a dataset using truncated power
iteration on a normalized pair-wise similarity matrix of the data.
This class is not yet an Estimator/Transformer, use :py:func:`assignClusters` method
to run the PowerIterationClustering algorithm.
.. versionadded:: 2.4.0
Notes
-----
See `Wikipedia on Spectral clustering <http://en.wikipedia.org/wiki/Spectral_clustering>`_
Examples
--------
>>> data = [(1, 0, 0.5),
... (2, 0, 0.5), (2, 1, 0.7),
... (3, 0, 0.5), (3, 1, 0.7), (3, 2, 0.9),
... (4, 0, 0.5), (4, 1, 0.7), (4, 2, 0.9), (4, 3, 1.1),
... (5, 0, 0.5), (5, 1, 0.7), (5, 2, 0.9), (5, 3, 1.1), (5, 4, 1.3)]
>>> df = spark.createDataFrame(data).toDF("src", "dst", "weight").repartition(1)
>>> pic = PowerIterationClustering(k=2, weightCol="weight")
>>> pic.setMaxIter(40)
PowerIterationClustering...
>>> assignments = pic.assignClusters(df)
>>> assignments.sort(assignments.id).show(truncate=False)
+---+-------+
|id |cluster|
+---+-------+
|0 |0 |
|1 |0 |
|2 |0 |
|3 |0 |
|4 |0 |
|5 |1 |
+---+-------+
...
>>> pic_path = temp_path + "/pic"
>>> pic.save(pic_path)
>>> pic2 = PowerIterationClustering.load(pic_path)
>>> pic2.getK()
2
>>> pic2.getMaxIter()
40
>>> pic2.assignClusters(df).take(6) == assignments.take(6)
True
"""
@keyword_only
def __init__(self, *, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",
weightCol=None):
"""
__init__(self, \\*, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",\
weightCol=None)
"""
super(PowerIterationClustering, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.clustering.PowerIterationClustering", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.4.0")
def setParams(self, *, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",
weightCol=None):
"""
setParams(self, \\*, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",\
weightCol=None)
Sets params for PowerIterationClustering.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.4.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.4.0")
def setInitMode(self, value):
"""
Sets the value of :py:attr:`initMode`.
"""
return self._set(initMode=value)
@since("2.4.0")
def setSrcCol(self, value):
"""
Sets the value of :py:attr:`srcCol`.
"""
return self._set(srcCol=value)
@since("2.4.0")
def setDstCol(self, value):
"""
Sets the value of :py:attr:`dstCol`.
"""
return self._set(dstCol=value)
@since("2.4.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.4.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("2.4.0")
def assignClusters(self, dataset):
"""
Run the PIC algorithm and returns a cluster assignment for each input vertex.
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
A dataset with columns src, dst, weight representing the affinity matrix,
which is the matrix A in the PIC paper. Suppose the src column value is i,
the dst column value is j, the weight column value is similarity s,,ij,,
which must be nonnegative. This is a symmetric matrix and hence
s,,ij,, = s,,ji,,. For any (i, j) with nonzero similarity, there should be
either (i, j, s,,ij,,) or (j, i, s,,ji,,) in the input. Rows with i = j are
ignored, because we assume s,,ij,, = 0.0.
Returns
-------
:py:class:`pyspark.sql.DataFrame`
A dataset that contains columns of vertex id and the corresponding cluster for
the id. The schema of it will be:
- id: Long
- cluster: Int
"""
self._transfer_params_to_java()
jdf = self._java_obj.assignClusters(dataset._jdf)
return DataFrame(jdf, dataset.sql_ctx)
if __name__ == "__main__":
import doctest
import numpy
import pyspark.ml.clustering
from pyspark.sql import SparkSession
try:
# Numpy 1.14+ changed it's string format.
numpy.set_printoptions(legacy='1.13')
except TypeError:
pass
globs = pyspark.ml.clustering.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.clustering tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
| apache-2.0 |
henridwyer/scikit-learn | benchmarks/bench_isotonic.py | 268 | 3046 | """
Benchmarks of isotonic regression performance.
We generate a synthetic dataset of size 10^n, for n in [min, max], and
examine the time taken to run isotonic regression over the dataset.
The timings are then output to stdout, or visualized on a log-log scale
with matplotlib.
This alows the scaling of the algorithm with the problem size to be
visualized and understood.
"""
from __future__ import print_function
import numpy as np
import gc
from datetime import datetime
from sklearn.isotonic import isotonic_regression
from sklearn.utils.bench import total_seconds
import matplotlib.pyplot as plt
import argparse
def generate_perturbed_logarithm_dataset(size):
return np.random.randint(-50, 50, size=n) \
+ 50. * np.log(1 + np.arange(n))
def generate_logistic_dataset(size):
X = np.sort(np.random.normal(size=size))
return np.random.random(size=size) < 1.0 / (1.0 + np.exp(-X))
DATASET_GENERATORS = {
'perturbed_logarithm': generate_perturbed_logarithm_dataset,
'logistic': generate_logistic_dataset
}
def bench_isotonic_regression(Y):
"""
Runs a single iteration of isotonic regression on the input data,
and reports the total time taken (in seconds).
"""
gc.collect()
tstart = datetime.now()
isotonic_regression(Y)
delta = datetime.now() - tstart
return total_seconds(delta)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Isotonic Regression benchmark tool")
parser.add_argument('--iterations', type=int, required=True,
help="Number of iterations to average timings over "
"for each problem size")
parser.add_argument('--log_min_problem_size', type=int, required=True,
help="Base 10 logarithm of the minimum problem size")
parser.add_argument('--log_max_problem_size', type=int, required=True,
help="Base 10 logarithm of the maximum problem size")
parser.add_argument('--show_plot', action='store_true',
help="Plot timing output with matplotlib")
parser.add_argument('--dataset', choices=DATASET_GENERATORS.keys(),
required=True)
args = parser.parse_args()
timings = []
for exponent in range(args.log_min_problem_size,
args.log_max_problem_size):
n = 10 ** exponent
Y = DATASET_GENERATORS[args.dataset](n)
time_per_iteration = \
[bench_isotonic_regression(Y) for i in range(args.iterations)]
timing = (n, np.mean(time_per_iteration))
timings.append(timing)
# If we're not plotting, dump the timing to stdout
if not args.show_plot:
print(n, np.mean(time_per_iteration))
if args.show_plot:
plt.plot(*zip(*timings))
plt.title("Average time taken running isotonic regression")
plt.xlabel('Number of observations')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.loglog()
plt.show()
| bsd-3-clause |
danielballan/mpld3 | doc/sphinxext/numpy_ext/docscrape_sphinx.py | 22 | 7924 | import re, inspect, textwrap, pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
config = {} if config is None else config
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param,param_type,desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc,8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex','']
else:
out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
Petr-By/qtpyvis | qtgui/widgets/sound.py | 1 | 18478 | """
"""
# standard imports
from typing import Tuple
import time
# Qt imports
from PyQt5.QtCore import Qt, pyqtSlot, pyqtSignal
from PyQt5.QtWidgets import (QWidget, QPushButton, QVBoxLayout, QApplication,
QHBoxLayout, QRadioButton, QStyle, QScrollBar,
QCheckBox)
from PyQt5.QtGui import (QPainter, QPainterPath, QPen,
QMouseEvent, QWheelEvent, QResizeEvent, QPaintEvent)
# toolbox imports
from dltb.base.sound import (Sound, SoundPlayer, SoundRecorder,
SoundDisplay)
from dltb.util.time import time_str
from ..utils import QObserver, protect, QThreadedUpdate, pyqtThreadedUpdate
class QSoundViewer(QThreadedUpdate, QObserver, SoundDisplay,
qobservables={
SoundPlayer: {'state_changed', 'position_changed'}}):
"""A Qt-based graphical widget that allows to display sound.
"""
MODE_PLAYING = 1
MODE_RECORDING = 2
MINIMUM_VIEW_LENGTH = 1.0 # one second
viewChanged = pyqtSignal()
def __init__(self, sound: Sound = None, player: SoundPlayer = None,
**kwargs) -> None:
super().__init__(**kwargs)
print(self.__class__.__mro__)
self._sound = sound
self._player = player
self._mode = self.MODE_PLAYING
self._position = None
self._lastUpdateTime = 0.
self._refresh = .1 # refresh rate in seconds (None = immedeate)
self._path = None
self._view = (0.0, 1.0 if sound is None else sound.duration)
self._selection = None
def length(self) -> float:
return 0. if self._sound is None else self._sound.duration
def position(self) -> float:
return self._position
def setPosition(self, position: float) -> None:
if position != self._position:
self._position = position
self.update()
def selection(self) -> Tuple[float, float]:
return self._selection
def setSelection(self, start: float, end: float) -> None:
if (start, end) != self._selection:
self._selection = (start, end)
self.update()
def view(self) -> Tuple[float, float]:
return self._view
def setView(self, start: float = None, end: float = None) -> None:
"""Set the view shown by this :py:class:`QSoundViewer`.
Arguments
---------
start: float
The start time of the view (in seconds).
end: float
The end time of the view (in seconds).
"""
if start is None or start < 0:
start = 0
if end is None or end > self._sound.duration:
end = self._sound.duration
if end-start < self.MINIMUM_VIEW_LENGTH:
end = start + self.MINIMUM_VIEW_LENGTH
if self._view != (start, end):
self._view = (start, end)
self._path = None
self.update()
self.viewChanged.emit()
def mode(self) -> int:
return self._mode
def setMode(self, mode: int) -> None:
self._mode = mode
self.update()
@protect
def resizeEvent(self, event: QResizeEvent) -> None:
"""Process resize events. Resizing the widget requires
an update of the wave displayed.
Parameters
----------
event: QResizeEvent
"""
# FIXME[bug]: resizing the window during playback causes a crash:
#
# src/hostapi/alsa/pa_linux_alsa.c:3636:
# PaAlsaStreamComponent_BeginPolling:
# Assertion `ret == self->nfds' failed.
#
# This is probably due to an empty buffer which may be caused by
# the GUI repainting of the path takes too much time and slows
# down the buffer filling.
#
# No: it actually seems that painting a path with too many points
# is taking to much time, even if the path is precomputed. So
# what we actually should do is limit the number of points
# (and hence the level of detail) to be displayed, at least
# during playback.
# Other options:
# - only repaint relevant parts of the wave curve (not an option
# on resize)
# - increase the buffer size / number of buffers or priority
# for sound playback
self.updatePath()
@pyqtThreadedUpdate
def updatePath(self) -> None:
MAXIMAL_SAMPLE_POINTS = 200 # check when playback breaks
width = min(self.width(), MAXIMAL_SAMPLE_POINTS)
x_ratio = self.width()/width
height = self.height()
#text_height = self.fontMetrics().height()
text_height = 20
level = self._sound.level(width, start=self._view[0],
end=self._view[1])
level = (1-2*level) * (height - text_height)
path = QPainterPath()
iterator = enumerate(level)
path.moveTo(*next(iterator))
for (x,y) in iterator:
path.lineTo(x*x_ratio, y)
self._path = path
self.update()
@protect
def paintEvent(self, event: QPaintEvent) -> None:
"""Process the paint event by repainting this Widget.
Parameters
----------
event: QPaintEvent
"""
if self._sound is None:
return
# FIXME[bug?]: this methods seems to be invoked quite often
# - check if this is so and why!
painter = QPainter()
painter.begin(self)
#transform = QTransform()
#transform.translate(x, y)
#transform.scale(w_ratio, h_ratio)
#painter.setTransform(transform)
pen_width = 2 # 1
pen_color = Qt.blue # Qt.green
pen = QPen(pen_color)
pen.setWidth(pen_width)
painter.setPen(pen)
if self._mode == self.MODE_PLAYING:
self._paintSoundLevel(painter)
elif self._mode == self.MODE_RECORDING:
self._paintSoundRecording(painter)
# polygon = QPolygonF(map(lambda p: QPointF(*p), enumerate(wave)))
# painter.drawPolyline(polygon)
#
painter.end()
def _paintSoundLevel(self, painter: QPainter) -> None:
"""Paint wave (power)
"""
fontMetrics = painter.fontMetrics()
pen = painter.pen()
width = self.width()
height = self.height()
text_height = fontMetrics.height()
# write times
pen.setColor(Qt.black)
start_string = time_str(self._view[0])
end_string = time_str(self._view[1])
painter.drawText(0, height, start_string)
painter.drawText(width - fontMetrics.width(end_string),
height, end_string)
# draw sound wave
if self._path is not None:
painter.drawPath(self._path)
# draw position indicator
if self._player is not None:
position = self._player.position
if position is not None:
x_position = int(((position - self._view[0]) /
(self._view[1] - self._view[0])) * width)
if 0 <= x_position <= width:
# draw vertical line
painter.setPen(QPen(Qt.red, 1))
painter.drawLine(x_position, 0, x_position, height)
# write time
position_string = time_str(self._player.position)
text_width = fontMetrics.width(position_string)
x_location = max(0, min(x_position - text_width // 2,
width - text_width))
painter.drawText(x_location, text_height, position_string)
def _paintSoundRecording(self, painter: QPainter,
downsample: int = 10) -> None:
points = self.width()
samplerate = self._sound.samplerate / downsample
duration = points / samplerate
if self._position is None:
start = max(0, self._sound.duration - duration)
else:
start = self._position
end = min(start + duration, self._sound.duration)
# get the sound wave
wave = self._sound[start:end:samplerate]
if len(wave) > 0:
wave = (wave[:, 0] + 1.0) * (0.5*self.height())
path = QPainterPath()
path.moveTo(0, wave[0])
for p in enumerate(wave):
path.lineTo(*p)
painter.drawPath(path)
def set_sound(self, sound: Sound) -> None:
self._sound = sound
self.update()
# @protect
def mousePressEvent(self, event: QMouseEvent) -> None:
"""A mouse press toggles between raw and processed mode.
"""
if self._player is not None:
position = (self._view[0] + (event.x() / self.width()) *
(self._view[1]-self._view[0]))
self._player.position = position
self.update()
def FIXME_demo_animation_loop(self):
"""This function does not anything useful - it is just meant as a
demonstration how an animation loop in PyQt5 could be realized.
"""
while True: # need some stop criterion ...
# do something (update data)
self.update() # initiate update of display (i.e., repaint)
QApplication.processEvents() # start the actual repainting
time.sleep(0.0025) # wait a bit
def wheelEvent(self, event: QWheelEvent):
"""Process mouse wheel events. The mouse wheel can be used for
zooming.
Parameters
----------
event: QWheelEvent
The event providing the angle delta.
"""
delta = event.angleDelta().y() / 120 # will be +/- 1
center = (self._view[0] + (event.x() / self.width() *
(self._view[1] - self._view[0])))
end = center - (center - self._view[1]) / (1 + delta * 0.01)
start = center - (center - self._view[0]) / (1 + delta * 0.01)
self.setView(start, end)
# We will accept the event, to prevent interference
# with the QScrollArea.
event.accept()
#
# Plotter interface
#
def start_plot(self) -> None:
pass
def stop_plot(self) -> None:
pass
def player_changed(self, player: SoundPlayer,
info: SoundPlayer.Change) -> None:
if info.position_changed:
self._position = player.position
currentTime = time.time()
if (self._refresh is None or
currentTime - self._lastUpdateTime > self._refresh):
# update if enough time has passed since last refresh
self._lastUpdateTime = currentTime
self.update()
class QSoundViewerScrollbar(QScrollBar):
def __init__(self, soundViewer: QSoundViewer, **kwargs) -> None:
super().__init__(Qt.Horizontal, **kwargs)
self._soundViewer = None
self.setSoundViewer(soundViewer)
self.valueChanged.connect(self.onValueChanged)
def setSoundViewer(self, soundViewer: QSoundViewer) -> None:
if self._soundViewer == soundViewer:
return # nothing changed
if self._soundViewer is not None:
self._soundViewer.viewChanged.disconnect(self.onViewChanged)
self._soundViewer = soundViewer
if self._soundViewer is not None:
self._adaptSlider()
self._soundViewer.viewChanged.connect(self.onViewChanged)
def _adaptSlider(self) -> None:
length = self._soundViewer.length()
view = self._soundViewer.view()
view_length = view[1] - view[0]
maximum = length-view_length
self.setMaximum(int(1000 * maximum))
self.setPageStep(int(1000 * view_length))
self.setSliderPosition(int(1000 * view[0]))
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
painter.drawText(self.width()-100, 10, "Hallo")
painter.end()
super().paintEvent(event)
painter = QPainter()
painter.begin(self)
painter.drawText(20, 10, "Hallo")
painter.end()
@pyqtSlot()
def onViewChanged(self) -> None:
self._adaptSlider()
@pyqtSlot(int)
def onValueChanged(self, value: int) -> None:
position = value / 1000
length = self.pageStep() / 1000
self._soundViewer.setView(position, position+length)
class QSoundControl(QWidget, QObserver, qobservables={
SoundPlayer: {'state_changed'}}):
"""A Qt-based graphical widget that allows to control playback
and recording of sounds.
"""
def __init__(self, sound: Sound,
player: SoundPlayer = None,
recorder: SoundRecorder = None,
**kwargs) -> None:
super().__init__(**kwargs)
self._sound = sound
self._player = player
self.observe(player)
self._recorder = recorder
self.observe(recorder)
style = self.style()
self._iconPlay = style.standardIcon(getattr(QStyle, 'SP_MediaPlay'))
self._iconPause = style.standardIcon(getattr(QStyle, 'SP_MediaPause'))
layout = QVBoxLayout()
self._buttonRecord = QPushButton("Record")
self._buttonRecord.setCheckable(True)
self._buttonRecord.clicked.connect(self._onButtonRecordClicked)
layout.addWidget(self._buttonRecord)
self._buttonPlay = QPushButton("Play")
self._buttonPlay.setCheckable(True)
self._buttonPlay.clicked.connect(self._onButtonPlayClicked)
self._buttonPlay.setIcon(self._iconPlay)
layout.addWidget(self._buttonPlay)
self._buttonInfo = QPushButton("Info")
self._buttonInfo.clicked.connect(self._onButtonInfoClicked)
layout.addWidget(self._buttonInfo)
self._soundViewer = QSoundViewer(self._sound, self._player)
self._soundViewer.setMinimumSize(200, 200)
self._soundViewer.observe(self._player)
layout.addWidget(self._soundViewer)
layout.addWidget(QSoundViewerScrollbar(self._soundViewer))
# self._matplotlib = QMatplotlib()
# layout.addWidget(self._matplotlib)
self._plotter = self._soundViewer
# self._plotter = None
#self._plotter = MatplotlibSoundPlotter(samplerate=sound.samplerate,
# standalone=False,
# figure=self._matplotlib.figure,
# ax=self._matplotlib._ax)
radioLayout = QHBoxLayout()
self.b1 = QRadioButton("Playing")
self.b1.setChecked(True)
self.b1.toggled.connect(lambda: self.btnstate(self.b1))
radioLayout.addWidget(self.b1)
self.b2 = QRadioButton("Recording")
self.b2.toggled.connect(lambda: self.btnstate(self.b2))
radioLayout.addWidget(self.b2)
self._checkboxLoop = QCheckBox("Loop")
self._checkboxLoop.stateChanged.connect(self._onLoopChanged)
radioLayout.addWidget(self._checkboxLoop)
self._checkboxReverse = QCheckBox("Reverse")
self._checkboxReverse.stateChanged.connect(self._onReverseChanged)
radioLayout.addWidget(self._checkboxReverse)
layout.addLayout(radioLayout)
self.setLayout(layout)
def btnstate(self, b):
if b == self.b1:
self._soundViewer.setMode(self._soundViewer.MODE_PLAYING)
if b == self.b2:
self._soundViewer.setMode(self._soundViewer.MODE_RECORDING)
@pyqtSlot(bool)
# @protect
def _onButtonRecordClicked(self, checked: bool) -> None:
if self._recorder is None:
print("QSoundControl: No recorder, sorry!")
elif checked:
print("QSoundControl: Recording sound")
recorder.record(self._sound)
else:
print("QSoundControl: Stop recording sound")
self._recorder.stop()
@pyqtSlot(bool)
# @protect
def _onButtonPlayClicked(self, checked: bool) -> None:
if self._player is None:
print("QSoundControl: No player, sorry!")
elif checked:
print("QSoundControl: Playing sound")
self._player.play(self._sound)
else:
print("QSoundControl: Stop playing sound")
self._player.stop()
@pyqtSlot(bool)
# @protect
def _onButtonInfoClicked(self, checked: bool) -> None:
print(f"info[QSoundControl]: Sound: {self._sound}")
@pyqtSlot(int)
# @protect
def _onLoopChanged(self, state: int) -> None:
if self._player is not None:
self._player.loop = (state == Qt.Checked)
@pyqtSlot(int)
# @protect
def _onReverseChanged(self, state: int) -> None:
if self._player is not None:
self._player.reverse = (state == Qt.Checked)
def player_changed(self, player: SoundPlayer,
info: SoundPlayer.Change) -> None:
if info.state_changed:
self.update()
def recorder_changed(self, player: SoundRecorder,
info: SoundRecorder.Change) -> None:
if info.state_changed:
self.update()
def update(self) -> None:
print(self._player is not None, self._player.sound)
self._buttonPlay.setEnabled(self._player is not None and
self._player.sound is not None)
self._buttonPlay.setChecked(self._player is not None and
self._player.playing)
self._buttonRecord.setChecked(self._recorder is not None and
self._recorder.recording)
self._checkboxLoop.setEnabled(self._player is not None)
self._checkboxLoop.setCheckState(Qt.Checked if
self._player is not None and
self._player.loop else Qt.Unchecked)
self._checkboxReverse.setEnabled(self._player is not None)
self._checkboxReverse.setCheckState(Qt.Checked if
self._player is not None and
self._player.reverse else
Qt.Unchecked)
super().update()
| mit |
glouppe/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 176 | 2027 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LinearDiscriminantAnalysis(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
colors = ['navy', 'turquoise', 'darkorange']
lw = 2
for color, i, target_name in zip(colors, [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=.8, lw=lw,
label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('PCA of IRIS dataset')
plt.figure()
for color, i, target_name in zip(colors, [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], alpha=.8, color=color,
label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
fyffyt/scikit-learn | examples/applications/plot_model_complexity_influence.py | 323 | 6372 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause |
bitemyapp/ggplot | ggplot/tests/test_basic.py | 12 | 9308 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from six.moves import xrange
from nose.tools import assert_equal, assert_true, assert_raises
from . import get_assert_same_ggplot, cleanup
assert_same_ggplot = get_assert_same_ggplot(__file__)
from ggplot import *
from ggplot.exampledata import diamonds
import numpy as np
import pandas as pd
def _build_testing_df():
df = pd.DataFrame({
"x": np.arange(0, 100),
"y": np.arange(0, 100),
"z": np.arange(0, 100)
})
df['cat'] = np.where(df.x*2 > 50, 'blah', 'blue')
df['cat'] = np.where(df.y > 50, 'hello', df.cat)
df['cat2'] = np.where(df.y < 15, 'one', 'two')
df['y'] = np.sin(df.y)
df['z'] = df['y'] + 100
df['c'] = np.where(df.x%2==0,"red", "blue")
return df
def _build_meat_df():
meat['date'] = pd.to_datetime(meat.date)
return meat
@cleanup
def test_geom_density():
df = _build_testing_df()
gg = ggplot(aes(x="x", color="c"), data=df)
gg = gg + geom_density() + xlab("x label") + ylab("y label")
assert_same_ggplot(gg, "geom_density")
@cleanup
def test_geom_histogram():
df = _build_testing_df()
# TODO: use fill aesthetic for a better test
gg = ggplot(aes(x="x", y="y", shape="cat2", color="cat"), data=df)
assert_same_ggplot(gg + geom_histogram(), "geom_hist")
assert_same_ggplot(gg + geom_histogram() + ggtitle("My Histogram"), "geom_hist_title")
@cleanup
def test_geom_point():
df = _build_testing_df()
gg = ggplot(aes(x="x", y="y", shape="cat2", color="cat"), data=df)
assert_same_ggplot(gg + geom_point(), "geom_point")
gg = gg + geom_point() + geom_vline(xintercept=50, ymin=-1.5, ymax=1.5)
assert_same_ggplot(gg, "geom_point_vline")
@cleanup
def test_geom_area():
df = _build_testing_df()
gg = ggplot(aes(x='x', ymax='y', ymin='z', color="cat2"), data=df)
assert_same_ggplot(gg + geom_area(), "geom_area")
@cleanup
def test_geom_text():
gg = ggplot(aes(x='wt',y='mpg',label='name'),data=mtcars) + geom_text()
assert_same_ggplot(gg, "geom_text")
@cleanup
def test_geom_line():
p = ggplot(mtcars, aes(x='wt', y='mpg', colour='factor(cyl)', size='mpg', linetype='factor(cyl)'))
assert_same_ggplot(p + geom_line(), "factor_geom_line")
@cleanup
def test_geom_rect():
df = pd.DataFrame({
'xmin':[3, 5, 3, 3, 9, 4, 8, 3, 9, 2, 9, 1, 11, 4, 7, 1],
'xmax':[10, 8, 10, 4, 10, 5, 9, 4, 10, 4, 11, 2, 12, 6, 9, 12],
'ymin':[3, 3, 6, 2, 2, 6, 6, 8, 8, 4, 4, 2, 2, 1, 1, 4],
'ymax':[5, 7, 7, 7, 7, 8, 8, 9, 9, 6, 6, 5, 5, 2, 2, 5]})
p = ggplot(df, aes(xmin='xmin', xmax='xmax', ymin='ymin', ymax='ymax'))
p += geom_rect(xmin=0, xmax=13, ymin=0, ymax=10)
p += geom_rect(colour="white", fill="white")
p += xlim(0, 13)
assert_same_ggplot(p, "geom_rect_inv")
@cleanup
def test_factor_geom_point():
p = ggplot(mtcars, aes(x='wt', y='mpg', colour='factor(cyl)', size='mpg', linetype='factor(cyl)'))
assert_same_ggplot(p + geom_point(), "factor_geom_point")
@cleanup
def test_factor_geom_point_line():
p = ggplot(mtcars, aes(x='wt', y='mpg', colour='factor(cyl)', size='mpg', linetype='factor(cyl)'))
assert_same_ggplot(p + geom_line() + geom_point(), "factor_geom_point_line")
@cleanup
def test_factor_point_line_title_lab():
p = ggplot(mtcars, aes(x='wt', y='mpg', colour='factor(cyl)', size='mpg', linetype='factor(cyl)'))
p = p + geom_point() + geom_line(color='lightblue') + ggtitle("Beef: It's What's for Dinner")
p = p + xlab("Date") + ylab("Head of Cattle Slaughtered")
assert_same_ggplot(p, "factor_complicated")
@cleanup
def test_labs():
p = ggplot(mtcars, aes(x='wt', y='mpg', colour='factor(cyl)', size='mpg', linetype='factor(cyl)'))
p = p + geom_point() + geom_line(color='lightblue')
p = p + labs(title="Beef: It's What's for Dinner", x="Date", y="Head of Cattle Slaughtered")
assert_same_ggplot(p, "labs")
@cleanup
def test_factor_bar():
p = ggplot(aes(x='factor(cyl)'), data=mtcars)
assert_same_ggplot(p + geom_histogram(), "factor_geom_bar")
@cleanup
def test_stats_smooth():
df = _build_testing_df()
gg = ggplot(aes(x="x", y="y", color="cat"), data=df)
gg = gg + stat_smooth(se=False) + ggtitle("My Smoothed Chart")
assert_same_ggplot(gg, "stat_smooth")
@cleanup
def test_stats_bin2d():
import matplotlib.pyplot as plt
if not hasattr(plt, "hist2d"):
import nose
raise nose.SkipTest("stat_bin2d only works with newer matplotlib (1.3) versions.")
df = _build_testing_df()
gg = ggplot(aes(x='x', y='y', shape='cat', color='cat2'), data=df)
assert_same_ggplot(gg + stat_bin2d(), "stat_bin2d")
@cleanup
def test_alpha_density():
gg = ggplot(aes(x='mpg'), data=mtcars)
assert_same_ggplot(gg + geom_density(fill=True, alpha=0.3), "geom_density_alpha")
@cleanup
def test_facet_wrap():
df = _build_testing_df()
gg = ggplot(aes(x='x', ymax='y', ymin='z'), data=df)
#assert_same_ggplot(gg + geom_bar() + facet_wrap(x="cat2"), "geom_bar_facet")
assert_same_ggplot(gg + geom_area() + facet_wrap(x="cat2"), "geom_area_facet")
@cleanup
def test_facet_wrap2():
meat = _build_meat_df()
meat_lng = pd.melt(meat, id_vars=['date'])
p = ggplot(aes(x='date', y='value', colour='variable'), data=meat_lng)
assert_same_ggplot(p + geom_density(fill=True, alpha=0.3) + facet_wrap("variable"), "geom_density_facet")
assert_same_ggplot(p + geom_line(alpha=0.3) + facet_wrap("variable"), "geom_line_facet")
@cleanup
def test_facet_grid_exceptions():
meat = _build_meat_df()
meat_lng = pd.melt(meat, id_vars=['date'])
p = ggplot(aes(x="date", y="value", colour="variable", shape="variable"), meat_lng)
with assert_raises(Exception):
print(p + geom_point() + facet_grid(y="variable"))
with assert_raises(Exception):
print(p + geom_point() + facet_grid(y="variable", x="NOT_AVAILABLE"))
with assert_raises(Exception):
print(p + geom_point() + facet_grid(y="NOT_AVAILABLE", x="variable"))
@cleanup
def test_facet_grid():
# only use a small subset of the data to speedup tests
# N=53940 -> N=7916 and only 2x2 facets
_mask1 = (diamonds.cut == "Ideal") | (diamonds.cut == "Good")
_mask2 = (diamonds.clarity == "SI2") | (diamonds.clarity == "VS1")
_df = diamonds[_mask1 & _mask2]
p = ggplot(aes(x='x', y='y', colour='z'), data=_df)
p = p + geom_point() + scale_colour_gradient(low="white", high="red")
p = p + facet_grid("cut", "clarity")
assert_same_ggplot(p, "diamonds_big")
p = ggplot(aes(x='carat'), data=_df)
p = p + geom_density() + facet_grid("cut", "clarity")
assert_same_ggplot(p, "diamonds_facet")
@cleanup
def test_smooth_se():
meat = _build_meat_df()
p = ggplot(aes(x='date', y='beef'), data=meat)
assert_same_ggplot(p + geom_point() + stat_smooth(), "point_smooth_se")
assert_same_ggplot(p + stat_smooth(), "smooth_se")
@cleanup
def test_scale_xy_continous():
meat = _build_meat_df()
p = ggplot(aes(x='date', y='beef'), data=meat)
p = p + geom_point() + scale_x_continuous("This is the X")
p = p + scale_y_continuous("Squared", limits=[0, 1500])
assert_same_ggplot(p, "scale1")
@cleanup
def test_ylim():
meat = _build_meat_df()
p = ggplot(aes(x='date', y='beef'), data=meat)
assert_same_ggplot(p + geom_point() + ylim(0, 1500), "ylim")
@cleanup
def test_partial_limits() :
p = ggplot(diamonds, aes('carat', 'price'))
assert_same_ggplot(p + geom_point(alpha=1/20.) + xlim(high = 4) + ylim(0), "partial_limits")
@cleanup
def test_partial_limits_facet() :
p = ggplot(diamonds, aes('carat', 'price', color="clarity"))
p = p + geom_point(alpha=1/20.) + facet_wrap(x="cut", scales="free") + xlim(low=0) + ylim(low=0)
assert_same_ggplot(p, "partial_limits_facet")
@cleanup
def test_scale_date():
meat = _build_meat_df()
gg = ggplot(aes(x='date', y='beef'), data=meat) + geom_line()
assert_same_ggplot(gg+scale_x_date(labels="%Y-%m-%d"), "scale_date")
@cleanup
def test_diamond():
p = ggplot(aes(x='x', y='y', colour='z'), data=diamonds.head(4))
p = p + geom_point() + scale_colour_gradient(low="white", high="red")
p = p + facet_wrap("cut")
assert_same_ggplot(p, "diamonds_small")
def test_aes_positional_args():
result = aes("weight", "hp")
expected = {"x": "weight", "y": "hp"}
assert_equal(result, expected)
result3 = aes("weight", "hp", "qsec")
expected3 = {"x": "weight", "y": "hp", "color": "qsec"}
assert_equal(result3, expected3)
def test_aes_keyword_args():
result = aes(x="weight", y="hp")
expected = {"x": "weight", "y": "hp"}
assert_equal(result, expected)
result3 = aes(x="weight", y="hp", color="qsec")
expected3 = {"x": "weight", "y": "hp", "color": "qsec"}
assert_equal(result3,expected3)
def test_aes_mixed_args():
result = aes("weight", "hp", color="qsec")
expected = {"x": "weight", "y": "hp", "color": "qsec"}
assert_equal(result, expected)
@cleanup
def test_scale_color_brewer() :
p = ggplot(diamonds, aes(x = "x", y="y"))
p = p + geom_line() + scale_color_brewer(type='qual', palette=2)
assert_same_ggplot(p, "scale_color_brewer")
| bsd-2-clause |
cwu2011/scikit-learn | benchmarks/bench_glm.py | 297 | 1493 | """
A comparison of different methods in GLM
Data comes from a random square matrix.
"""
from datetime import datetime
import numpy as np
from sklearn import linear_model
from sklearn.utils.bench import total_seconds
if __name__ == '__main__':
import pylab as pl
n_iter = 40
time_ridge = np.empty(n_iter)
time_ols = np.empty(n_iter)
time_lasso = np.empty(n_iter)
dimensions = 500 * np.arange(1, n_iter + 1)
for i in range(n_iter):
print('Iteration %s of %s' % (i, n_iter))
n_samples, n_features = 10 * i + 3, 10 * i + 3
X = np.random.randn(n_samples, n_features)
Y = np.random.randn(n_samples)
start = datetime.now()
ridge = linear_model.Ridge(alpha=1.)
ridge.fit(X, Y)
time_ridge[i] = total_seconds(datetime.now() - start)
start = datetime.now()
ols = linear_model.LinearRegression()
ols.fit(X, Y)
time_ols[i] = total_seconds(datetime.now() - start)
start = datetime.now()
lasso = linear_model.LassoLars()
lasso.fit(X, Y)
time_lasso[i] = total_seconds(datetime.now() - start)
pl.figure('scikit-learn GLM benchmark results')
pl.xlabel('Dimensions')
pl.ylabel('Time (s)')
pl.plot(dimensions, time_ridge, color='r')
pl.plot(dimensions, time_ols, color='g')
pl.plot(dimensions, time_lasso, color='b')
pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left')
pl.axis('tight')
pl.show()
| bsd-3-clause |
TheTimmy/spack | var/spack/repos/builtin/packages/cosmomc/package.py | 3 | 7785 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import fnmatch
import os
class Cosmomc(Package):
"""CosmoMC is a Fortran 2008 Markov-Chain Monte-Carlo (MCMC) engine
for exploring cosmological parameter space, together with
Fortran and python code for analysing Monte-Carlo samples and
importance sampling (plus a suite of scripts for building grids
of runs, plotting and presenting results)."""
homepage = "http://cosmologist.info/cosmomc/"
url = "https://github.com/cmbant/CosmoMC/archive/Nov2016.tar.gz"
version('2016.11', '98620cb746352f68fb0c1196e9a070ac')
version('2016.06', '92dc651d1407cca6ea9228992165f5cb')
def url_for_version(self, version):
names = {'2016.11': "Nov2016",
'2016.06': "June2016"}
return ("https://github.com/cmbant/CosmoMC/archive/%s.tar.gz" %
names[str(version)])
variant('mpi', default=True, description='Enable MPI support')
variant('planck', default=False,
description='Enable Planck Likelihood code and baseline data')
variant('python', default=True, description='Enable Python bindings')
extends('python', when='+python')
depends_on('mpi', when='+mpi')
depends_on('planck-likelihood', when='+planck')
depends_on('py-matplotlib', type=('build', 'run'), when='+python')
depends_on('py-numpy', type=('build', 'run'), when='+python')
depends_on('py-pandas', type=('build', 'run'), when='+python')
depends_on('py-scipy', type=('build', 'run'), when='+python')
depends_on('py-six', type=('build', 'run'), when='+python')
depends_on('python @2.7:2.999,3.4:', type=('build', 'run'), when='+python')
patch('Makefile.patch')
patch('errorstop.patch')
parallel = False
def install(self, spec, prefix):
# Clean up environment to avoid configure problems
os.environ.pop('LINKMPI', '')
os.environ.pop('NERSC_HOST', '')
os.environ.pop('NONCLIKLIKE', '')
os.environ.pop('PICO', '')
os.environ.pop('PRECISION', '')
os.environ.pop('RECOMBINATION', '')
os.environ.pop('WMAP', '')
# Set up Planck data if requested
clikdir = join_path('data', 'clik')
try:
os.remove(clikdir)
except OSError:
pass
if '+planck' in spec:
os.symlink(join_path(os.environ['CLIK_DATA'], 'plc_2.0'), clikdir)
else:
os.environ.pop('CLIK_DATA', '')
os.environ.pop('CLIK_PATH', '')
os.environ.pop('CLIK_PLUGIN', '')
# Choose compiler
# Note: Instead of checking the compiler vendor, we should
# rewrite the Makefile to use Spack's options all the time
if spec.satisfies('%gcc'):
if not spec.satisfies('%gcc@6:'):
raise InstallError(
"When using GCC, "
"CosmoMC requires version gcc@6: for building")
choosecomp = 'ifortErr=1' # choose gfortran
elif spec.satisfies('%intel'):
if not spec.satifies('%intel@14:'):
raise InstallError(
"When using the Intel compiler, "
"CosmoMC requires version intel@14: for building")
choosecomp = 'ifortErr=0' # choose ifort
else:
raise InstallError("Only GCC and Intel compilers are supported")
# Configure MPI
if '+mpi' in spec:
wantmpi = 'BUILD=MPI'
mpif90 = 'MPIF90C=%s' % spec['mpi'].mpifc
else:
wantmpi = 'BUILD=NOMPI'
mpif90 = 'MPIF90C='
# Choose BLAS and LAPACK
lapack = ("LAPACKL=%s" %
(spec['lapack'].libs + spec['blas'].libs).ld_flags)
# Build
make(choosecomp, wantmpi, mpif90, lapack)
# Install
mkdirp(prefix.bin)
install('cosmomc', prefix.bin)
root = join_path(prefix.share, 'cosmomc')
mkdirp(root)
entries = [
'batch1',
'batch2',
'batch3',
'camb',
'chains',
'clik_latex.paramnames',
'clik_units.paramnames',
'cosmomc.cbp',
'data',
'distgeneric.ini',
'distparams.ini',
'disttest.ini',
'docs',
'job_script',
'job_script_MOAB',
'job_script_SLURM',
'paramnames',
'params_generic.ini',
'planck_covmats',
'scripts',
# don't copy 'source'
'test.ini',
'test_pico.ini',
'test_planck.ini',
'tests',
]
if '+python' in spec:
entries += ['python']
for entry in entries:
if os.path.isfile(entry):
install(entry, root)
else:
install_tree(entry, join_path(root, entry))
for dirpath, dirnames, filenames in os.walk(prefix):
for filename in fnmatch.filter(filenames, '*~'):
os.remove(os.path.join(dirpath, filename))
@run_after('install')
@on_package_attributes(run_tests=True)
def check_install(self):
prefix = self.prefix
spec = self.spec
os.environ.pop('LINKMPI', '')
os.environ.pop('NERSC_HOST', '')
os.environ.pop('NONCLIKLIKE', '')
os.environ.pop('PICO', '')
os.environ.pop('PRECISION', '')
os.environ.pop('RECOMBINATION', '')
os.environ.pop('WMAP', '')
os.environ.pop('COSMOMC_LOCATION', '')
os.environ.pop('PLC_LOCATION', '')
os.environ.pop('CLIKPATH', '')
os.environ.pop('PLANCKLIKE', '')
exe = spec['cosmomc'].command.path
args = []
if '+mpi' in spec:
# Add mpirun prefix
args = ['-np', '1', exe]
exe = join_path(spec['mpi'].prefix.bin, 'mpiexec')
cosmomc = Executable(exe)
with working_dir('spack-check', create=True):
for entry in [
'camb',
'chains',
'data',
'paramnames',
'planck_covmats',
]:
os.symlink(join_path(prefix.share, 'cosmomc', entry), entry)
inifile = join_path(prefix.share, 'cosmomc', 'test.ini')
cosmomc(*(args + [inifile]))
if '+planck' in spec:
inifile = join_path(prefix.share, 'cosmomc', 'test_planck.ini')
cosmomc(*(args + [inifile]))
| lgpl-2.1 |
gfyoung/pandas | pandas/tests/frame/methods/test_count.py | 1 | 4682 | import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import DataFrame, Index, Series
import pandas._testing as tm
class TestDataFrameCount:
def test_count_multiindex(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
frame = frame.copy()
frame.index.names = ["a", "b"]
result = frame.count(level="b")
expected = frame.count(level=1)
tm.assert_frame_equal(result, expected, check_names=False)
result = frame.count(level="a")
expected = frame.count(level=0)
tm.assert_frame_equal(result, expected, check_names=False)
msg = "Level x not found"
with pytest.raises(KeyError, match=msg):
frame.count(level="x")
def test_count(self):
# corner case
frame = DataFrame()
ct1 = frame.count(1)
assert isinstance(ct1, Series)
ct2 = frame.count(0)
assert isinstance(ct2, Series)
# GH#423
df = DataFrame(index=range(10))
result = df.count(1)
expected = Series(0, index=df.index)
tm.assert_series_equal(result, expected)
df = DataFrame(columns=range(10))
result = df.count(0)
expected = Series(0, index=df.columns)
tm.assert_series_equal(result, expected)
df = DataFrame()
result = df.count()
expected = Series(0, index=[])
tm.assert_series_equal(result, expected)
def test_count_objects(self, float_string_frame):
dm = DataFrame(float_string_frame._series)
df = DataFrame(float_string_frame._series)
tm.assert_series_equal(dm.count(), df.count())
tm.assert_series_equal(dm.count(1), df.count(1))
def test_count_level_corner(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
ser = frame["A"][:0]
result = ser.count(level=0)
expected = Series(0, index=ser.index.levels[0], name="A")
tm.assert_series_equal(result, expected)
df = frame[:0]
result = df.count(level=0)
expected = (
DataFrame(
index=ser.index.levels[0].set_names(["first"]), columns=df.columns
)
.fillna(0)
.astype(np.int64)
)
tm.assert_frame_equal(result, expected)
def test_count_index_with_nan(self):
# https://github.com/pandas-dev/pandas/issues/21824
df = DataFrame(
{
"Person": ["John", "Myla", None, "John", "Myla"],
"Age": [24.0, 5, 21.0, 33, 26],
"Single": [False, True, True, True, False],
}
)
# count on row labels
res = df.set_index(["Person", "Single"]).count(level="Person")
expected = DataFrame(
index=Index(["John", "Myla"], name="Person"),
columns=Index(["Age"]),
data=[2, 2],
)
tm.assert_frame_equal(res, expected)
# count on column labels
res = df.set_index(["Person", "Single"]).T.count(level="Person", axis=1)
expected = DataFrame(
columns=Index(["John", "Myla"], name="Person"),
index=Index(["Age"]),
data=[[2, 2]],
)
tm.assert_frame_equal(res, expected)
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) groupby
def test_count_level(
self,
multiindex_year_month_day_dataframe_random_data,
multiindex_dataframe_random_data,
):
ymd = multiindex_year_month_day_dataframe_random_data
frame = multiindex_dataframe_random_data
def _check_counts(frame, axis=0):
index = frame._get_axis(axis)
for i in range(index.nlevels):
result = frame.count(axis=axis, level=i)
expected = frame.groupby(axis=axis, level=i).count()
expected = expected.reindex_like(result).astype("i8")
tm.assert_frame_equal(result, expected)
frame.iloc[1, [1, 2]] = np.nan
frame.iloc[7, [0, 1]] = np.nan
ymd.iloc[1, [1, 2]] = np.nan
ymd.iloc[7, [0, 1]] = np.nan
_check_counts(frame)
_check_counts(ymd)
_check_counts(frame.T, axis=1)
_check_counts(ymd.T, axis=1)
# can't call with level on regular DataFrame
df = tm.makeTimeDataFrame()
with pytest.raises(TypeError, match="hierarchical"):
df.count(level=0)
frame["D"] = "foo"
result = frame.count(level=0, numeric_only=True)
tm.assert_index_equal(result.columns, Index(list("ABC"), name="exp"))
| bsd-3-clause |
zfrenchee/pandas | pandas/tests/series/test_replace.py | 9 | 8604 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytest
import numpy as np
import pandas as pd
import pandas._libs.lib as lib
import pandas.util.testing as tm
from .common import TestData
class TestSeriesReplace(TestData):
def test_replace(self):
N = 100
ser = pd.Series(np.random.randn(N))
ser[0:4] = np.nan
ser[6:10] = 0
# replace list with a single value
ser.replace([np.nan], -1, inplace=True)
exp = ser.fillna(-1)
tm.assert_series_equal(ser, exp)
rs = ser.replace(0., np.nan)
ser[ser == 0.] = np.nan
tm.assert_series_equal(rs, ser)
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N),
dtype=object)
ser[:5] = np.nan
ser[6:10] = 'foo'
ser[20:30] = 'bar'
# replace list with a single value
rs = ser.replace([np.nan, 'foo', 'bar'], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isna(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isna(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True)
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
ser = pd.Series([np.nan, 0, np.inf])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
ser = pd.Series([np.nan, 0, 'foo', 'bar', np.inf, None, lib.NaT])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
filled = ser.copy()
filled[4] = 0
tm.assert_series_equal(ser.replace(np.inf, 0), filled)
ser = pd.Series(self.ts.index)
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
# malformed
pytest.raises(ValueError, ser.replace, [1, 2, 3], [np.nan, 0])
# make sure that we aren't just masking a TypeError because bools don't
# implement indexing
with tm.assert_raises_regex(TypeError, 'Cannot compare types .+'):
ser.replace([1, 2], [np.nan, 0])
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])
tm.assert_series_equal(result, pd.Series([4, 3, 2, 1, 0]))
def test_replace_gh5319(self):
# API change from 0.12?
# GH 5319
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace([np.nan])
tm.assert_series_equal(result, expected)
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace(np.nan)
tm.assert_series_equal(result, expected)
# GH 5797
ser = pd.Series(pd.date_range('20130101', periods=5))
expected = ser.copy()
expected.loc[2] = pd.Timestamp('20120101')
result = ser.replace({pd.Timestamp('20130103'):
pd.Timestamp('20120101')})
tm.assert_series_equal(result, expected)
result = ser.replace(pd.Timestamp('20130103'),
pd.Timestamp('20120101'))
tm.assert_series_equal(result, expected)
def test_replace_with_single_list(self):
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([1, 2, 3])
tm.assert_series_equal(result, pd.Series([0, 0, 0, 0, 4]))
s = ser.copy()
s.replace([1, 2, 3], inplace=True)
tm.assert_series_equal(s, pd.Series([0, 0, 0, 0, 4]))
# make sure things don't get corrupted when fillna call fails
s = ser.copy()
with pytest.raises(ValueError):
s.replace([1, 2, 3], inplace=True, method='crash_cymbal')
tm.assert_series_equal(s, ser)
def test_replace_mixed_types(self):
s = pd.Series(np.arange(5), dtype='int64')
def check_replace(to_rep, val, expected):
sc = s.copy()
r = s.replace(to_rep, val)
sc.replace(to_rep, val, inplace=True)
tm.assert_series_equal(expected, r)
tm.assert_series_equal(expected, sc)
# MUST upcast to float
e = pd.Series([0., 1., 2., 3., 4.])
tr, v = [3], [3.0]
check_replace(tr, v, e)
# MUST upcast to float
e = pd.Series([0, 1, 2, 3.5, 4])
tr, v = [3], [3.5]
check_replace(tr, v, e)
# casts to object
e = pd.Series([0, 1, 2, 3.5, 'a'])
tr, v = [3, 4], [3.5, 'a']
check_replace(tr, v, e)
# again casts to object
e = pd.Series([0, 1, 2, 3.5, pd.Timestamp('20130101')])
tr, v = [3, 4], [3.5, pd.Timestamp('20130101')]
check_replace(tr, v, e)
# casts to object
e = pd.Series([0, 1, 2, 3.5, True], dtype='object')
tr, v = [3, 4], [3.5, True]
check_replace(tr, v, e)
# test an object with dates + floats + integers + strings
dr = pd.date_range('1/1/2001', '1/10/2001',
freq='D').to_series().reset_index(drop=True)
result = dr.astype(object).replace(
[dr[0], dr[1], dr[2]], [1.0, 2, 'a'])
expected = pd.Series([1.0, 2, 'a'] + dr[3:].tolist(), dtype=object)
tm.assert_series_equal(result, expected)
def test_replace_bool_with_string_no_op(self):
s = pd.Series([True, False, True])
result = s.replace('fun', 'in-the-sun')
tm.assert_series_equal(s, result)
def test_replace_bool_with_string(self):
# nonexistent elements
s = pd.Series([True, False, True])
result = s.replace(True, '2u')
expected = pd.Series(['2u', False, '2u'])
tm.assert_series_equal(expected, result)
def test_replace_bool_with_bool(self):
s = pd.Series([True, False, True])
result = s.replace(True, False)
expected = pd.Series([False] * len(s))
tm.assert_series_equal(expected, result)
def test_replace_with_dict_with_bool_keys(self):
s = pd.Series([True, False, True])
with tm.assert_raises_regex(TypeError, 'Cannot compare types .+'):
s.replace({'asdf': 'asdb', True: 'yes'})
def test_replace2(self):
N = 100
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N),
dtype=object)
ser[:5] = np.nan
ser[6:10] = 'foo'
ser[20:30] = 'bar'
# replace list with a single value
rs = ser.replace([np.nan, 'foo', 'bar'], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isna(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isna(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True)
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
def test_replace_with_empty_dictlike(self):
# GH 15289
s = pd.Series(list('abcd'))
tm.assert_series_equal(s, s.replace(dict()))
tm.assert_series_equal(s, s.replace(pd.Series([])))
def test_replace_string_with_number(self):
# GH 15743
s = pd.Series([1, 2, 3])
result = s.replace('2', np.nan)
expected = pd.Series([1, 2, 3])
tm.assert_series_equal(expected, result)
def test_replace_unicode_with_number(self):
# GH 15743
s = pd.Series([1, 2, 3])
result = s.replace(u'2', np.nan)
expected = pd.Series([1, 2, 3])
tm.assert_series_equal(expected, result)
def test_replace_mixed_types_with_string(self):
# Testing mixed
s = pd.Series([1, 2, 3, '4', 4, 5])
result = s.replace([2, '4'], np.nan)
expected = pd.Series([1, np.nan, 3, np.nan, 4, 5])
tm.assert_series_equal(expected, result)
| bsd-3-clause |
dsockwell/trading-with-python | historicDataDownloader/historicDataDownloader.py | 77 | 4526 | '''
Created on 4 aug. 2012
Copyright: Jev Kuznetsov
License: BSD
a module for downloading historic data from IB
'''
import ib
import pandas
from ib.ext.Contract import Contract
from ib.opt import ibConnection, message
from time import sleep
import tradingWithPython.lib.logger as logger
from pandas import DataFrame, Index
import datetime as dt
from timeKeeper import TimeKeeper
import time
timeFormat = "%Y%m%d %H:%M:%S"
class DataHandler(object):
''' handles incoming messages '''
def __init__(self,tws):
self._log = logger.getLogger('DH')
tws.register(self.msgHandler,message.HistoricalData)
self.reset()
def reset(self):
self._log.debug('Resetting data')
self.dataReady = False
self._timestamp = []
self._data = {'open':[],'high':[],'low':[],'close':[],'volume':[],'count':[],'WAP':[]}
def msgHandler(self,msg):
#print '[msg]', msg
if msg.date[:8] == 'finished':
self._log.debug('Data recieved')
self.dataReady = True
return
self._timestamp.append(dt.datetime.strptime(msg.date,timeFormat))
for k in self._data.keys():
self._data[k].append(getattr(msg, k))
@property
def data(self):
''' return downloaded data as a DataFrame '''
df = DataFrame(data=self._data,index=Index(self._timestamp))
return df
class Downloader(object):
def __init__(self,debug=False):
self._log = logger.getLogger('DLD')
self._log.debug('Initializing data dwonloader. Pandas version={0}, ibpy version:{1}'.format(pandas.__version__,ib.version))
self.tws = ibConnection()
self._dataHandler = DataHandler(self.tws)
if debug:
self.tws.registerAll(self._debugHandler)
self.tws.unregister(self._debugHandler,message.HistoricalData)
self._log.debug('Connecting to tws')
self.tws.connect()
self._timeKeeper = TimeKeeper() # keep track of past requests
self._reqId = 1 # current request id
def _debugHandler(self,msg):
print '[debug]', msg
def requestData(self,contract,endDateTime,durationStr='1800 S',barSizeSetting='1 secs',whatToShow='TRADES',useRTH=1,formatDate=1):
self._log.debug('Requesting data for %s end time %s.' % (contract.m_symbol,endDateTime))
while self._timeKeeper.nrRequests(timeSpan=600) > 59:
print 'Too many requests done. Waiting... '
time.sleep(1)
self._timeKeeper.addRequest()
self._dataHandler.reset()
self.tws.reqHistoricalData(self._reqId,contract,endDateTime,durationStr,barSizeSetting,whatToShow,useRTH,formatDate)
self._reqId+=1
#wait for data
startTime = time.time()
timeout = 3
while not self._dataHandler.dataReady and (time.time()-startTime < timeout):
sleep(2)
if not self._dataHandler.dataReady:
self._log.error('Data timeout')
print self._dataHandler.data
return self._dataHandler.data
def getIntradayData(self,contract, dateTuple ):
''' get full day data on 1-s interval
date: a tuple of (yyyy,mm,dd)
'''
openTime = dt.datetime(*dateTuple)+dt.timedelta(hours=16)
closeTime = dt.datetime(*dateTuple)+dt.timedelta(hours=22)
timeRange = pandas.date_range(openTime,closeTime,freq='30min')
datasets = []
for t in timeRange:
datasets.append(self.requestData(contract,t.strftime(timeFormat)))
return pandas.concat(datasets)
def disconnect(self):
self.tws.disconnect()
if __name__=='__main__':
dl = Downloader(debug=True)
c = Contract()
c.m_symbol = 'SPY'
c.m_secType = 'STK'
c.m_exchange = 'SMART'
c.m_currency = 'USD'
df = dl.getIntradayData(c, (2012,8,6))
df.to_csv('test.csv')
# df = dl.requestData(c, '20120803 22:00:00')
# df.to_csv('test1.csv')
# df = dl.requestData(c, '20120803 21:30:00')
# df.to_csv('test2.csv')
dl.disconnect()
print 'Done.' | bsd-3-clause |
chappers/Data-Science | Think-Stats/3 Cumulative Distribution Functions.py | 1 | 20902 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# This HTML version of is provided for convenience, but it is not the best
# format for the book. In particular, some of the symbols are not rendered
# correctly.
#
# You might prefer to read the [PDF
# version](http://thinkstats.com/thinkstats.pdf), or you can buy a hardcopy
# [here](http://www.lulu.com/product/paperback/think-stats/12443331).
#
# # Chapter 3 Cumulative distribution functions
#
# ## 3.1 The class size paradox
#
# At many American colleges and universities, the student-to-faculty ratio is
# about 10:1. But students are often surprised to discover that their average
# class size is bigger than 10. There are two reasons for the discrepancy:
#
# * Students typically take 4–5 classes per semester, but professors often teach 1 or 2.
# * The number of students who enjoy a small class is small, but the number of students in a large class is (ahem!) large.
#
# The first effect is obvious (at least once it is pointed out); the second is
# more subtle. So let’s look at an example. Suppose that a college offers 65
# classes in a given semester, with the following distribution of sizes:
#
#
#
# size count
# 5- 9 8
# 10-14 8
# 15-19 14
# 20-24 4
# 25-29 6
# 30-34 12
# 35-39 8
# 40-44 3
# 45-49 2
#
#
# If you ask the Dean for the average class size, he would construct a PMF,
# compute the mean, and report that the average class size is 24.
#
# But if you survey a group of students, ask them how many students are in their
# classes, and compute the mean, you would think that the average class size was
# higher.
#
# **Exercise 1** Build a PMF of these data and compute the mean as perceived by the Dean. Since the data have been grouped in bins, you can use the mid-point of each bin.
#
# Now find the distribution of class sizes as perceived by students and compute
# its mean.
#
# Suppose you want to find the distribution of class sizes at a college, but
# you can’t get reliable data from the Dean. An alternative is to choose a
# random sample of students and ask them the number of students in each of their
# classes. Then you could compute the PMF of their responses.
#
# The result would be biased because large classes would be oversampled, but
# you could estimate the actual distribution of class sizes by applying an
# appropriate transformation to the observed distribution.
#
# Write a function called `UnbiasPmf` that takes the PMF of the observed values
# and returns a new Pmf object that estimates the distribution of class sizes.
#
# You can download a solution to this problem from
# `http://thinkstats.com/class_size.py`.
# <codecell>
%matplotlib inline
import pandas as pd
import numpy as np
t = [7]*8 + [12]*8 + [17]*14 + [22]*4 + [27]*6 + [32]*12 + [37]*8 + [42]*3 + [47]*2
print "The mean of the data above is %.2f" % np.mean(t)
pd.DataFrame(t).hist(normed = True)
# <markdowncell>
#
# **Exercise 2**
#
# In most foot races, everyone starts at the same time. If you are a fast
# runner, you usually pass a lot of people at the beginning of the race, but
# after a few miles everyone around you is going at the same speed.
#
# When I ran a long-distance (209 miles) relay race for the first time, I
# noticed an odd phenomenon: when I overtook another runner, I was usually much
# faster, and when another runner overtook me, he was usually much faster.
#
# At first I thought that the distribution of speeds might be bimodal; that is,
# there were many slow runners and many fast runners, but few at my speed.
#
# Then I realized that I was the victim of selection bias. The race was unusual
# in two ways: it used a staggered start, so teams started at different times;
# also, many teams included runners at different levels of ability.
#
# As a result, runners were spread out along the course with little
# relationship between speed and location. When I started running my leg, the
# runners near me were (pretty much) a random sample of the runners in the
# race.
#
# So where does the bias come from? During my time on the course, the chance of
# overtaking a runner, or being overtaken, is proportional to the difference in
# our speeds. To see why, think about the extremes. If another runner is going
# at the same speed as me, neither of us will overtake the other. If someone is
# going so fast that they cover the entire course while I am running, they are
# certain to overtake me.
#
# Write a function called `BiasPmf` that takes a Pmf representing the actual
# distribution of runners’ speeds, and the speed of a running observer, and
# returns a new Pmf representing the distribution of runners’ speeds as seen by
# the observer.
#
# To test your function, get the distribution of speeds from a normal road race
# (not a relay). I wrote a program that reads the results from the James Joyce
# Ramble 10K in Dedham MA and converts the pace of each runner to MPH. Download
# it from `http://thinkstats.com/relay.py`. Run it and look at the PMF of
# speeds.
#
# Now compute the distribution of speeds you would observe if you ran a relay
# race at 7.5 MPH with this group of runners. You can download a solution from
# `http://thinkstats.com/relay_soln.py`
#
# <markdowncell>
# ## 3.2 The limits of PMFs
#
# PMFs work well if the number of values is small. But as the number of values
# increases, the probability associated with each value gets smaller and the
# effect of random noise increases.
#
# For example, we might be interested in the distribution of birth weights. In
# the NSFG data, the variable `totalwgt_oz` records weight at birth in ounces.
# Figure 3.1 shows the PMF of these values for first babies and others.
#
# <codecell>
import pandas as pd
import numpy as np
import gzip
def makeRecord(line, fields):
obs = {}
for (field, start, end, cast) in fields:
try:
s = line[start-1:end]
val = cast(s)
except ValueError:
val = np.nan #make use of numpy's nan
obs[field]=val
return obs
fresp = gzip.open('./data/2002FemResp.dat.gz')
resp_fields = [
('caseid', 1, 12, int),
]
fpreg = gzip.open('./data/2002FemPreg.dat.gz')
preg_fields = [ ('caseid', 1, 12, int),
('nbrnaliv', 22, 22, int),
('babysex', 56, 56, int),
('birthwgt_lb', 57, 58, int),
('birthwgt_oz', 59, 60, int),
('prglength', 275, 276, int),
('outcome', 277, 277, int),
('birthord', 278, 279, int),
('agepreg', 284, 287, int),
('finalwgt', 423, 440, float)]
respondents = pd.DataFrame([makeRecord(line, resp_fields) for line in fresp])
pregnancies = pd.DataFrame([makeRecord(line, preg_fields) for line in fpreg])
#recode
pregnancies['agepreg'] = pregnancies.agepreg/100.0
pregnancies['totalwgt_oz'] = pregnancies.birthwgt_lb * 16 + pregnancies.birthwgt_oz
# <codecell>
# add column if they are first born
pregnancies['first_born'] = pregnancies.birthord == 1
pregnancies.query('totalwgt_oz < 250') \
.groupby('first_born')['totalwgt_oz'] \
.hist(bins=100, normed=True)
# <markdowncell>
#
# Overall, these distributions resemble the familiar “bell curve,” with many
# values near the mean and a few values much higher and lower.
#
# But parts of this figure are hard to interpret. There are many spikes and
# valleys, and some apparent differences between the distributions. It is hard
# to tell which of these features are significant. Also, it is hard to see
# overall patterns; for example, which distribution do you think has the higher
# mean?
#
# These problems can be mitigated by binning the data; that is, dividing the
# domain into non-overlapping intervals and counting the number of values in
# each bin. Binning can be useful, but it is tricky to get the size of the bins
# right. If they are big enough to smooth out noise, they might also smooth out
# useful information.
#
# An alternative that avoids these problems is the **cumulative distribution
# function**, or **CDF**. But before we can get to that, we have to talk about
# percentiles.
#
# ## 3.3 Percentiles
#
# If you have taken a standardized test, you probably got your results in the
# form of a raw score and a **percentile rank**. In this context, the percentile
# rank is the fraction of people who scored lower than you (or the same). So if
# you are “in the 90th percentile,” you did as well as or better than 90% of the
# people who took the exam.
#
# Here’s how you could compute the percentile rank of a value, `your_score`,
# relative to the scores in the sequence `scores`:
#
#
#
# def PercentileRank(scores, your_score):
# count = 0
# for score in scores:
# if score <= your_score:
# count += 1
#
# percentile_rank = 100.0 * count / len(scores)
# return percentile_rank
#
#
# For example, if the scores in the sequence were 55, 66, 77, 88 and 99, and you
# got the 88, then your percentile rank would be `100 * 4 / 5` which is 80.
#
# If you are given a value, it is easy to find its percentile rank; going the
# other way is slightly harder. If you are given a percentile rank and you want
# to find the corresponding value, one option is to sort the values and search
# for the one you want:
#
#
# def Percentile(scores, percentile_rank):
# scores.sort()
# for score in scores:
# if PercentileRank(scores, score) >= percentile_rank:
# return score
#
#
# The result of this calculation is a **percentile**. For example, the 50th
# percentile is the value with percentile rank 50. In the distribution of exam
# scores, the 50th percentile is 77.
#
# **Exercise 3** This implementation of `Percentile` is not very efficient. A better approach is to use the percentile rank to compute the index of the corresponding percentile. Write a version of `Percentile` that uses this algorithm.
#
# You can download a solution from `http://thinkstats.com/score_example.py`.
#
# **Exercise 4** Optional: If you only want to compute one percentile, it is not efficient to sort the scores. A better option is the selection algorithm, which you can read about at `http://wikipedia.org/wiki/Selection_algorithm`.
#
# Write (or find) an implementation of the selection algorithm and use it to
# write an efficient version of `Percentile`.
#
# ## 3.4 Cumulative distribution functions
#
# Now that we understand percentiles, we are ready to tackle the cumulative
# distribution function (CDF). The CDF is the function that maps values to their
# percentile rank in a distribution.
#
# The CDF is a function of \\(x\\), where \\(x\\) is any value that might appear in the
# distribution. To evaluate CDF(\\(x\\)) for a particular value of \\(x\\), we compute
# the fraction of the values in the sample less than (or equal to) \\(x\\).
#
# Here’s what that looks like as a function that takes a sample, `t`, and a
# value, `x`:
#
#
#
# def Cdf(t, x):
# count = 0.0
# for value in t:
# if value <= x:
# count += 1.0
#
# prob = count / len(t)
# return prob
#
#
# This function should look familiar; it is almost identical to
# `PercentileRank`, except that the result is in a probability in the range `[0–1]`
# rather than a percentile rank in the range `[0–100]`.
#
# As an example, suppose a sample has the values `{1, 2, 2, 3, 5}`. Here are some
# values from its CDF:
#
# $$CDF(0) = 0 $$
# $$CDF(1) = 0.2 $$
# $$CDF(2) = 0.6 $$
# $$CDF(3) = 0.8 $$
# $$CDF(4) = 0.8 $$
# $$CDF(5) = 1 $$
#
# We can evaluate the CDF for any value of \\(x\\), not just values that appear in
# the sample. If \\(x\\) is less than the smallest value in the sample, CDF(\\(x\\)) is
# 0. If \\(x\\) is greater than the largest value, CDF(\\(x\\)) is 1.
# <codecell>
import matplotlib.pyplot as plt
x = [0,1,2,3,4,5]
y = [0, 0.2, 0.6, 0.8, 0.8, 1]
plt.step(np.array(x), y, label='post')
plt.show()
# <markdowncell>
#
#
# Figure 3.2 is a graphical representation of this CDF. The CDF of a sample is a
# step function. In the next chapter we will see distributions whose CDFs are
# continuous functions.
#
# ## 3.5 Representing CDFs
#
# I have written a module called `Cdf` that provides a class named `Cdf` that
# represents CDFs. You can read the documentation of this module at
# `http://thinkstats.com/Cdf.html` and you can download it from
# `http://thinkstats.com/Cdf.py`.
#
# Cdfs are implemented with two sorted lists: `xs`, which contains the values,
# and `ps`, which contains the probabilities. The most important methods Cdfs
# provide are:
#
# **`Prob(x)`:**
# Given a value \\(x\\), computes the probability \\(p\\) = CDF(\\(x\\)).
# **`Value(p)`:**
# Given a probability \\(p\\), computes the corresponding value, \\(x\\); that is, the inverse CDF of \\(p\\).
#
# Because `xs` and `ps` are sorted, these operations can use the bisection
# algorithm, which is efficient. The run time is proportional to the logarithm
# of the number of values; see `http://wikipedia.org/wiki/Time_complexity`.
#
# Cdfs also provide `Render`, which returns two lists, `xs` and `ps`, suitable
# for plotting the CDF. Because the CDF is a step function, these lists have two
# elements for each unique value in the distribution.
#
# The Cdf module provides several functions for making Cdfs, including
# `MakeCdfFromList`, which takes a sequence of values and returns their Cdf.
#
# Finally, `myplot.py` provides functions named `Cdf` and `Cdfs` that plot Cdfs
# as lines.
#
# **Exercise 5** Download `Cdf.py` and `relay.py` (see Exercise 2) and generate a plot that shows the CDF of running speeds. Which gives you a better sense of the shape of the distribution, the PMF or the CDF? You can download a solution from `http://thinkstats.com/relay_cdf.py`.
#
# ## 3.6 Back to the survey data
#
# Figure 3.3 shows the CDFs of birth weight for first babies and others in the
# NSFG dataset.
#
# <codecell>
pregnancies.query('totalwgt_oz < 250') \
.groupby('first_born')['totalwgt_oz'] \
.hist(bins=100, normed=True, cumulative=True, histtype='step')
# <markdowncell>
# This figure makes the shape of the distributions, and the differences between
# them, much clearer. We can see that first babies are slightly lighter
# throughout the distribution, with a larger discrepancy above the mean.
#
# **Exercise 6** How much did you weigh at birth? If you don’t know, call your mother or someone else who knows. Using the pooled data (all live births), compute the distribution of birth weights and use it to find your percentile rank. If you were a first baby, find your percentile rank in the distribution for first babies. Otherwise use the distribution for others. If you are in the 90th percentile or higher, call your mother back and apologize.
#
# **Exercise 7** Suppose you and your classmates compute the percentile rank of your birth weights and then compute the CDF of the percentile ranks. What do you expect it to look like? Hint: what fraction of the class do you expect to be above the median?
#
# ## 3.7 Conditional distributions
#
# A **conditional distribution** is the distribution of a subset of the data
# which is selected according to a condition.
#
# For example, if you are above average in weight, but way above average in
# height, then you might be relatively light for your height. Here’s how you
# could make that claim more precise.
#
# 1. Select a cohort of people who are the same height as you (within some range).
# 2. Find the CDF of weight for those people.
# 3. Find the percentile rank of your weight in that distribution.
#
# Percentile ranks are useful for comparing measurements from different tests,
# or tests applied to different groups.
#
# For example, people who compete in foot races are usually grouped by age and
# gender. To compare people in different groups, you can convert race times to
# percentile ranks.
#
# **Exercise 8** I recently ran the James Joyce Ramble 10K in Dedham MA. The results are available from `http://coolrunning.com/results/10/ma/Apr25_27thAn_set1.shtml`. Go to that page and find my results. I came in 97th in a field of 1633, so what is my percentile rank in the field?
#
# In my division (M4049 means “male between 40 and 49 years of age”) I came in
# 26th out of 256. What is my percentile rank in my division?
#
# If I am still running in 10 years (and I hope I am), I will be in the M5059
# division. Assuming that my percentile rank in my division is the same, how
# much slower should I expect to be?
#
# I maintain a friendly rivalry with a student who is in the F2039 division.
# How fast does she have to run her next 10K to “beat” me in terms of percentile
# ranks?
# <markdowncell>
#
# ## 3.8 Random numbers
#
# CDFs are useful for generating random numbers with a given distribution.
# Here’s how:
#
# * Choose a random probability in the range 0–1.
# * Use `Cdf.Value` to find the value in the distribution that corresponds to the probability you chose.
#
# It might not be obvious why this works, but since it is easier to implement
# than to explain, let’s try it out.
#
# **Exercise 9** Write a function called `Sample`, that takes a Cdf and an integer, _n_, and returns a list of _n_ values chosen at random from the Cdf. Hint: use `random.random`. You will find a solution to this exercise in `Cdf.py`.
#
# Using the distribution of birth weights from the NSFG, generate a random
# sample with 1000 elements. Compute the CDF of the sample. Make a plot that
# shows the original CDF and the CDF of the random sample. For large values of
# \\(n\\), the distributions should be the same.
#
# This process, generating a random sample based on a measured sample, is called
# **resampling**.
#
# There are two ways to draw a sample from a population: with and without
# replacement. If you imagine drawing marbles from an [urn](http://wikipedia.org/wiki/Urn_problem), "replacement" means
# putting the marbles back as you go (and stirring), so the population is the
# same for every draw. "Without replacement," means that each marble can only be
# drawn once, so the remaining population is different after each draw.
#
# In Python, sampling with replacement can be implemented with `random.random`
# to choose a percentile rank, or `random.choice` to choose an element from a
# sequence. Sampling without replacement is provided by `random.sample`.
#
# **Exercise 10** The numbers generated by `random.random` are supposed to be uniform between 0 and 1; that is, every value in the range should have the same probability.
#
# Generate 1000 numbers from `random.random` and plot their PMF and CDF. Can
# you tell whether they are uniform?
#
# You can read about the uniform distribution [here](http://wikipedia.org/wiki/Uniform_distribution_(discrete)).
#
# ## 3.9 Summary statistics revisited
#
# Once you have computed a CDF, it is easy to compute other summary statistics.
# The median is just the 50th percentile2. The 25th and 75th percentiles are
# often used to check whether a distribution is symmetric, and their difference,
# which is called the **interquartile range**, measures the spread.
#
# **Exercise 11** Write a function called `Median` that takes a Cdf and computes the median, and one called `Interquartile` that computes the interquartile range.
#
# Compute the 25th, 50th, and 75th percentiles of the birth weight CDF. Do
# these values suggest that the distribution is symmetric?
#
# ## 3.10 Glossary
#
# **percentile rank:**
# The percentage of values in a distribution that are less than or equal to a given value.
# **CDF:**
# Cumulative distribution function, a function that maps from values to their percentile ranks.
# **percentile:**
# The value associated with a given percentile rank.
# **conditional distribution:**
# A distribution computed under the assumption that some condition holds.
# **resampling:**
# The process of generating a random sample from a distribution that was computed from a sample.
# **replacement:**
# During a sampling process, “replacement” indicates that the population is the same for every sample. “Without replacement” indicates that each element can be selected only once.
# **interquartile range:**
# A measure of spread, the difference between the 75th and 25th percentiles.
#
#
#
| mit |
eranchetz/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/colorbar.py | 69 | 27260 | '''
Colorbar toolkit with two classes and a function:
:class:`ColorbarBase`
the base class with full colorbar drawing functionality.
It can be used as-is to make a colorbar for a given colormap;
a mappable object (e.g., image) is not needed.
:class:`Colorbar`
the derived class for use with images or contour plots.
:func:`make_axes`
a function for resizing an axes and adding a second axes
suitable for a colorbar
The :meth:`~matplotlib.figure.Figure.colorbar` method uses :func:`make_axes`
and :class:`Colorbar`; the :func:`~matplotlib.pyplot.colorbar` function
is a thin wrapper over :meth:`~matplotlib.figure.Figure.colorbar`.
'''
import numpy as np
import matplotlib as mpl
import matplotlib.colors as colors
import matplotlib.cm as cm
import matplotlib.ticker as ticker
import matplotlib.cbook as cbook
import matplotlib.lines as lines
import matplotlib.patches as patches
import matplotlib.collections as collections
import matplotlib.contour as contour
make_axes_kw_doc = '''
========== ====================================================
Property Description
========== ====================================================
*fraction* 0.15; fraction of original axes to use for colorbar
*pad* 0.05 if vertical, 0.15 if horizontal; fraction
of original axes between colorbar and new image axes
*shrink* 1.0; fraction by which to shrink the colorbar
*aspect* 20; ratio of long to short dimensions
========== ====================================================
'''
colormap_kw_doc = '''
=========== ====================================================
Property Description
=========== ====================================================
*extend* [ 'neither' | 'both' | 'min' | 'max' ]
If not 'neither', make pointed end(s) for out-of-
range values. These are set for a given colormap
using the colormap set_under and set_over methods.
*spacing* [ 'uniform' | 'proportional' ]
Uniform spacing gives each discrete color the same
space; proportional makes the space proportional to
the data interval.
*ticks* [ None | list of ticks | Locator object ]
If None, ticks are determined automatically from the
input.
*format* [ None | format string | Formatter object ]
If None, the
:class:`~matplotlib.ticker.ScalarFormatter` is used.
If a format string is given, e.g. '%.3f', that is
used. An alternative
:class:`~matplotlib.ticker.Formatter` object may be
given instead.
*drawedges* [ False | True ] If true, draw lines at color
boundaries.
=========== ====================================================
The following will probably be useful only in the context of
indexed colors (that is, when the mappable has norm=NoNorm()),
or other unusual circumstances.
============ ===================================================
Property Description
============ ===================================================
*boundaries* None or a sequence
*values* None or a sequence which must be of length 1 less
than the sequence of *boundaries*. For each region
delimited by adjacent entries in *boundaries*, the
color mapped to the corresponding value in values
will be used.
============ ===================================================
'''
colorbar_doc = '''
Add a colorbar to a plot.
Function signatures for the :mod:`~matplotlib.pyplot` interface; all
but the first are also method signatures for the
:meth:`~matplotlib.figure.Figure.colorbar` method::
colorbar(**kwargs)
colorbar(mappable, **kwargs)
colorbar(mappable, cax=cax, **kwargs)
colorbar(mappable, ax=ax, **kwargs)
arguments:
*mappable*
the :class:`~matplotlib.image.Image`,
:class:`~matplotlib.contour.ContourSet`, etc. to
which the colorbar applies; this argument is mandatory for the
:meth:`~matplotlib.figure.Figure.colorbar` method but optional for the
:func:`~matplotlib.pyplot.colorbar` function, which sets the
default to the current image.
keyword arguments:
*cax*
None | axes object into which the colorbar will be drawn
*ax*
None | parent axes object from which space for a new
colorbar axes will be stolen
Additional keyword arguments are of two kinds:
axes properties:
%s
colorbar properties:
%s
If *mappable* is a :class:`~matplotlib.contours.ContourSet`, its *extend*
kwarg is included automatically.
Note that the *shrink* kwarg provides a simple way to keep a vertical
colorbar, for example, from being taller than the axes of the mappable
to which the colorbar is attached; but it is a manual method requiring
some trial and error. If the colorbar is too tall (or a horizontal
colorbar is too wide) use a smaller value of *shrink*.
For more precise control, you can manually specify the positions of
the axes objects in which the mappable and the colorbar are drawn. In
this case, do not use any of the axes properties kwargs.
returns:
:class:`~matplotlib.colorbar.Colorbar` instance; see also its base class,
:class:`~matplotlib.colorbar.ColorbarBase`. Call the
:meth:`~matplotlib.colorbar.ColorbarBase.set_label` method
to label the colorbar.
''' % (make_axes_kw_doc, colormap_kw_doc)
class ColorbarBase(cm.ScalarMappable):
'''
Draw a colorbar in an existing axes.
This is a base class for the :class:`Colorbar` class, which is the
basis for the :func:`~matplotlib.pyplot.colorbar` method and pylab
function.
It is also useful by itself for showing a colormap. If the *cmap*
kwarg is given but *boundaries* and *values* are left as None,
then the colormap will be displayed on a 0-1 scale. To show the
under- and over-value colors, specify the *norm* as::
colors.Normalize(clip=False)
To show the colors versus index instead of on the 0-1 scale,
use::
norm=colors.NoNorm.
Useful attributes:
:attr:`ax`
the Axes instance in which the colorbar is drawn
:attr:`lines`
a LineCollection if lines were drawn, otherwise None
:attr:`dividers`
a LineCollection if *drawedges* is True, otherwise None
Useful public methods are :meth:`set_label` and :meth:`add_lines`.
'''
_slice_dict = {'neither': slice(0,1000000),
'both': slice(1,-1),
'min': slice(1,1000000),
'max': slice(0,-1)}
def __init__(self, ax, cmap=None,
norm=None,
alpha=1.0,
values=None,
boundaries=None,
orientation='vertical',
extend='neither',
spacing='uniform', # uniform or proportional
ticks=None,
format=None,
drawedges=False,
filled=True,
):
self.ax = ax
if cmap is None: cmap = cm.get_cmap()
if norm is None: norm = colors.Normalize()
self.alpha = alpha
cm.ScalarMappable.__init__(self, cmap=cmap, norm=norm)
self.values = values
self.boundaries = boundaries
self.extend = extend
self._inside = self._slice_dict[extend]
self.spacing = spacing
self.orientation = orientation
self.drawedges = drawedges
self.filled = filled
self.solids = None
self.lines = None
self.dividers = None
self.set_label('')
if cbook.iterable(ticks):
self.locator = ticker.FixedLocator(ticks, nbins=len(ticks))
else:
self.locator = ticks # Handle default in _ticker()
if format is None:
if isinstance(self.norm, colors.LogNorm):
self.formatter = ticker.LogFormatter()
else:
self.formatter = ticker.ScalarFormatter()
elif cbook.is_string_like(format):
self.formatter = ticker.FormatStrFormatter(format)
else:
self.formatter = format # Assume it is a Formatter
# The rest is in a method so we can recalculate when clim changes.
self.draw_all()
def draw_all(self):
'''
Calculate any free parameters based on the current cmap and norm,
and do all the drawing.
'''
self._process_values()
self._find_range()
X, Y = self._mesh()
C = self._values[:,np.newaxis]
self._config_axes(X, Y)
if self.filled:
self._add_solids(X, Y, C)
self._set_label()
def _config_axes(self, X, Y):
'''
Make an axes patch and outline.
'''
ax = self.ax
ax.set_frame_on(False)
ax.set_navigate(False)
xy = self._outline(X, Y)
ax.update_datalim(xy)
ax.set_xlim(*ax.dataLim.intervalx)
ax.set_ylim(*ax.dataLim.intervaly)
self.outline = lines.Line2D(xy[:, 0], xy[:, 1], color=mpl.rcParams['axes.edgecolor'],
linewidth=mpl.rcParams['axes.linewidth'])
ax.add_artist(self.outline)
self.outline.set_clip_box(None)
self.outline.set_clip_path(None)
c = mpl.rcParams['axes.facecolor']
self.patch = patches.Polygon(xy, edgecolor=c,
facecolor=c,
linewidth=0.01,
zorder=-1)
ax.add_artist(self.patch)
ticks, ticklabels, offset_string = self._ticker()
if self.orientation == 'vertical':
ax.set_xticks([])
ax.yaxis.set_label_position('right')
ax.yaxis.set_ticks_position('right')
ax.set_yticks(ticks)
ax.set_yticklabels(ticklabels)
ax.yaxis.get_major_formatter().set_offset_string(offset_string)
else:
ax.set_yticks([])
ax.xaxis.set_label_position('bottom')
ax.set_xticks(ticks)
ax.set_xticklabels(ticklabels)
ax.xaxis.get_major_formatter().set_offset_string(offset_string)
def _set_label(self):
if self.orientation == 'vertical':
self.ax.set_ylabel(self._label, **self._labelkw)
else:
self.ax.set_xlabel(self._label, **self._labelkw)
def set_label(self, label, **kw):
'''
Label the long axis of the colorbar
'''
self._label = label
self._labelkw = kw
self._set_label()
def _outline(self, X, Y):
'''
Return *x*, *y* arrays of colorbar bounding polygon,
taking orientation into account.
'''
N = X.shape[0]
ii = [0, 1, N-2, N-1, 2*N-1, 2*N-2, N+1, N, 0]
x = np.take(np.ravel(np.transpose(X)), ii)
y = np.take(np.ravel(np.transpose(Y)), ii)
x = x.reshape((len(x), 1))
y = y.reshape((len(y), 1))
if self.orientation == 'horizontal':
return np.hstack((y, x))
return np.hstack((x, y))
def _edges(self, X, Y):
'''
Return the separator line segments; helper for _add_solids.
'''
N = X.shape[0]
# Using the non-array form of these line segments is much
# simpler than making them into arrays.
if self.orientation == 'vertical':
return [zip(X[i], Y[i]) for i in range(1, N-1)]
else:
return [zip(Y[i], X[i]) for i in range(1, N-1)]
def _add_solids(self, X, Y, C):
'''
Draw the colors using :meth:`~matplotlib.axes.Axes.pcolor`;
optionally add separators.
'''
## Change to pcolorfast after fixing bugs in some backends...
if self.orientation == 'vertical':
args = (X, Y, C)
else:
args = (np.transpose(Y), np.transpose(X), np.transpose(C))
kw = {'cmap':self.cmap, 'norm':self.norm,
'shading':'flat', 'alpha':self.alpha}
# Save, set, and restore hold state to keep pcolor from
# clearing the axes. Ordinarily this will not be needed,
# since the axes object should already have hold set.
_hold = self.ax.ishold()
self.ax.hold(True)
col = self.ax.pcolor(*args, **kw)
self.ax.hold(_hold)
#self.add_observer(col) # We should observe, not be observed...
self.solids = col
if self.drawedges:
self.dividers = collections.LineCollection(self._edges(X,Y),
colors=(mpl.rcParams['axes.edgecolor'],),
linewidths=(0.5*mpl.rcParams['axes.linewidth'],)
)
self.ax.add_collection(self.dividers)
def add_lines(self, levels, colors, linewidths):
'''
Draw lines on the colorbar.
'''
N = len(levels)
dummy, y = self._locate(levels)
if len(y) <> N:
raise ValueError("levels are outside colorbar range")
x = np.array([0.0, 1.0])
X, Y = np.meshgrid(x,y)
if self.orientation == 'vertical':
xy = [zip(X[i], Y[i]) for i in range(N)]
else:
xy = [zip(Y[i], X[i]) for i in range(N)]
col = collections.LineCollection(xy, linewidths=linewidths)
self.lines = col
col.set_color(colors)
self.ax.add_collection(col)
def _ticker(self):
'''
Return two sequences: ticks (colorbar data locations)
and ticklabels (strings).
'''
locator = self.locator
formatter = self.formatter
if locator is None:
if self.boundaries is None:
if isinstance(self.norm, colors.NoNorm):
nv = len(self._values)
base = 1 + int(nv/10)
locator = ticker.IndexLocator(base=base, offset=0)
elif isinstance(self.norm, colors.BoundaryNorm):
b = self.norm.boundaries
locator = ticker.FixedLocator(b, nbins=10)
elif isinstance(self.norm, colors.LogNorm):
locator = ticker.LogLocator()
else:
locator = ticker.MaxNLocator()
else:
b = self._boundaries[self._inside]
locator = ticker.FixedLocator(b, nbins=10)
if isinstance(self.norm, colors.NoNorm):
intv = self._values[0], self._values[-1]
else:
intv = self.vmin, self.vmax
locator.create_dummy_axis()
formatter.create_dummy_axis()
locator.set_view_interval(*intv)
locator.set_data_interval(*intv)
formatter.set_view_interval(*intv)
formatter.set_data_interval(*intv)
b = np.array(locator())
b, ticks = self._locate(b)
formatter.set_locs(b)
ticklabels = [formatter(t, i) for i, t in enumerate(b)]
offset_string = formatter.get_offset()
return ticks, ticklabels, offset_string
def _process_values(self, b=None):
'''
Set the :attr:`_boundaries` and :attr:`_values` attributes
based on the input boundaries and values. Input boundaries
can be *self.boundaries* or the argument *b*.
'''
if b is None:
b = self.boundaries
if b is not None:
self._boundaries = np.asarray(b, dtype=float)
if self.values is None:
self._values = 0.5*(self._boundaries[:-1]
+ self._boundaries[1:])
if isinstance(self.norm, colors.NoNorm):
self._values = (self._values + 0.00001).astype(np.int16)
return
self._values = np.array(self.values)
return
if self.values is not None:
self._values = np.array(self.values)
if self.boundaries is None:
b = np.zeros(len(self.values)+1, 'd')
b[1:-1] = 0.5*(self._values[:-1] - self._values[1:])
b[0] = 2.0*b[1] - b[2]
b[-1] = 2.0*b[-2] - b[-3]
self._boundaries = b
return
self._boundaries = np.array(self.boundaries)
return
# Neither boundaries nor values are specified;
# make reasonable ones based on cmap and norm.
if isinstance(self.norm, colors.NoNorm):
b = self._uniform_y(self.cmap.N+1) * self.cmap.N - 0.5
v = np.zeros((len(b)-1,), dtype=np.int16)
v[self._inside] = np.arange(self.cmap.N, dtype=np.int16)
if self.extend in ('both', 'min'):
v[0] = -1
if self.extend in ('both', 'max'):
v[-1] = self.cmap.N
self._boundaries = b
self._values = v
return
elif isinstance(self.norm, colors.BoundaryNorm):
b = list(self.norm.boundaries)
if self.extend in ('both', 'min'):
b = [b[0]-1] + b
if self.extend in ('both', 'max'):
b = b + [b[-1] + 1]
b = np.array(b)
v = np.zeros((len(b)-1,), dtype=float)
bi = self.norm.boundaries
v[self._inside] = 0.5*(bi[:-1] + bi[1:])
if self.extend in ('both', 'min'):
v[0] = b[0] - 1
if self.extend in ('both', 'max'):
v[-1] = b[-1] + 1
self._boundaries = b
self._values = v
return
else:
if not self.norm.scaled():
self.norm.vmin = 0
self.norm.vmax = 1
b = self.norm.inverse(self._uniform_y(self.cmap.N+1))
if self.extend in ('both', 'min'):
b[0] = b[0] - 1
if self.extend in ('both', 'max'):
b[-1] = b[-1] + 1
self._process_values(b)
def _find_range(self):
'''
Set :attr:`vmin` and :attr:`vmax` attributes to the first and
last boundary excluding extended end boundaries.
'''
b = self._boundaries[self._inside]
self.vmin = b[0]
self.vmax = b[-1]
def _central_N(self):
'''number of boundaries **before** extension of ends'''
nb = len(self._boundaries)
if self.extend == 'both':
nb -= 2
elif self.extend in ('min', 'max'):
nb -= 1
return nb
def _extended_N(self):
'''
Based on the colormap and extend variable, return the
number of boundaries.
'''
N = self.cmap.N + 1
if self.extend == 'both':
N += 2
elif self.extend in ('min', 'max'):
N += 1
return N
def _uniform_y(self, N):
'''
Return colorbar data coordinates for *N* uniformly
spaced boundaries, plus ends if required.
'''
if self.extend == 'neither':
y = np.linspace(0, 1, N)
else:
if self.extend == 'both':
y = np.zeros(N + 2, 'd')
y[0] = -0.05
y[-1] = 1.05
elif self.extend == 'min':
y = np.zeros(N + 1, 'd')
y[0] = -0.05
else:
y = np.zeros(N + 1, 'd')
y[-1] = 1.05
y[self._inside] = np.linspace(0, 1, N)
return y
def _proportional_y(self):
'''
Return colorbar data coordinates for the boundaries of
a proportional colorbar.
'''
if isinstance(self.norm, colors.BoundaryNorm):
b = self._boundaries[self._inside]
y = (self._boundaries - self._boundaries[0])
y = y / (self._boundaries[-1] - self._boundaries[0])
else:
y = self.norm(self._boundaries.copy())
if self.extend in ('both', 'min'):
y[0] = -0.05
if self.extend in ('both', 'max'):
y[-1] = 1.05
yi = y[self._inside]
norm = colors.Normalize(yi[0], yi[-1])
y[self._inside] = norm(yi)
return y
def _mesh(self):
'''
Return X,Y, the coordinate arrays for the colorbar pcolormesh.
These are suitable for a vertical colorbar; swapping and
transposition for a horizontal colorbar are done outside
this function.
'''
x = np.array([0.0, 1.0])
if self.spacing == 'uniform':
y = self._uniform_y(self._central_N())
else:
y = self._proportional_y()
self._y = y
X, Y = np.meshgrid(x,y)
if self.extend in ('min', 'both'):
X[0,:] = 0.5
if self.extend in ('max', 'both'):
X[-1,:] = 0.5
return X, Y
def _locate(self, x):
'''
Given a possible set of color data values, return the ones
within range, together with their corresponding colorbar
data coordinates.
'''
if isinstance(self.norm, (colors.NoNorm, colors.BoundaryNorm)):
b = self._boundaries
xn = x
xout = x
else:
# Do calculations using normalized coordinates so
# as to make the interpolation more accurate.
b = self.norm(self._boundaries, clip=False).filled()
# We do our own clipping so that we can allow a tiny
# bit of slop in the end point ticks to allow for
# floating point errors.
xn = self.norm(x, clip=False).filled()
in_cond = (xn > -0.001) & (xn < 1.001)
xn = np.compress(in_cond, xn)
xout = np.compress(in_cond, x)
# The rest is linear interpolation with clipping.
y = self._y
N = len(b)
ii = np.minimum(np.searchsorted(b, xn), N-1)
i0 = np.maximum(ii - 1, 0)
#db = b[ii] - b[i0]
db = np.take(b, ii) - np.take(b, i0)
db = np.where(i0==ii, 1.0, db)
#dy = y[ii] - y[i0]
dy = np.take(y, ii) - np.take(y, i0)
z = np.take(y, i0) + (xn-np.take(b,i0))*dy/db
return xout, z
def set_alpha(self, alpha):
self.alpha = alpha
class Colorbar(ColorbarBase):
def __init__(self, ax, mappable, **kw):
mappable.autoscale_None() # Ensure mappable.norm.vmin, vmax
# are set when colorbar is called,
# even if mappable.draw has not yet
# been called. This will not change
# vmin, vmax if they are already set.
self.mappable = mappable
kw['cmap'] = mappable.cmap
kw['norm'] = mappable.norm
kw['alpha'] = mappable.get_alpha()
if isinstance(mappable, contour.ContourSet):
CS = mappable
kw['boundaries'] = CS._levels
kw['values'] = CS.cvalues
kw['extend'] = CS.extend
#kw['ticks'] = CS._levels
kw.setdefault('ticks', ticker.FixedLocator(CS.levels, nbins=10))
kw['filled'] = CS.filled
ColorbarBase.__init__(self, ax, **kw)
if not CS.filled:
self.add_lines(CS)
else:
ColorbarBase.__init__(self, ax, **kw)
def add_lines(self, CS):
'''
Add the lines from a non-filled
:class:`~matplotlib.contour.ContourSet` to the colorbar.
'''
if not isinstance(CS, contour.ContourSet) or CS.filled:
raise ValueError('add_lines is only for a ContourSet of lines')
tcolors = [c[0] for c in CS.tcolors]
tlinewidths = [t[0] for t in CS.tlinewidths]
# The following was an attempt to get the colorbar lines
# to follow subsequent changes in the contour lines,
# but more work is needed: specifically, a careful
# look at event sequences, and at how
# to make one object track another automatically.
#tcolors = [col.get_colors()[0] for col in CS.collections]
#tlinewidths = [col.get_linewidth()[0] for lw in CS.collections]
#print 'tlinewidths:', tlinewidths
ColorbarBase.add_lines(self, CS.levels, tcolors, tlinewidths)
def update_bruteforce(self, mappable):
'''
Manually change any contour line colors. This is called
when the image or contour plot to which this colorbar belongs
is changed.
'''
# We are using an ugly brute-force method: clearing and
# redrawing the whole thing. The problem is that if any
# properties have been changed by methods other than the
# colorbar methods, those changes will be lost.
self.ax.cla()
self.draw_all()
#if self.vmin != self.norm.vmin or self.vmax != self.norm.vmax:
# self.ax.cla()
# self.draw_all()
if isinstance(self.mappable, contour.ContourSet):
CS = self.mappable
if not CS.filled:
self.add_lines(CS)
#if self.lines is not None:
# tcolors = [c[0] for c in CS.tcolors]
# self.lines.set_color(tcolors)
#Fixme? Recalculate boundaries, ticks if vmin, vmax have changed.
#Fixme: Some refactoring may be needed; we should not
# be recalculating everything if there was a simple alpha
# change.
def make_axes(parent, **kw):
orientation = kw.setdefault('orientation', 'vertical')
fraction = kw.pop('fraction', 0.15)
shrink = kw.pop('shrink', 1.0)
aspect = kw.pop('aspect', 20)
#pb = transforms.PBox(parent.get_position())
pb = parent.get_position(original=True).frozen()
if orientation == 'vertical':
pad = kw.pop('pad', 0.05)
x1 = 1.0-fraction
pb1, pbx, pbcb = pb.splitx(x1-pad, x1)
pbcb = pbcb.shrunk(1.0, shrink).anchored('C', pbcb)
anchor = (0.0, 0.5)
panchor = (1.0, 0.5)
else:
pad = kw.pop('pad', 0.15)
pbcb, pbx, pb1 = pb.splity(fraction, fraction+pad)
pbcb = pbcb.shrunk(shrink, 1.0).anchored('C', pbcb)
aspect = 1.0/aspect
anchor = (0.5, 1.0)
panchor = (0.5, 0.0)
parent.set_position(pb1)
parent.set_anchor(panchor)
fig = parent.get_figure()
cax = fig.add_axes(pbcb)
cax.set_aspect(aspect, anchor=anchor, adjustable='box')
return cax, kw
make_axes.__doc__ ='''
Resize and reposition a parent axes, and return a child
axes suitable for a colorbar::
cax, kw = make_axes(parent, **kw)
Keyword arguments may include the following (with defaults):
*orientation*
'vertical' or 'horizontal'
%s
All but the first of these are stripped from the input kw set.
Returns (cax, kw), the child axes and the reduced kw dictionary.
''' % make_axes_kw_doc
| agpl-3.0 |
kmike/scikit-learn | examples/svm/plot_svm_regression.py | 8 | 1431 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynominial and RBF
kernels.
"""
print(__doc__)
###############################################################################
# Generate sample data
import numpy as np
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
from sklearn.svm import SVR
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
import pylab as pl
pl.scatter(X, y, c='k', label='data')
pl.hold('on')
pl.plot(X, y_rbf, c='g', label='RBF model')
pl.plot(X, y_lin, c='r', label='Linear model')
pl.plot(X, y_poly, c='b', label='Polynomial model')
pl.xlabel('data')
pl.ylabel('target')
pl.title('Support Vector Regression')
pl.legend()
pl.show()
| bsd-3-clause |
gkc1000/pyscf | examples/mcscf/30-hf_scan/hf-scan-triplet.py | 2 | 1482 | #!/usr/bin/env python
import numpy
from pyscf import scf
from pyscf import gto
from pyscf import mcscf
'''
Scan HF molecule triplet state dissociation curve
'''
ehf = []
emc = []
def run(b, dm, mo):
mol = gto.Mole()
mol.verbose = 5
mol.output = 'out_hf-%2.1f' % b
mol.atom = [
["F", (0., 0., 0.)],
["H", (0., 0., b)],]
mol.spin = 2
mol.basis = {'F': 'cc-pvdz',
'H': 'cc-pvdz',}
mol.build()
m = scf.RHF(mol)
ehf.append(m.scf(dm))
mc = mcscf.CASSCF(m, 6, 6)
if mo is None:
mo = mcscf.sort_mo(mc, m.mo_coeff, [3,4,5,6,9,10])
else:
mo = mcscf.project_init_guess(mc, mo)
e1 = mc.mc1step(mo)[0]
emc.append(e1)
return m.make_rdm1(), mc.mo_coeff
dm = mo = None
for b in numpy.arange(0.7, 4.01, 0.1):
dm, mo = run(b, dm, mo)
for b in reversed(numpy.arange(0.7, 4.01, 0.1)):
dm, mo = run(b, dm, mo)
x = numpy.arange(0.7, 4.01, .1)
ehf1 = ehf[:len(x)]
ehf2 = ehf[len(x):]
emc1 = emc[:len(x)]
emc2 = emc[len(x):]
ehf2.reverse()
emc2.reverse()
with open('hf-scan-triplet.txt', 'w') as fout:
fout.write(' HF 4.0->0.7 CAS(6,6) HF 0.7->4.0 CAS(6,6) \n')
for i, xi in enumerate(x):
fout.write('%2.1f %12.8f %12.8f %12.8f %12.8f\n'
% (xi, ehf1[i], emc1[i], ehf2[i], emc2[i]))
import matplotlib.pyplot as plt
plt.plot(x, emc1, label='CAS(6,6),4.0->0.7')
plt.plot(x, emc2, label='CAS(6,6),0.7->4.0')
plt.legend()
plt.show()
| apache-2.0 |
jgrizou/explauto | explauto/sensorimotor_model/inverse/cma.py | 2 | 377570 | #!/usr/bin/env python
"""Module cma implements the CMA-ES (Covariance Matrix Adaptation
Evolution Strategy).
CMA-ES is a stochastic optimizer for robust non-linear non-convex
derivative- and function-value-free numerical optimization.
This implementation can be used with Python versions >= 2.6, namely
2.6, 2.7, 3.3, 3.4.
CMA-ES searches for a minimizer (a solution x in :math:`R^n`) of an
objective function f (cost function), such that f(x) is minimal.
Regarding f, only a passably reliable ranking of the candidate
solutions in each iteration is necessary. Neither the function values
itself, nor the gradient of f need to be available or do matter (like
in the downhill simplex Nelder-Mead algorithm). Some termination
criteria however depend on actual f-values.
Two interfaces are provided:
- function `fmin(func, x0, sigma0,...)`
runs a complete minimization
of the objective function func with CMA-ES.
- class `CMAEvolutionStrategy`
allows for minimization such that the control of the iteration
loop remains with the user.
Used packages:
- unavoidable: `numpy` (see `barecmaes2.py` if `numpy` is not
available),
- avoidable with small changes: `time`, `sys`
- optional: `matplotlib.pyplot` (for `plot` etc., highly
recommended), `pprint` (pretty print), `pickle` (in class
`Sections`), `doctest`, `inspect`, `pygsl` (never by default)
Install
-------
The file ``cma.py`` only needs to be visible in the python path (e.g. in
the current working directory).
The preferred way of (system-wide) installation is calling
pip install cma
from the command line.
The ``cma.py`` file can also be installed from the
system shell terminal command line by::
python cma.py --install
which solely calls the ``setup`` function from the standard
``distutils.core`` package for installation. If the ``setup.py``
file is been provided with ``cma.py``, the standard call is
python setup.py cma
Both calls need to see ``cma.py`` in the current working directory and
might need to be preceded with ``sudo``.
To upgrade the currently installed version from the Python Package Index,
and also for first time installation, type in the system shell::
pip install --upgrade cma
Testing
-------
From the system shell::
python cma.py --test
or from the Python shell ``ipython``::
run cma.py --test
or from any python shell
import cma
cma.main('--test')
runs ``doctest.testmod(cma)`` showing only exceptions (and not the
tests that fail due to small differences in the output) and should
run without complaints in about between 20 and 100 seconds.
Example
-------
From a python shell::
import cma
help(cma) # "this" help message, use cma? in ipython
help(cma.fmin)
help(cma.CMAEvolutionStrategy)
help(cma.CMAOptions)
cma.CMAOptions('tol') # display 'tolerance' termination options
cma.CMAOptions('verb') # display verbosity options
res = cma.fmin(cma.Fcts.tablet, 15 * [1], 1)
res[0] # best evaluated solution
res[5] # mean solution, presumably better with noise
:See: `fmin()`, `CMAOptions`, `CMAEvolutionStrategy`
:Author: Nikolaus Hansen, 2008-2015
:Contributor: Petr Baudis, 2014
:License: BSD 3-Clause, see below.
"""
# The BSD 3-Clause License
# Copyright (c) 2014 Inria
# Author: Nikolaus Hansen, 2008-2015
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright and
# authors notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# and authors notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors nor the authors names may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
# WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# (note to self) for testing:
# pyflakes cma.py # finds bugs by static analysis
# pychecker --limit 60 cma.py # also executes, all 60 warnings checked
# or python ~/Downloads/pychecker-0.8.19/pychecker/checker.py cma.py
# python cma.py -t -quiet # executes implemented tests based on doctest
# python -3 cma.py --test 2> out2to3warnings.txt #
# to create a html documentation file:
# pydoc -w cma # edit the header (remove local pointers)
# epydoc cma.py # comes close to javadoc but does not find the
# # links of function references etc
# doxygen needs @package cma as first line in the module docstring
# some things like class attributes are not interpreted correctly
# sphinx: doc style of doc.python.org, could not make it work (yet)
# TODO: implement a (deep enough) copy-constructor for class
# CMAEvolutionStrategy to repeat the same step in different
# configurations for online-adaptation of meta parameters
# TODO: reconsider geno-pheno transformation. Can it be a completely
# separate module that operates inbetween optimizer and objective?
# Can we still propagate a repair of solutions to the optimizer?
# How about gradients (should be fine)?
# TODO: implement bipop in a separate algorithm as meta portfolio
# algorithm of IPOP and a local restart option to be implemented
# in fmin (e.g. option restart_mode in [IPOP, local])
# TODO: self.opts['mindx'] is checked without sigma_vec, which is wrong,
# TODO: project sigma_vec on the smallest eigenvector?
# TODO: class _CMAStopDict implementation looks way too complicated
# TODO: separate display and logging options, those CMAEvolutionStrategy
# instances don't use themselves (probably all?)
# TODO: disp method is implemented in CMAEvolutionStrategy and in
# CMADataLogger separately, OOOptimizer.disp_str should return a str
# which can be used uniformly? Only logger can disp a history.
# TODO: check scitools.easyviz and how big the adaptation would be
# TODO: split tell into a variable transformation part and the "pure"
# functionality
# usecase: es.tell_geno(X, [func(es.pheno(x)) for x in X])
# genotypic repair is not part of tell_geno
# TODO: copy_always optional parameter does not make much sense,
# as one can always copy the input argument first,
# however some calls are simpler
# TODO: generalize input logger in optimize() as after_iteration_handler
# (which is logger.add by default)? One difficulty is that
# the logger object is returned (not anymore when return of optimize
# is change). Another difficulty is the obscure usage of modulo
# for writing a final data line in optimize.
# TODO: separate initialize==reset_state from __init__
# TODO: introduce Ypos == diffC which makes the code more consistent and
# the active update "exact"?
# TODO: dynamically read "signals" from a file, see import ConfigParser
# or myproperties.py (to be called after tell())
#
# typical parameters in scipy.optimize: disp, xtol, ftol, maxiter, maxfun,
# callback=None
# maxfev, diag (A sequency of N positive entries that serve as
# scale factors for the variables.)
# full_output -- non-zero to return all optional outputs.
# If xtol < 0.0, xtol is set to sqrt(machine_precision)
# 'infot -- a dictionary of optional outputs with the keys:
# 'nfev': the number of function calls...
#
# see eg fmin_powell
# typical returns
# x, f, dictionary d
# (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag},
# <allvecs>)
#
# TODO: keep best ten solutions
# TODO: implement constraints handling
# TODO: extend function unitdoctest, or use unittest?
# TODO: apply style guide
# TODO: eigh(): thorough testing would not hurt
# changes:
# 15/01/20: larger condition numbers for C realized by using tf_pheno
# of GenoPheno attribute gp.
# 15/01/19: injection method, first implementation, short injections
# and long injections with good fitness need to be addressed yet
# 15/01/xx: prepare_injection_directions to simplify/centralize injected
# solutions from mirroring and TPA
# 14/12/26: bug fix in correlation_matrix computation if np.diag is a view
# 14/12/06: meta_parameters now only as annotations in ## comments
# 14/12/03: unified use of base class constructor call, now always
# super(ThisClass, self).__init__(args_for_base_class_constructor)
# 14/11/29: termination via "stop now" in file cmaes_signals.par
# 14/11/28: bug fix initialization of C took place before setting the
# seed. Now in some dimensions (e.g. 10) results are (still) not
# determistic due to np.linalg.eigh, in some dimensions (<9, 12)
# they seem to be deterministic.
# 14/11/23: bipop option integration, contributed by Petr Baudis
# 14/09/30: initial_elitism option added to fmin
# 14/08/1x: developing fitness wrappers in FFWrappers class
# 14/08/xx: return value of OOOptimizer.optimize changed to self.
# CMAOptions now need to uniquely match an *initial substring*
# only (via method corrected_key).
# Bug fix in CMAEvolutionStrategy.stop: termination conditions
# are now recomputed iff check and self.countiter > 0.
# Doc corrected that self.gp.geno _is_ applied to x0
# Vaste reorganization/modularization/improvements of plotting
# 14/08/01: bug fix to guaranty pos. def. in active CMA
# 14/06/04: gradient of f can now be used with fmin and/or ask
# 14/05/11: global rcParams['font.size'] not permanently changed anymore,
# a little nicer annotations for the plots
# 14/05/07: added method result_pretty to pretty print optimization result
# 14/05/06: associated show() everywhere with ion() which should solve the
# blocked terminal problem
# 14/05/05: all instances of "unicode" removed (was incompatible to 3.x)
# 14/05/05: replaced type(x) == y with isinstance(x, y), reorganized the
# comments before the code starts
# 14/05/xx: change the order of kwargs of OOOptimizer.optimize,
# remove prepare method in AdaptSigma classes, various changes/cleaning
# 14/03/01: bug fix BoundaryHandlerBase.has_bounds didn't check lower bounds correctly
# bug fix in BoundPenalty.repair len(bounds[0]) was used instead of len(bounds[1])
# bug fix in GenoPheno.pheno, where x was not copied when only boundary-repair was applied
# 14/02/27: bug fixed when BoundPenalty was combined with fixed variables.
# 13/xx/xx: step-size adaptation becomes a class derived from CMAAdaptSigmaBase,
# to make testing different adaptation rules (much) easier
# 12/12/14: separated CMAOptions and arguments to fmin
# 12/10/25: removed useless check_points from fmin interface
# 12/10/17: bug fix printing number of infeasible samples, moved not-in-use methods
# timesCroot and divCroot to the right class
# 12/10/16 (0.92.00): various changes commit: bug bound[0] -> bounds[0], more_to_write fixed,
# sigma_vec introduced, restart from elitist, trace normalization, max(mu,popsize/2)
# is used for weight calculation.
# 12/07/23: (bug:) BoundPenalty.update respects now genotype-phenotype transformation
# 12/07/21: convert value True for noisehandling into 1 making the output compatible
# 12/01/30: class Solution and more old stuff removed r3101
# 12/01/29: class Solution is depreciated, GenoPheno and SolutionDict do the job (v0.91.00, r3100)
# 12/01/06: CMA_eigenmethod option now takes a function (integer still works)
# 11/09/30: flat fitness termination checks also history length
# 11/09/30: elitist option (using method clip_or_fit_solutions)
# 11/09/xx: method clip_or_fit_solutions for check_points option for all sorts of
# injected or modified solutions and even reliable adaptive encoding
# 11/08/19: fixed: scaling and typical_x type clashes 1 vs array(1) vs ones(dim) vs dim * [1]
# 11/07/25: fixed: fmin wrote first and last line even with verb_log==0
# fixed: method settableOptionsList, also renamed to versatileOptions
# default seed depends on time now
# 11/07/xx (0.9.92): added: active CMA, selective mirrored sampling, noise/uncertainty handling
# fixed: output argument ordering in fmin, print now only used as function
# removed: parallel option in fmin
# 11/07/01: another try to get rid of the memory leak by replacing self.unrepaired = self[:]
# 11/07/01: major clean-up and reworking of abstract base classes and of the documentation,
# also the return value of fmin changed and attribute stop is now a method.
# 11/04/22: bug-fix: option fixed_variables in combination with scaling
# 11/04/21: stopdict is not a copy anymore
# 11/04/15: option fixed_variables implemented
# 11/03/23: bug-fix boundary update was computed even without boundaries
# 11/03/12: bug-fix of variable annotation in plots
# 11/02/05: work around a memory leak in numpy
# 11/02/05: plotting routines improved
# 10/10/17: cleaning up, now version 0.9.30
# 10/10/17: bug-fix: return values of fmin now use phenotyp (relevant
# if input scaling_of_variables is given)
# 08/10/01: option evalparallel introduced,
# bug-fix for scaling being a vector
# 08/09/26: option CMAseparable becomes CMA_diagonal
# 08/10/18: some names change, test functions go into a class
# 08/10/24: more refactorizing
# 10/03/09: upper bound exp(min(1,...)) for step-size control
from __future__ import division
# future is >= 3.0, this code has mainly been used with 2.6 & 2.7
from __future__ import with_statement
# only necessary for python 2.5 and not in heavy use
from __future__ import print_function
# available from python 2.6, code should also work without
from __future__ import absolute_import
from __future__ import unicode_literals
# from __future__ import collections.MutableMapping
# does not exist in future, otherwise Python 2.5 would work, since 0.91.01
import sys
if not sys.version.startswith('2'): # in python 3
xrange = range
raw_input = input
basestring = str
else:
input = raw_input # in py2, input(x) == eval(raw_input(x))
import time # not really essential
import collections
import numpy as np
# arange, cos, size, eye, inf, dot, floor, outer, zeros, linalg.eigh,
# sort, argsort, random, ones,...
from numpy import inf, array, dot, exp, log, sqrt, sum, isscalar, isfinite
# to access the built-in sum fct: ``__builtins__.sum`` or ``del sum``
# removes the imported sum and recovers the shadowed build-in
try:
from matplotlib import pyplot
savefig = pyplot.savefig # now we can use cma.savefig() etc
closefig = pyplot.close
def show():
# is_interactive = matplotlib.is_interactive()
pyplot.ion()
pyplot.show()
# if we call now matplotlib.interactive(True), the console is
# blocked
pyplot.ion() # prevents that execution stops after plotting
except:
pyplot = None
savefig = None
closefig = None
def show():
print('pyplot.show() is not available')
print('Could not import matplotlib.pyplot, therefore ``cma.plot()``" +'
' etc. is not available')
__author__ = 'Nikolaus Hansen'
__version__ = "1.1.06 $Revision: 4129 $ $Date: 2015-01-23 20:13:51 +0100 (Fri, 23 Jan 2015) $"
# $Source$ # according to PEP 8 style guides, but what is it good for?
# $Id: cma.py 4129 2015-01-23 19:13:51Z hansen $
# bash $: svn propset svn:keywords 'Date Revision Id' cma.py
__docformat__ = "reStructuredText" # this hides some comments entirely?
__all__ = (
'main',
'fmin',
'fcts',
'Fcts',
'felli',
'rotate',
'pprint',
'plot',
'disp',
'show',
'savefig',
'closefig',
'use_archives',
'is_feasible',
'unitdoctest',
'DerivedDictBase',
'SolutionDict',
'CMASolutionDict',
'BestSolution',
# 'BoundaryHandlerBase',
'BoundNone',
'BoundTransform',
'BoundPenalty',
# 'BoxConstraintsTransformationBase',
# 'BoxConstraintsLinQuadTransformation',
'GenoPheno',
'OOOptimizer',
'CMAEvolutionStrategy',
'CMAOptions',
'CMASolutionDict',
'CMAAdaptSigmaBase',
'CMAAdaptSigmaNone',
'CMAAdaptSigmaDistanceProportional',
'CMAAdaptSigmaCSA',
'CMAAdaptSigmaTPA',
'CMAAdaptSigmaMedianImprovement',
'BaseDataLogger',
'CMADataLogger',
'NoiseHandler',
'Sections',
'Misc',
'Mh',
'ElapsedTime',
'Rotation',
'fcts',
'FFWrappers',
)
use_archives = True # on False some unit tests fail
"""speed up for very large population size. `use_archives` prevents the
need for an inverse gp-transformation, relies on collections module,
not sure what happens if set to ``False``. """
class MetaParameters(object):
"""meta parameters are either "modifiable constants" or refer to
options from ``CMAOptions`` or are arguments to ``fmin`` or to the
``NoiseHandler`` class constructor.
Details
-------
This code contains a single class instance `meta_parameters`
Some interfaces rely on parameters being either `int` or
`float` only. More sophisticated choices are implemented via
``choice_value = {1: 'this', 2: 'or that'}[int_param_value]`` here.
CAVEAT
------
``meta_parameters`` should not be used to determine default
arguments, because these are assigned only once and for all during
module import.
"""
def __init__(self):
self.sigma0 = None ## [~0.01, ~10] # no default available
# learning rates and back-ward time horizons
self.CMA_cmean = 1.0 ## [~0.1, ~10] #
self.c1_multiplier = 1.0 ## [~1e-4, ~20] l
self.cmu_multiplier = 2.0 ## [~1e-4, ~30] l # zero means off
self.CMA_active = 1.0 ## [~1e-4, ~10] l # 0 means off, was CMA_activefac
self.cc_multiplier = 1.0 ## [~0.01, ~20] l
self.cs_multiplier = 1.0 ## [~0.01, ~10] l # learning rate for cs
self.CSA_dampfac = 1.0 ## [~0.01, ~10]
self.CMA_dampsvec_fac = None ## [~0.01, ~100] # def=np.Inf or 0.5, not clear whether this is a log parameter
self.CMA_dampsvec_fade = 0.1 ## [0, ~2]
# exponents for learning rates
self.c1_exponent = 2.0 ## [~1.25, 2]
self.cmu_exponent = 2.0 ## [~1.25, 2]
self.cact_exponent = 1.5 ## [~1.25, 2]
self.cc_exponent = 1.0 ## [~0.25, ~1.25]
self.cs_exponent = 1.0 ## [~0.25, ~1.75] # upper bound depends on CSA_clip_length_value
# selection related parameters
self.lambda_exponent = 0.0 ## [0, ~2.5] # usually <= 2, used by adding N**lambda_exponent to popsize-1
self.parent_fraction = 0.5 ## [0, 1] # default is weighted recombination
self.CMA_elitist = 0 ## [0, 2] i # a choice variable
self.CMA_mirrors = 0.0 ## [0, 0.5) # values <0.5 are interpreted as fraction, values >1 as numbers (rounded), otherwise about 0.16 is used',
# sampling strategies
self.CMA_sample_on_sphere_surface = 0 ## [0, 1] i # boolean
self.mean_shift_line_samples = 0 ## [0, 1] i # boolean
self.pc_line_samples = 0 ## [0, 1] i # boolean
# step-size adapation related parameters
self.CSA_damp_mueff_exponent = 0.5 ## [~0.25, ~1.5] # zero would mean no dependency of damping on mueff, useful with CSA_disregard_length option',
self.CSA_disregard_length = 0 ## [0, 1] i
self.CSA_squared = 0 ## [0, 1] i
self.CSA_clip_length_value = None ## [0, ~20] # None reflects inf
# noise handling
self.noise_reeval_multiplier = 1.0 ## [0.2, 4] # usually 2 offspring are reevaluated
self.noise_choose_reeval = 1 ## [1, 3] i # which ones to reevaluate
self.noise_theta = 0.5 ## [~0.05, ~0.9]
self.noise_alphasigma = 2.0 ## [0, 10]
self.noise_alphaevals = 2.0 ## [0, 10]
self.noise_alphaevalsdown_exponent = -0.25 ## [-1.5, 0]
self.noise_aggregate = None ## [1, 2] i # None and 0 == default or user option choice, 1 == median, 2 == mean
# TODO: more noise handling options (maxreevals...)
# restarts
self.restarts = 0 ## [0, ~30] # but depends on popsize inc
self.restart_from_best = 0 ## [0, 1] i # bool
self.incpopsize = 2.0 ## [~1, ~5]
# termination conditions (for restarts)
self.maxiter_multiplier = 1.0 ## [~0.01, ~100] l
self.mindx = 0.0 ## [1e-17, ~1e-3] l #v minimal std in any direction, cave interference with tol*',
self.minstd = 0.0 ## [1e-17, ~1e-3] l #v minimal std in any coordinate direction, cave interference with tol*',
self.maxstd = None ## [~1, ~1e9] l #v maximal std in any coordinate direction, default is inf',
self.tolfacupx = 1e3 ## [~10, ~1e9] l #v termination when step-size increases by tolfacupx (diverges). That is, the initial step-size was chosen far too small and better solutions were found far away from the initial solution x0',
self.tolupsigma = 1e20 ## [~100, ~1e99] l #v sigma/sigma0 > tolupsigma * max(sqrt(eivenvals(C))) indicates "creeping behavior" with usually minor improvements',
self.tolx = 1e-11 ## [1e-17, ~1e-3] l #v termination criterion: tolerance in x-changes',
self.tolfun = 1e-11 ## [1e-17, ~1e-3] l #v termination criterion: tolerance in function value, quite useful',
self.tolfunhist = 1e-12 ## [1e-17, ~1e-3] l #v termination criterion: tolerance in function value history',
self.tolstagnation_multiplier = 1.0 ## [0.01, ~100] # ': 'int(100 + 100 * N**1.5 / popsize) #v termination if no improvement over tolstagnation iterations',
# abandoned:
# self.noise_change_sigma_exponent = 1.0 ## [0, 2]
# self.noise_epsilon = 1e-7 ## [0, ~1e-2] l #
# self.maxfevals = None ## [1, ~1e11] l # is not a performance parameter
# self.cc_mu_multiplier = 1 ## [0, ~10] # AKA alpha_cc
# self.lambda_log_multiplier = 3 ## [0, ~10]
# self.lambda_multiplier = 0 ## (0, ~10]
meta_parameters = MetaParameters()
# emptysets = ('', (), [], {})
# array([]) does not work but np.size(.) == 0
# here is the problem:
# bool(array([0])) is False
# bool(list(array([0]))) is True
# bool(list(array([0, 1]))) is True
# bool(array([0, 1])) raises ValueError
#
# "x in emptysets" cannot be well replaced by "not x"
# which is also True for array([]) and None, but also for 0 and False,
# and False for NaN, and an exception for array([0,1]), see also
# http://google-styleguide.googlecode.com/svn/trunk/pyguide.html#True/False_evaluations
# ____________________________________________________________
# ____________________________________________________________
#
def rglen(ar):
"""shortcut for the iterator ``xrange(len(ar))``"""
return xrange(len(ar))
def is_feasible(x, f):
"""default to check feasibility, see also ``cma_default_options``"""
return f is not None and f is not np.NaN
global_verbosity = 1
def _print_warning(msg, method_name=None, class_name=None, iteration=None,
verbose=None):
if verbose is None:
verbose = global_verbosity
if verbose > 0:
print('WARNING (module=' + __name__ +
(', class=' + str(class_name) if class_name else '') +
(', method=' + str(method_name) if method_name else '') +
(', iteration=' + str(iteration) if iteration else '') +
'): ', msg)
# ____________________________________________________________
# ____________________________________________________________
#
def unitdoctest():
"""is used to describe test cases and might in future become helpful
as an experimental tutorial as well. The main testing feature at the
moment is by doctest with ``cma._test()`` or conveniently by
``python cma.py --test``. With the ``--verbose`` option added, the
results will always slightly differ and many "failed" test cases
might be reported.
A simple first overall test:
>>> import cma
>>> res = cma.fmin(cma.fcts.elli, 3*[1], 1,
... {'CMA_diagonal':2, 'seed':1, 'verb_time':0})
(3_w,7)-CMA-ES (mu_w=2.3,w_1=58%) in dimension 3 (seed=1)
Covariance matrix is diagonal for 2 iterations (1/ccov=7.0)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 7 1.453161670768570e+04 1.2e+00 1.08e+00 1e+00 1e+00
2 14 3.281197961927601e+04 1.3e+00 1.22e+00 1e+00 2e+00
3 21 1.082851071704020e+04 1.3e+00 1.24e+00 1e+00 2e+00
100 700 8.544042012075362e+00 1.4e+02 3.18e-01 1e-03 2e-01
200 1400 5.691152415221861e-12 1.0e+03 3.82e-05 1e-09 1e-06
220 1540 3.890107746209078e-15 9.5e+02 4.56e-06 8e-11 7e-08
termination on tolfun : 1e-11
final/bestever f-value = 3.89010774621e-15 2.52273602735e-15
mean solution: [ -4.63614606e-08 -3.42761465e-10 1.59957987e-11]
std deviation: [ 6.96066282e-08 2.28704425e-09 7.63875911e-11]
Test on the Rosenbrock function with 3 restarts. The first trial only
finds the local optimum, which happens in about 20% of the cases.
>>> import cma
>>> res = cma.fmin(cma.fcts.rosen, 4*[-1], 1,
... options={'ftarget':1e-6, 'verb_time':0,
... 'verb_disp':500, 'seed':3},
... restarts=3)
(4_w,8)-CMA-ES (mu_w=2.6,w_1=52%) in dimension 4 (seed=3)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 8 4.875315645656848e+01 1.0e+00 8.43e-01 8e-01 8e-01
2 16 1.662319948123120e+02 1.1e+00 7.67e-01 7e-01 8e-01
3 24 6.747063604799602e+01 1.2e+00 7.08e-01 6e-01 7e-01
184 1472 3.701428610430019e+00 4.3e+01 9.41e-07 3e-08 5e-08
termination on tolfun : 1e-11
final/bestever f-value = 3.70142861043 3.70142861043
mean solution: [-0.77565922 0.61309336 0.38206284 0.14597202]
std deviation: [ 2.54211502e-08 3.88803698e-08 4.74481641e-08 3.64398108e-08]
(8_w,16)-CMA-ES (mu_w=4.8,w_1=32%) in dimension 4 (seed=4)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 1489 2.011376859371495e+02 1.0e+00 8.90e-01 8e-01 9e-01
2 1505 4.157106647905128e+01 1.1e+00 8.02e-01 7e-01 7e-01
3 1521 3.548184889359060e+01 1.1e+00 1.02e+00 8e-01 1e+00
111 3249 6.831867555502181e-07 5.1e+01 2.62e-02 2e-04 2e-03
termination on ftarget : 1e-06
final/bestever f-value = 6.8318675555e-07 1.18576673231e-07
mean solution: [ 0.99997004 0.99993938 0.99984868 0.99969505]
std deviation: [ 0.00018973 0.00038006 0.00076479 0.00151402]
>>> assert res[1] <= 1e-6
Notice the different termination conditions. Termination on the target
function value ftarget prevents further restarts.
Test of scaling_of_variables option
>>> import cma
>>> opts = cma.CMAOptions()
>>> opts['seed'] = 456
>>> opts['verb_disp'] = 0
>>> opts['CMA_active'] = 1
>>> # rescaling of third variable: for searching in roughly
>>> # x0 plus/minus 1e3*sigma0 (instead of plus/minus sigma0)
>>> opts['scaling_of_variables'] = [1, 1, 1e3, 1]
>>> res = cma.fmin(cma.fcts.rosen, 4 * [0.1], 0.1, opts)
termination on tolfun : 1e-11
final/bestever f-value = 2.68096173031e-14 1.09714829146e-14
mean solution: [ 1.00000001 1.00000002 1.00000004 1.00000007]
std deviation: [ 3.00466854e-08 5.88400826e-08 1.18482371e-07 2.34837383e-07]
The printed std deviations reflect the actual value in the parameters
of the function (not the one in the internal representation which
can be different).
Test of CMA_stds scaling option.
>>> import cma
>>> opts = cma.CMAOptions()
>>> s = 5 * [1]
>>> s[0] = 1e3
>>> opts.set('CMA_stds', s)
>>> opts.set('verb_disp', 0)
>>> res = cma.fmin(cma.fcts.cigar, 5 * [0.1], 0.1, opts)
>>> assert res[1] < 1800
:See: cma.main(), cma._test()
"""
pass
class _BlancClass(object):
"""blanc container class for having a collection of attributes,
that might/should at some point become a more tailored class"""
if use_archives:
class DerivedDictBase(collections.MutableMapping):
"""for conveniently adding "features" to a dictionary. The actual
dictionary is in ``self.data``. Copy-paste
and modify setitem, getitem, and delitem, if necessary.
Details: This is the clean way to subclass build-in dict.
"""
def __init__(self, *args, **kwargs):
# collections.MutableMapping.__init__(self)
super(DerivedDictBase, self).__init__()
# super(SolutionDict, self).__init__() # the same
self.data = dict()
self.data.update(dict(*args, **kwargs))
def __len__(self):
return len(self.data)
def __contains__(self, key):
return key in self.data
def __iter__(self):
return iter(self.data)
def __setitem__(self, key, value):
"""defines self[key] = value"""
self.data[key] = value
def __getitem__(self, key):
"""defines self[key]"""
return self.data[key]
def __delitem__(self, key):
del self.data[key]
class SolutionDict(DerivedDictBase):
"""dictionary with computation of an hash key.
The hash key is generated from the inserted solution and a stack of
previously inserted same solutions is provided. Each entry is meant
to store additional information related to the solution.
>>> import cma, numpy as np
>>> d = cma.SolutionDict()
>>> x = np.array([1,2,4])
>>> d[x] = {'f': sum(x**2), 'iteration': 1}
>>> assert d[x]['iteration'] == 1
>>> assert d.get(x) == (d[x] if d.key(x) in d.keys() else None)
TODO: data_with_same_key behaves like a stack (see setitem and
delitem), but rather should behave like a queue?! A queue is less
consistent with the operation self[key] = ..., if
self.data_with_same_key[key] is not empty.
TODO: iteration key is used to clean up without error management
"""
def __init__(self, *args, **kwargs):
# DerivedDictBase.__init__(self, *args, **kwargs)
super(SolutionDict, self).__init__(*args, **kwargs)
self.data_with_same_key = {}
self.last_iteration = 0
def key(self, x):
try:
return tuple(x)
# using sum(x) is slower, using x[0] is slightly faster
except TypeError:
return x
def __setitem__(self, key, value):
"""defines self[key] = value"""
key = self.key(key)
if key in self.data_with_same_key:
self.data_with_same_key[key] += [self.data[key]]
elif key in self.data:
self.data_with_same_key[key] = [self.data[key]]
self.data[key] = value
def __getitem__(self, key): # 50% of time of
"""defines self[key]"""
return self.data[self.key(key)]
def __delitem__(self, key):
"""remove only most current key-entry"""
key = self.key(key)
if key in self.data_with_same_key:
if len(self.data_with_same_key[key]) == 1:
self.data[key] = self.data_with_same_key.pop(key)[0]
else:
self.data[key] = self.data_with_same_key[key].pop(-1)
else:
del self.data[key]
def truncate(self, max_len, min_iter):
if len(self) > max_len:
for k in list(self.keys()):
if self[k]['iteration'] < min_iter:
del self[k]
# deletes one item with k as key, better delete all?
class CMASolutionDict(SolutionDict):
def __init__(self, *args, **kwargs):
# SolutionDict.__init__(self, *args, **kwargs)
super(CMASolutionDict, self).__init__(*args, **kwargs)
self.last_solution_index = 0
# TODO: insert takes 30% of the overall CPU time, mostly in def key()
# with about 15% of the overall CPU time
def insert(self, key, geno=None, iteration=None, fitness=None, value=None):
"""insert an entry with key ``key`` and value
``value if value is not None else {'geno':key}`` and
``self[key]['kwarg'] = kwarg if kwarg is not None`` for the further kwargs.
"""
# archive returned solutions, first clean up archive
if iteration is not None and iteration > self.last_iteration and (iteration % 10) < 1:
self.truncate(300, iteration - 3)
elif value is not None and value.get('iteration'):
iteration = value['iteration']
if (iteration % 10) < 1:
self.truncate(300, iteration - 3)
self.last_solution_index += 1
if value is not None:
try:
iteration = value['iteration']
except:
pass
if iteration is not None:
if iteration > self.last_iteration:
self.last_solution_index = 0
self.last_iteration = iteration
else:
iteration = self.last_iteration + 0.5 # a hack to get a somewhat reasonable value
if value is not None:
self[key] = value
else:
self[key] = {'pheno': key}
if geno is not None:
self[key]['geno'] = geno
if iteration is not None:
self[key]['iteration'] = iteration
if fitness is not None:
self[key]['fitness'] = fitness
return self[key]
if not use_archives:
class CMASolutionDict(dict):
"""a hack to get most code examples running"""
def insert(self, *args, **kwargs):
pass
def get(self, key):
return None
def __getitem__(self, key):
return None
def __setitem__(self, key, value):
pass
class BestSolution(object):
"""container to keep track of the best solution seen"""
def __init__(self, x=None, f=np.inf, evals=None):
"""initialize the best solution with `x`, `f`, and `evals`.
Better solutions have smaller `f`-values.
"""
self.x = x
self.x_geno = None
self.f = f if f is not None and f is not np.nan else np.inf
self.evals = evals
self.evalsall = evals
self.last = _BlancClass()
self.last.x = x
self.last.f = f
def update(self, arx, xarchive=None, arf=None, evals=None):
"""checks for better solutions in list `arx`.
Based on the smallest corresponding value in `arf`,
alternatively, `update` may be called with a `BestSolution`
instance like ``update(another_best_solution)`` in which case
the better solution becomes the current best.
`xarchive` is used to retrieve the genotype of a solution.
"""
if isinstance(arx, BestSolution):
if self.evalsall is None:
self.evalsall = arx.evalsall
elif arx.evalsall is not None:
self.evalsall = max((self.evalsall, arx.evalsall))
if arx.f is not None and arx.f < np.inf:
self.update([arx.x], xarchive, [arx.f], arx.evals)
return self
assert arf is not None
# find failsave minimum
minidx = np.nanargmin(arf)
if minidx is np.nan:
return
minarf = arf[minidx]
# minarf = reduce(lambda x, y: y if y and y is not np.nan
# and y < x else x, arf, np.inf)
if minarf < np.inf and (minarf < self.f or self.f is None):
self.x, self.f = arx[minidx], arf[minidx]
if xarchive is not None and xarchive.get(self.x) is not None:
self.x_geno = xarchive[self.x].get('geno')
else:
self.x_geno = None
self.evals = None if not evals else evals - len(arf) + minidx + 1
self.evalsall = evals
elif evals:
self.evalsall = evals
self.last.x = arx[minidx]
self.last.f = minarf
def get(self):
"""return ``(x, f, evals)`` """
return self.x, self.f, self.evals # , self.x_geno
# ____________________________________________________________
# ____________________________________________________________
#
class BoundaryHandlerBase(object):
"""hacked base class """
def __init__(self, bounds):
"""bounds are not copied, but possibly modified and
put into a normalized form: ``bounds`` can be ``None``
or ``[lb, ub]`` where ``lb`` and ``ub`` are
either None or a vector (which can have ``None`` entries).
Generally, the last entry is recycled to compute bounds
for any dimension.
"""
if not bounds:
self.bounds = None
else:
l = [None, None] # figure out lenths
for i in [0, 1]:
try:
l[i] = len(bounds[i])
except TypeError:
bounds[i] = [bounds[i]]
l[i] = 1
if all([bounds[i][j] is None or not isfinite(bounds[i][j])
for j in rglen(bounds[i])]):
bounds[i] = None
if bounds[i] is not None and any([bounds[i][j] == (-1)**i * np.inf
for j in rglen(bounds[i])]):
raise ValueError('lower/upper is +inf/-inf and ' +
'therefore no finite feasible solution is available')
self.bounds = bounds
def __call__(self, solutions, *args, **kwargs):
"""return penalty or list of penalties, by default zero(s).
This interface seems too specifically tailored to the derived
BoundPenalty class, it should maybe change.
"""
if isscalar(solutions[0]):
return 0.0
else:
return len(solutions) * [0.0]
def update(self, *args, **kwargs):
return self
def repair(self, x, copy_if_changed=True, copy_always=False):
"""projects infeasible values on the domain bound, might be
overwritten by derived class """
if copy_always:
x = array(x, copy=True)
copy = False
else:
copy = copy_if_changed
if self.bounds is None:
return x
for ib in [0, 1]:
if self.bounds[ib] is None:
continue
for i in rglen(x):
idx = min([i, len(self.bounds[ib]) - 1])
if self.bounds[ib][idx] is not None and \
(-1)**ib * x[i] < (-1)**ib * self.bounds[ib][idx]:
if copy:
x = array(x, copy=True)
copy = False
x[i] = self.bounds[ib][idx]
def inverse(self, y, copy_if_changed=True, copy_always=False):
return y if not copy_always else array(y, copy=True)
def get_bounds(self, which, dimension):
"""``get_bounds('lower', 8)`` returns the lower bounds in 8-D"""
if which == 'lower' or which == 0:
return self._get_bounds(0, dimension)
elif which == 'upper' or which == 1:
return self._get_bounds(1, dimension)
else:
raise ValueError("argument which must be 'lower' or 'upper'")
def _get_bounds(self, ib, dimension):
"""ib == 0/1 means lower/upper bound, return a vector of length
`dimension` """
sign_ = 2 * ib - 1
assert sign_**2 == 1
if self.bounds is None or self.bounds[ib] is None:
return array(dimension * [sign_ * np.Inf])
res = []
for i in xrange(dimension):
res.append(self.bounds[ib][min([i, len(self.bounds[ib]) - 1])])
if res[-1] is None:
res[-1] = sign_ * np.Inf
return array(res)
def has_bounds(self):
"""return True, if any variable is bounded"""
bounds = self.bounds
if bounds in (None, [None, None]):
return False
for ib, bound in enumerate(bounds):
if bound is not None:
sign_ = 2 * ib - 1
for bound_i in bound:
if bound_i is not None and sign_ * bound_i < np.inf:
return True
return False
def is_in_bounds(self, x):
"""not yet tested"""
if self.bounds is None:
return True
for ib in [0, 1]:
if self.bounds[ib] is None:
continue
for i in rglen(x):
idx = min([i, len(self.bounds[ib]) - 1])
if self.bounds[ib][idx] is not None and \
(-1)**ib * x[i] < (-1)**ib * self.bounds[ib][idx]:
return False
return True
def to_dim_times_two(self, bounds):
"""return boundaries in format ``[[lb0, ub0], [lb1, ub1], ...]``,
as used by ``BoxConstraints...`` class.
"""
if not bounds:
b = [[None, None]]
else:
l = [None, None] # figure out lenths
for i in [0, 1]:
try:
l[i] = len(bounds[i])
except TypeError:
bounds[i] = [bounds[i]]
l[i] = 1
b = [] # bounds in different format
try:
for i in xrange(max(l)):
b.append([bounds[0][i] if i < l[0] else None,
bounds[1][i] if i < l[1] else None])
except (TypeError, IndexError):
print("boundaries must be provided in the form " +
"[scalar_of_vector, scalar_or_vector]")
raise
return b
# ____________________________________________________________
# ____________________________________________________________
#
class BoundNone(BoundaryHandlerBase):
def __init__(self, bounds=None):
if bounds is not None:
raise ValueError()
# BoundaryHandlerBase.__init__(self, None)
super(BoundNone, self).__init__(None)
def is_in_bounds(self, x):
return True
# ____________________________________________________________
# ____________________________________________________________
#
class BoundTransform(BoundaryHandlerBase):
"""Handles boundary by a smooth, piecewise linear and quadratic
transformation into the feasible domain.
>>> import cma
>>> veq = cma.Mh.vequals_approximately
>>> b = cma.BoundTransform([None, 1])
>>> assert b.bounds == [[None], [1]]
>>> assert veq(b.repair([0, 1, 1.2]), array([ 0., 0.975, 0.975]))
>>> assert b.is_in_bounds([0, 0.5, 1])
>>> assert veq(b.transform([0, 1, 2]), [ 0. , 0.975, 0.2 ])
>>> o=cma.fmin(cma.fcts.sphere, 6 * [-2], 0.5, options={
... 'boundary_handling': 'BoundTransform ',
... 'bounds': [[], 5 * [-1] + [inf]] })
>>> assert o[1] < 5 + 1e-8
>>> import numpy as np
>>> b = cma.BoundTransform([-np.random.rand(120), np.random.rand(120)])
>>> for i in range(100):
... x = (-i-1) * np.random.rand(120) + i * np.random.randn(120)
... x_to_b = b.repair(x)
... x2 = b.inverse(x_to_b)
... x2_to_b = b.repair(x2)
... x3 = b.inverse(x2_to_b)
... x3_to_b = b.repair(x3)
... assert veq(x_to_b, x2_to_b)
... assert veq(x2, x3)
... assert veq(x2_to_b, x3_to_b)
Details: this class uses ``class BoxConstraintsLinQuadTransformation``
"""
def __init__(self, bounds=None):
"""Argument bounds can be `None` or ``bounds[0]`` and ``bounds[1]``
are lower and upper domain boundaries, each is either `None` or
a scalar or a list or array of appropriate size.
"""
# BoundaryHandlerBase.__init__(self, bounds)
super(BoundTransform, self).__init__(bounds)
self.bounds_tf = BoxConstraintsLinQuadTransformation(self.to_dim_times_two(bounds))
def repair(self, x, copy_if_changed=True, copy_always=False):
"""transforms ``x`` into the bounded domain.
``copy_always`` option might disappear.
"""
copy = copy_if_changed
if copy_always:
x = array(x, copy=True)
copy = False
if self.bounds is None or (self.bounds[0] is None and
self.bounds[1] is None):
return x
return self.bounds_tf(x, copy)
def transform(self, x):
return self.repair(x)
def inverse(self, x, copy_if_changed=True, copy_always=False):
"""inverse transform of ``x`` from the bounded domain.
"""
copy = copy_if_changed
if copy_always:
x = array(x, copy=True)
copy = False
if self.bounds is None or (self.bounds[0] is None and
self.bounds[1] is None):
return x
return self.bounds_tf.inverse(x, copy) # this doesn't exist
# ____________________________________________________________
# ____________________________________________________________
#
class BoundPenalty(BoundaryHandlerBase):
"""Computes the boundary penalty. Must be updated each iteration,
using the `update` method.
Details
-------
The penalty computes like ``sum(w[i] * (x[i]-xfeas[i])**2)``,
where `xfeas` is the closest feasible (in-bounds) solution from `x`.
The weight `w[i]` should be updated during each iteration using
the update method.
Example:
>>> import cma
>>> cma.fmin(cma.felli, 6 * [1], 1,
... {
... 'boundary_handling': 'BoundPenalty',
... 'bounds': [-1, 1],
... 'fixed_variables': {0: 0.012, 2:0.234}
... })
Reference: Hansen et al 2009, A Method for Handling Uncertainty...
IEEE TEC, with addendum, see
http://www.lri.fr/~hansen/TEC2009online.pdf
"""
def __init__(self, bounds=None):
"""Argument bounds can be `None` or ``bounds[0]`` and ``bounds[1]``
are lower and upper domain boundaries, each is either `None` or
a scalar or a list or array of appropriate size.
"""
# #
# bounds attribute reminds the domain boundary values
# BoundaryHandlerBase.__init__(self, bounds)
super(BoundPenalty, self).__init__(bounds)
self.gamma = 1 # a very crude assumption
self.weights_initialized = False # gamma becomes a vector after initialization
self.hist = [] # delta-f history
def repair(self, x, copy_if_changed=True, copy_always=False):
"""sets out-of-bounds components of ``x`` on the bounds.
"""
# TODO (old data): CPU(N,lam,iter=20,200,100): 3.3s of 8s for two bounds, 1.8s of 6.5s for one bound
# remark: np.max([bounds[0], x]) is about 40 times slower than max((bounds[0], x))
copy = copy_if_changed
if copy_always:
x = array(x, copy=True)
bounds = self.bounds
if bounds not in (None, [None, None], (None, None)): # solely for effiency
x = array(x, copy=True) if copy and not copy_always else x
if bounds[0] is not None:
if isscalar(bounds[0]):
for i in rglen(x):
x[i] = max((bounds[0], x[i]))
else:
for i in rglen(x):
j = min([i, len(bounds[0]) - 1])
if bounds[0][j] is not None:
x[i] = max((bounds[0][j], x[i]))
if bounds[1] is not None:
if isscalar(bounds[1]):
for i in rglen(x):
x[i] = min((bounds[1], x[i]))
else:
for i in rglen(x):
j = min((i, len(bounds[1]) - 1))
if bounds[1][j] is not None:
x[i] = min((bounds[1][j], x[i]))
return x
# ____________________________________________________________
#
def __call__(self, x, archive, gp):
"""returns the boundary violation penalty for `x` ,where `x` is a
single solution or a list or array of solutions.
"""
if x in (None, (), []):
return x
if self.bounds in (None, [None, None], (None, None)):
return 0.0 if isscalar(x[0]) else [0.0] * len(x) # no penalty
x_is_single_vector = isscalar(x[0])
x = [x] if x_is_single_vector else x
# add fixed variables to self.gamma
try:
gamma = list(self.gamma) # fails if self.gamma is a scalar
for i in sorted(gp.fixed_values): # fails if fixed_values is None
gamma.insert(i, 0.0)
gamma = array(gamma, copy=False)
except TypeError:
gamma = self.gamma
pen = []
for xi in x:
# CAVE: this does not work with already repaired values!!
# CPU(N,lam,iter=20,200,100)?: 3s of 10s, array(xi): 1s
# remark: one deep copy can be prevented by xold = xi first
xpheno = gp.pheno(archive[xi]['geno'])
# necessary, because xi was repaired to be in bounds
xinbounds = self.repair(xpheno)
# could be omitted (with unpredictable effect in case of external repair)
fac = 1 # exp(0.1 * (log(self.scal) - np.mean(self.scal)))
pen.append(sum(gamma * ((xinbounds - xpheno) / fac)**2) / len(xi))
return pen[0] if x_is_single_vector else pen
# ____________________________________________________________
#
def feasible_ratio(self, solutions):
"""counts for each coordinate the number of feasible values in
``solutions`` and returns an array of length ``len(solutions[0])``
with the ratios.
`solutions` is a list or array of repaired ``Solution``
instances,
"""
raise NotImplementedError('Solution class disappeared')
count = np.zeros(len(solutions[0]))
for x in solutions:
count += x.unrepaired == x
return count / float(len(solutions))
# ____________________________________________________________
#
def update(self, function_values, es):
"""updates the weights for computing a boundary penalty.
Arguments
---------
`function_values`
all function values of recent population of solutions
`es`
`CMAEvolutionStrategy` object instance, in particular
mean and variances and the methods from the attribute
`gp` of type `GenoPheno` are used.
"""
if self.bounds is None or (self.bounds[0] is None and
self.bounds[1] is None):
return self
N = es.N
# ## prepare
# compute varis = sigma**2 * C_ii
varis = es.sigma**2 * array(N * [es.C] if isscalar(es.C) else (# scalar case
es.C if isscalar(es.C[0]) else # diagonal matrix case
[es.C[i][i] for i in xrange(N)])) # full matrix case
# relative violation in geno-space
dmean = (es.mean - es.gp.geno(self.repair(es.gp.pheno(es.mean)))) / varis**0.5
# ## Store/update a history of delta fitness value
fvals = sorted(function_values)
l = 1 + len(fvals)
val = fvals[3 * l // 4] - fvals[l // 4] # exact interquartile range apart interpolation
val = val / np.mean(varis) # new: val is normalized with sigma of the same iteration
# insert val in history
if isfinite(val) and val > 0:
self.hist.insert(0, val)
elif val == inf and len(self.hist) > 1:
self.hist.insert(0, max(self.hist))
else:
pass # ignore 0 or nan values
if len(self.hist) > 20 + (3 * N) / es.popsize:
self.hist.pop()
# ## prepare
dfit = np.median(self.hist) # median interquartile range
damp = min(1, es.sp.mueff / 10. / N)
# ## set/update weights
# Throw initialization error
if len(self.hist) == 0:
raise _Error('wrongful initialization, no feasible solution sampled. ' +
'Reasons can be mistakenly set bounds (lower bound not smaller than upper bound) or a too large initial sigma0 or... ' +
'See description of argument func in help(cma.fmin) or an example handling infeasible solutions in help(cma.CMAEvolutionStrategy). ')
# initialize weights
if dmean.any() and (not self.weights_initialized or es.countiter == 2): # TODO
self.gamma = array(N * [2 * dfit]) ## BUGBUGzzzz: N should be phenotypic (bounds are in phenotype), but is genotypic
self.weights_initialized = True
# update weights gamma
if self.weights_initialized:
edist = array(abs(dmean) - 3 * max(1, N**0.5 / es.sp.mueff))
if 1 < 3: # this is better, around a factor of two
# increase single weights possibly with a faster rate than they can decrease
# value unit of edst is std dev, 3==random walk of 9 steps
self.gamma *= exp((edist > 0) * np.tanh(edist / 3) / 2.)**damp
# decrease all weights up to the same level to avoid single extremely small weights
# use a constant factor for pseudo-keeping invariance
self.gamma[self.gamma > 5 * dfit] *= exp(-1. / 3)**damp
# self.gamma[idx] *= exp(5*dfit/self.gamma[idx] - 1)**(damp/3)
es.more_to_write += list(self.gamma) if self.weights_initialized else N * [1.0]
# ## return penalty
# es.more_to_write = self.gamma if not isscalar(self.gamma) else N*[1]
return self # bound penalty values
# ____________________________________________________________
# ____________________________________________________________
#
class BoxConstraintsTransformationBase(object):
"""Implements a transformation into boundaries and is used for
boundary handling::
tf = BoxConstraintsTransformationAnyDerivedClass([[1, 4]])
x = [3, 2, 4.4]
y = tf(x) # "repaired" solution
print(tf([2.5])) # middle value is never changed
[2.5]
:See: ``BoundaryHandler``
"""
def __init__(self, bounds):
try:
if len(bounds[0]) != 2:
raise ValueError
except:
raise ValueError(' bounds must be either [[lb0, ub0]] or [[lb0, ub0], [lb1, ub1],...], \n where in both cases the last entry is reused for all remaining dimensions')
self.bounds = bounds
self.initialize()
def initialize(self):
"""initialize in base class"""
self._lb = [b[0] for b in self.bounds] # can be done more efficiently?
self._ub = [b[1] for b in self.bounds]
def _lowerupperval(self, a, b, c):
return np.max([np.max(a), np.min([np.min(b), c])])
def bounds_i(self, i):
"""return ``[ith_lower_bound, ith_upper_bound]``"""
return self.bounds[self._index(i)]
def __call__(self, solution_in_genotype):
res = [self._transform_i(x, i) for i, x in enumerate(solution_in_genotype)]
return res
transform = __call__
def inverse(self, solution_in_phenotype, copy_if_changed=True, copy_always=True):
return [self._inverse_i(y, i) for i, y in enumerate(solution_in_phenotype)]
def _index(self, i):
return min((i, len(self.bounds) - 1))
def _transform_i(self, x, i):
raise NotImplementedError('this is an abstract method that should be implemented in the derived class')
def _inverse_i(self, y, i):
raise NotImplementedError('this is an abstract method that should be implemented in the derived class')
def shift_or_mirror_into_invertible_domain(self, solution_genotype):
"""return the reference solution that has the same ``box_constraints_transformation(solution)``
value, i.e. ``tf.shift_or_mirror_into_invertible_domain(x) = tf.inverse(tf.transform(x))``.
This is an idempotent mapping (leading to the same result independent how often it is
repeatedly applied).
"""
return self.inverse(self(solution_genotype))
raise NotImplementedError('this is an abstract method that should be implemented in the derived class')
class _BoxConstraintsTransformationTemplate(BoxConstraintsTransformationBase):
"""copy/paste this template to implement a new boundary handling transformation"""
def __init__(self, bounds):
# BoxConstraintsTransformationBase.__init__(self, bounds)
super(_BoxConstraintsTransformationTemplate, self).__init__(bounds)
def initialize(self):
BoxConstraintsTransformationBase.initialize(self) # likely to be removed
def _transform_i(self, x, i):
raise NotImplementedError('this is an abstract method that should be implemented in the derived class')
def _inverse_i(self, y, i):
raise NotImplementedError('this is an abstract method that should be implemented in the derived class')
__doc__ = BoxConstraintsTransformationBase.__doc__ + __doc__
class BoxConstraintsLinQuadTransformation(BoxConstraintsTransformationBase):
"""implements a bijective, monotonous transformation between [lb - al, ub + au]
and [lb, ub] which is the identity (and therefore linear) in [lb + al, ub - au]
(typically about 90% of the interval) and quadratic in [lb - 3*al, lb + al]
and in [ub - au, ub + 3*au]. The transformation is periodically
expanded beyond the limits (somewhat resembling the shape sin(x-pi/2))
with a period of ``2 * (ub - lb + al + au)``.
Details
=======
Partly due to numerical considerations depend the values ``al`` and ``au``
on ``abs(lb)`` and ``abs(ub)`` which makes the transformation non-translation
invariant. In contrast to sin(.), the transformation is robust to "arbitrary"
values for boundaries, e.g. a lower bound of ``-1e99`` or ``np.Inf`` or
``None``.
Examples
========
Example to use with cma:
>>> import cma
>>> # only the first variable has an upper bound
>>> tf = cma.BoxConstraintsLinQuadTransformation([[1,2], [1,None]]) # second==last pair is re-cycled
>>> cma.fmin(cma.felli, 9 * [2], 1, {'transformation': [tf.transform, tf.inverse], 'verb_disp': 0})
>>> # ...or...
>>> es = cma.CMAEvolutionStrategy(9 * [2], 1)
>>> while not es.stop():
... X = es.ask()
... f = [cma.felli(tf(x)) for x in X] # tf(x) == tf.transform(x)
... es.tell(X, f)
Example of the internal workings:
>>> import cma
>>> tf = cma.BoxConstraintsLinQuadTransformation([[1,2], [1,11], [1,11]])
>>> tf.bounds
[[1, 2], [1, 11], [1, 11]]
>>> tf([1.5, 1.5, 1.5])
[1.5, 1.5, 1.5]
>>> tf([1.52, -2.2, -0.2, 2, 4, 10.4])
[1.52, 4.0, 2.0, 2.0, 4.0, 10.4]
>>> res = np.round(tf._au, 2)
>>> assert list(res[:4]) == [ 0.15, 0.6, 0.6, 0.6]
>>> res = [round(x, 2) for x in tf.shift_or_mirror_into_invertible_domain([1.52, -12.2, -0.2, 2, 4, 10.4])]
>>> assert res == [1.52, 9.2, 2.0, 2.0, 4.0, 10.4]
>>> tmp = tf([1]) # call with lower dimension
"""
def __init__(self, bounds):
"""``x`` is defined in ``[lb - 3*al, ub + au + r - 2*al]`` with ``r = ub - lb + al + au``,
and ``x == transformation(x)`` in ``[lb + al, ub - au]``.
``beta*x - alphal = beta*x - alphau`` is then defined in ``[lb, ub]``,
``alphal`` and ``alphau`` represent the same value, but respectively numerically
better suited for values close to lb and ub.
"""
# BoxConstraintsTransformationBase.__init__(self, bounds)
super(BoxConstraintsLinQuadTransformation, self).__init__(bounds)
# super().__init__(bounds) # only available since Python 3.x
# super(BB, self).__init__(bounds) # is supposed to call initialize
def initialize(self, length=None):
"""see ``__init__``"""
if length is None:
length = len(self.bounds)
max_i = min((len(self.bounds) - 1, length - 1))
self._lb = array([self.bounds[min((i, max_i))][0]
if self.bounds[min((i, max_i))][0] is not None
else -np.Inf
for i in xrange(length)], copy=False)
self._ub = array([self.bounds[min((i, max_i))][1]
if self.bounds[min((i, max_i))][1] is not None
else np.Inf
for i in xrange(length)], copy=False)
lb = self._lb
ub = self._ub
# define added values for lower and upper bound
self._al = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(lb[i])) / 20])
if isfinite(lb[i]) else 1 for i in rglen(lb)], copy=False)
self._au = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(ub[i])) / 20])
if isfinite(ub[i]) else 1 for i in rglen(ub)], copy=False)
def __call__(self, solution_genotype, copy_if_changed=True, copy_always=False):
# about four times faster version of array([self._transform_i(x, i) for i, x in enumerate(solution_genotype)])
# still, this makes a typical run on a test function two times slower, but there might be one too many copies
# during the transformations in gp
if len(self._lb) != len(solution_genotype):
self.initialize(len(solution_genotype))
lb = self._lb
ub = self._ub
al = self._al
au = self._au
if copy_always or not isinstance(solution_genotype[0], float):
# transformed value is likely to be a float
y = np.array(solution_genotype, copy=True, dtype=float)
# if solution_genotype is not a float, copy value is disregarded
copy = False
else:
y = solution_genotype
copy = copy_if_changed
idx = (y < lb - 2 * al - (ub - lb) / 2.0) | (y > ub + 2 * au + (ub - lb) / 2.0)
if idx.any():
r = 2 * (ub[idx] - lb[idx] + al[idx] + au[idx]) # period
s = lb[idx] - 2 * al[idx] - (ub[idx] - lb[idx]) / 2.0 # start
if copy:
y = np.array(y, copy=True)
copy = False
y[idx] -= r * ((y[idx] - s) // r) # shift
idx = y > ub + au
if idx.any():
if copy:
y = np.array(y, copy=True)
copy = False
y[idx] -= 2 * (y[idx] - ub[idx] - au[idx])
idx = y < lb - al
if idx.any():
if copy:
y = np.array(y, copy=True)
copy = False
y[idx] += 2 * (lb[idx] - al[idx] - y[idx])
idx = y < lb + al
if idx.any():
if copy:
y = np.array(y, copy=True)
copy = False
y[idx] = lb[idx] + (y[idx] - (lb[idx] - al[idx]))**2 / 4 / al[idx]
idx = y > ub - au
if idx.any():
if copy:
y = np.array(y, copy=True)
copy = False
y[idx] = ub[idx] - (y[idx] - (ub[idx] + au[idx]))**2 / 4 / au[idx]
# assert Mh.vequals_approximately(y, BoxConstraintsTransformationBase.__call__(self, solution_genotype))
return y
__call__.doc = BoxConstraintsTransformationBase.__doc__
transform = __call__
def idx_infeasible(self, solution_genotype):
"""return indices of "infeasible" variables, that is,
variables that do not directly map into the feasible domain such that
``tf.inverse(tf(x)) == x``.
"""
res = [i for i, x in enumerate(solution_genotype)
if not self.is_feasible_i(x, i)]
return res
def is_feasible_i(self, x, i):
"""return True if value ``x`` is in the invertible domain of
variable ``i``
"""
lb = self._lb[self._index(i)]
ub = self._ub[self._index(i)]
al = self._al[self._index(i)]
au = self._au[self._index(i)]
return lb - al < x < ub + au
def is_loosely_feasible_i(self, x, i):
"""never used"""
lb = self._lb[self._index(i)]
ub = self._ub[self._index(i)]
al = self._al[self._index(i)]
au = self._au[self._index(i)]
return lb - 2 * al - (ub - lb) / 2.0 <= x <= ub + 2 * au + (ub - lb) / 2.0
def shift_or_mirror_into_invertible_domain(self, solution_genotype,
copy=False):
"""Details: input ``solution_genotype`` is changed. The domain is
[lb - al, ub + au] and in [lb - 2*al - (ub - lb) / 2, lb - al]
mirroring is applied.
"""
assert solution_genotype is not None
if copy:
y = [val for val in solution_genotype]
else:
y = solution_genotype
if isinstance(y, np.ndarray) and not isinstance(y[0], float):
y = array(y, dtype=float)
for i in rglen(y):
lb = self._lb[self._index(i)]
ub = self._ub[self._index(i)]
al = self._al[self._index(i)]
au = self._au[self._index(i)]
# x is far from the boundary, compared to ub - lb
if y[i] < lb - 2 * al - (ub - lb) / 2.0 or y[i] > ub + 2 * au + (ub - lb) / 2.0:
r = 2 * (ub - lb + al + au) # period
s = lb - 2 * al - (ub - lb) / 2.0 # start
y[i] -= r * ((y[i] - s) // r) # shift
if y[i] > ub + au:
y[i] -= 2 * (y[i] - ub - au)
if y[i] < lb - al:
y[i] += 2 * (lb - al - y[i])
return y
shift_or_mirror_into_invertible_domain.__doc__ = BoxConstraintsTransformationBase.shift_or_mirror_into_invertible_domain.__doc__ + shift_or_mirror_into_invertible_domain.__doc__
def _shift_or_mirror_into_invertible_i(self, x, i):
"""shift into the invertible domain [lb - ab, ub + au], mirror close to
boundaries in order to get a smooth transformation everywhere
"""
assert x is not None
lb = self._lb[self._index(i)]
ub = self._ub[self._index(i)]
al = self._al[self._index(i)]
au = self._au[self._index(i)]
# x is far from the boundary, compared to ub - lb
if x < lb - 2 * al - (ub - lb) / 2.0 or x > ub + 2 * au + (ub - lb) / 2.0:
r = 2 * (ub - lb + al + au) # period
s = lb - 2 * al - (ub - lb) / 2.0 # start
x -= r * ((x - s) // r) # shift
if x > ub + au:
x -= 2 * (x - ub - au)
if x < lb - al:
x += 2 * (lb - al - x)
return x
def _transform_i(self, x, i):
"""return transform of x in component i"""
x = self._shift_or_mirror_into_invertible_i(x, i)
lb = self._lb[self._index(i)]
ub = self._ub[self._index(i)]
al = self._al[self._index(i)]
au = self._au[self._index(i)]
if x < lb + al:
return lb + (x - (lb - al))**2 / 4 / al
elif x < ub - au:
return x
elif x < ub + 3 * au:
return ub - (x - (ub + au))**2 / 4 / au
else:
assert False # shift removes this case
return ub + au - (x - (ub + au))
def _inverse_i(self, y, i):
"""return inverse of y in component i"""
lb = self._lb[self._index(i)]
ub = self._ub[self._index(i)]
al = self._al[self._index(i)]
au = self._au[self._index(i)]
if 1 < 3:
if not lb <= y <= ub:
raise ValueError('argument of inverse must be within the given bounds')
if y < lb + al:
return (lb - al) + 2 * (al * (y - lb))**0.5
elif y < ub - au:
return y
else:
return (ub + au) - 2 * (au * (ub - y))**0.5
class GenoPheno(object):
"""Genotype-phenotype transformation.
Method `pheno` provides the transformation from geno- to phenotype,
that is from the internal representation to the representation used
in the objective function. Method `geno` provides the "inverse" pheno-
to genotype transformation. The geno-phenotype transformation comprises,
in this order:
- insert fixed variables (with the phenotypic and therefore quite
possibly "wrong" values)
- affine linear transformation (first scaling then shift)
- user-defined transformation
- repair (e.g. into feasible domain due to boundaries)
- assign fixed variables their original phenotypic value
By default all transformations are the identity. The repair is only applied,
if the transformation is given as argument to the method `pheno`.
``geno`` is only necessary, if solutions have been injected.
"""
def __init__(self, dim, scaling=None, typical_x=None,
fixed_values=None, tf=None):
"""return `GenoPheno` instance with phenotypic dimension `dim`.
Keyword Arguments
-----------------
`scaling`
the diagonal of a scaling transformation matrix, multipliers
in the genotyp-phenotyp transformation, see `typical_x`
`typical_x`
``pheno = scaling*geno + typical_x``
`fixed_values`
a dictionary of variable indices and values, like ``{0:2.0, 2:1.1}``,
that are not subject to change, negative indices are ignored
(they act like incommenting the index), values are phenotypic
values.
`tf`
list of two user-defined transformation functions, or `None`.
``tf[0]`` is a function that transforms the internal representation
as used by the optimizer into a solution as used by the
objective function. ``tf[1]`` does the back-transformation.
For example::
tf_0 = lambda x: [xi**2 for xi in x]
tf_1 = lambda x: [abs(xi)**0.5 fox xi in x]
or "equivalently" without the `lambda` construct::
def tf_0(x):
return [xi**2 for xi in x]
def tf_1(x):
return [abs(xi)**0.5 fox xi in x]
``tf=[tf_0, tf_1]`` is a reasonable way to guaranty that only positive
values are used in the objective function.
Details
-------
If ``tf_0`` is not the identity and ``tf_1`` is ommitted,
the genotype of ``x0`` cannot be computed consistently and
"injection" of phenotypic solutions is likely to lead to
unexpected results.
"""
self.N = dim
self.fixed_values = fixed_values
if tf is not None:
self.tf_pheno = tf[0]
self.tf_geno = tf[1] # TODO: should not necessarily be needed
# r = np.random.randn(dim)
# assert all(tf[0](tf[1](r)) - r < 1e-7)
# r = np.random.randn(dim)
# assert all(tf[0](tf[1](r)) - r > -1e-7)
_print_warning("in class GenoPheno: user defined transformations have not been tested thoroughly")
else:
self.tf_geno = None
self.tf_pheno = None
if fixed_values:
if not isinstance(fixed_values, dict):
raise _Error("fixed_values must be a dictionary {index:value,...}")
if max(fixed_values.keys()) >= dim:
raise _Error("max(fixed_values.keys()) = " + str(max(fixed_values.keys())) +
" >= dim=N=" + str(dim) + " is not a feasible index")
# convenience commenting functionality: drop negative keys
for k in list(fixed_values.keys()):
if k < 0:
fixed_values.pop(k)
def vec_is_default(vec, default_val=0):
"""return True if `vec` has the value `default_val`,
None or [None] are also recognized as default
"""
# TODO: rather let default_val be a list of default values,
# cave comparison of arrays
try:
if len(vec) == 1:
vec = vec[0] # [None] becomes None and is always default
except TypeError:
pass # vec is a scalar
if vec is None or all(vec == default_val):
return True
if all([val is None or val == default_val for val in vec]):
return True
return False
self.scales = array(scaling) if scaling is not None else None
if vec_is_default(self.scales, 1):
self.scales = 1 # CAVE: 1 is not array(1)
elif self.scales.shape is not () and len(self.scales) != self.N:
raise _Error('len(scales) == ' + str(len(self.scales)) +
' does not match dimension N == ' + str(self.N))
self.typical_x = array(typical_x) if typical_x is not None else None
if vec_is_default(self.typical_x, 0):
self.typical_x = 0
elif self.typical_x.shape is not () and len(self.typical_x) != self.N:
raise _Error('len(typical_x) == ' + str(len(self.typical_x)) +
' does not match dimension N == ' + str(self.N))
if (self.scales is 1 and
self.typical_x is 0 and
self.fixed_values is None and
self.tf_pheno is None):
self.isidentity = True
else:
self.isidentity = False
if self.tf_pheno is None:
self.islinear = True
else:
self.islinear = False
def pheno(self, x, into_bounds=None, copy=True, copy_always=False,
archive=None, iteration=None):
"""maps the genotypic input argument into the phenotypic space,
see help for class `GenoPheno`
Details
-------
If ``copy``, values from ``x`` are copied if changed under the transformation.
"""
# TODO: copy_always seems superfluous, as it could be done in the calling code
input_type = type(x)
if into_bounds is None:
into_bounds = (lambda x, copy=False:
x if not copy else array(x, copy=copy))
if copy_always and not copy:
raise ValueError('arguments copy_always=' + str(copy_always) +
' and copy=' + str(copy) + ' have inconsistent values')
if copy_always:
x = array(x, copy=True)
copy = False
if self.isidentity:
y = into_bounds(x) # was into_bounds(x, False) before (bug before v0.96.22)
else:
if self.fixed_values is None:
y = array(x, copy=copy) # make a copy, in case
else: # expand with fixed values
y = list(x) # is a copy
for i in sorted(self.fixed_values.keys()):
y.insert(i, self.fixed_values[i])
y = array(y, copy=False)
copy = False
if self.scales is not 1: # just for efficiency
y *= self.scales
if self.typical_x is not 0:
y += self.typical_x
if self.tf_pheno is not None:
y = array(self.tf_pheno(y), copy=False)
y = into_bounds(y, copy) # copy is False
if self.fixed_values is not None:
for i, k in list(self.fixed_values.items()):
y[i] = k
if input_type is np.ndarray:
y = array(y, copy=False)
if archive is not None:
archive.insert(y, geno=x, iteration=iteration)
return y
def geno(self, y, from_bounds=None,
copy_if_changed=True, copy_always=False,
repair=None, archive=None):
"""maps the phenotypic input argument into the genotypic space,
that is, computes essentially the inverse of ``pheno``.
By default a copy is made only to prevent to modify ``y``.
The inverse of the user-defined transformation (if any)
is only needed if external solutions are injected, it is not
applied to the initial solution x0.
Details
=======
``geno`` searches first in ``archive`` for the genotype of
``y`` and returns the found value, typically unrepaired.
Otherwise, first ``from_bounds`` is applied, to revert a
projection into the bound domain (if necessary) and ``pheno``
is reverted. ``repair`` is applied last, and is usually the
method ``CMAEvolutionStrategy.repair_genotype`` that limits the
Mahalanobis norm of ``geno(y) - mean``.
"""
if from_bounds is None:
from_bounds = lambda x, copy=False: x # not change, no copy
if archive is not None:
try:
x = archive[y]['geno']
except (KeyError, TypeError):
x = None
if x is not None:
if archive[y]['iteration'] < archive.last_iteration \
and repair is not None:
x = repair(x, copy_if_changed=copy_always)
return x
input_type = type(y)
x = y
if copy_always:
x = array(y, copy=True)
copy = False
else:
copy = copy_if_changed
x = from_bounds(x, copy)
if self.isidentity:
if repair is not None:
x = repair(x, copy)
return x
if copy: # could be improved?
x = array(x, copy=True)
copy = False
# user-defined transformation
if self.tf_geno is not None:
x = array(self.tf_geno(x), copy=False)
elif self.tf_pheno is not None:
raise ValueError('t1 of options transformation was not defined but is needed as being the inverse of t0')
# affine-linear transformation: shift and scaling
if self.typical_x is not 0:
x -= self.typical_x
if self.scales is not 1: # just for efficiency
x /= self.scales
# kick out fixed_values
if self.fixed_values is not None:
# keeping the transformed values does not help much
# therefore it is omitted
if 1 < 3:
keys = sorted(self.fixed_values.keys())
x = array([x[i] for i in xrange(len(x)) if i not in keys],
copy=False)
# repair injected solutions
if repair is not None:
x = repair(x, copy)
if input_type is np.ndarray:
x = array(x, copy=False)
return x
# ____________________________________________________________
# ____________________________________________________________
# check out built-in package abc: class ABCMeta, abstractmethod, abstractproperty...
# see http://docs.python.org/whatsnew/2.6.html PEP 3119 abstract base classes
#
class OOOptimizer(object):
""""abstract" base class for an Object Oriented Optimizer interface.
Relevant methods are `__init__`, `ask`, `tell`, `stop`, `result`,
and `optimize`. Only `optimize` is fully implemented in this base
class.
Examples
--------
All examples minimize the function `elli`, the output is not shown.
(A preferred environment to execute all examples is ``ipython`` in
``%pylab`` mode.)
First we need::
from cma import CMAEvolutionStrategy
# CMAEvolutionStrategy derives from the OOOptimizer class
felli = lambda x: sum(1e3**((i-1.)/(len(x)-1.)*x[i])**2 for i in range(len(x)))
The shortest example uses the inherited method
`OOOptimizer.optimize()`::
es = CMAEvolutionStrategy(8 * [0.1], 0.5).optimize(felli)
The input parameters to `CMAEvolutionStrategy` are specific to this
inherited class. The remaining functionality is based on interface
defined by `OOOptimizer`. We might have a look at the result::
print(es.result()[0]) # best solution and
print(es.result()[1]) # its function value
In order to display more exciting output we do::
es.logger.plot() # if matplotlib is available
Virtually the same example can be written with an explicit loop
instead of using `optimize()`. This gives the necessary insight into
the `OOOptimizer` class interface and entire control over the
iteration loop::
optim = CMAEvolutionStrategy(9 * [0.5], 0.3)
# a new CMAEvolutionStrategy instance
# this loop resembles optimize()
while not optim.stop(): # iterate
X = optim.ask() # get candidate solutions
f = [felli(x) for x in X] # evaluate solutions
# in case do something else that needs to be done
optim.tell(X, f) # do all the real "update" work
optim.disp(20) # display info every 20th iteration
optim.logger.add() # log another "data line"
# final output
print('termination by', optim.stop())
print('best f-value =', optim.result()[1])
print('best solution =', optim.result()[0])
optim.logger.plot() # if matplotlib is available
Details
-------
Most of the work is done in the method `tell(...)`. The method
`result()` returns more useful output.
"""
def __init__(self, xstart, **more_args):
"""``xstart`` is a mandatory argument"""
self.xstart = xstart
self.more_args = more_args
self.initialize()
def initialize(self):
"""(re-)set to the initial state"""
self.countiter = 0
self.xcurrent = self.xstart[:]
raise NotImplementedError('method initialize() must be implemented in derived class')
def ask(self, gradf=None, **more_args):
"""abstract method, AKA "get" or "sample_distribution", deliver
new candidate solution(s), a list of "vectors"
"""
raise NotImplementedError('method ask() must be implemented in derived class')
def tell(self, solutions, function_values):
"""abstract method, AKA "update", pass f-values and prepare for
next iteration
"""
self.countiter += 1
raise NotImplementedError('method tell() must be implemented in derived class')
def stop(self):
"""abstract method, return satisfied termination conditions in
a dictionary like ``{'termination reason': value, ...}``,
for example ``{'tolfun': 1e-12}``, or the empty dictionary ``{}``.
The implementation of `stop()` should prevent an infinite
loop.
"""
raise NotImplementedError('method stop() is not implemented')
def disp(self, modulo=None):
"""abstract method, display some iteration infos if
``self.iteration_counter % modulo == 0``
"""
pass # raise NotImplementedError('method disp() is not implemented')
def result(self):
"""abstract method, return ``(x, f(x), ...)``, that is, the
minimizer, its function value, ...
"""
raise NotImplementedError('method result() is not implemented')
# previous ordering:
# def optimize(self, objectivefct,
# logger=None, verb_disp=20,
# iterations=None, min_iterations=1,
# call_back=None):
def optimize(self, objective_fct, iterations=None, min_iterations=1,
args=(), verb_disp=None, logger=None, call_back=None):
"""find minimizer of `objective_fct`.
CAVEAT: the return value for `optimize` has changed to ``self``.
Arguments
---------
`objective_fct`
function be to minimized
`iterations`
number of (maximal) iterations, while ``not self.stop()``
`min_iterations`
minimal number of iterations, even if ``not self.stop()``
`args`
arguments passed to `objective_fct`
`verb_disp`
print to screen every `verb_disp` iteration, if ``None``
the value from ``self.logger`` is "inherited", if
available.
``logger``
a `BaseDataLogger` instance, which must be compatible
with the type of ``self``.
``call_back``
call back function called like ``call_back(self)`` or
a list of call back functions.
``return self``, that is, the `OOOptimizer` instance.
Example
-------
>>> import cma
>>> es = cma.CMAEvolutionStrategy(7 * [0.1], 0.5
... ).optimize(cma.fcts.rosen, verb_disp=100)
(4_w,9)-CMA-ES (mu_w=2.8,w_1=49%) in dimension 7 (seed=630721393)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 9 3.163954777181882e+01 1.0e+00 4.12e-01 4e-01 4e-01 0:0.0
2 18 3.299006223906629e+01 1.0e+00 3.60e-01 3e-01 4e-01 0:0.0
3 27 1.389129389866704e+01 1.1e+00 3.18e-01 3e-01 3e-01 0:0.0
100 900 2.494847340045985e+00 8.6e+00 5.03e-02 2e-02 5e-02 0:0.3
200 1800 3.428234862999135e-01 1.7e+01 3.77e-02 6e-03 3e-02 0:0.5
300 2700 3.216640032470860e-04 5.6e+01 6.62e-03 4e-04 9e-03 0:0.8
400 3600 6.155215286199821e-12 6.6e+01 7.44e-06 1e-07 4e-06 0:1.1
438 3942 1.187372505161762e-14 6.0e+01 3.27e-07 4e-09 9e-08 0:1.2
438 3942 1.187372505161762e-14 6.0e+01 3.27e-07 4e-09 9e-08 0:1.2
('termination by', {'tolfun': 1e-11})
('best f-value =', 1.1189867885201275e-14)
('solution =', array([ 1. , 1. , 1. , 0.99999999, 0.99999998,
0.99999996, 0.99999992]))
>>> print(es.result()[0])
array([ 1. 1. 1. 0.99999999 0.99999998 0.99999996
0.99999992])
"""
assert iterations is None or min_iterations <= iterations
if not hasattr(self, 'logger'):
self.logger = logger
logger = self.logger = logger or self.logger
if not isinstance(call_back, list):
call_back = [call_back]
citer = 0
while not self.stop() or citer < min_iterations:
if iterations is not None and citer >= iterations:
return self.result()
citer += 1
X = self.ask() # deliver candidate solutions
fitvals = [objective_fct(x, *args) for x in X]
self.tell(X, fitvals) # all the work is done here
self.disp(verb_disp)
for f in call_back:
f is None or f(self)
logger.add(self) if logger else None
# signal logger that we left the loop
# TODO: this is very ugly, because it assumes modulo keyword
# argument *and* modulo attribute to be available
try:
logger.add(self, modulo=bool(logger.modulo)) if logger else None
except TypeError:
print(' suppressing the final call of the logger in ' +
'OOOptimizer.optimize (modulo keyword parameter not ' +
'available)')
except AttributeError:
print(' suppressing the final call of the logger in ' +
'OOOptimizer.optimize (modulo attribute not ' +
'available)')
if verb_disp:
self.disp(1)
if verb_disp in (1, True):
print('termination by', self.stop())
print('best f-value =', self.result()[1])
print('solution =', self.result()[0])
return self
# was: return self.result() + (self.stop(), self, logger)
_experimental = False
class CMAAdaptSigmaBase(object):
"""step-size adaptation base class, implementing hsig functionality
via an isotropic evolution path.
"""
def __init__(self, *args, **kwargs):
self.is_initialized_base = False
self._ps_updated_iteration = -1
def initialize_base(self, es):
"""set parameters and state variable based on dimension,
mueff and possibly further options.
"""
## meta_parameters.cs_exponent == 1.0
b = 1.0
## meta_parameters.cs_multiplier == 1.0
self.cs = 1.0 * (es.sp.mueff + 2)**b / (es.N**b + (es.sp.mueff + 3)**b)
self.ps = np.zeros(es.N)
self.is_initialized_base = True
return self
def _update_ps(self, es):
"""update the isotropic evolution path
:type es: CMAEvolutionStrategy
"""
if not self.is_initialized_base:
self.initialize_base(es)
if self._ps_updated_iteration == es.countiter:
return
if es.countiter <= es.itereigenupdated:
# es.B and es.D must/should be those from the last iteration
assert es.countiter >= es.itereigenupdated
_print_warning('distribution transformation (B and D) have been updated before ps could be computed',
'_update_ps', 'CMAAdaptSigmaBase')
z = dot(es.B, (1. / es.D) * dot(es.B.T, (es.mean - es.mean_old) / es.sigma_vec))
z *= es.sp.mueff**0.5 / es.sigma / es.sp.cmean
self.ps = (1 - self.cs) * self.ps + sqrt(self.cs * (2 - self.cs)) * z
self._ps_updated_iteration = es.countiter
def hsig(self, es):
"""return "OK-signal" for rank-one update, `True` (OK) or `False`
(stall rank-one update), based on the length of an evolution path
"""
self._update_ps(es)
if self.ps is None:
return True
squared_sum = sum(self.ps**2) / (1 - (1 - self.cs)**(2 * es.countiter))
# correction with self.countiter seems not necessary,
# as pc also starts with zero
return squared_sum / es.N - 1 < 1 + 4. / (es.N + 1)
def update(self, es, **kwargs):
"""update ``es.sigma``"""
self._update_ps(es)
raise NotImplementedError('must be implemented in a derived class')
class CMAAdaptSigmaNone(CMAAdaptSigmaBase):
def update(self, es, **kwargs):
"""no update, ``es.sigma`` remains constant.
:param es: ``CMAEvolutionStrategy`` class instance
:param kwargs: whatever else is needed to update ``es.sigma``
"""
pass
class CMAAdaptSigmaDistanceProportional(CMAAdaptSigmaBase):
"""artificial setting of ``sigma`` for test purposes, e.g.
to simulate optimal progress rates.
"""
def __init__(self, coefficient=1.2):
super(CMAAdaptSigmaDistanceProportional, self).__init__() # base class provides method hsig()
self.coefficient = coefficient
self.is_initialized = True
def update(self, es, **kwargs):
# optimal step-size is
es.sigma = self.coefficient * es.sp.mueff * sum(es.mean**2)**0.5 / es.N / es.sp.cmean
class CMAAdaptSigmaCSA(CMAAdaptSigmaBase):
def __init__(self):
"""postpone initialization to a method call where dimension and mueff should be known.
"""
self.is_initialized = False
def initialize(self, es):
"""set parameters and state variable based on dimension,
mueff and possibly further options.
"""
self.disregard_length_setting = True if es.opts['CSA_disregard_length'] else False
if es.opts['CSA_clip_length_value'] is not None:
try:
if len(es.opts['CSA_clip_length_value']) == 0:
es.opts['CSA_clip_length_value'] = [-np.Inf, np.Inf]
elif len(es.opts['CSA_clip_length_value']) == 1:
es.opts['CSA_clip_length_value'] = [-np.Inf, es.opts['CSA_clip_length_value'][0]]
elif len(es.opts['CSA_clip_length_value']) == 2:
es.opts['CSA_clip_length_value'] = np.sort(es.opts['CSA_clip_length_value'])
else:
raise ValueError('option CSA_clip_length_value should be a number of len(.) in [1,2]')
except TypeError: # len(...) failed
es.opts['CSA_clip_length_value'] = [-np.Inf, es.opts['CSA_clip_length_value']]
es.opts['CSA_clip_length_value'] = list(np.sort(es.opts['CSA_clip_length_value']))
if es.opts['CSA_clip_length_value'][0] > 0 or es.opts['CSA_clip_length_value'][1] < 0:
raise ValueError('option CSA_clip_length_value must be a single positive or a negative and a positive number')
## meta_parameters.cs_exponent == 1.0
b = 1.0
## meta_parameters.cs_multiplier == 1.0
self.cs = 1.0 * (es.sp.mueff + 2)**b / (es.N + (es.sp.mueff + 3)**b)
self.damps = es.opts['CSA_dampfac'] * (0.5 +
0.5 * min([1, (es.sp.lam_mirr / (0.159 * es.sp.popsize) - 1)**2])**1 +
2 * max([0, ((es.sp.mueff - 1) / (es.N + 1))**es.opts['CSA_damp_mueff_exponent'] - 1]) +
self.cs
)
self.max_delta_log_sigma = 1 # in symmetric use (strict lower bound is -cs/damps anyway)
if self.disregard_length_setting:
es.opts['CSA_clip_length_value'] = [0, 0]
## meta_parameters.cs_exponent == 1.0
b = 1.0 * 0.5
## meta_parameters.cs_multiplier == 1.0
self.cs = 1.0 * (es.sp.mueff + 1)**b / (es.N**b + 2 * es.sp.mueff**b)
self.damps = es.opts['CSA_dampfac'] * 1 # * (1.1 - 1/(es.N+1)**0.5)
if es.opts['verbose'] > 1 or self.disregard_length_setting or 11 < 3:
print('SigmaCSA Parameters')
for k, v in self.__dict__.items():
print(' ', k, ':', v)
self.ps = np.zeros(es.N)
self._ps_updated_iteration = -1
self.is_initialized = True
def _update_ps(self, es):
if not self.is_initialized:
self.initialize(es)
if self._ps_updated_iteration == es.countiter:
return
z = dot(es.B, (1. / es.D) * dot(es.B.T, (es.mean - es.mean_old) / es.sigma_vec))
z *= es.sp.mueff**0.5 / es.sigma / es.sp.cmean
# zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
if es.opts['CSA_clip_length_value'] is not None:
vals = es.opts['CSA_clip_length_value']
min_len = es.N**0.5 + vals[0] * es.N / (es.N + 2)
max_len = es.N**0.5 + vals[1] * es.N / (es.N + 2)
act_len = sum(z**2)**0.5
new_len = Mh.minmax(act_len, min_len, max_len)
if new_len != act_len:
z *= new_len / act_len
# z *= (es.N / sum(z**2))**0.5 # ==> sum(z**2) == es.N
# z *= es.const.chiN / sum(z**2)**0.5
self.ps = (1 - self.cs) * self.ps + sqrt(self.cs * (2 - self.cs)) * z
self._ps_updated_iteration = es.countiter
def update(self, es, **kwargs):
self._update_ps(es) # caveat: if es.B or es.D are already updated and ps is not, this goes wrong!
if es.opts['CSA_squared']:
s = (sum(self.ps**2) / es.N - 1) / 2
# sum(self.ps**2) / es.N has mean 1 and std sqrt(2/N) and is skewed
# divided by 2 to have the derivative d/dx (x**2 / N - 1) for x**2=N equal to 1
else:
s = sum(self.ps**2)**0.5 / es.const.chiN - 1
if es.opts['vv'] == 'pc for ps':
s = sum((es.D**-1 * dot(es.B.T, es.pc))**2)**0.5 / es.const.chiN - 1
s = (sum((es.D**-1 * dot(es.B.T, es.pc))**2) / es.N - 1) / 2
s *= self.cs / self.damps
s_clipped = Mh.minmax(s, -self.max_delta_log_sigma, self.max_delta_log_sigma)
es.sigma *= np.exp(s_clipped)
# "error" handling
if s_clipped != s:
_print_warning('sigma change exp(' + str(s) + ') = ' + str(np.exp(s)) +
' clipped to exp(+-' + str(self.max_delta_log_sigma) + ')',
'update',
'CMAAdaptSigmaCSA',
es.countiter, es.opts['verbose'])
class CMAAdaptSigmaMedianImprovement(CMAAdaptSigmaBase):
"""Compares median fitness against a fitness percentile of the previous iteration,
see Ait ElHara et al, GECCO 2013.
"""
def __init__(self):
# CMAAdaptSigmaBase.__init__(self)
super(CMAAdaptSigmaMedianImprovement, self).__init__() # base class provides method hsig()
def initialize(self, es):
r = es.sp.mueff / es.popsize
self.index_to_compare = 0.5 * (r**0.5 + 2.0 * (1 - r**0.5) / log(es.N + 9)**2) * (es.popsize) # TODO
self.index_to_compare = (0.30 if not es.opts['vv']
else es.opts['vv']) * es.popsize # TODO
self.damp = 2 - 2 / es.N # sign-rule: 2
self.c = 0.3 # sign-rule needs <= 0.3
self.s = 0 # averaged statistics, usually between -1 and +1
def update(self, es, **kwargs):
if es.countiter < 2:
self.initialize(es)
self.fit = es.fit.fit
else:
ft1, ft2 = self.fit[int(self.index_to_compare)], self.fit[int(np.ceil(self.index_to_compare))]
ftt1, ftt2 = es.fit.fit[(es.popsize - 1) // 2], es.fit.fit[int(np.ceil((es.popsize - 1) / 2))]
pt2 = self.index_to_compare - int(self.index_to_compare)
# ptt2 = (es.popsize - 1) / 2 - (es.popsize - 1) // 2 # not in use
s = 0
if 1 < 3:
s += pt2 * sum(es.fit.fit <= self.fit[int(np.ceil(self.index_to_compare))])
s += (1 - pt2) * sum(es.fit.fit < self.fit[int(self.index_to_compare)])
s -= es.popsize / 2.
s *= 2. / es.popsize # the range was popsize, is 2
self.s = (1 - self.c) * self.s + self.c * s
es.sigma *= exp(self.s / self.damp)
# es.more_to_write.append(10**(self.s))
#es.more_to_write.append(10**((2 / es.popsize) * (sum(es.fit.fit < self.fit[int(self.index_to_compare)]) - (es.popsize + 1) / 2)))
# # es.more_to_write.append(10**(self.index_to_compare - sum(self.fit <= es.fit.fit[es.popsize // 2])))
# # es.more_to_write.append(10**(np.sign(self.fit[int(self.index_to_compare)] - es.fit.fit[es.popsize // 2])))
self.fit = es.fit.fit
class CMAAdaptSigmaTPA(CMAAdaptSigmaBase):
"""two point adaptation for step-size sigma. Relies on a specific
sampling of the first two offspring, whose objective function
value ranks are used to decide on the step-size change.
Example
=======
>>> import cma
>>> cma.CMAOptions('adapt').pprint()
>>> es = cma.CMAEvolutionStrategy(10 * [0.2], 0.1, {'AdaptSigma': cma.CMAAdaptSigmaTPA, 'ftarget': 1e-8})
>>> es.optimize(cma.fcts.rosen)
>>> assert 'ftarget' in es.stop()
>>> assert es.result()[1] <= 1e-8
>>> assert es.result()[2] < 6500 # typically < 5500
References: loosely based on Hansen 2008, CMA-ES with Two-Point
Step-Size Adaptation, more tightly based on an upcoming paper by
Hansen et al.
"""
def __init__(self, dimension=None, opts=None):
super(CMAAdaptSigmaTPA, self).__init__() # base class provides method hsig()
# CMAAdaptSigmaBase.__init__(self)
self.initialized = False
self.dimension = dimension
self.opts = opts
def initialize(self, N=None, opts=None):
if N is None:
N = self.dimension
if opts is None:
opts = self.opts
try:
damp_fac = opts['CSA_dampfac'] # should be renamed to sigma_adapt_dampfac or something
except (TypeError, KeyError):
damp_fac = 1
self.sp = _BlancClass() # just a container to have sp.name instead of sp['name'] to access parameters
try:
self.sp.damp = damp_fac * eval('N')**0.5 # why do we need 10 <-> exp(1/10) == 1.1? 2 should be fine!?
# self.sp.damp = damp_fac * (4 - 3.6/eval('N')**0.5)
except:
self.sp.damp = 4 # - 3.6 / N**0.5 # should become new default
_print_warning("dimension not known, damping set to 4",
'initialize', 'CMAAdaptSigmaTPA')
try:
if opts['vv'][0] == 'TPA_damp':
self.sp.damp = opts['vv'][1]
print('damp set to %d' % self.sp.damp)
except (TypeError):
pass
self.sp.dampup = 0.5**0.0 * 1.0 * self.sp.damp # 0.5 fails to converge on the Rastrigin function
self.sp.dampdown = 2.0**0.0 * self.sp.damp
if self.sp.dampup != self.sp.dampdown:
print('TPA damping is asymmetric')
self.sp.c = 0.3 # rank difference is asymetric and therefore the switch from increase to decrease takes too long
self.sp.z_exponent = 0.5 # sign(z) * abs(z)**z_exponent, 0.5 seems better with larger popsize, 1 was default
self.sp.sigma_fac = 1.0 # (obsolete) 0.5 feels better, but no evidence whether it is
self.sp.relative_to_delta_mean = True # (obsolete)
self.s = 0 # the state variable
self.last = None
self.initialized = True
return self
def update(self, es, function_values, **kwargs):
"""the first and second value in ``function_values``
must reflect two mirrored solutions sampled
in direction / in opposite direction of
the previous mean shift, respectively.
"""
# TODO: on the linear function, the two mirrored samples lead
# to a sharp increase of condition of the covariance matrix.
# They should not be used to update the covariance matrix,
# if the step-size inreases quickly. This should be fine with
# negative updates though.
if not self.initialized:
self.initialize(es.N, es.opts)
if 1 < 3:
# use the ranking difference of the mirrors for adaptation
# damp = 5 should be fine
z = np.where(es.fit.idx == 1)[0][0] - np.where(es.fit.idx == 0)[0][0]
z /= es.popsize - 1 # z in [-1, 1]
self.s = (1 - self.sp.c) * self.s + self.sp.c * np.sign(z) * np.abs(z)**self.sp.z_exponent
if self.s > 0:
es.sigma *= exp(self.s / self.sp.dampup)
else:
es.sigma *= exp(self.s / self.sp.dampdown)
#es.more_to_write.append(10**z)
new_injections = True
# ____________________________________________________________
# ____________________________________________________________
#
class CMAEvolutionStrategy(OOOptimizer):
"""CMA-ES stochastic optimizer class with ask-and-tell interface.
Calling Sequences
=================
es = CMAEvolutionStrategy(x0, sigma0)
es = CMAEvolutionStrategy(x0, sigma0, opts)
es = CMAEvolutionStrategy(x0, sigma0).optimize(objective_fct)
res = CMAEvolutionStrategy(x0, sigma0,
opts).optimize(objective_fct).result()
Arguments
=========
`x0`
initial solution, starting point. `x0` is given as "phenotype"
which means, if::
opts = {'transformation': [transform, inverse]}
is given and ``inverse is None``, the initial mean is not
consistent with `x0` in that ``transform(mean)`` does not
equal to `x0` unless ``transform(mean)`` equals ``mean``.
`sigma0`
initial standard deviation. The problem variables should
have been scaled, such that a single standard deviation
on all variables is useful and the optimum is expected to
lie within about `x0` +- ``3*sigma0``. See also options
`scaling_of_variables`. Often one wants to check for
solutions close to the initial point. This allows,
for example, for an easier check of consistency of the
objective function and its interfacing with the optimizer.
In this case, a much smaller `sigma0` is advisable.
`opts`
options, a dictionary with optional settings,
see class `CMAOptions`.
Main interface / usage
======================
The interface is inherited from the generic `OOOptimizer`
class (see also there). An object instance is generated from
es = cma.CMAEvolutionStrategy(8 * [0.5], 0.2)
The least verbose interface is via the optimize method::
es.optimize(objective_func)
res = es.result()
More verbosely, the optimization is done using the
methods ``stop``, ``ask``, and ``tell``::
while not es.stop():
solutions = es.ask()
es.tell(solutions, [cma.fcts.rosen(s) for s in solutions])
es.disp()
es.result_pretty()
where ``ask`` delivers new candidate solutions and ``tell`` updates
the ``optim`` instance by passing the respective function values
(the objective function ``cma.fcts.rosen`` can be replaced by any
properly defined objective function, see ``cma.fcts`` for more
examples).
To change an option, for example a termination condition to
continue the optimization, call
es.opts.set({'tolfacupx': 1e4})
The class `CMAEvolutionStrategy` also provides::
(solutions, func_values) = es.ask_and_eval(objective_func)
and an entire optimization can also be written like::
while not es.stop():
es.tell(*es.ask_and_eval(objective_func))
Besides for termination criteria, in CMA-ES only the ranks of the
`func_values` are relevant.
Attributes and Properties
=========================
- `inputargs` -- passed input arguments
- `inopts` -- passed options
- `opts` -- actually used options, some of them can be changed any
time via ``opts.set``, see class `CMAOptions`
- `popsize` -- population size lambda, number of candidate
solutions returned by `ask()`
- `logger` -- a `CMADataLogger` instance utilized by `optimize`
Examples
========
Super-short example, with output shown:
>>> import cma
>>> # construct an object instance in 4-D, sigma0=1:
>>> es = cma.CMAEvolutionStrategy(4 * [1], 1, {'seed':234})
(4_w,8)-CMA-ES (mu_w=2.6,w_1=52%) in dimension 4 (seed=234)
>>>
>>> # optimize the ellipsoid function
>>> es.optimize(cma.fcts.elli, verb_disp=1)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 8 2.093015112685775e+04 1.0e+00 9.27e-01 9e-01 9e-01 0:0.0
2 16 4.964814235917688e+04 1.1e+00 9.54e-01 9e-01 1e+00 0:0.0
3 24 2.876682459926845e+05 1.2e+00 1.02e+00 9e-01 1e+00 0:0.0
100 800 6.809045875281943e-01 1.3e+02 1.41e-02 1e-04 1e-02 0:0.2
200 1600 2.473662150861846e-10 8.0e+02 3.08e-05 1e-08 8e-06 0:0.5
233 1864 2.766344961865341e-14 8.6e+02 7.99e-07 8e-11 7e-08 0:0.6
>>>
>>> cma.pprint(es.result())
(array([ -1.98546755e-09, -1.10214235e-09, 6.43822409e-11,
-1.68621326e-11]),
4.5119610261406537e-16,
1666,
1672,
209,
array([ -9.13545269e-09, -1.45520541e-09, -6.47755631e-11,
-1.00643523e-11]),
array([ 3.20258681e-08, 3.15614974e-09, 2.75282215e-10,
3.27482983e-11]))
>>> assert es.result()[1] < 1e-9
>>> help(es.result)
Help on method result in module cma:
result(self) method of cma.CMAEvolutionStrategy instance
return ``(xbest, f(xbest), evaluations_xbest, evaluations, iterations, pheno(xmean), effective_stds)``
The optimization loop can also be written explicitly.
>>> import cma
>>> es = cma.CMAEvolutionStrategy(4 * [1], 1)
>>> while not es.stop():
... X = es.ask()
... es.tell(X, [cma.fcts.elli(x) for x in X])
... es.disp()
<output omitted>
achieving the same result as above.
An example with lower bounds (at zero) and handling infeasible
solutions:
>>> import cma
>>> import numpy as np
>>> es = cma.CMAEvolutionStrategy(10 * [0.2], 0.5, {'bounds': [0, np.inf]})
>>> while not es.stop():
... fit, X = [], []
... while len(X) < es.popsize:
... curr_fit = None
... while curr_fit in (None, np.NaN):
... x = es.ask(1)[0]
... curr_fit = cma.fcts.somenan(x, cma.fcts.elli) # might return np.NaN
... X.append(x)
... fit.append(curr_fit)
... es.tell(X, fit)
... es.logger.add()
... es.disp()
<output omitted>
>>>
>>> assert es.result()[1] < 1e-9
>>> assert es.result()[2] < 9000 # by internal termination
>>> # es.logger.plot() # will plot data
>>> # cma.show() # display plot window
An example with user-defined transformation, in this case to realize
a lower bound of 2.
>>> es = cma.CMAEvolutionStrategy(5 * [3], 1,
... {"transformation": [lambda x: x**2+2, None]})
>>> es.optimize(cma.fcts.rosen)
<output omitted>
>>> assert cma.fcts.rosen(es.result()[0]) < 1e-6 + 5.530760944396627e+02
>>> assert es.result()[2] < 3300
The inverse transformation is (only) necessary if the `BoundPenalty`
boundary handler is used at the same time.
The ``CMAEvolutionStrategy`` class also provides a default logger
(cave: files are overwritten when the logger is used with the same
filename prefix):
>>> import cma
>>> es = cma.CMAEvolutionStrategy(4 * [0.2], 0.5, {'verb_disp': 0})
>>> es.logger.disp_header() # to understand the print of disp
Iterat Nfevals function value axis ratio maxstd minstd
>>> while not es.stop():
... X = es.ask()
... es.tell(X, [cma.fcts.sphere(x) for x in X])
... es.logger.add() # log current iteration
... es.logger.disp([-1]) # display info for last iteration
1 8 2.72769793021748e+03 1.0e+00 4.05e-01 3.99e-01
2 16 6.58755537926063e+03 1.1e+00 4.00e-01 3.39e-01
<output ommitted>
193 1544 3.15195320957214e-15 1.2e+03 3.70e-08 3.45e-11
>>> es.logger.disp_header()
Iterat Nfevals function value axis ratio maxstd minstd
>>> # es.logger.plot() # will make a plot
Example implementing restarts with increasing popsize (IPOP), output
is not displayed:
>>> import cma, numpy as np
>>>
>>> # restart with increasing population size (IPOP)
>>> bestever = cma.BestSolution()
>>> for lam in 10 * 2**np.arange(8): # 10, 20, 40, 80, ..., 10 * 2**7
... es = cma.CMAEvolutionStrategy('6 - 8 * np.random.rand(9)', # 9-D
... 5, # initial std sigma0
... {'popsize': lam, # options
... 'verb_append': bestever.evalsall})
... logger = cma.CMADataLogger().register(es, append=bestever.evalsall)
... while not es.stop():
... X = es.ask() # get list of new solutions
... fit = [cma.fcts.rastrigin(x) for x in X] # evaluate each solution
... es.tell(X, fit) # besides for termination only the ranking in fit is used
...
... # display some output
... logger.add() # add a "data point" to the log, writing in files
... es.disp() # uses option verb_disp with default 100
...
... print('termination:', es.stop())
... cma.pprint(es.best.__dict__)
...
... bestever.update(es.best)
...
... # show a plot
... # logger.plot();
... if bestever.f < 1e-8: # global optimum was hit
... break
<output omitted>
>>> assert es.result()[1] < 1e-8
On the Rastrigin function, usually after five restarts the global
optimum is located.
Using the ``multiprocessing`` module, we can evaluate the function in
parallel with a simple modification of the example (however
multiprocessing seems not always reliable)::
try:
import multiprocessing as mp
import cma
es = cma.CMAEvolutionStrategy(22 * [0.0], 1.0, {'maxiter':10})
pool = mp.Pool(es.popsize)
while not es.stop():
X = es.ask()
f_values = pool.map_async(cma.felli, X).get()
# use chunksize parameter as es.popsize/len(pool)?
es.tell(X, f_values)
es.disp()
es.logger.add()
except ImportError:
pass
The final example shows how to resume:
>>> import cma, pickle
>>>
>>> es = cma.CMAEvolutionStrategy(12 * [0.1], # a new instance, 12-D
... 0.5) # initial std sigma0
>>> es.optimize(cma.fcts.rosen, iterations=100)
>>> pickle.dump(es, open('saved-cma-object.pkl', 'wb'))
>>> print('saved')
>>> del es # let's start fresh
>>>
>>> es = pickle.load(open('saved-cma-object.pkl', 'rb'))
>>> print('resumed')
>>> es.optimize(cma.fcts.rosen, verb_disp=200)
>>> assert es.result()[2] < 15000
>>> cma.pprint(es.result())
Details
=======
The following two enhancements are implemented, the latter is turned
on by default only for very small population size.
*Active CMA* is implemented with option ``CMA_active`` and
conducts an update of the covariance matrix with negative weights.
The negative update is implemented, such that positive definiteness
is guarantied. The update is applied after the default update and
only before the covariance matrix is decomposed, which limits the
additional computational burden to be at most a factor of three
(typically smaller). A typical speed up factor (number of
f-evaluations) is between 1.1 and two.
References: Jastrebski and Arnold, CEC 2006, Glasmachers et al, GECCO 2010.
*Selective mirroring* is implemented with option ``CMA_mirrors``
in the method ``get_mirror()``. Only the method `ask_and_eval()`
(used by `fmin`) will then sample selectively mirrored vectors. In
selective mirroring, only the worst solutions are mirrored. With
the default small number of mirrors, *pairwise selection* (where at
most one of the two mirrors contribute to the update of the
distribution mean) is implicitly guarantied under selective
mirroring and therefore not explicitly implemented.
References: Brockhoff et al, PPSN 2010, Auger et al, GECCO 2011.
:See: `fmin()`, `OOOptimizer`, `CMAOptions`, `plot()`, `ask()`,
`tell()`, `ask_and_eval()`
"""
@property # read only attribute decorator for a method
def popsize(self):
"""number of samples by default returned by` ask()`
"""
return self.sp.popsize
# this is not compatible with python2.5:
# @popsize.setter
# def popsize(self, p):
# """popsize cannot be set (this might change in future)
# """
# raise _Error("popsize cannot be changed")
def stop(self, check=True):
"""return a dictionary with the termination status.
With ``check==False``, the termination conditions are not checked
and the status might not reflect the current situation.
"""
if (check and self.countiter > 0 and self.opts['termination_callback'] and
self.opts['termination_callback'] != str(self.opts['termination_callback'])):
self.callbackstop = self.opts['termination_callback'](self)
return self._stopdict(self, check) # update the stopdict and return a Dict
def copy_constructor(self, es):
raise NotImplementedError("")
def __init__(self, x0, sigma0, inopts={}):
"""see class `CMAEvolutionStrategy`
"""
if isinstance(x0, CMAEvolutionStrategy):
self.copy_constructor(x0)
return
self.inputargs = dict(locals()) # for the record
del self.inputargs['self'] # otherwise the instance self has a cyclic reference
self.inopts = inopts
opts = CMAOptions(inopts).complement() # CMAOptions() == fmin([],[]) == defaultOptions()
global_verbosity = opts.eval('verbose')
if global_verbosity < -8:
opts['verb_disp'] = 0
opts['verb_log'] = 0
opts['verb_plot'] = 0
if 'noise_handling' in opts and opts.eval('noise_handling'):
raise ValueError('noise_handling not available with class CMAEvolutionStrategy, use function fmin')
if 'restarts' in opts and opts.eval('restarts'):
raise ValueError('restarts not available with class CMAEvolutionStrategy, use function fmin')
self._set_x0(x0) # manage weird shapes, set self.x0
self.N_pheno = len(self.x0)
self.sigma0 = sigma0
if isinstance(sigma0, basestring):
# TODO: no real need here (do rather in fmin)
self.sigma0 = eval(sigma0) # like '1./N' or 'np.random.rand(1)[0]+1e-2'
if np.size(self.sigma0) != 1 or np.shape(self.sigma0):
raise _Error('input argument sigma0 must be (or evaluate to) a scalar')
self.sigma = self.sigma0 # goes to inialize
# extract/expand options
N = self.N_pheno
assert isinstance(opts['fixed_variables'], (basestring, dict)) \
or opts['fixed_variables'] is None
# TODO: in case of a string we need to eval the fixed_variables
if isinstance(opts['fixed_variables'], dict):
N = self.N_pheno - len(opts['fixed_variables'])
opts.evalall(locals()) # using only N
self.opts = opts
self.randn = opts['randn']
self.gp = GenoPheno(self.N_pheno, opts['scaling_of_variables'], opts['typical_x'],
opts['fixed_variables'], opts['transformation'])
self.boundary_handler = opts.eval('boundary_handling')(opts.eval('bounds'))
if not self.boundary_handler.has_bounds():
self.boundary_handler = BoundNone() # just a little faster and well defined
elif not self.boundary_handler.is_in_bounds(self.x0):
if opts['verbose'] >= 0:
_print_warning('initial solution is out of the domain boundaries:')
print(' x0 = ' + str(self.gp.pheno(self.x0)))
print(' ldom = ' + str(self.boundary_handler.bounds[0]))
print(' udom = ' + str(self.boundary_handler.bounds[1]))
# set self.mean to geno(x0)
tf_geno_backup = self.gp.tf_geno
if self.gp.tf_pheno and self.gp.tf_geno is None:
self.gp.tf_geno = lambda x: x # a hack to avoid an exception
_print_warning("""
computed initial point is likely to be wrong, because
no inverse was found of user provided phenotype
transformation""")
self.mean = self.gp.geno(self.x0,
from_bounds=self.boundary_handler.inverse,
copy_always=True)
self.gp.tf_geno = tf_geno_backup
# without copy_always interface:
# self.mean = self.gp.geno(array(self.x0, copy=True), copy_if_changed=False)
self.N = len(self.mean)
assert N == self.N
self.fmean = np.NaN # TODO name should change? prints nan in output files (OK with matlab&octave)
self.fmean_noise_free = 0. # for output only
self.adapt_sigma = opts['AdaptSigma']
if self.adapt_sigma is False:
self.adapt_sigma = CMAAdaptSigmaNone
self.adapt_sigma = self.adapt_sigma() # class instance
self.sp = _CMAParameters(N, opts)
self.sp0 = self.sp # looks useless, as it is not a copy
# initialization of state variables
self.countiter = 0
self.countevals = max((0, opts['verb_append'])) \
if not isinstance(opts['verb_append'], bool) else 0
self.pc = np.zeros(N)
self.pc_neg = np.zeros(N)
def eval_scaling_vector(in_):
res = 1
if np.all(in_):
res = array(in_, dtype=float)
if np.size(res) not in (1, N):
raise ValueError("""CMA_stds option must have dimension %d
instead of %d""" %
(str(N), np.size(res)))
return res
self.sigma_vec = eval_scaling_vector(self.opts['CMA_stds'])
if isfinite(self.opts['CMA_dampsvec_fac']):
self.sigma_vec *= np.ones(N) # make sure to get a vector
self.sigma_vec0 = self.sigma_vec if isscalar(self.sigma_vec) \
else self.sigma_vec.copy()
stds = eval_scaling_vector(self.opts['CMA_teststds'])
if self.opts['CMA_diagonal']: # is True or > 0
# linear time and space complexity
self.B = array(1) # fine for np.dot(self.B, .) and self.B.T
self.C = stds**2 * np.ones(N) # in case stds == 1
self.dC = self.C
else:
self.B = np.eye(N) # identity(N)
# prevent equal eigenvals, a hack for np.linalg:
# self.C = np.diag(stds**2 * exp(1e-4 * np.random.rand(N)))
self.C = np.diag(stds**2 * exp((1e-4 / N) * np.arange(N)))
self.dC = np.diag(self.C).copy()
self._Yneg = np.zeros((N, N))
self.D = self.dC**0.5 # we assume that C is diagonal
# self.gp.pheno adds fixed variables
relative_stds = ((self.gp.pheno(self.mean + self.sigma * self.sigma_vec * self.D)
- self.gp.pheno(self.mean - self.sigma * self.sigma_vec * self.D)) / 2.0
/ (self.boundary_handler.get_bounds('upper', self.N_pheno)
- self.boundary_handler.get_bounds('lower', self.N_pheno)))
if np.any(relative_stds > 1):
raise ValueError('initial standard deviations larger than the bounded domain size in variables '
+ str(np.where(relative_stds > 1)[0]))
self._flgtelldone = True
self.itereigenupdated = self.countiter
self.count_eigen = 0
self.noiseS = 0 # noise "signal"
self.hsiglist = []
if opts['seed'] is not None:
opts['seed'] = int(opts['seed'])
np.random.seed(opts['seed']) # CAVEAT: this only seeds np.random
self.sent_solutions = CMASolutionDict()
self.archive = CMASolutionDict()
self.best = BestSolution()
self.const = _BlancClass()
self.const.chiN = N**0.5 * (1 - 1. / (4.*N) + 1. / (21.*N**2)) # expectation of norm(randn(N,1))
self.logger = CMADataLogger(opts['verb_filenameprefix'], modulo=opts['verb_log']).register(self)
# attribute for stopping criteria in function stop
self._stopdict = _CMAStopDict()
self.callbackstop = 0
self.fit = _BlancClass()
self.fit.fit = [] # not really necessary
self.fit.hist = [] # short history of best
self.fit.histbest = [] # long history of best
self.fit.histmedian = [] # long history of median
self.more_to_write = [] # [1, 1, 1, 1] # N*[1] # needed when writing takes place before setting
# say hello
if opts['verb_disp'] > 0 and opts['verbose'] >= 0:
sweighted = '_w' if self.sp.mu > 1 else ''
smirr = 'mirr%d' % (self.sp.lam_mirr) if self.sp.lam_mirr else ''
print('(%d' % (self.sp.mu) + sweighted + ',%d' % (self.sp.popsize) + smirr +
')-' + ('a' if opts['CMA_active'] else '') + 'CMA-ES' +
' (mu_w=%2.1f,w_1=%d%%)' % (self.sp.mueff, int(100 * self.sp.weights[0])) +
' in dimension %d (seed=%d, %s)' % (N, opts['seed'], time.asctime())) # + func.__name__
if opts['CMA_diagonal'] and self.sp.CMA_on:
s = ''
if opts['CMA_diagonal'] is not True:
s = ' for '
if opts['CMA_diagonal'] < np.inf:
s += str(int(opts['CMA_diagonal']))
else:
s += str(np.floor(opts['CMA_diagonal']))
s += ' iterations'
s += ' (1/ccov=' + str(round(1. / (self.sp.c1 + self.sp.cmu))) + ')'
print(' Covariance matrix is diagonal' + s)
def _set_x0(self, x0):
if x0 == str(x0):
x0 = eval(x0)
self.x0 = array(x0) # should not have column or row, is just 1-D
if self.x0.ndim == 2:
if self.opts.eval('verbose') >= 0:
_print_warning('input x0 should be a list or 1-D array, trying to flatten ' +
str(self.x0.shape) + '-array')
if self.x0.shape[0] == 1:
self.x0 = self.x0[0]
elif self.x0.shape[1] == 1:
self.x0 = array([x[0] for x in self.x0])
# if self.x0.ndim != 1:
# raise _Error('x0 must be 1-D array')
# if len(self.x0) <= 1:
# raise _Error('optimization in 1-D is not supported (code was never tested)')
self.x0.resize(self.x0.shape[0]) # 1-D array, not really necessary?!
# ____________________________________________________________
# ____________________________________________________________
def ask(self, number=None, xmean=None, sigma_fac=1,
gradf=None, args=()):
"""get new candidate solutions, sampled from a multi-variate
normal distribution and transformed to f-representation
(phenotype) to be evaluated.
Arguments
---------
`number`
number of returned solutions, by default the
population size ``popsize`` (AKA ``lambda``).
`xmean`
distribution mean, phenotyp?
`sigma_fac`
multiplier for internal sample width (standard
deviation)
`gradf`
gradient, ``len(gradf(x)) == len(x)``, if
``gradf is not None`` the third solution in the
returned list is "sampled" in supposedly Newton
direction ``dot(C, gradf(xmean, *args))``.
`args`
additional arguments passed to gradf
Return
------
A list of N-dimensional candidate solutions to be evaluated
Example
-------
>>> import cma
>>> es = cma.CMAEvolutionStrategy([0,0,0,0], 0.3)
>>> while not es.stop() and es.best.f > 1e-6: # my_desired_target_f_value
... X = es.ask() # get list of new solutions
... fit = [cma.fcts.rosen(x) for x in X] # call function rosen with each solution
... es.tell(X, fit) # feed values
:See: `ask_and_eval`, `ask_geno`, `tell`
"""
pop_geno = self.ask_geno(number, xmean, sigma_fac)
# N,lambda=20,200: overall CPU 7s vs 5s == 40% overhead, even without bounds!
# new data: 11.5s vs 9.5s == 20%
# TODO: check here, whether this is necessary?
# return [self.gp.pheno(x, copy=False, into_bounds=self.boundary_handler.repair) for x in pop] # probably fine
# return [Solution(self.gp.pheno(x, copy=False), copy=False) for x in pop] # here comes the memory leak, now solved
pop_pheno = [self.gp.pheno(x, copy=True, into_bounds=self.boundary_handler.repair) for x in pop_geno]
if gradf is not None:
# see Hansen (2011), Injecting external solutions into CMA-ES
if not self.gp.islinear:
_print_warning("""
using the gradient (option ``gradf``) with a non-linear
coordinate-wise transformation (option ``transformation``)
has never been tested.""")
# TODO: check this out
def grad_numerical_of_coordinate_map(x, map, epsilon=None):
"""map is a coordinate-wise independent map, return
the estimated diagonal of the Jacobian.
"""
eps = 1e-8 * (1 + abs(x)) if epsilon is None else epsilon
return (map(x + eps) - map(x - eps)) / (2 * eps)
def grad_numerical_sym(x, func, epsilon=None):
"""return symmetric numerical gradient of func : R^n -> R.
"""
eps = 1e-8 * (1 + abs(x)) if epsilon is None else epsilon
grad = np.zeros(len(x))
ei = np.zeros(len(x)) # float is 1.6 times faster than int
for i in rglen(x):
ei[i] = eps[i]
grad[i] = (func(x + ei) - func(x - ei)) / (2*eps[i])
ei[i] = 0
return grad
try:
if self.last_iteration_with_gradient == self.countiter:
_print_warning('gradient is used several times in ' +
'this iteration', iteration=self.countiter)
self.last_iteration_with_gradient = self.countiter
except AttributeError:
pass
index_for_gradient = min((2, len(pop_pheno)-1))
xmean = self.mean if xmean is None else xmean
xpheno = self.gp.pheno(xmean, copy=True,
into_bounds=self.boundary_handler.repair)
grad_at_mean = gradf(xpheno, *args)
# lift gradient into geno-space
if not self.gp.isidentity or (self.boundary_handler is not None
and self.boundary_handler.has_bounds()):
boundary_repair = None
gradpen = 0
if isinstance(self.boundary_handler, BoundTransform):
boundary_repair = self.boundary_handler.repair
elif isinstance(self.boundary_handler, BoundPenalty):
fpenalty = lambda x: self.boundary_handler.__call__(
x, SolutionDict({tuple(x): {'geno': x}}), self.gp)
gradpen = grad_numerical_sym(
xmean, fpenalty)
elif self.boundary_handler is None or \
isinstance(self.boundary_handler, BoundNone):
pass
else:
raise NotImplementedError(
"unknown boundary handling method" +
str(self.boundary_handler) +
" when using gradf")
gradgp = grad_numerical_of_coordinate_map(
xmean,
lambda x: self.gp.pheno(x, copy=True,
into_bounds=boundary_repair))
grad_at_mean = grad_at_mean * gradgp + gradpen
# TODO: frozen variables brake the code (e.g. at grad of map)
if len(grad_at_mean) != self.N and self.opts['fixed_variables']:
NotImplementedError("""
gradient with fixed variables is not yet implemented""")
v = self.D * dot(self.B.T, self.sigma_vec * grad_at_mean)
# newton_direction = sv * B * D * D * B^T * sv * gradient = sv * B * D * v
# v = D^-1 * B^T * sv^-1 * newton_direction = D * B^T * sv * gradient
q = sum(v**2)
if q:
# Newton direction
pop_geno[index_for_gradient] = xmean - self.sigma \
* (self.N / q)**0.5 \
* (self.sigma_vec * dot(self.B, self.D * v))
else:
pop_geno[index_for_gradient] = xmean
_print_warning('gradient zero observed',
iteration=self.countiter)
pop_pheno[index_for_gradient] = self.gp.pheno(
pop_geno[index_for_gradient], copy=True,
into_bounds=self.boundary_handler.repair)
# insert solutions, this could also (better?) be done in self.gp.pheno
for i in rglen((pop_geno)):
self.sent_solutions.insert(pop_pheno[i], geno=pop_geno[i], iteration=self.countiter)
return pop_pheno
# ____________________________________________________________
# ____________________________________________________________
def ask_geno(self, number=None, xmean=None, sigma_fac=1):
"""get new candidate solutions in genotyp, sampled from a
multi-variate normal distribution.
Arguments are
`number`
number of returned solutions, by default the
population size `popsize` (AKA lambda).
`xmean`
distribution mean
`sigma_fac`
multiplier for internal sample width (standard
deviation)
`ask_geno` returns a list of N-dimensional candidate solutions
in genotyp representation and is called by `ask`.
Details: updates the sample distribution and might change
the geno-pheno transformation during this update.
:See: `ask`, `ask_and_eval`
"""
if number is None or number < 1:
number = self.sp.popsize
# update distribution, might change self.mean
if self.sp.CMA_on and (
(self.opts['updatecovwait'] is None and
self.countiter >=
self.itereigenupdated + 1. / (self.sp.c1 + self.sp.cmu) / self.N / 10
) or
(self.opts['updatecovwait'] is not None and
self.countiter > self.itereigenupdated + self.opts['updatecovwait']
) or
(self.sp.neg.cmuexp * (self.countiter - self.itereigenupdated) > 0.5
) # TODO (minor): not sure whether this is "the right" criterion
):
self.updateBD()
if xmean is None:
xmean = self.mean
else:
try:
xmean = self.archive[xmean]['geno']
# noise handling after call of tell
except KeyError:
try:
xmean = self.sent_solutions[xmean]['geno']
# noise handling before calling tell
except KeyError:
pass
if self.countiter == 0:
self.tic = time.clock() # backward compatible
self.elapsed_time = ElapsedTime()
sigma = sigma_fac * self.sigma
# update parameters for sampling the distribution
# fac 0 1 10
# 150-D cigar:
# 50749 50464 50787
# 200-D elli: == 6.9
# 99900 101160
# 100995 103275 == 2% loss
# 100-D elli: == 6.9
# 363052 369325 < 2% loss
# 365075 365755
# sample distribution
if self._flgtelldone: # could be done in tell()!?
self._flgtelldone = False
self.ary = []
# check injections from pop_injection_directions
arinj = []
if hasattr(self, 'pop_injection_directions'):
if self.countiter < 4 and \
len(self.pop_injection_directions) > self.popsize - 2:
_print_warning(' %d special injected samples with popsize %d, '
% (len(self.pop_injection_directions), self.popsize)
+ "popsize %d will be used" % (len(self.pop_injection_directions) + 2)
+ (" and the warning is suppressed in the following" if self.countiter == 3 else ""))
while self.pop_injection_directions:
y = self.pop_injection_directions.pop(0)
if self.opts['CMA_sample_on_sphere_surface']:
y *= (self.N**0.5 if self.opts['CSA_squared'] else
self.const.chiN) / self.mahalanobis_norm(y)
arinj.append(y)
else:
y *= self.random_rescaling_factor_to_mahalanobis_size(y) / self.sigma
arinj.append(y)
# each row is a solution
# the 1 is a small safeguard which needs to be removed to implement "pure" adaptive encoding
arz = self.randn((max([1, (number - len(arinj))]), self.N))
if self.opts['CMA_sample_on_sphere_surface']: # normalize the length to chiN
for i in rglen((arz)):
ss = sum(arz[i]**2)
if 1 < 3 or ss > self.N + 10.1:
arz[i] *= (self.N**0.5 if self.opts['CSA_squared']
else self.const.chiN) / ss**0.5
# or to average
# arz *= 1 * self.const.chiN / np.mean([sum(z**2)**0.5 for z in arz])
# fac = np.mean(sum(arz**2, 1)**0.5)
# print fac
# arz *= self.const.chiN / fac
# compute ary from arz
if len(arz): # should always be true
# apply unconditional mirroring, is pretty obsolete
if new_injections and self.sp.lam_mirr and self.opts['CMA_mirrormethod'] == 0:
for i in xrange(self.sp.lam_mirr):
if 2 * (i + 1) > len(arz):
if self.countiter < 4:
_print_warning("fewer mirrors generated than given in parameter setting (%d<%d)"
% (i, self.sp.lam_mirr))
break
arz[-1 - 2 * i] = -arz[-2 - 2 * i]
ary = self.sigma_vec * np.dot(self.B, (self.D * arz).T).T
if len(arinj):
ary = np.vstack((arinj, ary))
else:
ary = array(arinj)
# TODO: subject to removal in future
if not new_injections and number > 2 and self.countiter > 2:
if (isinstance(self.adapt_sigma, CMAAdaptSigmaTPA) or
self.opts['mean_shift_line_samples'] or
self.opts['pc_line_samples']):
ys = []
if self.opts['pc_line_samples']:
ys.append(self.pc[:]) # now TPA is with pc_line_samples
if self.opts['mean_shift_line_samples']:
ys.append(self.mean - self.mean_old)
if not len(ys):
ys.append(self.mean - self.mean_old)
# assign a mirrored pair from each element of ys into ary
for i, y in enumerate(ys):
if len(arz) > 2 * i + 1: # at least two more samples
assert y is not self.pc
# y *= sum(self.randn(self.N)**2)**0.5 / self.mahalanobis_norm(y)
y *= self.random_rescaling_factor_to_mahalanobis_size(y)
# TODO: rescale y depending on some parameter?
ary[2*i] = y / self.sigma
ary[2*i + 1] = y / -self.sigma
else:
_print_warning('line samples omitted due to small popsize',
method_name='ask_geno', iteration=self.countiter)
# print(xmean[0])
pop = xmean + sigma * ary
self.evaluations_per_f_value = 1
self.ary = ary
return pop
def random_rescale_to_mahalanobis(self, x):
"""change `x` like for injection, all on genotypic level"""
x -= self.mean
if any(x):
x *= sum(self.randn(len(x))**2)**0.5 / self.mahalanobis_norm(x)
x += self.mean
return x
def random_rescaling_factor_to_mahalanobis_size(self, y):
"""``self.mean + self.random_rescaling_factor_to_mahalanobis_size(y)``
is guarantied to appear like from the sample distribution.
"""
if len(y) != self.N:
raise ValueError('len(y)=%d != %d=dimension' % (len(y), self.N))
if not any(y):
_print_warning("input was all-zeros, which is probably a bug",
"random_rescaling_factor_to_mahalanobis_size",
iteration=self.countiter)
return 1.0
return sum(self.randn(len(y))**2)**0.5 / self.mahalanobis_norm(y)
def get_mirror(self, x, preserve_length=False):
"""return ``pheno(self.mean - (geno(x) - self.mean))``.
>>> import cma
>>> es = cma.CMAEvolutionStrategy(cma.np.random.randn(3), 1)
>>> x = cma.np.random.randn(3)
>>> assert cma.Mh.vequals_approximately(es.mean - (x - es.mean), es.get_mirror(x, preserve_length=True))
>>> x = es.ask(1)[0]
>>> vals = (es.get_mirror(x) - es.mean) / (x - es.mean)
>>> assert cma.Mh.equals_approximately(sum(vals), len(vals) * vals[0])
TODO: this implementation is yet experimental.
TODO: this implementation includes geno-pheno transformation,
however in general GP-transformation should be separated from
specific code.
Selectively mirrored sampling improves to a moderate extend but
overadditively with active CMA for quite understandable reasons.
Optimal number of mirrors are suprisingly small: 1,2,3 for
maxlam=7,13,20 where 3,6,10 are the respective maximal possible
mirrors that must be clearly suboptimal.
"""
try:
dx = self.sent_solutions[x]['geno'] - self.mean
except: # can only happen with injected solutions?!
dx = self.gp.geno(x, from_bounds=self.boundary_handler.inverse,
copy_if_changed=True) - self.mean
if not preserve_length:
# dx *= sum(self.randn(self.N)**2)**0.5 / self.mahalanobis_norm(dx)
dx *= self.random_rescaling_factor_to_mahalanobis_size(dx)
x = self.mean - dx
y = self.gp.pheno(x, into_bounds=self.boundary_handler.repair)
# old measure: costs 25% in CPU performance with N,lambda=20,200
self.sent_solutions.insert(y, geno=x, iteration=self.countiter)
return y
def _mirror_penalized(self, f_values, idx):
"""obsolete and subject to removal (TODO),
return modified f-values such that for each mirror one becomes worst.
This function is useless when selective mirroring is applied with no
more than (lambda-mu)/2 solutions.
Mirrors are leading and trailing values in ``f_values``.
"""
assert len(f_values) >= 2 * len(idx)
m = np.max(np.abs(f_values))
for i in len(idx):
if f_values[idx[i]] > f_values[-1 - i]:
f_values[idx[i]] += m
else:
f_values[-1 - i] += m
return f_values
def _mirror_idx_cov(self, f_values, idx1): # will most likely be removed
"""obsolete and subject to removal (TODO),
return indices for negative ("active") update of the covariance matrix
assuming that ``f_values[idx1[i]]`` and ``f_values[-1-i]`` are
the corresponding mirrored values
computes the index of the worse solution sorted by the f-value of the
better solution.
TODO: when the actual mirror was rejected, it is better
to return idx1 instead of idx2.
Remark: this function might not be necessary at all: if the worst solution
is the best mirrored, the covariance matrix updates cancel (cave: weights
and learning rates), which seems what is desirable. If the mirror is bad,
as strong negative update is made, again what is desirable.
And the fitness--step-length correlation is in part addressed by
using flat weights.
"""
idx2 = np.arange(len(f_values) - 1, len(f_values) - 1 - len(idx1), -1)
f = []
for i in rglen((idx1)):
f.append(min((f_values[idx1[i]], f_values[idx2[i]])))
# idx.append(idx1[i] if f_values[idx1[i]] > f_values[idx2[i]] else idx2[i])
return idx2[np.argsort(f)][-1::-1]
def eval_mean(self, func, args=()):
"""evaluate the distribution mean, this is not (yet) effective
in terms of termination or display"""
self.fmean = func(self.mean, *args)
return self.fmean
# ____________________________________________________________
# ____________________________________________________________
#
def ask_and_eval(self, func, args=(), gradf=None, number=None, xmean=None, sigma_fac=1,
evaluations=1, aggregation=np.median, kappa=1):
"""samples `number` solutions and evaluates them on `func`, where
each solution `s` is resampled until ``self.is_feasible(s, func(s)) is True``.
Arguments
---------
`func`
objective function, ``func(x)`` returns a scalar
`args`
additional parameters for `func`
`gradf`
gradient of objective function, ``g = gradf(x, *args)``
must satisfy ``len(g) == len(x)``
`number`
number of solutions to be sampled, by default
population size ``popsize`` (AKA lambda)
`xmean`
mean for sampling the solutions, by default ``self.mean``.
`sigma_fac`
multiplier for sampling width, standard deviation, for example
to get a small perturbation of solution `xmean`
`evaluations`
number of evaluations for each sampled solution
`aggregation`
function that aggregates `evaluations` values to
as single value.
`kappa`
multiplier used for the evaluation of the solutions, in
that ``func(m + kappa*(x - m))`` is the f-value for x.
Return
------
``(X, fit)``, where
X -- list of solutions
fit -- list of respective function values
Details
-------
While ``not self.is_feasible(x, func(x))``new solutions are sampled. By
default ``self.is_feasible == cma.feasible == lambda x, f: f not in (None, np.NaN)``.
The argument to `func` can be freely modified within `func`.
Depending on the ``CMA_mirrors`` option, some solutions are not sampled
independently but as mirrors of other bad solutions. This is a simple
derandomization that can save 10-30% of the evaluations in particular
with small populations, for example on the cigar function.
Example
-------
>>> import cma
>>> x0, sigma0 = 8*[10], 1 # 8-D
>>> es = cma.CMAEvolutionStrategy(x0, sigma0)
>>> while not es.stop():
... X, fit = es.ask_and_eval(cma.fcts.elli) # handles NaN with resampling
... es.tell(X, fit) # pass on fitness values
... es.disp(20) # print every 20-th iteration
>>> print('terminated on ' + str(es.stop()))
<output omitted>
A single iteration step can be expressed in one line, such that
an entire optimization after initialization becomes
::
while not es.stop():
es.tell(*es.ask_and_eval(cma.fcts.elli))
"""
# initialize
popsize = self.sp.popsize
if number is not None:
popsize = number
selective_mirroring = self.opts['CMA_mirrormethod'] > 0
nmirrors = self.sp.lam_mirr
if popsize != self.sp.popsize:
nmirrors = Mh.sround(popsize * self.sp.lam_mirr / self.sp.popsize)
# TODO: now selective mirroring might be impaired
assert new_injections or self.opts['CMA_mirrormethod'] < 2
if new_injections and self.opts['CMA_mirrormethod'] != 1: # otherwise mirrors are done elsewhere
nmirrors = 0
assert nmirrors <= popsize // 2
self.mirrors_idx = np.arange(nmirrors) # might never be used
self.mirrors_rejected_idx = [] # might never be used
is_feasible = self.opts['is_feasible']
# do the work
fit = [] # or np.NaN * np.empty(number)
X_first = self.ask(popsize, xmean=xmean, gradf=gradf, args=args)
if xmean is None:
xmean = self.mean # might have changed in self.ask
X = []
for k in xrange(int(popsize)):
x, f = X_first.pop(0), None
rejected = -1
while rejected < 0 or not is_feasible(x, f): # rejection sampling
rejected += 1
if rejected: # resample
x = self.ask(1, xmean, sigma_fac)[0]
elif k >= popsize - nmirrors: # mirrored sample
if k == popsize - nmirrors and selective_mirroring:
self.mirrors_idx = np.argsort(fit)[-1:-1 - nmirrors:-1]
x = self.get_mirror(X[self.mirrors_idx[popsize - 1 - k]])
if rejected == 1 and k >= popsize - nmirrors:
self.mirrors_rejected_idx.append(k)
# contraints handling test hardwired ccccccccccc
length_normalizer = 1
# zzzzzzzzzzzzzzzzzzzzzzzzz
f = func(x, *args) if kappa == 1 else \
func(xmean + kappa * length_normalizer * (x - xmean),
*args)
if is_feasible(x, f) and evaluations > 1:
f = aggregation([f] + [(func(x, *args) if kappa == 1 else
func(xmean + kappa * length_normalizer * (x - xmean), *args))
for _i in xrange(int(evaluations - 1))])
if rejected + 1 % 1000 == 0:
print(' %d solutions rejected (f-value NaN or None) at iteration %d' %
(rejected, self.countiter))
fit.append(f)
X.append(x)
self.evaluations_per_f_value = int(evaluations)
return X, fit
def prepare_injection_directions(self):
"""provide genotypic directions for TPA and selective mirroring,
with no specific length normalization, to be used in the
coming iteration.
Details:
This method is called in the end of `tell`. The result is
assigned to ``self.pop_injection_directions`` and used in
`ask_geno`.
TODO: should be rather appended?
"""
# self.pop_injection_directions is supposed to be empty here
if hasattr(self, 'pop_injection_directions') and self.pop_injection_directions:
ValueError("Looks like a bug in calling order/logics")
ary = []
if (isinstance(self.adapt_sigma, CMAAdaptSigmaTPA) or
self.opts['mean_shift_line_samples']):
ary.append(self.mean - self.mean_old)
ary.append(self.mean_old - self.mean) # another copy!
if ary[-1][0] == 0.0:
_print_warning('zero mean shift encountered which ',
'prepare_injection_directions',
'CMAEvolutionStrategy', self.countiter)
if self.opts['pc_line_samples']: # caveat: before, two samples were used
ary.append(self.pc.copy())
if self.sp.lam_mirr and self.opts['CMA_mirrormethod'] == 2:
if self.pop_sorted is None:
_print_warning('pop_sorted attribute not found, mirrors obmitted',
'prepare_injection_directions',
iteration=self.countiter)
else:
ary += self.get_selective_mirrors()
self.pop_injection_directions = ary
return ary
def get_selective_mirrors(self, number=None, pop_sorted=None):
"""get mirror genotypic directions of the `number` worst
solution, based on ``pop_sorted`` attribute (from last
iteration).
Details:
Takes the last ``number=sp.lam_mirr`` entries in
``pop_sorted=self.pop_sorted`` as solutions to be mirrored.
"""
if pop_sorted is None:
if hasattr(self, 'pop_sorted'):
pop_sorted = self.pop_sorted
else:
return None
if number is None:
number = self.sp.lam_mirr
res = []
for i in xrange(1, number + 1):
res.append(self.mean_old - pop_sorted[-i])
return res
# ____________________________________________________________
def tell(self, solutions, function_values, check_points=None,
copy=False):
"""pass objective function values to prepare for next
iteration. This core procedure of the CMA-ES algorithm updates
all state variables, in particular the two evolution paths, the
distribution mean, the covariance matrix and a step-size.
Arguments
---------
`solutions`
list or array of candidate solution points (of
type `numpy.ndarray`), most presumably before
delivered by method `ask()` or `ask_and_eval()`.
`function_values`
list or array of objective function values
corresponding to the respective points. Beside for termination
decisions, only the ranking of values in `function_values`
is used.
`check_points`
If ``check_points is None``, only solutions that are not generated
by `ask()` are possibly clipped (recommended). ``False`` does not clip
any solution (not recommended).
If ``True``, clips solutions that realize long steps (i.e. also
those that are unlikely to be generated with `ask()`). `check_points`
can be a list of indices to be checked in solutions.
`copy`
``solutions`` can be modified in this routine, if ``copy is False``
Details
-------
`tell()` updates the parameters of the multivariate
normal search distribution, namely covariance matrix and
step-size and updates also the attributes ``countiter`` and
``countevals``. To check the points for consistency is quadratic
in the dimension (like sampling points).
Bugs
----
The effect of changing the solutions delivered by `ask()`
depends on whether boundary handling is applied. With boundary
handling, modifications are disregarded. This is necessary to
apply the default boundary handling that uses unrepaired
solutions but might change in future.
Example
-------
::
import cma
func = cma.fcts.elli # choose objective function
es = cma.CMAEvolutionStrategy(cma.np.random.rand(10), 1)
while not es.stop():
X = es.ask()
es.tell(X, [func(x) for x in X])
es.result() # where the result can be found
:See: class `CMAEvolutionStrategy`, `ask()`, `ask_and_eval()`, `fmin()`
"""
if self._flgtelldone:
raise _Error('tell should only be called once per iteration')
lam = len(solutions)
if lam != array(function_values).shape[0]:
raise _Error('for each candidate solution '
+ 'a function value must be provided')
if lam + self.sp.lam_mirr < 3:
raise _Error('population size ' + str(lam) + ' is too small when option CMA_mirrors * popsize < 0.5')
if not isscalar(function_values[0]):
if isscalar(function_values[0][0]):
if self.countiter <= 1:
_print_warning('function values are not a list of scalars (further warnings are suppressed)')
function_values = [val[0] for val in function_values]
else:
raise _Error('objective function values must be a list of scalars')
# ## prepare
N = self.N
sp = self.sp
if lam < sp.mu: # rather decrease cmean instead of having mu > lambda//2
raise _Error('not enough solutions passed to function tell (mu>lambda)')
self.countiter += 1 # >= 1 now
self.countevals += sp.popsize * self.evaluations_per_f_value
self.best.update(solutions, self.sent_solutions, function_values, self.countevals)
flg_diagonal = self.opts['CMA_diagonal'] is True \
or self.countiter <= self.opts['CMA_diagonal']
if not flg_diagonal and len(self.C.shape) == 1: # C was diagonal ie 1-D
# enter non-separable phase (no easy return from here)
self.C = np.diag(self.C)
if 1 < 3:
self.B = np.eye(N) # identity(N)
idx = np.argsort(self.D)
self.D = self.D[idx]
self.B = self.B[:, idx]
self._Yneg = np.zeros((N, N))
# ## manage fitness
fit = self.fit # make short cut
# CPU for N,lam=20,200: this takes 10s vs 7s
fit.bndpen = self.boundary_handler.update(function_values, self)(solutions, self.sent_solutions, self.gp)
# for testing:
# fit.bndpen = self.boundary_handler.update(function_values, self)([s.unrepaired for s in solutions])
fit.idx = np.argsort(array(fit.bndpen) + array(function_values))
fit.fit = array(function_values, copy=False)[fit.idx]
# update output data TODO: this is obsolete!? However: need communicate current best x-value?
# old: out['recent_x'] = self.gp.pheno(pop[0])
# self.out['recent_x'] = array(solutions[fit.idx[0]]) # TODO: change in a data structure(?) and use current as identify
# self.out['recent_f'] = fit.fit[0]
# fitness histories
fit.hist.insert(0, fit.fit[0])
# if len(self.fit.histbest) < 120+30*N/sp.popsize or # does not help, as tablet in the beginning is the critical counter-case
if ((self.countiter % 5) == 0): # 20 percent of 1e5 gen.
fit.histbest.insert(0, fit.fit[0])
fit.histmedian.insert(0, np.median(fit.fit) if len(fit.fit) < 21
else fit.fit[self.popsize // 2])
if len(fit.histbest) > 2e4: # 10 + 30*N/sp.popsize:
fit.histbest.pop()
fit.histmedian.pop()
if len(fit.hist) > 10 + 30 * N / sp.popsize:
fit.hist.pop()
# TODO: clean up inconsistency when an unrepaired solution is available and used
# now get the genotypes
pop = self.pop_sorted = [] # create pop from input argument solutions
for k, s in enumerate(solutions): # use phenotype before Solution.repair()
if 1 < 3:
pop += [self.gp.geno(s,
from_bounds=self.boundary_handler.inverse,
repair=(self.repair_genotype if check_points not in (False, 0, [], ()) else None),
archive=self.sent_solutions)] # takes genotype from sent_solutions, if available
try:
self.archive.insert(s, value=self.sent_solutions.pop(s), fitness=function_values[k])
# self.sent_solutions.pop(s)
except KeyError:
pass
# check that TPA mirrors are available, TODO: move to TPA class?
if isinstance(self.adapt_sigma, CMAAdaptSigmaTPA) and self.countiter > 3 and not (self.countiter % 3):
dm = self.mean[0] - self.mean_old[0]
dx0 = pop[0][0] - self.mean_old[0]
dx1 = pop[1][0] - self.mean_old[0]
for i in np.random.randint(1, self.N, 1):
try:
if not Mh.equals_approximately(
(self.mean[i] - self.mean_old[i])
/ (pop[0][i] - self.mean_old[i]),
dm / dx0, 1e-8) or \
not Mh.equals_approximately(
(self.mean[i] - self.mean_old[i])
/ (pop[1][i] - self.mean_old[i]),
dm / dx1, 1e-8):
_print_warning('TPA error with mirrored samples', 'tell',
'CMAEvolutionStrategy', self.countiter)
except ZeroDivisionError:
_print_warning('zero division encountered in TPA check\n which should be very rare and is likely a bug',
'tell', 'CMAEvolutionStrategy', self.countiter)
try:
moldold = self.mean_old
except:
pass
self.mean_old = self.mean
mold = self.mean_old # just an alias
# check and normalize each x - m
# check_points is a flag (None is default: check non-known solutions) or an index list
# should also a number possible (first check_points points)?
if check_points not in (None, False, 0, [], ()): # useful in case of injected solutions and/or adaptive encoding, however is automatic with use_sent_solutions
try:
if len(check_points):
idx = check_points
except:
idx = xrange(sp.popsize)
for k in idx:
self.repair_genotype(pop[k])
# only arrays can be multiple indexed
pop = array(pop, copy=False)
# sort pop
pop = pop[fit.idx]
# prepend best-ever solution to population, in case
if self.opts['CMA_elitist'] and self.best.f < fit.fit[0]:
if self.best.x_geno is not None:
xp = [self.best.x_geno]
# xp = [self.best.xdict['geno']]
# xp = [self.gp.geno(self.best.x[:])] # TODO: remove
# print self.mahalanobis_norm(xp[0]-self.mean)
else:
xp = [self.gp.geno(array(self.best.x, copy=True),
self.boundary_handler.inverse,
copy_if_changed=False)]
print('genotype for elitist not found')
self.clip_or_fit_solutions(xp, [0])
pop = array([xp[0]] + list(pop))
elif self.opts['CMA_elitist'] == 'initial': # current solution was better
self.opts['CMA_elitist'] = False
self.pop_sorted = pop
# compute new mean
self.mean = mold + self.sp.cmean * \
(sum(sp.weights * pop[0:sp.mu].T, 1) - mold)
# check Delta m (this is not default, but could become at some point)
# CAVE: upper_length=sqrt(2)+2 is too restrictive, test upper_length = sqrt(2*N) thoroughly.
# replaced by repair_geno?
# simple test case injecting self.mean:
# self.mean = 1e-4 * self.sigma * np.random.randn(N)
if 1 < 3:
cmean = self.sp.cmean
# zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
# get learning rate constants
cc, c1, cmu = sp.cc, sp.c1, sp.cmu
if flg_diagonal:
cc, c1, cmu = sp.cc_sep, sp.c1_sep, sp.cmu_sep
# now the real work can start
hsig = self.adapt_sigma.hsig(self) # ps update must be done here in separable case
# hsig = sum(self.ps**2) / self.N < 2 + 4./(N+1)
# adjust missing variance due to hsig, in 4-D with damps=1e99 and sig0 small
# hsig leads to premature convergence of C otherwise
# hsiga = (1-hsig**2) * c1 * cc * (2-cc) # to be removed in future
c1a = c1 - (1 - hsig**2) * c1 * cc * (2 - cc) # adjust for variance loss
self.pc = (1 - cc) * self.pc + \
hsig * (sqrt(cc * (2 - cc) * sp.mueff) / self.sigma / cmean) * \
(self.mean - mold) / self.sigma_vec
# covariance matrix adaptation/udpate
if sp.CMA_on:
# assert sp.c1 + sp.cmu < sp.mueff / N # ??
assert c1 + cmu <= 1
# default full matrix case
if not flg_diagonal:
Y = (pop[0:sp.mu] - mold) / (self.sigma * self.sigma_vec)
Y = dot((cmu * sp.weights) * Y.T, Y) # learning rate integrated
if self.sp.neg.cmuexp:
tmp = (pop[-sp.neg.mu:] - mold) / (self.sigma * self.sigma_vec)
# normalize to constant length (seems preferable in several aspects)
for i in xrange(tmp.shape[0]):
tmp[i, :] *= N**0.5 / self.mahalanobis_norm(
self.sigma_vec * tmp[i, :]) / self.sigma
self._Yneg *= 1 - self.sp.neg.cmuexp # for some reason necessary?
self._Yneg += dot(sp.neg.weights * tmp.T, tmp) - self.C
# self.update_exponential(dot(sp.neg.weights * tmp.T, tmp) - 1 * self.C, -1*self.sp.neg.cmuexp)
self.C *= 1 - c1a - cmu
self.C += np.outer(c1 * self.pc, self.pc) + Y
self.dC[:] = np.diag(self.C) # for output and termination checking
else: # separable/diagonal linear case
assert(c1 + cmu <= 1)
Z = np.zeros(N)
for k in xrange(sp.mu):
z = (pop[k] - mold) / (self.sigma * self.sigma_vec) # TODO see above
Z += sp.weights[k] * z * z # is 1-D
self.C = (1 - c1a - cmu) * self.C + c1 * self.pc * self.pc + cmu * Z
# TODO: self.C *= exp(cmuneg * (N - dot(sp.neg.weights, **2)
self.dC = self.C
self.D = sqrt(self.C) # C is a 1-D array, this is why adapt_sigma needs to prepare before
self.itereigenupdated = self.countiter
# idx = self._mirror_idx_cov() # take half of mirrored vectors for negative update
# step-size adaptation, adapt sigma
# in case of TPA, function_values[0] and [1] must reflect samples colinear to xmean - xmean_old
self.adapt_sigma.update(self, function_values=function_values)
if self.sigma * min(self.sigma_vec * self.dC**0.5) < self.opts['minstd']:
self.sigma = self.opts['minstd'] / min(self.sigma_vec * self.dC**0.5)
if self.sigma * max(self.sigma_vec * self.dC**0.5) > self.opts['maxstd']:
self.sigma = self.opts['maxstd'] / max(self.sigma_vec * self.dC**0.5)
# g = self.countiter
# N = self.N
# mindx = eval(self.opts['mindx'])
# if isinstance(self.opts['mindx'], basestring) else self.opts['mindx']
if self.sigma * min(self.D) < self.opts['mindx']: # TODO: sigma_vec is missing here
self.sigma = self.opts['mindx'] / min(self.D)
if self.sigma > 1e9 * self.sigma0:
alpha = self.sigma / max(self.D)
self.multiplyC(alpha)
self.sigma /= alpha**0.5
self.opts['tolupsigma'] /= alpha**0.5 # to be compared with sigma
# TODO increase sigma in case of a plateau?
# Uncertainty noise measurement is done on an upper level
# move mean into "feasible preimage", leads to weird behavior on
# 40-D tablet with bound 0.1, not quite explained (constant
# dragging is problematic, but why doesn't it settle), still a bug?
if new_injections:
self.pop_injection_directions = self.prepare_injection_directions()
self.pop_sorted = [] # remove this in case pop is still needed
self._flgtelldone = True
# end tell()
def inject(self, solutions):
"""inject a genotypic solution. The solution is used as direction
relative to the distribution mean to compute a new candidate
solution returned in method `ask_geno` which in turn is used in
method `ask`.
>>> import cma
>>> es = cma.CMAEvolutionStrategy(4 * [1], 2)
>>> while not es.stop():
... es.inject([4 * [0.0]])
... X = es.ask()
... break
>>> assert X[0][0] == X[0][1]
"""
if not hasattr(self, 'pop_injection_directions'):
self.pop_injection_directions = []
for solution in solutions:
if len(solution) != self.N:
raise ValueError('method `inject` needs a list or array'
+ (' each el with dimension (`len`) %d' % self.N))
self.pop_injection_directions.append(
array(solution, copy=False, dtype=float) - self.mean)
def result(self):
"""return::
(xbest, f(xbest), evaluations_xbest, evaluations, iterations,
pheno(xmean), effective_stds)
"""
# TODO: how about xcurrent?
return self.best.get() + (
self.countevals, self.countiter, self.gp.pheno(self.mean),
self.gp.scales * self.sigma * self.sigma_vec * self.dC**0.5)
def result_pretty(self, number_of_runs=0, time_str=None,
fbestever=None):
"""pretty print result.
Returns ``self.result()``
"""
if fbestever is None:
fbestever = self.best.f
s = (' after %i restart' + ('s' if number_of_runs > 1 else '')) \
% number_of_runs if number_of_runs else ''
for k, v in self.stop().items():
print('termination on %s=%s%s' % (k, str(v), s +
(' (%s)' % time_str if time_str else '')))
print('final/bestever f-value = %e %e' % (self.best.last.f,
fbestever))
if self.N < 9:
print('incumbent solution: ' + str(list(self.gp.pheno(self.mean, into_bounds=self.boundary_handler.repair))))
print('std deviation: ' + str(list(self.sigma * self.sigma_vec * sqrt(self.dC) * self.gp.scales)))
else:
print('incumbent solution: %s ...]' % (str(self.gp.pheno(self.mean, into_bounds=self.boundary_handler.repair)[:8])[:-1]))
print('std deviations: %s ...]' % (str((self.sigma * self.sigma_vec * sqrt(self.dC) * self.gp.scales)[:8])[:-1]))
return self.result()
def clip_or_fit_solutions(self, pop, idx):
"""make sure that solutions fit to sample distribution, this interface will probably change.
In particular the frequency of long vectors appearing in pop[idx] - self.mean is limited.
"""
for k in idx:
self.repair_genotype(pop[k])
def repair_genotype(self, x, copy_if_changed=False):
"""make sure that solutions fit to the sample distribution, this interface will probably change.
In particular the frequency of x - self.mean being long is limited.
"""
x = array(x, copy=False)
mold = array(self.mean, copy=False)
if 1 < 3: # hard clip at upper_length
upper_length = self.N**0.5 + 2 * self.N / (self.N + 2) # should become an Option, but how? e.g. [0, 2, 2]
fac = self.mahalanobis_norm(x - mold) / upper_length
if fac > 1:
if copy_if_changed:
x = (x - mold) / fac + mold
else: # should be 25% faster:
x -= mold
x /= fac
x += mold
# print self.countiter, k, fac, self.mahalanobis_norm(pop[k] - mold)
# adapt also sigma: which are the trust-worthy/injected solutions?
else:
if 'checktail' not in self.__dict__: # hasattr(self, 'checktail')
raise NotImplementedError
# from check_tail_smooth import CheckTail # for the time being
# self.checktail = CheckTail()
# print('untested feature checktail is on')
fac = self.checktail.addchin(self.mahalanobis_norm(x - mold))
if fac < 1:
x = fac * (x - mold) + mold
return x
def decompose_C(self):
"""eigen-decompose self.C and update self.dC, self.C, self.B.
Known bugs: this might give a runtime error with
CMA_diagonal / separable option on.
"""
if self.opts['CMA_diagonal']:
_print_warning("this might fail with CMA_diagonal option on",
iteration=self.countiter)
print(self.opts['CMA_diagonal'])
# print(' %.19e' % self.C[0][0])
self.C = (self.C + self.C.T) / 2
self.dC = np.diag(self.C).copy()
self.D, self.B = self.opts['CMA_eigenmethod'](self.C)
# self.B = np.round(self.B, 10)
# for i in rglen(self.D):
# d = self.D[i]
# oom = np.round(np.log10(d))
# self.D[i] = 10**oom * np.round(d / 10**oom, 10)
# print(' %.19e' % self.C[0][0])
# print(' %.19e' % self.D[0])
if any(self.D <= 0):
_print_warning("ERROR", iteration=self.countiter)
raise ValueError("covariance matrix was not positive definite," +
" this must be considered as a bug")
self.D = self.D**0.5
assert all(isfinite(self.D))
idx = np.argsort(self.D)
self.D = self.D[idx]
self.B = self.B[:, idx] # self.B[i] is a row, columns self.B[:,i] are eigenvectors
self.count_eigen += 1
def updateBD(self):
"""update internal variables for sampling the distribution with the
current covariance matrix C. This method is O(N^3), if C is not diagonal.
"""
# itereigenupdated is always up-to-date in the diagonal case
# just double check here
if self.itereigenupdated == self.countiter:
return
if self.opts['CMA_diagonal'] >= self.countiter:
_print_warning("updateBD called in CMA_diagonal mode, " +
"this should be considered a bug", "updateBD",
iteration=self.countiter)
# C has already positive updates, here come the additional negative updates
if self.sp.neg.cmuexp:
C_shrunken = (1 - self.sp.cmu - self.sp.c1)**(self.countiter - self.itereigenupdated)
clip_fac = 0.60 # 0.9 is sufficient to prevent degeneration in small dimension
if hasattr(self.opts['vv'], '__getitem__') and self.opts['vv'][0] == 'sweep_ccov_neg':
clip_fac = 0.98
if (self.countiter - self.itereigenupdated) * self.sp.neg.cmuexp * self.N \
< clip_fac * C_shrunken:
# pos.def. guarantied, because vectors are normalized
self.C -= self.sp.neg.cmuexp * self._Yneg
else:
max_warns = 1
try:
self._updateBD_warnings += 1
except AttributeError:
self._updateBD_warnings = 1
if self.opts['verbose'] > 1 and \
self._updateBD_warnings <= max_warns:
_print_warning('doing two additional eigen' +
'decompositions to guarantee pos.def.',
'updateBD', 'CMAEvolutionStrategy')
if self._updateBD_warnings == max_warns:
_print_warning('further warnings are surpressed',
'updateBD')
self.decompose_C()
_tmp_inverse_root_C = dot(self.B / self.D, self.B.T)
_tmp_inverse_root_C = (_tmp_inverse_root_C + _tmp_inverse_root_C.T) / 2
Zneg = dot(dot(_tmp_inverse_root_C, self._Yneg), _tmp_inverse_root_C)
eigvals, eigvecs = self.opts['CMA_eigenmethod'](Zneg)
self.count_eigen += 1
if max(eigvals) * self.sp.neg.cmuexp <= clip_fac:
self.C -= self.sp.neg.cmuexp * self._Yneg
elif 1 < 3:
self.C -= (clip_fac / max(eigvals)) * self._Yneg
_print_warning(
'clipped learning rate for negative weights, ' +
'maximal eigenvalue = %f, maxeig * ccov = %f > %f'
% (max(eigvals), max(eigvals) * self.sp.neg.cmuexp, clip_fac),
iteration=self.countiter)
if 1 < 3: # let's check
eigvals, eigvecs = self.opts['CMA_eigenmethod'](self.C)
self.count_eigen += 1
print('new min eigenval = %e, old = %e'
% (min(eigvals), min(self.D)**2))
if min(eigvals) > 0:
print('new cond = %e, old = %e'
% (max(eigvals) / min(eigvals),
(max(self.D) / min(self.D))**2))
else: # guaranties pos.def. unconditionally
_print_warning('exponential update for negative weights (internally more expensive)',
iteration=self.countiter)
self.update_exponential(self._Yneg, -self.sp.neg.cmuexp)
# self.C = self.Ypos + Cs * Mh.expms(-self.sp.neg.cmuexp*Csi*self.Yneg*Csi) * Cs
# Yneg = self.Yneg # for temporary debugging, can be removed
self._Yneg = np.zeros((self.N, self.N))
if hasattr(self.opts['vv'], '__getitem__') and self.opts['vv'][0].startswith('sweep_ccov'):
self.opts['CMA_const_trace'] = True
if self.opts['CMA_const_trace'] in (True, 1, 2): # normalize trace of C
if self.opts['CMA_const_trace'] == 2:
s = np.exp(2 * np.mean(np.log(self.D))) # or geom average of dC?
else:
s = np.mean(np.diag(self.C))
self.C /= s
dC = np.diag(self.C)
if max(dC) / min(dC) > 1e8:
# allows for much larger condition numbers, if axis-parallel
self.sigma_vec *= np.diag(self.C)**0.5
self.C = self.correlation_matrix()
_print_warning('condition in coordinate system exceeded 1e8' +
', rescaled to 1')
# self.C = np.triu(self.C) + np.triu(self.C,1).T # should work as well
# self.D, self.B = eigh(self.C) # hermitian, ie symmetric C is assumed
self.decompose_C()
# assert(sum(self.D-DD) < 1e-6)
# assert(sum(sum(np.dot(BB, BB.T)-np.eye(self.N))) < 1e-6)
# assert(sum(sum(np.dot(BB * DD, BB.T) - self.C)) < 1e-6)
# assert(all(self.B[self.countiter % self.N] == self.B[self.countiter % self.N,:]))
# qqqqqqqqqq
# is O(N^3)
# assert(sum(abs(self.C - np.dot(self.D * self.B, self.B.T))) < N**2*1e-11)
if 1 < 3 and max(self.D) / min(self.D) > 1e6 and self.gp.isidentity:
# TODO: allow to do this again
# dmean_prev = dot(self.B, (1. / self.D) * dot(self.B.T, (self.mean - 0*self.mean_old) / self.sigma_vec))
self.gp._tf_matrix = (self.sigma_vec * dot(self.B * self.D, self.B.T).T).T
self.gp._tf_matrix_inv = (dot(self.B / self.D, self.B.T).T / self.sigma_vec).T
self.gp.tf_pheno = lambda x: dot(self.gp._tf_matrix, x)
self.gp.tf_geno = lambda x: dot(self.gp._tf_matrix_inv, x) # not really necessary
self.gp.isidentity = False
assert self.mean is not self.mean_old
self.mean = self.gp.geno(self.mean) # same as tf_geno
self.mean_old = self.gp.geno(self.mean_old) # not needed?
self.pc = self.gp.geno(self.pc)
self.D[:] = 1.0
self.B = np.eye(self.N)
self.C = np.eye(self.N)
self.dC[:] = 1.0
self.sigma_vec = 1
# dmean_now = dot(self.B, (1. / self.D) * dot(self.B.T, (self.mean - 0*self.mean_old) / self.sigma_vec))
# assert Mh.vequals_approximately(dmean_now, dmean_prev)
_print_warning('\n geno-pheno transformation introduced based on current C,\n injected solutions become "invalid" in this iteration',
'updateBD', 'CMAEvolutionStrategy', self.countiter)
self.itereigenupdated = self.countiter
def multiplyC(self, alpha):
"""multiply C with a scalar and update all related internal variables (dC, D,...)"""
self.C *= alpha
if self.dC is not self.C:
self.dC *= alpha
self.D *= alpha**0.5
def update_exponential(self, Z, eta, BDpair=None):
"""exponential update of C that guarantees positive definiteness, that is,
instead of the assignment ``C = C + eta * Z``,
we have ``C = C**.5 * exp(eta * C**-.5 * Z * C**-.5) * C**.5``.
Parameter `Z` should have expectation zero, e.g. sum(w[i] * z[i] * z[i].T) - C
if E z z.T = C.
Parameter `eta` is the learning rate, for ``eta == 0`` nothing is updated.
This function conducts two eigendecompositions, assuming that
B and D are not up to date, unless `BDpair` is given. Given BDpair,
B is the eigensystem and D is the vector of sqrt(eigenvalues), one
eigendecomposition is omitted.
Reference: Glasmachers et al 2010, Exponential Natural Evolution Strategies
"""
if eta == 0:
return
if BDpair:
B, D = BDpair
else:
D, B = self.opts['CMA_eigenmethod'](self.C)
self.count_eigen += 1
D **= 0.5
Cs = dot(B, (B * D).T) # square root of C
Csi = dot(B, (B / D).T) # square root of inverse of C
self.C = dot(Cs, dot(Mh.expms(eta * dot(Csi, dot(Z, Csi)),
self.opts['CMA_eigenmethod']), Cs))
self.count_eigen += 1
# ____________________________________________________________
# ____________________________________________________________
def feedForResume(self, X, function_values):
"""Given all "previous" candidate solutions and their respective
function values, the state of a `CMAEvolutionStrategy` object
can be reconstructed from this history. This is the purpose of
function `feedForResume`.
Arguments
---------
`X`
(all) solution points in chronological order, phenotypic
representation. The number of points must be a multiple
of popsize.
`function_values`
respective objective function values
Details
-------
`feedForResume` can be called repeatedly with only parts of
the history. The part must have the length of a multiple
of the population size.
`feedForResume` feeds the history in popsize-chunks into `tell`.
The state of the random number generator might not be
reconstructed, but this would be only relevant for the future.
Example
-------
::
import cma
# prepare
(x0, sigma0) = ... # initial values from previous trial
X = ... # list of generated solutions from a previous trial
f = ... # respective list of f-values
# resume
es = cma.CMAEvolutionStrategy(x0, sigma0)
es.feedForResume(X, f)
# continue with func as objective function
while not es.stop():
X = es.ask()
es.tell(X, [func(x) for x in X])
Credits to Dirk Bueche and Fabrice Marchal for the feeding idea.
:See: class `CMAEvolutionStrategy` for a simple dump/load to resume
"""
if self.countiter > 0:
_print_warning('feed should generally be used with a new object instance')
if len(X) != len(function_values):
raise _Error('number of solutions ' + str(len(X)) +
' and number function values ' +
str(len(function_values)) + ' must not differ')
popsize = self.sp.popsize
if (len(X) % popsize) != 0:
raise _Error('number of solutions ' + str(len(X)) +
' must be a multiple of popsize (lambda) ' +
str(popsize))
for i in rglen((X) / popsize):
# feed in chunks of size popsize
self.ask() # a fake ask, mainly for a conditioned calling of updateBD
# and secondary to get possibly the same random state
self.tell(X[i * popsize:(i + 1) * popsize], function_values[i * popsize:(i + 1) * popsize])
# ____________________________________________________________
# ____________________________________________________________
def readProperties(self):
"""reads dynamic parameters from property file (not implemented)
"""
print('not yet implemented')
# ____________________________________________________________
# ____________________________________________________________
def correlation_matrix(self):
if len(self.C.shape) <= 1:
return None
c = self.C.copy()
for i in xrange(c.shape[0]):
fac = c[i, i]**0.5
c[:, i] /= fac
c[i, :] /= fac
c = (c + c.T) / 2.0
return c
def mahalanobis_norm(self, dx):
"""compute the Mahalanobis norm that is induced by the adapted
sample distribution, covariance matrix ``C`` times ``sigma**2``,
including ``sigma_vec``. The expected Mahalanobis distance to
the sample mean is about ``sqrt(dimension)``.
Argument
--------
A *genotype* difference `dx`.
Example
-------
>>> import cma, numpy
>>> es = cma.CMAEvolutionStrategy(numpy.ones(10), 1)
>>> xx = numpy.random.randn(2, 10)
>>> d = es.mahalanobis_norm(es.gp.geno(xx[0]-xx[1]))
`d` is the distance "in" the true sample distribution,
sampled points have a typical distance of ``sqrt(2*es.N)``,
where ``es.N`` is the dimension, and an expected distance of
close to ``sqrt(N)`` to the sample mean. In the example,
`d` is the Euclidean distance, because C = I and sigma = 1.
"""
return sqrt(sum((self.D**-1. * np.dot(self.B.T, dx / self.sigma_vec))**2)) / self.sigma
def _metric_when_multiplied_with_sig_vec(self, sig):
"""return D^-1 B^T diag(sig) B D as a measure for
C^-1/2 diag(sig) C^1/2
:param sig: a vector "used" as diagonal matrix
:return:
"""
return dot((self.B * self.D**-1.).T * sig, self.B * self.D)
def disp_annotation(self):
"""print annotation for `disp()`"""
print('Iterat #Fevals function value axis ratio sigma min&max std t[m:s]')
sys.stdout.flush()
def disp(self, modulo=None): # TODO: rather assign opt['verb_disp'] as default?
"""prints some single-line infos according to `disp_annotation()`,
if ``iteration_counter % modulo == 0``
"""
if modulo is None:
modulo = self.opts['verb_disp']
# console display
if modulo:
if (self.countiter - 1) % (10 * modulo) < 1:
self.disp_annotation()
if self.countiter > 0 and (self.stop() or self.countiter < 4
or self.countiter % modulo < 1):
if self.opts['verb_time']:
toc = self.elapsed_time()
stime = str(int(toc // 60)) + ':' + str(round(toc % 60, 1))
else:
stime = ''
print(' '.join((repr(self.countiter).rjust(5),
repr(self.countevals).rjust(6),
'%.15e' % (min(self.fit.fit)),
'%4.1e' % (self.D.max() / self.D.min()),
'%6.2e' % self.sigma,
'%6.0e' % (self.sigma * min(self.sigma_vec * sqrt(self.dC))),
'%6.0e' % (self.sigma * max(self.sigma_vec * sqrt(self.dC))),
stime)))
# if self.countiter < 4:
sys.stdout.flush()
return self
def plot(self):
try:
self.logger.plot()
except AttributeError:
_print_warning('plotting failed, no logger attribute found')
except:
_print_warning(('plotting failed with:', sys.exc_info()[0]),
'plot', 'CMAEvolutionStrategy')
return self
cma_default_options = {
# the follow string arguments are evaluated if they do not contain "filename"
'AdaptSigma': 'CMAAdaptSigmaCSA # or any other CMAAdaptSigmaBase class e.g. CMAAdaptSigmaTPA',
'CMA_active': 'True # negative update, conducted after the original update',
# 'CMA_activefac': '1 # learning rate multiplier for active update',
'CMA_cmean': '1 # learning rate for the mean value',
'CMA_const_trace': 'False # normalize trace, value CMA_const_trace=2 normalizes sum log eigenvalues to zero',
'CMA_diagonal': '0*100*N/sqrt(popsize) # nb of iterations with diagonal covariance matrix, True for always', # TODO 4/ccov_separable?
'CMA_eigenmethod': 'np.linalg.eigh # 0=numpy-s eigh, -1=pygsl, otherwise cma.Misc.eig (slower)',
'CMA_elitist': 'False #v or "initial" or True, elitism likely impairs global search performance',
'CMA_mirrors': 'popsize < 6 # values <0.5 are interpreted as fraction, values >1 as numbers (rounded), otherwise about 0.16 is used',
'CMA_mirrormethod': '1 # 0=unconditional, 1=selective, 2==experimental',
'CMA_mu': 'None # parents selection parameter, default is popsize // 2',
'CMA_on': 'True # False or 0 for no adaptation of the covariance matrix',
'CMA_sample_on_sphere_surface': 'False #v all mutation vectors have the same length',
'CMA_rankmu': 'True # False or 0 for omitting rank-mu update of covariance matrix',
'CMA_rankmualpha': '0.3 # factor of rank-mu update if mu=1, subject to removal, default might change to 0.0',
'CMA_dampsvec_fac': 'np.Inf # tentative and subject to changes, 0.5 would be a "default" damping for sigma vector update',
'CMA_dampsvec_fade': '0.1 # tentative fading out parameter for sigma vector update',
'CMA_teststds': 'None # factors for non-isotropic initial distr. of C, mainly for test purpose, see CMA_stds for production',
'CMA_stds': 'None # multipliers for sigma0 in each coordinate, not represented in C, makes scaling_of_variables obsolete',
# 'CMA_AII': 'False # not yet tested',
'CSA_dampfac': '1 #v positive multiplier for step-size damping, 0.3 is close to optimal on the sphere',
'CSA_damp_mueff_exponent': '0.5 # zero would mean no dependency of damping on mueff, useful with CSA_disregard_length option',
'CSA_disregard_length': 'False #v True is untested',
'CSA_clip_length_value': 'None #v untested, [0, 0] means disregarding length completely',
'CSA_squared': 'False #v use squared length for sigma-adaptation ',
'boundary_handling': 'BoundTransform # or BoundPenalty, unused when ``bounds in (None, [None, None])``',
'bounds': '[None, None] # lower (=bounds[0]) and upper domain boundaries, each a scalar or a list/vector',
# , eval_parallel2': 'not in use {"processes": None, "timeout": 12, "is_feasible": lambda x: True} # distributes function calls to processes processes'
'fixed_variables': 'None # dictionary with index-value pairs like {0:1.1, 2:0.1} that are not optimized',
'ftarget': '-inf #v target function value, minimization',
'is_feasible': 'is_feasible #v a function that computes feasibility, by default lambda x, f: f not in (None, np.NaN)',
'maxfevals': 'inf #v maximum number of function evaluations',
'maxiter': '100 + 50 * (N+3)**2 // popsize**0.5 #v maximum number of iterations',
'mean_shift_line_samples': 'False #v sample two new solutions colinear to previous mean shift',
'mindx': '0 #v minimal std in any direction, cave interference with tol*',
'minstd': '0 #v minimal std in any coordinate direction, cave interference with tol*',
'maxstd': 'inf #v maximal std in any coordinate direction',
'pc_line_samples': 'False #v two line samples along the evolution path pc',
'popsize': '4+int(3*log(N)) # population size, AKA lambda, number of new solution per iteration',
'randn': 'np.random.standard_normal #v randn((lam, N)) must return an np.array of shape (lam, N)',
'scaling_of_variables': 'None # (rather use CMA_stds) scale for each variable, sigma0 is interpreted w.r.t. this scale, in that effective_sigma0 = sigma0*scaling. Internally the variables are divided by scaling_of_variables and sigma is unchanged, default is np.ones(N)',
'seed': 'None # random number seed',
'signals_filename': 'cmaes_signals.par # read from this file, e.g. "stop now"',
'termination_callback': 'None #v a function returning True for termination, called after each iteration step and could be abused for side effects',
'tolfacupx': '1e3 #v termination when step-size increases by tolfacupx (diverges). That is, the initial step-size was chosen far too small and better solutions were found far away from the initial solution x0',
'tolupsigma': '1e20 #v sigma/sigma0 > tolupsigma * max(sqrt(eivenvals(C))) indicates "creeping behavior" with usually minor improvements',
'tolfun': '1e-11 #v termination criterion: tolerance in function value, quite useful',
'tolfunhist': '1e-12 #v termination criterion: tolerance in function value history',
'tolstagnation': 'int(100 + 100 * N**1.5 / popsize) #v termination if no improvement over tolstagnation iterations',
'tolx': '1e-11 #v termination criterion: tolerance in x-changes',
'transformation': 'None # [t0, t1] are two mappings, t0 transforms solutions from CMA-representation to f-representation (tf_pheno), t1 is the (optional) back transformation, see class GenoPheno',
'typical_x': 'None # used with scaling_of_variables',
'updatecovwait': 'None #v number of iterations without distribution update, name is subject to future changes', # TODO: rename: iterwaitupdatedistribution?
'verbose': '1 #v verbosity e.v. of initial/final message, -1 is very quiet, -9 maximally quiet, not yet fully implemented',
'verb_append': '0 # initial evaluation counter, if append, do not overwrite output files',
'verb_disp': '100 #v verbosity: display console output every verb_disp iteration',
'verb_filenameprefix': 'outcmaes # output filenames prefix',
'verb_log': '1 #v verbosity: write data to files every verb_log iteration, writing can be time critical on fast to evaluate functions',
'verb_plot': '0 #v in fmin(): plot() is called every verb_plot iteration',
'verb_time': 'True #v output timings on console',
'vv': '0 #? versatile variable for hacking purposes, value found in self.opts["vv"]'
}
class CMAOptions(dict):
"""``CMAOptions()`` returns a dictionary with the available options
and their default values for class ``CMAEvolutionStrategy``.
``CMAOptions('pop')`` returns a subset of recognized options that
contain 'pop' in there keyword name or (default) value or description.
``CMAOptions(opts)`` returns the subset of recognized options in
``dict(opts)``.
Option values can be "written" in a string and, when passed to fmin
or CMAEvolutionStrategy, are evaluated using "N" and "popsize" as
known values for dimension and population size (sample size, number
of new solutions per iteration). All default option values are such
a string.
Details
-------
``CMAOptions`` entries starting with ``tol`` are termination
"tolerances".
For `tolstagnation`, the median over the first and the second half
of at least `tolstagnation` iterations are compared for both, the
per-iteration best and per-iteration median function value.
Example
-------
::
import cma
cma.CMAOptions('tol')
is a shortcut for cma.CMAOptions().match('tol') that returns all options
that contain 'tol' in their name or description.
To set an option
import cma
opts = cma.CMAOptions()
opts.set('tolfun', 1e-12)
opts['tolx'] = 1e-11
:See: `fmin`(), `CMAEvolutionStrategy`, `_CMAParameters`
"""
# @classmethod # self is the class, not the instance
# @property
# def default(self):
# """returns all options with defaults"""
# return fmin([],[])
@staticmethod
def defaults():
"""return a dictionary with default option values and description"""
return dict((str(k), str(v)) for k, v in cma_default_options.items())
# getting rid of the u of u"name" by str(u"name")
# return dict(cma_default_options)
@staticmethod
def versatile_options():
"""return list of options that can be changed at any time (not
only be initialized), however the list might not be entirely up
to date.
The string ' #v ' in the default value indicates a 'versatile'
option that can be changed any time.
"""
return tuple(sorted(i[0] for i in list(CMAOptions.defaults().items()) if i[1].find(' #v ') > 0))
def check(self, options=None):
"""check for ambiguous keys and move attributes into dict"""
self.check_values(options)
self.check_attributes(options)
self.check_values(options)
return self
def check_values(self, options=None):
corrected_key = CMAOptions().corrected_key # caveat: infinite recursion
validated_keys = []
original_keys = []
if options is None:
options = self
for key in options:
correct_key = corrected_key(key)
if correct_key is None:
raise ValueError("""%s is not a valid option""" % key)
if correct_key in validated_keys:
if key == correct_key:
key = original_keys[validated_keys.index(key)]
raise ValueError("%s was not a unique key for %s option"
% (key, correct_key))
validated_keys.append(correct_key)
original_keys.append(key)
return options
def check_attributes(self, opts=None):
"""check for attributes and moves them into the dictionary"""
if opts is None:
opts = self
if 1 < 3:
# the problem with merge is that ``opts['ftarget'] = new_value``
# would be overwritten by the old ``opts.ftarget``.
# The solution here is to empty opts.__dict__ after the merge
if hasattr(opts, '__dict__'):
for key in list(opts.__dict__):
if key in self._attributes:
continue
_print_warning(
"""
An option attribute has been merged into the dictionary,
thereby possibly overwriting the dictionary value, and the
attribute has been removed. Assign options with
``opts['%s'] = value`` # dictionary assignment
or use
``opts.set('%s', value) # here isinstance(opts, CMAOptions)
instead of
``opts.%s = value`` # attribute assignment
""" % (key, key, key), 'check', 'CMAOptions')
opts[key] = opts.__dict__[key] # getattr(opts, key)
delattr(opts, key) # is that cosher?
# delattr is necessary to prevent that the attribute
# overwrites the dict entry later again
return opts
@staticmethod
def merge(self, dict_=None):
"""not is use so far, see check()"""
if dict_ is None and hasattr(self, '__dict__'):
dict_ = self.__dict__
# doesn't work anymore as we have _lock attribute
if dict_ is None:
return self
self.update(dict_)
return self
def __init__(self, s=None, unchecked=False):
"""return an `CMAOptions` instance, either with the default
options, if ``s is None``, or with all options whose name or
description contains `s`, if `s` is a string (case is
disregarded), or with entries from dictionary `s` as options,
not complemented with default options or settings
Returns: see above.
"""
# if not CMAOptions.defaults: # this is different from self.defaults!!!
# CMAOptions.defaults = fmin([],[])
if s is None:
super(CMAOptions, self).__init__(CMAOptions.defaults()) # dict.__init__(self, CMAOptions.defaults()) should be the same
# self = CMAOptions.defaults()
elif isinstance(s, basestring):
super(CMAOptions, self).__init__(CMAOptions().match(s))
# we could return here
else:
super(CMAOptions, self).__init__(s)
if not unchecked and s is not None:
self.check() # caveat: infinite recursion
for key in list(self.keys()):
correct_key = self.corrected_key(key)
if correct_key not in CMAOptions.defaults():
_print_warning('invalid key ``' + str(key) +
'`` removed', '__init__', 'CMAOptions')
self.pop(key)
elif key != correct_key:
self[correct_key] = self.pop(key)
# self.evaluated = False # would become an option entry
self._lock_setting = False
self._attributes = self.__dict__.copy() # are not valid keys
self._attributes['_attributes'] = len(self._attributes)
def init(self, dict_or_str, val=None, warn=True):
"""initialize one or several options.
Arguments
---------
`dict_or_str`
a dictionary if ``val is None``, otherwise a key.
If `val` is provided `dict_or_str` must be a valid key.
`val`
value for key
Details
-------
Only known keys are accepted. Known keys are in `CMAOptions.defaults()`
"""
# dic = dict_or_key if val is None else {dict_or_key:val}
self.check(dict_or_str)
dic = dict_or_str
if val is not None:
dic = {dict_or_str:val}
for key, val in dic.items():
key = self.corrected_key(key)
if key not in CMAOptions.defaults():
# TODO: find a better solution?
if warn:
print('Warning in cma.CMAOptions.init(): key ' +
str(key) + ' ignored')
else:
self[key] = val
return self
def set(self, dic, val=None, force=False):
"""set can assign versatile options from
`CMAOptions.versatile_options()` with a new value, use `init()`
for the others.
Arguments
---------
`dic`
either a dictionary or a key. In the latter
case, `val` must be provided
`val`
value for `key`, approximate match is sufficient
`force`
force setting of non-versatile options, use with caution
This method will be most probably used with the ``opts`` attribute of
a `CMAEvolutionStrategy` instance.
"""
if val is not None: # dic is a key in this case
dic = {dic:val} # compose a dictionary
for key_original, val in list(dict(dic).items()):
key = self.corrected_key(key_original)
if not self._lock_setting or \
key in CMAOptions.versatile_options():
self[key] = val
else:
_print_warning('key ' + str(key_original) +
' ignored (not recognized as versatile)',
'set', 'CMAOptions')
return self # to allow o = CMAOptions(o).set(new)
def complement(self):
"""add all missing options with their default values"""
# add meta-parameters, given options have priority
self.check()
for key in CMAOptions.defaults():
if key not in self:
self[key] = CMAOptions.defaults()[key]
return self
def settable(self):
"""return the subset of those options that are settable at any
time.
Settable options are in `versatile_options()`, but the
list might be incomplete.
"""
return CMAOptions([i for i in list(self.items())
if i[0] in CMAOptions.versatile_options()])
def __call__(self, key, default=None, loc=None):
"""evaluate and return the value of option `key` on the fly, or
returns those options whose name or description contains `key`,
case disregarded.
Details
-------
Keys that contain `filename` are not evaluated.
For ``loc==None``, `self` is used as environment
but this does not define ``N``.
:See: `eval()`, `evalall()`
"""
try:
val = self[key]
except:
return self.match(key)
if loc is None:
loc = self # TODO: this hack is not so useful: popsize could be there, but N is missing
try:
if isinstance(val, basestring):
val = val.split('#')[0].strip() # remove comments
if isinstance(val, basestring) and \
key.find('filename') < 0:
# and key.find('mindx') < 0:
val = eval(val, globals(), loc)
# invoke default
# TODO: val in ... fails with array type, because it is applied element wise!
# elif val in (None,(),[],{}) and default is not None:
elif val is None and default is not None:
val = eval(str(default), globals(), loc)
except:
pass # slighly optimistic: the previous is bug-free
return val
def corrected_key(self, key):
"""return the matching valid key, if ``key.lower()`` is a unique
starting sequence to identify the valid key, ``else None``
"""
matching_keys = []
for allowed_key in CMAOptions.defaults():
if allowed_key.lower() == key.lower():
return allowed_key
if allowed_key.lower().startswith(key.lower()):
matching_keys.append(allowed_key)
return matching_keys[0] if len(matching_keys) == 1 else None
def eval(self, key, default=None, loc=None, correct_key=True):
"""Evaluates and sets the specified option value in
environment `loc`. Many options need ``N`` to be defined in
`loc`, some need `popsize`.
Details
-------
Keys that contain 'filename' are not evaluated.
For `loc` is None, the self-dict is used as environment
:See: `evalall()`, `__call__`
"""
# TODO: try: loc['dim'] = loc['N'] etc
if correct_key:
# in_key = key # for debugging only
key = self.corrected_key(key)
self[key] = self(key, default, loc)
return self[key]
def evalall(self, loc=None, defaults=None):
"""Evaluates all option values in environment `loc`.
:See: `eval()`
"""
self.check()
if defaults is None:
defaults = cma_default_options
# TODO: this needs rather the parameter N instead of loc
if 'N' in loc: # TODO: __init__ of CMA can be simplified
popsize = self('popsize', defaults['popsize'], loc)
for k in list(self.keys()):
k = self.corrected_key(k)
self.eval(k, defaults[k],
{'N':loc['N'], 'popsize':popsize})
self._lock_setting = True
return self
def match(self, s=''):
"""return all options that match, in the name or the description,
with string `s`, case is disregarded.
Example: ``cma.CMAOptions().match('verb')`` returns the verbosity
options.
"""
match = s.lower()
res = {}
for k in sorted(self):
s = str(k) + '=\'' + str(self[k]) + '\''
if match in s.lower():
res[k] = self[k]
return CMAOptions(res, unchecked=True)
def pp(self):
pprint(self)
def pprint(self, linebreak=80):
for i in sorted(self.items()):
s = str(i[0]) + "='" + str(i[1]) + "'"
a = s.split(' ')
# print s in chunks
l = '' # start entire to the left
while a:
while a and len(l) + len(a[0]) < linebreak:
l += ' ' + a.pop(0)
print(l)
l = ' ' # tab for subsequent lines
print_ = pprint # Python style to prevent clash with keywords
printme = pprint
# ____________________________________________________________
# ____________________________________________________________
class _CMAStopDict(dict):
"""keep and update a termination condition dictionary, which is
"usually" empty and returned by `CMAEvolutionStrategy.stop()`.
The class methods entirely depend on `CMAEvolutionStrategy` class
attributes.
Details
-------
This class is not relevant for the end-user and could be a nested
class, but nested classes cannot be serialized.
Example
-------
>>> import cma
>>> es = cma.CMAEvolutionStrategy(4 * [1], 1, {'verbose':-1})
>>> print(es.stop())
{}
>>> es.optimize(cma.fcts.sphere, verb_disp=0)
>>> print(es.stop())
{'tolfun': 1e-11}
:See: `OOOptimizer.stop()`, `CMAEvolutionStrategy.stop()`
"""
def __init__(self, d={}):
update = isinstance(d, CMAEvolutionStrategy)
super(_CMAStopDict, self).__init__({} if update else d)
self._stoplist = [] # to keep multiple entries
self.lastiter = 0 # probably not necessary
if isinstance(d, _CMAStopDict): # inherit
self._stoplist = d._stoplist # multiple entries
self.lastiter = d.lastiter # probably not necessary
if update:
self._update(d)
def __call__(self, es=None, check=True):
"""update and return the termination conditions dictionary
"""
if not check:
return self
if es is None and self.es is None:
raise ValueError('termination conditions need an optimizer to act upon')
self._update(es)
return self
def _update(self, es):
"""Test termination criteria and update dictionary
"""
if es is None:
es = self.es
assert es is not None
if es.countiter == 0: # in this case termination tests fail
self.__init__()
return self
self.lastiter = es.countiter
self.es = es
self.clear() # compute conditions from scratch
N = es.N
opts = es.opts
self.opts = opts # a hack to get _addstop going
# fitness: generic criterion, user defined w/o default
self._addstop('ftarget',
es.best.f < opts['ftarget'])
# maxiter, maxfevals: generic criteria
self._addstop('maxfevals',
es.countevals - 1 >= opts['maxfevals'])
self._addstop('maxiter',
## meta_parameters.maxiter_multiplier == 1.0
es.countiter >= 1.0 * opts['maxiter'])
# tolx, tolfacupx: generic criteria
# tolfun, tolfunhist (CEC:tolfun includes hist)
self._addstop('tolx',
all([es.sigma * xi < opts['tolx'] for xi in es.sigma_vec * es.pc]) and
all([es.sigma * xi < opts['tolx'] for xi in es.sigma_vec * sqrt(es.dC)]))
self._addstop('tolfacupx',
any(es.sigma * es.sigma_vec * sqrt(es.dC) >
es.sigma0 * es.sigma_vec0 * opts['tolfacupx']))
self._addstop('tolfun',
es.fit.fit[-1] - es.fit.fit[0] < opts['tolfun'] and
max(es.fit.hist) - min(es.fit.hist) < opts['tolfun'])
self._addstop('tolfunhist',
len(es.fit.hist) > 9 and
max(es.fit.hist) - min(es.fit.hist) < opts['tolfunhist'])
# worst seen false positive: table N=80,lam=80, getting worse for fevals=35e3 \approx 50 * N**1.5
# but the median is not so much getting worse
# / 5 reflects the sparsity of histbest/median
# / 2 reflects the left and right part to be compared
## meta_parameters.tolstagnation_multiplier == 1.0
l = int(max(( 1.0 * opts['tolstagnation'] / 5. / 2, len(es.fit.histbest) / 10)))
# TODO: why max(..., len(histbest)/10) ???
# TODO: the problem in the beginning is only with best ==> ???
# equality should handle flat fitness
self._addstop('tolstagnation', # leads sometimes early stop on ftablet, fcigtab, N>=50?
1 < 3 and opts['tolstagnation'] and es.countiter > N * (5 + 100 / es.popsize) and
len(es.fit.histbest) > 100 and 2 * l < len(es.fit.histbest) and
np.median(es.fit.histmedian[:l]) >= np.median(es.fit.histmedian[l:2 * l]) and
np.median(es.fit.histbest[:l]) >= np.median(es.fit.histbest[l:2 * l]))
# iiinteger: stagnation termination can prevent to find the optimum
self._addstop('tolupsigma', opts['tolupsigma'] and
es.sigma / np.max(es.D) > es.sigma0 * opts['tolupsigma'])
if 1 < 3:
# non-user defined, method specific
# noeffectaxis (CEC: 0.1sigma), noeffectcoord (CEC:0.2sigma), conditioncov
idx = np.where(es.mean == es.mean + 0.2 * es.sigma *
es.sigma_vec * es.dC**0.5)[0]
self._addstop('noeffectcoord', any(idx), idx)
# any([es.mean[i] == es.mean[i] + 0.2 * es.sigma *
# (es.sigma_vec if isscalar(es.sigma_vec) else es.sigma_vec[i]) *
# sqrt(es.dC[i])
# for i in xrange(N)])
# )
if opts['CMA_diagonal'] is not True and es.countiter > opts['CMA_diagonal']:
i = es.countiter % N
self._addstop('noeffectaxis',
sum(es.mean == es.mean + 0.1 * es.sigma * es.D[i] * es.B[:, i]) == N)
self._addstop('conditioncov',
es.D[-1] > 1e7 * es.D[0], 1e14) # TODO
self._addstop('callback', es.callbackstop) # termination_callback
try:
with open(self.opts['signals_filename'], 'r') as f:
for line in f.readlines():
words = line.split()
if len(words) < 2 or words[0].startswith(('#', '%')):
continue
if words[0] == 'stop' and words[1] == 'now':
if len(words) > 2 and not words[2].startswith(
self.opts['verb_filenameprefix']):
continue
self._addstop('file_signal', True, "stop now")
break
except IOError:
pass
if len(self):
self._addstop('flat fitness: please (re)consider how to compute the fitness more elaborate',
len(es.fit.hist) > 9 and
max(es.fit.hist) == min(es.fit.hist))
return self
def _addstop(self, key, cond, val=None):
if cond:
self.stoplist.append(key) # can have the same key twice
self[key] = val if val is not None \
else self.opts.get(key, None)
def clear(self):
for k in list(self):
self.pop(k)
self.stoplist = []
# ____________________________________________________________
# ____________________________________________________________
class _CMAParameters(object):
"""strategy parameters like population size and learning rates.
Note:
contrary to `CMAOptions`, `_CMAParameters` is not (yet) part of the
"user-interface" and subject to future changes (it might become
a `collections.namedtuple`)
Example
-------
>>> import cma
>>> es = cma.CMAEvolutionStrategy(20 * [0.1], 1)
(6_w,12)-CMA-ES (mu_w=3.7,w_1=40%) in dimension 20 (seed=504519190) # the seed is "random" by default
>>>
>>> type(es.sp) # sp contains the strategy parameters
<class 'cma._CMAParameters'>
>>>
>>> es.sp.disp()
{'CMA_on': True,
'N': 20,
'c1': 0.004181139918745593,
'c1_sep': 0.034327992810300939,
'cc': 0.17176721127681213,
'cc_sep': 0.25259494835857677,
'cmean': 1.0,
'cmu': 0.0085149624979034746,
'cmu_sep': 0.057796356229390715,
'cs': 0.21434997799189287,
'damps': 1.2143499779918929,
'mu': 6,
'mu_f': 6.0,
'mueff': 3.7294589343030671,
'popsize': 12,
'rankmualpha': 0.3,
'weights': array([ 0.40240294, 0.25338908, 0.16622156, 0.10437523, 0.05640348,
0.01720771])}
>>>
>> es.sp == cma._CMAParameters(20, 12, cma.CMAOptions().evalall({'N': 20}))
True
:See: `CMAOptions`, `CMAEvolutionStrategy`
"""
def __init__(self, N, opts, ccovfac=1, verbose=True):
"""Compute strategy parameters, mainly depending on
dimension and population size, by calling `set`
"""
self.N = N
if ccovfac == 1:
ccovfac = opts['CMA_on'] # that's a hack
self.popsize = None # declaring the attribute, not necessary though
self.set(opts, ccovfac=ccovfac, verbose=verbose)
def set(self, opts, popsize=None, ccovfac=1, verbose=True):
"""Compute strategy parameters as a function
of dimension and population size """
alpha_cc = 1.0 # cc-correction for mueff, was zero before
def conedf(df, mu, N):
"""used for computing separable learning rate"""
return 1. / (df + 2.*sqrt(df) + float(mu) / N)
def cmudf(df, mu, alphamu):
"""used for computing separable learning rate"""
return (alphamu + mu - 2. + 1. / mu) / (df + 4.*sqrt(df) + mu / 2.)
sp = self
N = sp.N
if popsize:
opts.evalall({'N':N, 'popsize':popsize})
else:
popsize = opts.evalall({'N':N})['popsize'] # the default popsize is computed in CMAOptions()
## meta_parameters.lambda_exponent == 0.0
popsize = int(popsize + N** 0.0 - 1)
sp.popsize = popsize
if opts['CMA_mirrors'] < 0.5:
sp.lam_mirr = int(0.5 + opts['CMA_mirrors'] * popsize)
elif opts['CMA_mirrors'] > 1:
sp.lam_mirr = int(0.5 + opts['CMA_mirrors'])
else:
sp.lam_mirr = int(0.5 + 0.16 * min((popsize, 2 * N + 2)) + 0.29) # 0.158650... * popsize is optimal
# lam = arange(2,22)
# mirr = 0.16 + 0.29/lam
# print(lam); print([int(0.5 + l) for l in mirr*lam])
# [ 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21]
# [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4]
## meta_parameters.parent_fraction == 0.5
sp.mu_f = 0.5 * sp.popsize # float value of mu
if opts['CMA_mu'] is not None:
sp.mu_f = opts['CMA_mu']
sp.mu = int(sp.mu_f + 0.499999) # round down for x.5
sp.mu = max((sp.mu, 1))
# in principle we have mu_opt = popsize/2 + lam_mirr/2,
# which means in particular weights should only be negative for q > 0.5+mirr_frac/2
if sp.mu > sp.popsize - 2 * sp.lam_mirr + 1:
_print_warning("pairwise selection is not implemented, therefore " +
" mu = %d > %d = %d - 2*%d + 1 = popsize - 2*mirr + 1 can produce a bias" % (
sp.mu, sp.popsize - 2 * sp.lam_mirr + 1, sp.popsize, sp.lam_mirr))
if sp.lam_mirr > sp.popsize // 2:
raise _Error("fraction of mirrors in the population as read from option CMA_mirrors cannot be larger 0.5, " +
"theoretically optimal is 0.159")
sp.weights = log(max([sp.mu, sp.popsize / 2.0]) + 0.5) - log(1 + np.arange(sp.mu))
sp.weights /= sum(sp.weights)
sp.mueff = 1 / sum(sp.weights**2)
# TODO: this will disappear, as it is done in class CMAAdaptSigmaCSA
## meta_parameters.cs_exponent == 1.0
b = 1.0
## meta_parameters.cs_multiplier == 1.0
sp.cs = 1.0 * (sp.mueff + 2)**b / (N + (sp.mueff + 3)**b) # TODO: this doesn't change dependency of dimension
# sp.cs = (sp.mueff + 2) / (N + 1.5*sp.mueff + 1)
## meta_parameters.cc_exponent == 1.0
b = 1.0
## meta_parameters.cc_multiplier == 1.0
sp.cc = 1.0 * \
(4 + alpha_cc * sp.mueff / N)**b / \
(N**b + (4 + alpha_cc * 2 * sp.mueff / N)**b)
sp.cc_sep = (1 + 1 / N + alpha_cc * sp.mueff / N) / (N**0.5 + 1 / N + alpha_cc * 2 * sp.mueff / N) # \not\gg\cc
if hasattr(opts['vv'], '__getitem__') and opts['vv'][0] == 'sweep_ccov1':
## meta_parameters.cc_multiplier == 1.0
sp.cc = 1.0 * (4 + sp.mueff / N)**0.5 / ((N + 4)**0.5 + (2 * sp.mueff / N)**0.5)
sp.rankmualpha = opts['CMA_rankmualpha']
# sp.rankmualpha = _evalOption(opts['CMA_rankmualpha'], 0.3)
## meta_parameters.c1_multiplier == 1.0
sp.c1 = ( 1.0 * ccovfac * min(1, sp.popsize / 6) *
## meta_parameters.c1_exponent == 2.0
2 / ((N + 1.3)** 2.0 + sp.mueff))
# 1/0
sp.c1_sep = ccovfac * conedf(N, sp.mueff, N)
if opts['CMA_rankmu'] != 0: # also empty
## meta_parameters.cmu_multiplier == 2.0
alphacov, mu = 2.0 , sp.mueff
sp.cmu = min(1 - sp.c1, ccovfac * alphacov *
## meta_parameters.cmu_exponent == 2.0
(sp.rankmualpha + mu - 2 + 1 / mu) / ((N + 2)** 2.0 + alphacov * mu / 2))
if hasattr(opts['vv'], '__getitem__') and opts['vv'][0] == 'sweep_ccov':
sp.cmu = opts['vv'][1]
sp.cmu_sep = min(1 - sp.c1_sep, ccovfac * cmudf(N, sp.mueff, sp.rankmualpha))
else:
sp.cmu = sp.cmu_sep = 0
if hasattr(opts['vv'], '__getitem__') and opts['vv'][0] == 'sweep_ccov1':
sp.c1 = opts['vv'][1]
sp.neg = _BlancClass()
if opts['CMA_active'] and opts['CMA_on']:
# in principle we have mu_opt = popsize/2 + lam_mirr/2,
# which means in particular weights should only be negative for q > 0.5+mirr_frac/2
if 1 < 3: # seems most natural: continuation of log(lambda/2) - log(k) qqqqqqqqqqqqqqqqqqqqqqqqqq
sp.neg.mu_f = popsize // 2 # not sure anymore what this is good for
sp.neg.weights = array([log(k) - log(popsize/2 + 1/2) for k in np.arange(np.ceil(popsize/2 + 1.1/2), popsize + .1)])
sp.neg.mu = len(sp.neg.weights)
sp.neg.weights /= sum(sp.neg.weights)
sp.neg.mueff = 1 / sum(sp.neg.weights**2)
## meta_parameters.cact_exponent == 1.5
sp.neg.cmuexp = opts['CMA_active'] * 0.3 * sp.neg.mueff / ((N + 2)** 1.5 + 1.0 * sp.neg.mueff)
if hasattr(opts['vv'], '__getitem__') and opts['vv'][0] == 'sweep_ccov_neg':
sp.neg.cmuexp = opts['vv'][1]
# reasoning on learning rate cmuexp: with sum |w| == 1 and
# length-normalized vectors in the update, the residual
# variance in any direction exceeds exp(-N*cmuexp)
assert sp.neg.mu >= sp.lam_mirr # not really necessary
# sp.neg.minresidualvariance = 0.66 # not it use, keep at least 0.66 in all directions, small popsize is most critical
else:
sp.neg.cmuexp = 0
sp.CMA_on = sp.c1 + sp.cmu > 0
# print(sp.c1_sep / sp.cc_sep)
if not opts['CMA_on'] and opts['CMA_on'] not in (None, [], (), ''):
sp.CMA_on = False
# sp.c1 = sp.cmu = sp.c1_sep = sp.cmu_sep = 0
mueff_exponent = 0.5
if 1 < 3:
mueff_exponent = opts['CSA_damp_mueff_exponent']
# TODO: this will disappear, as it is done in class CMAAdaptSigmaCSA
sp.damps = opts['CSA_dampfac'] * (0.5 +
0.5 * min([1, (sp.lam_mirr / (0.159 * sp.popsize) - 1)**2])**1 +
2 * max([0, ((sp.mueff - 1) / (N + 1))**mueff_exponent - 1]) + sp.cs
)
sp.cmean = float(opts['CMA_cmean'])
# sp.kappa = 1 # 4-D, lam=16, rank1, kappa < 4 does not influence convergence rate
# in larger dim it does, 15-D with defaults, kappa=8 factor 2
if verbose:
if not sp.CMA_on:
print('covariance matrix adaptation turned off')
if opts['CMA_mu'] != None:
print('mu = %f' % (sp.mu_f))
# return self # the constructor returns itself
def disp(self):
pprint(self.__dict__)
def fmin(objective_function, x0, sigma0,
options=None,
args=(),
gradf=None,
restarts=0,
restart_from_best='False',
incpopsize=2,
eval_initial_x=False,
noise_handler=None,
noise_change_sigma_exponent=1,
noise_kappa_exponent=0, # TODO: add max kappa value as parameter
bipop=False):
"""functional interface to the stochastic optimizer CMA-ES
for non-convex function minimization.
Calling Sequences
=================
``fmin(objective_function, x0, sigma0)``
minimizes `objective_function` starting at `x0` and with standard deviation
`sigma0` (step-size)
``fmin(objective_function, x0, sigma0, options={'ftarget': 1e-5})``
minimizes `objective_function` up to target function value 1e-5, which
is typically useful for benchmarking.
``fmin(objective_function, x0, sigma0, args=('f',))``
minimizes `objective_function` called with an additional argument ``'f'``.
``fmin(objective_function, x0, sigma0, options={'ftarget':1e-5, 'popsize':40})``
uses additional options ``ftarget`` and ``popsize``
``fmin(objective_function, esobj, None, options={'maxfevals': 1e5})``
uses the `CMAEvolutionStrategy` object instance `esobj` to optimize
`objective_function`, similar to `esobj.optimize()`.
Arguments
=========
`objective_function`
function to be minimized. Called as ``objective_function(x,
*args)``. `x` is a one-dimensional `numpy.ndarray`.
`objective_function` can return `numpy.NaN`,
which is interpreted as outright rejection of solution `x`
and invokes an immediate resampling and (re-)evaluation
of a new solution not counting as function evaluation.
`x0`
list or `numpy.ndarray`, initial guess of minimum solution
before the application of the geno-phenotype transformation
according to the ``transformation`` option. It can also be
a string holding a Python expression that is evaluated
to yield the initial guess - this is important in case
restarts are performed so that they start from different
places. Otherwise `x0` can also be a `cma.CMAEvolutionStrategy`
object instance, in that case `sigma0` can be ``None``.
`sigma0`
scalar, initial standard deviation in each coordinate.
`sigma0` should be about 1/4th of the search domain width
(where the optimum is to be expected). The variables in
`objective_function` should be scaled such that they
presumably have similar sensitivity.
See also option `scaling_of_variables`.
`options`
a dictionary with additional options passed to the constructor
of class ``CMAEvolutionStrategy``, see ``cma.CMAOptions()``
for a list of available options.
``args=()``
arguments to be used to call the `objective_function`
``gradf``
gradient of f, where ``len(gradf(x, *args)) == len(x)``.
`gradf` is called once in each iteration if
``gradf is not None``.
``restarts=0``
number of restarts with increasing population size, see also
parameter `incpopsize`, implementing the IPOP-CMA-ES restart
strategy, see also parameter `bipop`; to restart from
different points (recommended), pass `x0` as a string.
``restart_from_best=False``
which point to restart from
``incpopsize=2``
multiplier for increasing the population size `popsize` before
each restart
``eval_initial_x=None``
evaluate initial solution, for `None` only with elitist option
``noise_handler=None``
a ``NoiseHandler`` instance or ``None``, a simple usecase is
``cma.fmin(f, 6 * [1], 1, noise_handler=cma.NoiseHandler(6))``
see ``help(cma.NoiseHandler)``.
``noise_change_sigma_exponent=1``
exponent for sigma increment for additional noise treatment
``noise_evaluations_as_kappa``
instead of applying reevaluations, the "number of evaluations"
is (ab)used as scaling factor kappa (experimental).
``bipop``
if True, run as BIPOP-CMA-ES; BIPOP is a special restart
strategy switching between two population sizings - small
(like the default CMA, but with more focused search) and
large (progressively increased as in IPOP). This makes the
algorithm perform well both on functions with many regularly
or irregularly arranged local optima (the latter by frequently
restarting with small populations). For the `bipop` parameter
to actually take effect, also select non-zero number of
(IPOP) restarts; the recommended setting is ``restarts<=9``
and `x0` passed as a string. Note that small-population
restarts do not count into the total restart count.
Optional Arguments
==================
All values in the `options` dictionary are evaluated if they are of
type `str`, besides `verb_filenameprefix`, see class `CMAOptions` for
details. The full list is available via ``cma.CMAOptions()``.
>>> import cma
>>> cma.CMAOptions()
Subsets of options can be displayed, for example like
``cma.CMAOptions('tol')``, or ``cma.CMAOptions('bound')``,
see also class `CMAOptions`.
Return
======
Return the list provided by `CMAEvolutionStrategy.result()` appended
with termination conditions, an `OOOptimizer` and a `BaseDataLogger`::
res = es.result() + (es.stop(), es, logger)
where
- ``res[0]`` (``xopt``) -- best evaluated solution
- ``res[1]`` (``fopt``) -- respective function value
- ``res[2]`` (``evalsopt``) -- respective number of function evaluations
- ``res[3]`` (``evals``) -- number of overall conducted objective function evaluations
- ``res[4]`` (``iterations``) -- number of overall conducted iterations
- ``res[5]`` (``xmean``) -- mean of the final sample distribution
- ``res[6]`` (``stds``) -- effective stds of the final sample distribution
- ``res[-3]`` (``stop``) -- termination condition(s) in a dictionary
- ``res[-2]`` (``cmaes``) -- class `CMAEvolutionStrategy` instance
- ``res[-1]`` (``logger``) -- class `CMADataLogger` instance
Details
=======
This function is an interface to the class `CMAEvolutionStrategy`. The
latter class should be used when full control over the iteration loop
of the optimizer is desired.
Examples
========
The following example calls `fmin` optimizing the Rosenbrock function
in 10-D with initial solution 0.1 and initial step-size 0.5. The
options are specified for the usage with the `doctest` module.
>>> import cma
>>> # cma.CMAOptions() # returns all possible options
>>> options = {'CMA_diagonal':100, 'seed':1234, 'verb_time':0}
>>>
>>> res = cma.fmin(cma.fcts.rosen, [0.1] * 10, 0.5, options)
(5_w,10)-CMA-ES (mu_w=3.2,w_1=45%) in dimension 10 (seed=1234)
Covariance matrix is diagonal for 10 iterations (1/ccov=29.0)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 10 1.264232686260072e+02 1.1e+00 4.40e-01 4e-01 4e-01
2 20 1.023929748193649e+02 1.1e+00 4.00e-01 4e-01 4e-01
3 30 1.214724267489674e+02 1.2e+00 3.70e-01 3e-01 4e-01
100 1000 6.366683525319511e+00 6.2e+00 2.49e-02 9e-03 3e-02
200 2000 3.347312410388666e+00 1.2e+01 4.52e-02 8e-03 4e-02
300 3000 1.027509686232270e+00 1.3e+01 2.85e-02 5e-03 2e-02
400 4000 1.279649321170636e-01 2.3e+01 3.53e-02 3e-03 3e-02
500 5000 4.302636076186532e-04 4.6e+01 4.78e-03 3e-04 5e-03
600 6000 6.943669235595049e-11 5.1e+01 5.41e-06 1e-07 4e-06
650 6500 5.557961334063003e-14 5.4e+01 1.88e-07 4e-09 1e-07
termination on tolfun : 1e-11
final/bestever f-value = 5.55796133406e-14 2.62435631419e-14
mean solution: [ 1. 1.00000001 1. 1.
1. 1.00000001 1.00000002 1.00000003 ...]
std deviation: [ 3.9193387e-09 3.7792732e-09 4.0062285e-09 4.6605925e-09
5.4966188e-09 7.4377745e-09 1.3797207e-08 2.6020765e-08 ...]
>>>
>>> print('best solutions fitness = %f' % (res[1]))
best solutions fitness = 2.62435631419e-14
>>> assert res[1] < 1e-12
The above call is pretty much equivalent with the slightly more
verbose call::
es = cma.CMAEvolutionStrategy([0.1] * 10, 0.5,
options=options).optimize(cma.fcts.rosen)
The following example calls `fmin` optimizing the Rastrigin function
in 3-D with random initial solution in [-2,2], initial step-size 0.5
and the BIPOP restart strategy (that progressively increases population).
The options are specified for the usage with the `doctest` module.
>>> import cma
>>> # cma.CMAOptions() # returns all possible options
>>> options = {'seed':12345, 'verb_time':0, 'ftarget': 1e-8}
>>>
>>> res = cma.fmin(cma.fcts.rastrigin, '2. * np.random.rand(3) - 1', 0.5,
... options, restarts=9, bipop=True)
(3_w,7)-aCMA-ES (mu_w=2.3,w_1=58%) in dimension 3 (seed=12345)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 7 1.633489455763566e+01 1.0e+00 4.35e-01 4e-01 4e-01
2 14 9.762462950258016e+00 1.2e+00 4.12e-01 4e-01 4e-01
3 21 2.461107851413725e+01 1.4e+00 3.78e-01 3e-01 4e-01
100 700 9.949590571272680e-01 1.7e+00 5.07e-05 3e-07 5e-07
123 861 9.949590570932969e-01 1.3e+00 3.93e-06 9e-09 1e-08
termination on tolfun=1e-11
final/bestever f-value = 9.949591e-01 9.949591e-01
mean solution: [ 9.94958638e-01 -7.19265205e-10 2.09294450e-10]
std deviation: [ 8.71497860e-09 8.58994807e-09 9.85585654e-09]
[...]
(4_w,9)-aCMA-ES (mu_w=2.8,w_1=49%) in dimension 3 (seed=12349)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 5342.0 2.114883315350800e+01 1.0e+00 3.42e-02 3e-02 4e-02
2 5351.0 1.810102940125502e+01 1.4e+00 3.79e-02 3e-02 4e-02
3 5360.0 1.340222457448063e+01 1.4e+00 4.58e-02 4e-02 6e-02
50 5783.0 8.631491965616078e-09 1.6e+00 2.01e-04 8e-06 1e-05
termination on ftarget=1e-08 after 4 restarts
final/bestever f-value = 8.316963e-09 8.316963e-09
mean solution: [ -3.10652459e-06 2.77935436e-06 -4.95444519e-06]
std deviation: [ 1.02825265e-05 8.08348144e-06 8.47256408e-06]
In either case, the method::
cma.plot();
(based on `matplotlib.pyplot`) produces a plot of the run and, if
necessary::
cma.show()
shows the plot in a window. Finally::
cma.savefig('myfirstrun') # savefig from matplotlib.pyplot
will save the figure in a png.
We can use the gradient like
>>> import cma
>>> res = cma.fmin(cma.fcts.rosen, np.zeros(10), 0.1,
... options = {'ftarget':1e-8,},
... gradf=cma.fcts.grad_rosen,
... )
>>> assert cma.fcts.rosen(res[0]) < 1e-8
>>> assert res[2] < 3600 # 1% are > 3300
>>> assert res[3] < 3600 # 1% are > 3300
:See: `CMAEvolutionStrategy`, `OOOptimizer.optimize(), `plot()`,
`CMAOptions`, `scipy.optimize.fmin()`
""" # style guides say there should be the above empty line
if 1 < 3: # try: # pass on KeyboardInterrupt
if not objective_function: # cma.fmin(0, 0, 0)
return CMAOptions() # these opts are by definition valid
fmin_options = locals().copy() # archive original options
del fmin_options['objective_function']
del fmin_options['x0']
del fmin_options['sigma0']
del fmin_options['options']
del fmin_options['args']
if options is None:
options = cma_default_options
CMAOptions().check_attributes(options) # might modify options
# checked that no options.ftarget =
opts = CMAOptions(options.copy()).complement()
# BIPOP-related variables:
runs_with_small = 0
small_i = []
large_i = []
popsize0 = None # to be evaluated after the first iteration
maxiter0 = None # to be evaluated after the first iteration
base_evals = 0
irun = 0
best = BestSolution()
while True: # restart loop
sigma_factor = 1
# Adjust the population according to BIPOP after a restart.
if not bipop:
# BIPOP not in use, simply double the previous population
# on restart.
if irun > 0:
popsize_multiplier = fmin_options['incpopsize'] ** (irun - runs_with_small)
opts['popsize'] = popsize0 * popsize_multiplier
elif irun == 0:
# Initial run is with "normal" population size; it is
# the large population before first doubling, but its
# budget accounting is the same as in case of small
# population.
poptype = 'small'
elif sum(small_i) < sum(large_i):
# An interweaved run with small population size
poptype = 'small'
runs_with_small += 1 # _Before_ it's used in popsize_lastlarge
sigma_factor = 0.01 ** np.random.uniform() # Local search
popsize_multiplier = fmin_options['incpopsize'] ** (irun - runs_with_small)
opts['popsize'] = np.floor(popsize0 * popsize_multiplier ** (np.random.uniform() ** 2))
opts['maxiter'] = min(maxiter0, 0.5 * sum(large_i) / opts['popsize'])
# print('small basemul %s --> %s; maxiter %s' % (popsize_multiplier, opts['popsize'], opts['maxiter']))
else:
# A run with large population size; the population
# doubling is implicit with incpopsize.
poptype = 'large'
popsize_multiplier = fmin_options['incpopsize'] ** (irun - runs_with_small)
opts['popsize'] = popsize0 * popsize_multiplier
opts['maxiter'] = maxiter0
# print('large basemul %s --> %s; maxiter %s' % (popsize_multiplier, opts['popsize'], opts['maxiter']))
# recover from a CMA object
if irun == 0 and isinstance(x0, CMAEvolutionStrategy):
es = x0
x0 = es.inputargs['x0'] # for the next restarts
if isscalar(sigma0) and isfinite(sigma0) and sigma0 > 0:
es.sigma = sigma0
# debatable whether this makes sense:
sigma0 = es.inputargs['sigma0'] # for the next restarts
if options is not None:
es.opts.set(options)
# ignore further input args and keep original options
else: # default case
if irun and eval(str(fmin_options['restart_from_best'])):
print_warning('CAVE: restart_from_best is often not useful',
verbose=opts['verbose'])
es = CMAEvolutionStrategy(best.x, sigma_factor * sigma0, opts)
else:
es = CMAEvolutionStrategy(x0, sigma_factor * sigma0, opts)
if eval_initial_x or es.opts['CMA_elitist'] == 'initial' \
or (es.opts['CMA_elitist'] and eval_initial_x is None):
x = es.gp.pheno(es.mean,
into_bounds=es.boundary_handler.repair,
archive=es.sent_solutions)
es.best.update([x], es.sent_solutions,
[objective_function(x, *args)], 1)
es.countevals += 1
opts = es.opts # processed options, unambiguous
# a hack:
fmin_opts = CMAOptions(fmin_options.copy(), unchecked=True)
for k in fmin_opts:
# locals() cannot be modified directly, exec won't work
# in 3.x, therefore
fmin_opts.eval(k, loc={'N': es.N,
'popsize': opts['popsize']},
correct_key=False)
append = opts['verb_append'] or es.countiter > 0 or irun > 0
# es.logger is "the same" logger, because the "identity"
# is only determined by the `filenameprefix`
logger = CMADataLogger(opts['verb_filenameprefix'],
opts['verb_log'])
logger.register(es, append).add() # no fitness values here
es.logger = logger
if noise_handler:
noisehandler = noise_handler
noise_handling = True
if fmin_opts['noise_change_sigma_exponent'] > 0:
es.opts['tolfacupx'] = inf
else:
noisehandler = NoiseHandler(es.N, 0)
noise_handling = False
es.noise_handler = noisehandler
# the problem: this assumes that good solutions cannot take longer than bad ones:
# with EvalInParallel(objective_function, 2, is_feasible=opts['is_feasible']) as eval_in_parallel:
if 1 < 3:
while not es.stop(): # iteration loop
# X, fit = eval_in_parallel(lambda: es.ask(1)[0], es.popsize, args, repetitions=noisehandler.evaluations-1)
X, fit = es.ask_and_eval(objective_function, args, gradf=gradf,
evaluations=noisehandler.evaluations,
aggregation=np.median) # treats NaN with resampling
# TODO: check args and in case use args=(noisehandler.evaluations, )
es.tell(X, fit) # prepare for next iteration
if noise_handling: # it would be better to also use these f-evaluations in tell
es.sigma *= noisehandler(X, fit, objective_function, es.ask,
args=args)**fmin_opts['noise_change_sigma_exponent']
es.countevals += noisehandler.evaluations_just_done # TODO: this is a hack, not important though
# es.more_to_write.append(noisehandler.evaluations_just_done)
if noisehandler.maxevals > noisehandler.minevals:
es.more_to_write.append(noisehandler.get_evaluations())
if 1 < 3:
es.sp.cmean *= exp(-noise_kappa_exponent * np.tanh(noisehandler.noiseS))
if es.sp.cmean > 1:
es.sp.cmean = 1
es.disp()
logger.add(# more_data=[noisehandler.evaluations, 10**noisehandler.noiseS] if noise_handling else [],
modulo=1 if es.stop() and logger.modulo else None)
if (opts['verb_log'] and opts['verb_plot'] and
(es.countiter % max(opts['verb_plot'], opts['verb_log']) == 0 or es.stop())):
logger.plot(324)
# end while not es.stop
mean_pheno = es.gp.pheno(es.mean, into_bounds=es.boundary_handler.repair, archive=es.sent_solutions)
fmean = objective_function(mean_pheno, *args)
es.countevals += 1
es.best.update([mean_pheno], es.sent_solutions, [fmean], es.countevals)
best.update(es.best, es.sent_solutions) # in restarted case
# es.best.update(best)
this_evals = es.countevals - base_evals
base_evals = es.countevals
# BIPOP stats update
if irun == 0:
popsize0 = opts['popsize']
maxiter0 = opts['maxiter']
# XXX: This might be a bug? Reproduced from Matlab
# small_i.append(this_evals)
if bipop:
if poptype == 'small':
small_i.append(this_evals)
else: # poptype == 'large'
large_i.append(this_evals)
# final message
if opts['verb_disp']:
es.result_pretty(irun, time.asctime(time.localtime()),
best.f)
irun += 1
# if irun > fmin_opts['restarts'] or 'ftarget' in es.stop() \
# if irun > restarts or 'ftarget' in es.stop() \
if irun - runs_with_small > fmin_opts['restarts'] or 'ftarget' in es.stop() \
or 'maxfevals' in es.stop(check=False):
break
opts['verb_append'] = es.countevals
opts['popsize'] = fmin_opts['incpopsize'] * es.sp.popsize # TODO: use rather options?
opts['seed'] += 1
# while irun
# es.out['best'] = best # TODO: this is a rather suboptimal type for inspection in the shell
if 1 < 3:
if irun:
es.best.update(best)
# TODO: there should be a better way to communicate the overall best
return es.result() + (es.stop(), es, logger)
else: # previously: to be removed
return (best.x.copy(), best.f, es.countevals,
dict((('stopdict', _CMAStopDict(es._stopdict))
, ('mean', es.gp.pheno(es.mean))
, ('std', es.sigma * es.sigma_vec * sqrt(es.dC) * es.gp.scales)
, ('out', es.out)
, ('opts', es.opts) # last state of options
, ('cma', es)
, ('inputargs', es.inputargs)
))
)
# TODO refine output, can #args be flexible?
# is this well usable as it is now?
else: # except KeyboardInterrupt: # Exception, e:
if eval(str(options['verb_disp'])) > 0:
print(' in/outcomment ``raise`` in last line of cma.fmin to prevent/restore KeyboardInterrupt exception')
raise # cave: swallowing this exception can silently mess up experiments, if ctrl-C is hit
# _____________________________________________________________________
# _____________________________________________________________________
#
class BaseDataLogger(object):
""""abstract" base class for a data logger that can be used with an `OOOptimizer`
Details: attribute `modulo` is used in ``OOOptimizer.optimize``
"""
def add(self, optim=None, more_data=[]):
"""abstract method, add a "data point" from the state of `optim` into the
logger, the argument `optim` can be omitted if it was `register()`-ed before,
acts like an event handler"""
raise NotImplementedError
def register(self, optim):
"""abstract method, register an optimizer `optim`, only needed if `add()` is
called without a value for the `optim` argument"""
self.optim = optim
def disp(self):
"""display some data trace (not implemented)"""
print('method BaseDataLogger.disp() not implemented, to be done in subclass ' + str(type(self)))
def plot(self):
"""plot data (not implemented)"""
print('method BaseDataLogger.plot() is not implemented, to be done in subclass ' + str(type(self)))
def data(self):
"""return logged data in a dictionary (not implemented)"""
print('method BaseDataLogger.data() is not implemented, to be done in subclass ' + str(type(self)))
# _____________________________________________________________________
# _____________________________________________________________________
#
class CMADataLogger(BaseDataLogger):
"""data logger for class `CMAEvolutionStrategy`. The logger is
identified by its name prefix and (over-)writes or reads according
data files. Therefore, the logger must be considered as *global* variable
with unpredictable side effects, if two loggers with the same name
and on the same working folder are used at the same time.
Examples
========
::
import cma
es = cma.CMAEvolutionStrategy(...)
logger = cma.CMADataLogger().register(es)
while not es.stop():
...
logger.add() # add can also take an argument
logger.plot() # or a short cut can be used:
cma.plot() # plot data from logger with default name
logger2 = cma.CMADataLogger('just_another_filename_prefix').load()
logger2.plot()
logger2.disp()
::
import cma
from matplotlib.pylab import *
res = cma.fmin(cma.Fcts.sphere, rand(10), 1e-0)
logger = res[-1] # the CMADataLogger
logger.load() # by "default" data are on disk
semilogy(logger.f[:,0], logger.f[:,5]) # plot f versus iteration, see file header
show()
Details
=======
After loading data, the logger has the attributes `xmean`, `xrecent`,
`std`, `f`, `D` and `corrspec` corresponding to ``xmean``,
``xrecentbest``, ``stddev``, ``fit``, ``axlen`` and ``axlencorr``
filename trails.
:See: `disp()`, `plot()`
"""
default_prefix = 'outcmaes'
# names = ('axlen','fit','stddev','xmean','xrecentbest')
# key_names_with_annotation = ('std', 'xmean', 'xrecent')
def __init__(self, name_prefix=default_prefix, modulo=1, append=False):
"""initialize logging of data from a `CMAEvolutionStrategy`
instance, default ``modulo=1`` means logging with each call
"""
# super(CMAData, self).__init__({'iter':[], 'stds':[], 'D':[],
# 'sig':[], 'fit':[], 'xm':[]})
# class properties:
self.name_prefix = name_prefix if name_prefix \
else CMADataLogger.default_prefix
if isinstance(self.name_prefix, CMAEvolutionStrategy):
self.name_prefix = self.name_prefix.opts.eval(
'verb_filenameprefix')
self.file_names = ('axlen', 'axlencorr', 'fit', 'stddev', 'xmean',
'xrecentbest')
"""used in load, however hard-coded in add"""
self.key_names = ('D', 'corrspec', 'f', 'std', 'xmean', 'xrecent')
"""used in load, however hard-coded in plot"""
self._key_names_with_annotation = ('std', 'xmean', 'xrecent')
"""used in load to add one data row to be modified in plot"""
self.modulo = modulo
"""how often to record data, allows calling `add` without args"""
self.append = append
"""append to previous data"""
self.counter = 0
"""number of calls to `add`"""
self.last_iteration = 0
self.registered = False
self.last_correlation_spectrum = None
self._eigen_counter = 1 # reduce costs
def data(self):
"""return dictionary with data.
If data entries are None or incomplete, consider calling
``.load().data()`` to (re-)load the data from files first.
"""
d = {}
for name in self.key_names:
d[name] = self.__dict__.get(name, None)
return d
def register(self, es, append=None, modulo=None):
"""register a `CMAEvolutionStrategy` instance for logging,
``append=True`` appends to previous data logged under the same name,
by default previous data are overwritten.
"""
if not isinstance(es, CMAEvolutionStrategy):
raise TypeError("only class CMAEvolutionStrategy can be " +
"registered for logging")
self.es = es
if append is not None:
self.append = append
if modulo is not None:
self.modulo = modulo
self.registered = True
return self
def initialize(self, modulo=None):
"""reset logger, overwrite original files, `modulo`: log only every modulo call"""
if modulo is not None:
self.modulo = modulo
try:
es = self.es # must have been registered
except AttributeError:
pass # TODO: revise usage of es... that this can pass
raise _Error('call register() before initialize()')
self.counter = 0 # number of calls of add
self.last_iteration = 0 # some lines are only written if iteration>last_iteration
# write headers for output
fn = self.name_prefix + 'fit.dat'
strseedtime = 'seed=%d, %s' % (es.opts['seed'], time.asctime())
try:
with open(fn, 'w') as f:
f.write('% # columns="iteration, evaluation, sigma, axis ratio, ' +
'bestever, best, median, worst objective function value, ' +
'further objective values of best", ' +
strseedtime +
# strftime("%Y/%m/%d %H:%M:%S", localtime()) + # just asctime() would do
'\n')
except (IOError, OSError):
print('could not open file ' + fn)
fn = self.name_prefix + 'axlen.dat'
try:
with open(fn, 'w') as f:
f.write('% columns="iteration, evaluation, sigma, ' +
'max axis length, ' +
' min axis length, all principle axes lengths ' +
' (sorted square roots of eigenvalues of C)", ' +
strseedtime +
'\n')
except (IOError, OSError):
print('could not open/write file ' + fn)
fn = self.name_prefix + 'axlencorr.dat'
try:
with open(fn, 'w') as f:
f.write('% columns="iteration, evaluation, min max(neg(.)) min(pos(.))' +
' max correlation, correlation matrix principle axes lengths ' +
' (sorted square roots of eigenvalues of correlation matrix)", ' +
strseedtime +
'\n')
except (IOError, OSError):
print('could not open file ' + fn)
fn = self.name_prefix + 'stddev.dat'
try:
with open(fn, 'w') as f:
f.write('% # columns=["iteration, evaluation, sigma, void, void, ' +
' stds==sigma*sqrt(diag(C))", ' +
strseedtime +
'\n')
except (IOError, OSError):
print('could not open file ' + fn)
fn = self.name_prefix + 'xmean.dat'
try:
with open(fn, 'w') as f:
f.write('% # columns="iteration, evaluation, void, void, void, xmean", ' +
strseedtime)
f.write(' # scaling_of_variables: ')
if np.size(es.gp.scales) > 1:
f.write(' '.join(map(str, es.gp.scales)))
else:
f.write(str(es.gp.scales))
f.write(', typical_x: ')
if np.size(es.gp.typical_x) > 1:
f.write(' '.join(map(str, es.gp.typical_x)))
else:
f.write(str(es.gp.typical_x))
f.write('\n')
except (IOError, OSError):
print('could not open/write file ' + fn)
fn = self.name_prefix + 'xrecentbest.dat'
try:
with open(fn, 'w') as f:
f.write('% # iter+eval+sigma+0+fitness+xbest, ' +
strseedtime +
'\n')
except (IOError, OSError):
print('could not open/write file ' + fn)
return self
# end def __init__
def load(self, filenameprefix=None):
"""load (or reload) data from output files, `load()` is called in
`plot()` and `disp()`.
Argument `filenameprefix` is the filename prefix of data to be
loaded (six files), by default ``'outcmaes'``.
Return self with (added) attributes `xrecent`, `xmean`,
`f`, `D`, `std`, 'corrspec'
"""
if not filenameprefix:
filenameprefix = self.name_prefix
assert len(self.file_names) == len(self.key_names)
for i in rglen((self.file_names)):
fn = filenameprefix + self.file_names[i] + '.dat'
try:
self.__dict__[self.key_names[i]] = _fileToMatrix(fn)
except:
_print_warning('reading from file "' + fn + '" failed',
'load', 'CMADataLogger')
try:
if self.key_names[i] in self._key_names_with_annotation:
# copy last row to later fill in annotation position for display
self.__dict__[self.key_names[i]].append(
self.__dict__[self.key_names[i]][-1])
self.__dict__[self.key_names[i]] = \
array(self.__dict__[self.key_names[i]], copy=False)
except:
_print_warning('no data for %s' % fn, 'load',
'CMADataLogger')
return self
def add(self, es=None, more_data=[], modulo=None):
"""append some logging data from `CMAEvolutionStrategy` class instance `es`,
if ``number_of_times_called % modulo`` equals to zero, never if ``modulo==0``.
The sequence ``more_data`` must always have the same length.
When used for a different optimizer class, this function can be
(easily?) adapted by changing the assignments under INTERFACE
in the implemention.
"""
mod = modulo if modulo is not None else self.modulo
self.counter += 1
if mod == 0 or (self.counter > 3 and (self.counter - 1) % mod):
return
if es is None:
try:
es = self.es # must have been registered
except AttributeError :
raise _Error('call `add` with argument `es` or ``register(es)`` before ``add()``')
elif not self.registered:
self.register(es)
if 1 < 3:
if self.counter == 1 and not self.append and self.modulo != 0:
self.initialize() # write file headers
self.counter = 1
# --- INTERFACE, can be changed if necessary ---
if not isinstance(es, CMAEvolutionStrategy): # not necessary
_print_warning('type CMAEvolutionStrategy expected, found '
+ str(type(es)), 'add', 'CMADataLogger')
evals = es.countevals
iteration = es.countiter
eigen_decompositions = es.count_eigen
sigma = es.sigma
axratio = es.D.max() / es.D.min()
xmean = es.mean # TODO: should be optionally phenotype?
fmean_noise_free = es.fmean_noise_free
fmean = es.fmean
# TODO: find a different way to communicate current x and f?
try:
besteverf = es.best.f
bestf = es.fit.fit[0]
worstf = es.fit.fit[-1]
medianf = es.fit.fit[es.sp.popsize // 2]
except:
if iteration > 0: # first call without f-values is OK
raise
try:
xrecent = es.best.last.x
except:
xrecent = None
maxD = es.D.max()
minD = es.D.min()
diagD = es.D
diagC = es.sigma * es.sigma_vec * sqrt(es.dC)
more_to_write = es.more_to_write
es.more_to_write = []
# --- end interface ---
try:
# fit
if iteration > self.last_iteration:
fn = self.name_prefix + 'fit.dat'
with open(fn, 'a') as f:
f.write(str(iteration) + ' '
+ str(evals) + ' '
+ str(sigma) + ' '
+ str(axratio) + ' '
+ str(besteverf) + ' '
+ '%.16e' % bestf + ' '
+ str(medianf) + ' '
+ str(worstf) + ' '
# + str(es.sp.popsize) + ' '
# + str(10**es.noiseS) + ' '
# + str(es.sp.cmean) + ' '
+ ' '.join(str(i) for i in more_to_write) + ' '
+ ' '.join(str(i) for i in more_data) + ' '
+ '\n')
# axlen
fn = self.name_prefix + 'axlen.dat'
if 1 < 3:
with open(fn, 'a') as f: # does not rely on reference counting
f.write(str(iteration) + ' '
+ str(evals) + ' '
+ str(sigma) + ' '
+ str(maxD) + ' '
+ str(minD) + ' '
+ ' '.join(map(str, diagD))
+ '\n')
# correlation matrix eigenvalues
if 1 < 3:
fn = self.name_prefix + 'axlencorr.dat'
c = es.correlation_matrix()
if c is not None:
# accept at most 50% internal loss
if self._eigen_counter < eigen_decompositions / 2:
self.last_correlation_spectrum = \
sorted(es.opts['CMA_eigenmethod'](c)[0]**0.5)
self._eigen_counter += 1
if self.last_correlation_spectrum is None:
self.last_correlation_spectrum = len(diagD) * [1]
c = c[c < 1 - 1e-14] # remove diagonal elements
c[c > 1 - 1e-14] = 1 - 1e-14
c[c < -1 + 1e-14] = -1 + 1e-14
c_min = np.min(c)
c_max = np.max(c)
if np.min(abs(c)) == 0:
c_medminus = 0 # thereby zero "is negative"
c_medplus = 0 # thereby zero "is positive"
else:
c_medminus = c[np.argmin(1/c)] # c is flat
c_medplus = c[np.argmax(1/c)] # c is flat
with open(fn, 'a') as f:
f.write(str(iteration) + ' '
+ str(evals) + ' '
+ str(c_min) + ' '
+ str(c_medminus) + ' ' # the one closest to 0
+ str(c_medplus) + ' ' # the one closest to 0
+ str(c_max) + ' '
+ ' '.join(map(str,
self.last_correlation_spectrum))
+ '\n')
# stddev
fn = self.name_prefix + 'stddev.dat'
with open(fn, 'a') as f:
f.write(str(iteration) + ' '
+ str(evals) + ' '
+ str(sigma) + ' '
+ '0 0 '
+ ' '.join(map(str, diagC))
+ '\n')
# xmean
fn = self.name_prefix + 'xmean.dat'
with open(fn, 'a') as f:
f.write(str(iteration) + ' '
+ str(evals) + ' '
# + str(sigma) + ' '
+ '0 '
+ str(fmean_noise_free) + ' '
+ str(fmean) + ' ' # TODO: this does not make sense
# TODO should be optional the phenotyp?
+ ' '.join(map(str, xmean))
+ '\n')
# xrecent
fn = self.name_prefix + 'xrecentbest.dat'
if iteration > 0 and xrecent is not None:
with open(fn, 'a') as f:
f.write(str(iteration) + ' '
+ str(evals) + ' '
+ str(sigma) + ' '
+ '0 '
+ str(bestf) + ' '
+ ' '.join(map(str, xrecent))
+ '\n')
except (IOError, OSError):
if iteration <= 1:
_print_warning(('could not open/write file %s: ' % fn,
sys.exc_info()))
self.last_iteration = iteration
def closefig(self):
pyplot.close(self.fighandle)
def save_to(self, nameprefix, switch=False):
"""saves logger data to a different set of files, for
``switch=True`` also the loggers name prefix is switched to
the new value
"""
if not nameprefix or not isinstance(nameprefix, basestring):
raise _Error('filename prefix must be a nonempty string')
if nameprefix == self.default_prefix:
raise _Error('cannot save to default name "' + nameprefix + '...", chose another name')
if nameprefix == self.name_prefix:
return
for name in self.file_names:
open(nameprefix + name + '.dat', 'w').write(open(self.name_prefix + name + '.dat').read())
if switch:
self.name_prefix = nameprefix
def select_data(self, iteration_indices):
"""keep only data of `iteration_indices`"""
dat = self
iteridx = iteration_indices
dat.f = dat.f[np.where([x in iteridx for x in dat.f[:, 0]])[0], :]
dat.D = dat.D[np.where([x in iteridx for x in dat.D[:, 0]])[0], :]
try:
iteridx = list(iteridx)
iteridx.append(iteridx[-1]) # last entry is artificial
except:
pass
dat.std = dat.std[np.where([x in iteridx
for x in dat.std[:, 0]])[0], :]
dat.xmean = dat.xmean[np.where([x in iteridx
for x in dat.xmean[:, 0]])[0], :]
try:
dat.xrecent = dat.x[np.where([x in iteridx for x in
dat.xrecent[:, 0]])[0], :]
except AttributeError:
pass
try:
dat.corrspec = dat.x[np.where([x in iteridx for x in
dat.corrspec[:, 0]])[0], :]
except AttributeError:
pass
def plot(self, fig=None, iabscissa=1, iteridx=None,
plot_mean=False, # was: plot_mean=True
foffset=1e-19, x_opt=None, fontsize=9):
"""plot data from a `CMADataLogger` (using the files written
by the logger).
Arguments
---------
`fig`
figure number, by default 325
`iabscissa`
``0==plot`` versus iteration count,
``1==plot`` versus function evaluation number
`iteridx`
iteration indices to plot
Return `CMADataLogger` itself.
Examples
--------
::
import cma
logger = cma.CMADataLogger() # with default name
# try to plot the "default logging" data (e.g.
# from previous fmin calls, which is essentially what
# also cma.plot() does)
logger.plot()
cma.savefig('fig325.png') # save current figure
logger.closefig()
Dependencies: matlabplotlib/pyplot.
"""
try:
# pyplot: prodedural interface for matplotlib
from matplotlib.pyplot import figure, subplot, hold, gcf
except ImportError:
ImportError('could not find matplotlib.pyplot module, function plot() is not available')
return
if fig is None:
fig = 325
if iabscissa not in (0, 1):
iabscissa = 1
self.load() # better load only conditionally?
dat = self
dat.x = dat.xmean # this is the genotyp
if not plot_mean:
if len(dat.x) < 2:
print('not enough data to plot recent x')
else:
dat.x = dat.xrecent
# index out some data
if iteridx is not None:
self.select_data(iteridx)
if len(dat.f) <= 1:
print('nothing to plot')
return
# not in use anymore, see formatter above
# xticklocs = np.arange(5) * np.round(minxend/4., -int(np.log10(minxend/4.)))
# dfit(dfit<1e-98) = NaN;
# TODO: if abscissa==0 plot in chunks, ie loop over subsets where
# dat.f[:,0]==countiter is monotonous
figure(fig)
self._enter_plotting(fontsize)
self.fighandle = gcf() # fighandle.number
subplot(2, 2, 1)
self.plot_divers(iabscissa, foffset)
pyplot.xlabel('')
# Scaling
subplot(2, 2, 3)
self.plot_axes_scaling(iabscissa)
# spectrum of correlation matrix
figure(fig)
subplot(2, 2, 2)
if plot_mean:
self.plot_mean(iabscissa, x_opt)
else:
self.plot_xrecent(iabscissa, x_opt)
pyplot.xlabel('')
# pyplot.xticks(xticklocs)
# standard deviations
subplot(2, 2, 4)
self.plot_stds(iabscissa)
self._finalize_plotting()
return self
def plot_all(self, fig=None, iabscissa=1, iteridx=None,
foffset=1e-19, x_opt=None, fontsize=9):
"""
plot data from a `CMADataLogger` (using the files written by the logger).
Arguments
---------
`fig`
figure number, by default 425
`iabscissa`
``0==plot`` versus iteration count,
``1==plot`` versus function evaluation number
`iteridx`
iteration indices to plot
Return `CMADataLogger` itself.
Examples
--------
::
import cma
logger = cma.CMADataLogger() # with default name
# try to plot the "default logging" data (e.g.
# from previous fmin calls, which is essentially what
# also cma.plot() does)
logger.plot_all()
cma.savefig('fig425.png') # save current figure
logger.closefig()
Dependencies: matlabplotlib/pyplot.
"""
try:
# pyplot: prodedural interface for matplotlib
from matplotlib.pyplot import figure, subplot, gcf
except ImportError:
ImportError('could not find matplotlib.pyplot module, function plot() is not available')
return
if fig is None:
fig = 426
if iabscissa not in (0, 1):
iabscissa = 1
self.load()
dat = self
# index out some data
if iteridx is not None:
self.select_data(iteridx)
if len(dat.f) == 0:
print('nothing to plot')
return
# not in use anymore, see formatter above
# xticklocs = np.arange(5) * np.round(minxend/4., -int(np.log10(minxend/4.)))
# dfit(dfit<1e-98) = NaN;
# TODO: if abscissa==0 plot in chunks, ie loop over subsets where dat.f[:,0]==countiter is monotonous
figure(fig)
self._enter_plotting(fontsize)
self.fighandle = gcf() # fighandle.number
if 1 < 3:
subplot(2, 3, 1)
self.plot_divers(iabscissa, foffset)
pyplot.xlabel('')
# standard deviations
subplot(2, 3, 4)
self.plot_stds(iabscissa)
# Scaling
subplot(2, 3, 2)
self.plot_axes_scaling(iabscissa)
pyplot.xlabel('')
# spectrum of correlation matrix
subplot(2, 3, 5)
self.plot_correlations(iabscissa)
# x-vectors
subplot(2, 3, 3)
self.plot_xrecent(iabscissa, x_opt)
pyplot.xlabel('')
subplot(2, 3, 6)
self.plot_mean(iabscissa, x_opt)
self._finalize_plotting()
return self
def plot_axes_scaling(self, iabscissa=1):
if not hasattr(self, 'D'):
self.load()
dat = self
self._enter_plotting()
pyplot.semilogy(dat.D[:, iabscissa], dat.D[:, 5:], '-b')
pyplot.hold(True)
pyplot.grid(True)
ax = array(pyplot.axis())
# ax[1] = max(minxend, ax[1])
pyplot.axis(ax)
pyplot.title('Principle Axes Lengths')
# pyplot.xticks(xticklocs)
self._xlabel(iabscissa)
self._finalize_plotting()
return self
def plot_stds(self, iabscissa=1):
if not hasattr(self, 'std'):
self.load()
dat = self
self._enter_plotting()
# remove sigma from stds (graphs become much better readible)
dat.std[:, 5:] = np.transpose(dat.std[:, 5:].T / dat.std[:, 2].T)
# ax = array(pyplot.axis())
# ax[1] = max(minxend, ax[1])
# axis(ax)
if 1 < 2 and dat.std.shape[1] < 100:
# use fake last entry in x and std for line extension-annotation
minxend = int(1.06 * dat.std[-2, iabscissa])
# minxend = int(1.06 * dat.x[-2, iabscissa])
dat.std[-1, iabscissa] = minxend # TODO: should be ax[1]
idx = np.argsort(dat.std[-2, 5:])
idx2 = np.argsort(idx)
dat.std[-1, 5 + idx] = np.logspace(np.log10(np.min(dat.std[:, 5:])),
np.log10(np.max(dat.std[:, 5:])), dat.std.shape[1] - 5)
dat.std[-1, iabscissa] = minxend # TODO: should be ax[1]
pyplot.semilogy(dat.std[:, iabscissa], dat.std[:, 5:], '-')
pyplot.hold(True)
ax = array(pyplot.axis())
yy = np.logspace(np.log10(ax[2]), np.log10(ax[3]), dat.std.shape[1] - 5)
# yyl = np.sort(dat.std[-1,5:])
idx = np.argsort(dat.std[-1, 5:])
idx2 = np.argsort(idx)
# plot(np.dot(dat.std[-2, iabscissa],[1,1]), array([ax[2]+1e-6, ax[3]-1e-6]), 'k-') # vertical separator
# vertical separator
pyplot.plot(np.dot(dat.std[-2, iabscissa], [1, 1]),
array([ax[2] + 1e-6, ax[3] - 1e-6]),
# array([np.min(dat.std[:, 5:]), np.max(dat.std[:, 5:])]),
'k-')
pyplot.hold(True)
# plot([dat.std[-1, iabscissa], ax[1]], [dat.std[-1,5:], yy[idx2]], 'k-') # line from last data point
for i in rglen((idx)):
# text(ax[1], yy[i], ' '+str(idx[i]))
pyplot.text(dat.std[-1, iabscissa], dat.std[-1, 5 + i], ' ' + str(i))
else:
pyplot.semilogy(dat.std[:, iabscissa], dat.std[:, 5:], '-')
pyplot.hold(True)
pyplot.grid(True)
pyplot.title(r'Standard Deviations $\times$ $\sigma^{-1}$ in All Coordinates')
# pyplot.xticks(xticklocs)
self._xlabel(iabscissa)
self._finalize_plotting()
return self
def plot_mean(self, iabscissa=1, x_opt=None, annotations=None):
if not hasattr(self, 'xmean'):
self.load()
self.x = self.xmean
self._plot_x(iabscissa, x_opt, 'mean', annotations=annotations)
self._xlabel(iabscissa)
return self
def plot_xrecent(self, iabscissa=1, x_opt=None, annotations=None):
if not hasattr(self, 'xrecent'):
self.load()
self.x = self.xrecent
self._plot_x(iabscissa, x_opt, 'curr best', annotations=annotations)
self._xlabel(iabscissa)
return self
def plot_correlations(self, iabscissa=1):
"""spectrum of correlation matrix and largest correlation"""
if not hasattr(self, 'corrspec'):
self.load()
if len(self.corrspec) < 2:
return self
x = self.corrspec[:, iabscissa]
y = self.corrspec[:, 6:] # principle axes
ys = self.corrspec[:, :6] # "special" values
from matplotlib.pyplot import semilogy, hold, text, grid, axis, title
self._enter_plotting()
semilogy(x, y, '-c')
hold(True)
semilogy(x[:], np.max(y, 1) / np.min(y, 1), '-r')
text(x[-1], np.max(y[-1, :]) / np.min(y[-1, :]), 'axis ratio')
if ys is not None:
semilogy(x, 1 + ys[:, 2], '-b')
text(x[-1], 1 + ys[-1, 2], '1 + min(corr)')
semilogy(x, 1 - ys[:, 5], '-b')
text(x[-1], 1 - ys[-1, 5], '1 - max(corr)')
semilogy(x[:], 1 + ys[:, 3], '-k')
text(x[-1], 1 + ys[-1, 3], '1 + max(neg corr)')
semilogy(x[:], 1 - ys[:, 4], '-k')
text(x[-1], 1 - ys[-1, 4], '1 - min(pos corr)')
grid(True)
ax = array(axis())
# ax[1] = max(minxend, ax[1])
axis(ax)
title('Spectrum (roots) of correlation matrix')
# pyplot.xticks(xticklocs)
self._xlabel(iabscissa)
self._finalize_plotting()
return self
def plot_divers(self, iabscissa=1, foffset=1e-19):
"""plot fitness, sigma, axis ratio...
:param iabscissa: 0 means vs evaluations, 1 means vs iterations
:param foffset: added to f-value
:See: `plot()`
"""
from matplotlib.pyplot import semilogy, hold, grid, \
axis, title, text
fontsize = pyplot.rcParams['font.size']
if not hasattr(self, 'f'):
self.load()
dat = self
minfit = min(dat.f[:, 5])
dfit = dat.f[:, 5] - minfit # why not using idx?
dfit[dfit < 1e-98] = np.NaN
self._enter_plotting()
if dat.f.shape[1] > 7:
# semilogy(dat.f[:, iabscissa], abs(dat.f[:,[6, 7, 10, 12]])+foffset,'-k')
semilogy(dat.f[:, iabscissa], abs(dat.f[:, [6, 7]]) + foffset, '-k')
hold(True)
# (larger indices): additional fitness data, for example constraints values
if dat.f.shape[1] > 8:
# dd = abs(dat.f[:,7:]) + 10*foffset
# dd = np.where(dat.f[:,7:]==0, np.NaN, dd) # cannot be
semilogy(dat.f[:, iabscissa], np.abs(dat.f[:, 8:]) + 10 * foffset, 'y')
hold(True)
idx = np.where(dat.f[:, 5] > 1e-98)[0] # positive values
semilogy(dat.f[idx, iabscissa], dat.f[idx, 5] + foffset, '.b')
hold(True)
grid(True)
semilogy(dat.f[:, iabscissa], abs(dat.f[:, 5]) + foffset, '-b')
text(dat.f[-1, iabscissa], abs(dat.f[-1, 5]) + foffset,
r'$|f_\mathsf{best}|$', fontsize=fontsize + 2)
# negative f-values, dots
sgn = np.sign(dat.f[:, 5])
sgn[np.abs(dat.f[:, 5]) < 1e-98] = 0
idx = np.where(sgn < 0)[0]
semilogy(dat.f[idx, iabscissa], abs(dat.f[idx, 5]) + foffset,
'.m') # , markersize=5
# lines between negative f-values
dsgn = np.diff(sgn)
start_idx = 1 + np.where((dsgn < 0) * (sgn[1:] < 0))[0]
stop_idx = 1 + np.where(dsgn > 0)[0]
if sgn[0] < 0:
start_idx = np.concatenate(([0], start_idx))
for istart in start_idx:
istop = stop_idx[stop_idx > istart]
istop = istop[0] if len(istop) else 0
idx = xrange(istart, istop if istop else dat.f.shape[0])
if len(idx) > 1:
semilogy(dat.f[idx, iabscissa], abs(dat.f[idx, 5]) + foffset,
'm') # , markersize=5
# lines between positive and negative f-values
# TODO: the following might plot values very close to zero
if istart > 0: # line to the left of istart
semilogy(dat.f[istart-1:istart+1, iabscissa],
abs(dat.f[istart-1:istart+1, 5]) +
foffset, '--m')
if istop: # line to the left of istop
semilogy(dat.f[istop-1:istop+1, iabscissa],
abs(dat.f[istop-1:istop+1, 5]) +
foffset, '--m')
# mark the respective first positive values
semilogy(dat.f[istop, iabscissa], abs(dat.f[istop, 5]) +
foffset, '.b', markersize=7)
# mark the respective first negative values
semilogy(dat.f[istart, iabscissa], abs(dat.f[istart, 5]) +
foffset, '.r', markersize=7)
# standard deviations std
semilogy(dat.std[:-1, iabscissa],
np.vstack([list(map(max, dat.std[:-1, 5:])),
list(map(min, dat.std[:-1, 5:]))]).T,
'-m', linewidth=2)
text(dat.std[-2, iabscissa], max(dat.std[-2, 5:]), 'max std',
fontsize=fontsize)
text(dat.std[-2, iabscissa], min(dat.std[-2, 5:]), 'min std',
fontsize=fontsize)
# delta-fitness in cyan
idx = isfinite(dfit)
if 1 < 3:
idx_nan = np.where(idx == False)[0] # gaps
if not len(idx_nan): # should never happen
semilogy(dat.f[:, iabscissa][idx], dfit[idx], '-c')
else:
i_start = 0
for i_end in idx_nan:
if i_end > i_start:
semilogy(dat.f[:, iabscissa][i_start:i_end],
dfit[i_start:i_end], '-c')
i_start = i_end + 1
if len(dfit) > idx_nan[-1] + 1:
semilogy(dat.f[:, iabscissa][idx_nan[-1]+1:],
dfit[idx_nan[-1]+1:], '-c')
text(dat.f[idx, iabscissa][-1], dfit[idx][-1],
r'$f_\mathsf{best} - \min(f)$', fontsize=fontsize + 2)
# overall minimum
i = np.argmin(dat.f[:, 5])
semilogy(dat.f[i, iabscissa], np.abs(dat.f[i, 5]), 'ro',
markersize=9)
semilogy(dat.f[i, iabscissa], dfit[idx][np.argmin(dfit[idx])]
+ 1e-98, 'ro', markersize=9)
# semilogy(dat.f[-1, iabscissa]*np.ones(2), dat.f[-1,4]*np.ones(2), 'rd')
# AR and sigma
semilogy(dat.f[:, iabscissa], dat.f[:, 3], '-r') # AR
semilogy(dat.f[:, iabscissa], dat.f[:, 2], '-g') # sigma
text(dat.f[-1, iabscissa], dat.f[-1, 3], r'axis ratio',
fontsize=fontsize)
text(dat.f[-1, iabscissa], dat.f[-1, 2] / 1.5, r'$\sigma$',
fontsize=fontsize+3)
ax = array(axis())
# ax[1] = max(minxend, ax[1])
axis(ax)
text(ax[0] + 0.01, ax[2], # 10**(log10(ax[2])+0.05*(log10(ax[3])-log10(ax[2]))),
'.min($f$)=' + repr(minfit))
#'.f_recent=' + repr(dat.f[-1, 5]))
# title('abs(f) (blue), f-min(f) (cyan), Sigma (green), Axis Ratio (red)')
# title(r'blue:$\mathrm{abs}(f)$, cyan:$f - \min(f)$, green:$\sigma$, red:axis ratio',
# fontsize=fontsize - 0.0)
title(r'$|f_{\mathrm{best},\mathrm{med},\mathrm{worst}}|$, $f - \min(f)$, $\sigma$, axis ratio')
# if __name__ != 'cma': # should be handled by the caller
self._xlabel(iabscissa)
self._finalize_plotting()
return self
def _enter_plotting(self, fontsize=9):
"""assumes that a figure is open """
# interactive_status = matplotlib.is_interactive()
self.original_fontsize = pyplot.rcParams['font.size']
pyplot.rcParams['font.size'] = fontsize
pyplot.hold(False) # opens a figure window, if non exists
pyplot.ioff()
def _finalize_plotting(self):
pyplot.ion()
pyplot.draw() # update "screen"
pyplot.show() # show figure
# matplotlib.interactive(interactive_status)
pyplot.rcParams['font.size'] = self.original_fontsize
def _xlabel(self, iabscissa=1):
pyplot.xlabel('iterations' if iabscissa == 0
else 'function evaluations')
def _plot_x(self, iabscissa=1, x_opt=None, remark=None,
annotations=None):
"""If ``x_opt is not None`` the difference to x_opt is plotted
in log scale
"""
if not hasattr(self, 'x'):
_print_warning('no x-attributed found, use methods ' +
'plot_xrecent or plot_mean', 'plot_x',
'CMADataLogger')
return
from matplotlib.pyplot import plot, semilogy, hold, text, grid, axis, title
dat = self # for convenience and historical reasons
# modify fake last entry in x for line extension-annotation
if dat.x.shape[1] < 100:
minxend = int(1.06 * dat.x[-2, iabscissa])
# write y-values for individual annotation into dat.x
dat.x[-1, iabscissa] = minxend # TODO: should be ax[1]
if x_opt is None:
idx = np.argsort(dat.x[-2, 5:])
idx2 = np.argsort(idx)
dat.x[-1, 5 + idx] = np.linspace(np.min(dat.x[:, 5:]),
np.max(dat.x[:, 5:]), dat.x.shape[1] - 5)
else: # y-axis is in log
xdat = np.abs(dat.x[:, 5:] - np.array(x_opt, copy=False))
idx = np.argsort(xdat[-2, :])
idx2 = np.argsort(idx)
xdat[-1, idx] = np.logspace(np.log10(np.min(abs(xdat[xdat!=0]))),
np.log10(np.max(np.abs(xdat))),
dat.x.shape[1] - 5)
else:
minxend = 0
self._enter_plotting()
if x_opt is not None: # TODO: differentate neg and pos?
semilogy(dat.x[:, iabscissa], abs(xdat), '-')
else:
plot(dat.x[:, iabscissa], dat.x[:, 5:], '-')
hold(True)
grid(True)
ax = array(axis())
# ax[1] = max(minxend, ax[1])
axis(ax)
ax[1] -= 1e-6 # to prevent last x-tick annotation, probably superfluous
if dat.x.shape[1] < 100:
yy = np.linspace(ax[2] + 1e-6, ax[3] - 1e-6, dat.x.shape[1] - 5)
# yyl = np.sort(dat.x[-1,5:])
if x_opt is not None:
# semilogy([dat.x[-1, iabscissa], ax[1]], [abs(dat.x[-1, 5:]), yy[idx2]], 'k-') # line from last data point
semilogy(np.dot(dat.x[-2, iabscissa], [1, 1]),
array([ax[2] * (1+1e-6), ax[3] / (1+1e-6)]), 'k-')
else:
# plot([dat.x[-1, iabscissa], ax[1]], [dat.x[-1,5:], yy[idx2]], 'k-') # line from last data point
plot(np.dot(dat.x[-2, iabscissa], [1, 1]),
array([ax[2] + 1e-6, ax[3] - 1e-6]), 'k-')
# plot(array([dat.x[-1, iabscissa], ax[1]]),
# reshape(array([dat.x[-1,5:], yy[idx2]]).flatten(), (2,4)), '-k')
for i in rglen(idx):
# TODOqqq: annotate phenotypic value!?
# text(ax[1], yy[i], 'x(' + str(idx[i]) + ')=' + str(dat.x[-2,5+idx[i]]))
text(dat.x[-1, iabscissa], dat.x[-1, 5 + i]
if x_opt is None else np.abs(xdat[-1, i]),
('x(' + str(i) + ')=' if annotations is None
else str(i) + ':' + annotations[i] + "=")
+ str(dat.x[-2, 5 + i]))
i = 2 # find smallest i where iteration count differs (in case the same row appears twice)
while i < len(dat.f) and dat.f[-i][0] == dat.f[-1][0]:
i += 1
title('Object Variables (' +
(remark + ', ' if remark is not None else '') +
str(dat.x.shape[1] - 5) + '-D, popsize~' +
(str(int((dat.f[-1][1] - dat.f[-i][1]) / (dat.f[-1][0] - dat.f[-i][0])))
if len(dat.f.T[0]) > 1 and dat.f[-1][0] > dat.f[-i][0] else 'NA')
+ ')')
self._finalize_plotting()
def downsampling(self, factor=10, first=3, switch=True, verbose=True):
"""
rude downsampling of a `CMADataLogger` data file by `factor`,
keeping also the first `first` entries. This function is a
stump and subject to future changes. Return self.
Arguments
---------
- `factor` -- downsampling factor
- `first` -- keep first `first` entries
- `switch` -- switch the new logger to the downsampled logger
original_name+'down'
Details
-------
``self.name_prefix+'down'`` files are written
Example
-------
::
import cma
cma.downsampling() # takes outcmaes* files
cma.plot('outcmaesdown')
"""
newprefix = self.name_prefix + 'down'
for name in self.file_names:
f = open(newprefix + name + '.dat', 'w')
iline = 0
cwritten = 0
for line in open(self.name_prefix + name + '.dat'):
if iline < first or iline % factor == 0:
f.write(line)
cwritten += 1
iline += 1
f.close()
if verbose and iline > first:
print('%d' % (cwritten) + ' lines written in ' + newprefix + name + '.dat')
if switch:
self.name_prefix += 'down'
return self
# ____________________________________________________________
# ____________________________________________________________
#
def disp(self, idx=100): # r_[0:5,1e2:1e9:1e2,-10:0]):
"""displays selected data from (files written by) the class `CMADataLogger`.
Arguments
---------
`idx`
indices corresponding to rows in the data file;
if idx is a scalar (int), the first two, then every idx-th,
and the last three rows are displayed. Too large index values are removed.
Example
-------
>>> import cma, numpy as np
>>> res = cma.fmin(cma.fcts.elli, 7 * [0.1], 1, {'verb_disp':1e9}) # generate data
>>> assert res[1] < 1e-9
>>> assert res[2] < 4400
>>> l = cma.CMADataLogger() # == res[-1], logger with default name, "points to" above data
>>> l.disp([0,-1]) # first and last
>>> l.disp(20) # some first/last and every 20-th line
>>> l.disp(np.r_[0:999999:100, -1]) # every 100-th and last
>>> l.disp(np.r_[0, -10:0]) # first and ten last
>>> cma.disp(l.name_prefix, np.r_[0::100, -10:]) # the same as l.disp(...)
Details
-------
The data line with the best f-value is displayed as last line.
:See: `disp()`
"""
filenameprefix = self.name_prefix
def printdatarow(dat, iteration):
"""print data of iteration i"""
i = np.where(dat.f[:, 0] == iteration)[0][0]
j = np.where(dat.std[:, 0] == iteration)[0][0]
print('%5d' % (int(dat.f[i, 0])) + ' %6d' % (int(dat.f[i, 1])) + ' %.14e' % (dat.f[i, 5]) +
' %5.1e' % (dat.f[i, 3]) +
' %6.2e' % (max(dat.std[j, 5:])) + ' %6.2e' % min(dat.std[j, 5:]))
dat = CMADataLogger(filenameprefix).load()
ndata = dat.f.shape[0]
# map index to iteration number, is difficult if not all iteration numbers exist
# idx = idx[np.where(map(lambda x: x in dat.f[:,0], idx))[0]] # TODO: takes pretty long
# otherwise:
if idx is None:
idx = 100
if isscalar(idx):
# idx = np.arange(0, ndata, idx)
if idx:
idx = np.r_[0, 1, idx:ndata - 3:idx, -3:0]
else:
idx = np.r_[0, 1, -3:0]
idx = array(idx)
idx = idx[idx < ndata]
idx = idx[-idx <= ndata]
iters = dat.f[idx, 0]
idxbest = np.argmin(dat.f[:, 5])
iterbest = dat.f[idxbest, 0]
if len(iters) == 1:
printdatarow(dat, iters[0])
else:
self.disp_header()
for i in iters:
printdatarow(dat, i)
self.disp_header()
printdatarow(dat, iterbest)
sys.stdout.flush()
def disp_header(self):
heading = 'Iterat Nfevals function value axis ratio maxstd minstd'
print(heading)
# end class CMADataLogger
# ____________________________________________________________
# ____________________________________________________________
#
last_figure_number = 324
def plot(name=None, fig=None, abscissa=1, iteridx=None,
plot_mean=False,
foffset=1e-19, x_opt=None, fontsize=9):
"""
plot data from files written by a `CMADataLogger`,
the call ``cma.plot(name, **argsdict)`` is a shortcut for
``cma.CMADataLogger(name).plot(**argsdict)``
Arguments
---------
`name`
name of the logger, filename prefix, None evaluates to
the default 'outcmaes'
`fig`
filename or figure number, or both as a tuple (any order)
`abscissa`
0==plot versus iteration count,
1==plot versus function evaluation number
`iteridx`
iteration indices to plot
Return `None`
Examples
--------
::
cma.plot(); # the optimization might be still
# running in a different shell
cma.savefig('fig325.png')
cma.closefig()
cdl = cma.CMADataLogger().downsampling().plot()
# in case the file sizes are large
Details
-------
Data from codes in other languages (C, Java, Matlab, Scilab) have the same
format and can be plotted just the same.
:See: `CMADataLogger`, `CMADataLogger.plot()`
"""
global last_figure_number
if not fig:
last_figure_number += 1
fig = last_figure_number
if isinstance(fig, (int, float)):
last_figure_number = fig
CMADataLogger(name).plot(fig, abscissa, iteridx, plot_mean, foffset,
x_opt, fontsize)
def disp(name=None, idx=None):
"""displays selected data from (files written by) the class `CMADataLogger`.
The call ``cma.disp(name, idx)`` is a shortcut for ``cma.CMADataLogger(name).disp(idx)``.
Arguments
---------
`name`
name of the logger, filename prefix, `None` evaluates to
the default ``'outcmaes'``
`idx`
indices corresponding to rows in the data file; by
default the first five, then every 100-th, and the last
10 rows. Too large index values are removed.
Examples
--------
::
import cma, numpy
# assume some data are available from previous runs
cma.disp(None,numpy.r_[0,-1]) # first and last
cma.disp(None,numpy.r_[0:1e9:100,-1]) # every 100-th and last
cma.disp(idx=numpy.r_[0,-10:0]) # first and ten last
cma.disp(idx=numpy.r_[0:1e9:1e3,-10:0])
:See: `CMADataLogger.disp()`
"""
return CMADataLogger(name if name else CMADataLogger.default_prefix
).disp(idx)
# ____________________________________________________________
def _fileToMatrix(file_name):
"""rudimentary method to read in data from a file"""
# TODO: np.loadtxt() might be an alternative
# try:
if 1 < 3:
lres = []
for line in open(file_name, 'r').readlines():
if len(line) > 0 and line[0] not in ('%', '#'):
lres.append(list(map(float, line.split())))
res = lres
while res != [] and res[0] == []: # remove further leading empty lines
del res[0]
return res
# except:
print('could not read file ' + file_name)
# ____________________________________________________________
# ____________________________________________________________
class NoiseHandler(object):
"""Noise handling according to [Hansen et al 2009, A Method for
Handling Uncertainty in Evolutionary Optimization...]
The interface of this class is yet versatile and subject to changes.
The noise handling follows closely [Hansen et al 2009] in the
measurement part, but the implemented treatment is slightly
different: for ``noiseS > 0``, ``evaluations`` (time) and sigma are
increased by ``alpha``. For ``noiseS < 0``, ``evaluations`` (time)
is decreased by ``alpha**(1/4)``.
The (second) parameter ``evaluations`` defines the maximal number
of evaluations for a single fitness computation. If it is a list,
the smallest element defines the minimal number and if the list has
three elements, the median value is the start value for
``evaluations``.
``NoiseHandler`` serves to control the noise via steps-size
increase and number of re-evaluations, for example via ``fmin`` or
with ``ask_and_eval()``.
Examples
--------
Minimal example together with `fmin` on a non-noisy function:
>>> import cma
>>> cma.fmin(cma.felli, 7 * [1], 1, noise_handler=cma.NoiseHandler(7))
in dimension 7 (which needs to be given tice). More verbose example
in the optimization loop with a noisy function defined in ``func``:
>>> import cma, numpy as np
>>> func = lambda x: cma.fcts.sphere(x) * (1 + 4 * np.random.randn() / len(x)) # cma.Fcts.noisysphere
>>> es = cma.CMAEvolutionStrategy(np.ones(10), 1)
>>> nh = cma.NoiseHandler(es.N, maxevals=[1, 1, 30])
>>> while not es.stop():
... X, fit_vals = es.ask_and_eval(func, evaluations=nh.evaluations)
... es.tell(X, fit_vals) # prepare for next iteration
... es.sigma *= nh(X, fit_vals, func, es.ask) # see method __call__
... es.countevals += nh.evaluations_just_done # this is a hack, not important though
... es.logger.add(more_data = [nh.evaluations, nh.noiseS]) # add a data point
... es.disp()
... # nh.maxevals = ... it might be useful to start with smaller values and then increase
>>> print(es.stop())
>>> print(es.result()[-2]) # take mean value, the best solution is totally off
>>> assert sum(es.result()[-2]**2) < 1e-9
>>> print(X[np.argmin(fit_vals)]) # not bad, but probably worse than the mean
>>> # es.logger.plot()
The command ``logger.plot()`` will plot the logged data.
The noise options of `fmin()` control a `NoiseHandler` instance
similar to this example. The command ``cma.CMAOptions('noise')``
lists in effect the parameters of `__init__` apart from
``aggregate``.
Details
-------
The parameters reevals, theta, c_s, and alpha_t are set differently
than in the original publication, see method `__init__()`. For a
very small population size, say popsize <= 5, the measurement
technique based on rank changes is likely to fail.
Missing Features
----------------
In case no noise is found, ``self.lam_reeval`` should be adaptive
and get at least as low as 1 (however the possible savings from this
are rather limited). Another option might be to decide during the
first call by a quantitative analysis of fitness values whether
``lam_reeval`` is set to zero. More generally, an automatic noise
mode detection might also set the covariance matrix learning rates
to smaller values.
:See: `fmin()`, `CMAEvolutionStrategy.ask_and_eval()`
"""
# TODO: for const additive noise a better version might be with alphasigma also used for sigma-increment,
# while all other variance changing sources are removed (because they are intrinsically biased). Then
# using kappa to get convergence (with unit sphere samples): noiseS=0 leads to a certain kappa increasing rate?
def __init__(self, N, maxevals=[1, 1, 1], aggregate=np.median,
reevals=None, epsilon=1e-7, parallel=False):
"""parameters are
`N`
dimension, (only) necessary to adjust the internal
"alpha"-parameters
`maxevals`
maximal value for ``self.evaluations``, where
``self.evaluations`` function calls are aggregated for
noise treatment. With ``maxevals == 0`` the noise
handler is (temporarily) "switched off". If `maxevals`
is a list, min value and (for >2 elements) median are
used to define minimal and initial value of
``self.evaluations``. Choosing ``maxevals > 1`` is only
reasonable, if also the original ``fit`` values (that
are passed to `__call__`) are computed by aggregation of
``self.evaluations`` values (otherwise the values are
not comparable), as it is done within `fmin()`.
`aggregate`
function to aggregate single f-values to a 'fitness', e.g.
``np.median``.
`reevals`
number of solutions to be reevaluated for noise
measurement, can be a float, by default set to ``2 +
popsize/20``, where ``popsize = len(fit)`` in
``__call__``. zero switches noise handling off.
`epsilon`
multiplier for perturbation of the reevaluated solutions
`parallel`
a single f-call with all resampled solutions
:See: `fmin()`, `CMAOptions`, `CMAEvolutionStrategy.ask_and_eval()`
"""
self.lam_reeval = reevals # 2 + popsize/20, see method indices(), originally 2 + popsize/10
self.epsilon = epsilon
self.parallel = parallel
## meta_parameters.noise_theta == 0.5
self.theta = 0.5 # 0.5 # originally 0.2
self.cum = 0.3 # originally 1, 0.3 allows one disagreement of current point with resulting noiseS
## meta_parameters.noise_alphasigma == 2.0
self.alphasigma = 1 + 2.0 / (N + 10) # 2, unit sphere sampling: 1 + 1 / (N + 10)
## meta_parameters.noise_alphaevals == 2.0
self.alphaevals = 1 + 2.0 / (N + 10) # 2, originally 1.5
## meta_parameters.noise_alphaevalsdown_exponent == -0.25
self.alphaevalsdown = self.alphaevals** -0.25 # originally 1/1.5
# zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
self.evaluations = 1 # to aggregate for a single f-evaluation
self.minevals = 1
self.maxevals = int(np.max(maxevals))
if hasattr(maxevals, '__contains__'): # i.e. can deal with ``in``
if len(maxevals) > 1:
self.minevals = min(maxevals)
self.evaluations = self.minevals
if len(maxevals) > 2:
self.evaluations = np.median(maxevals)
## meta_parameters.noise_aggregate == None
self.f_aggregate = aggregate if not None else {1: np.median, 2: np.mean}[ None ]
self.evaluations_just_done = 0 # actually conducted evals, only for documentation
self.noiseS = 0
def __call__(self, X, fit, func, ask=None, args=()):
"""proceed with noise measurement, set anew attributes ``evaluations``
(proposed number of evaluations to "treat" noise) and ``evaluations_just_done``
and return a factor for increasing sigma.
Parameters
----------
`X`
a list/sequence/vector of solutions
`fit`
the respective list of function values
`func`
the objective function, ``fit[i]`` corresponds to ``func(X[i], *args)``
`ask`
a method to generate a new, slightly disturbed solution. The argument
is (only) mandatory if ``epsilon`` is not zero, see `__init__()`.
`args`
optional additional arguments to `func`
Details
-------
Calls the methods ``reeval()``, ``update_measure()`` and ``treat()`` in this order.
``self.evaluations`` is adapted within the method `treat()`.
"""
self.evaluations_just_done = 0
if not self.maxevals or self.lam_reeval == 0:
return 1.0
res = self.reeval(X, fit, func, ask, args)
if not len(res):
return 1.0
self.update_measure()
return self.treat()
def get_evaluations(self):
"""return ``self.evaluations``, the number of evalutions to get a single fitness measurement"""
return self.evaluations
def treat(self):
"""adapt self.evaluations depending on the current measurement value
and return ``sigma_fac in (1.0, self.alphasigma)``
"""
if self.noiseS > 0:
self.evaluations = min((self.evaluations * self.alphaevals, self.maxevals))
return self.alphasigma
else:
self.evaluations = max((self.evaluations * self.alphaevalsdown, self.minevals))
return 1.0 # / self.alphasigma
def reeval(self, X, fit, func, ask, args=()):
"""store two fitness lists, `fit` and ``fitre`` reevaluating some
solutions in `X`.
``self.evaluations`` evaluations are done for each reevaluated
fitness value.
See `__call__()`, where `reeval()` is called.
"""
self.fit = list(fit)
self.fitre = list(fit)
self.idx = self.indices(fit)
if not len(self.idx):
return self.idx
evals = int(self.evaluations) if self.f_aggregate else 1
fagg = np.median if self.f_aggregate is None else self.f_aggregate
for i in self.idx:
X_i = X[i]
if self.epsilon:
if self.parallel:
self.fitre[i] = fagg(func(ask(evals, X_i, self.epsilon), *args))
else:
self.fitre[i] = fagg([func(ask(1, X_i, self.epsilon)[0], *args)
for _k in xrange(evals)])
else:
self.fitre[i] = fagg([func(X_i, *args) for _k in xrange(evals)])
self.evaluations_just_done = evals * len(self.idx)
return self.fit, self.fitre, self.idx
def update_measure(self):
"""updated noise level measure using two fitness lists ``self.fit`` and
``self.fitre``, return ``self.noiseS, all_individual_measures``.
Assumes that `self.idx` contains the indices where the fitness
lists differ
"""
lam = len(self.fit)
idx = np.argsort(self.fit + self.fitre)
ranks = np.argsort(idx).reshape((2, lam))
rankDelta = ranks[0] - ranks[1] - np.sign(ranks[0] - ranks[1])
# compute rank change limits using both ranks[0] and ranks[1]
r = np.arange(1, 2 * lam) # 2 * lam - 2 elements
limits = [0.5 * (Mh.prctile(np.abs(r - (ranks[0, i] + 1 - (ranks[0, i] > ranks[1, i]))),
self.theta * 50) +
Mh.prctile(np.abs(r - (ranks[1, i] + 1 - (ranks[1, i] > ranks[0, i]))),
self.theta * 50))
for i in self.idx]
# compute measurement
# max: 1 rankchange in 2*lambda is always fine
s = np.abs(rankDelta[self.idx]) - Mh.amax(limits, 1) # lives roughly in 0..2*lambda
self.noiseS += self.cum * (np.mean(s) - self.noiseS)
return self.noiseS, s
def indices(self, fit):
"""return the set of indices to be reevaluated for noise
measurement.
Given the first values are the earliest, this is a useful policy also
with a time changing objective.
"""
## meta_parameters.noise_reeval_multiplier == 1.0
lam_reev = 1.0 * (self.lam_reeval if self.lam_reeval
else 2 + len(fit) / 20)
lam_reev = int(lam_reev) + ((lam_reev % 1) > np.random.rand())
## meta_parameters.noise_choose_reeval == 1
choice = 1
if choice == 1:
# take n_first first and reev - n_first best of the remaining
n_first = lam_reev - lam_reev // 2
sort_idx = np.argsort(array(fit, copy=False)[n_first:]) + n_first
return np.array(list(range(0, n_first)) +
list(sort_idx[0:lam_reev - n_first]), copy=False)
elif choice == 2:
idx_sorted = np.argsort(array(fit, copy=False))
# take lam_reev equally spaced, starting with best
linsp = np.linspace(0, len(fit) - len(fit) / lam_reev, lam_reev)
return idx_sorted[[int(i) for i in linsp]]
# take the ``lam_reeval`` best from the first ``2 * lam_reeval + 2`` values.
elif choice == 3:
return np.argsort(array(fit, copy=False)[:2 * (lam_reev + 1)])[:lam_reev]
else:
raise ValueError('unrecognized choice value %d for noise reev'
% choice)
# ____________________________________________________________
# ____________________________________________________________
class Sections(object):
"""plot sections through an objective function.
A first rational thing to do, when facing an (expensive)
application. By default 6 points in each coordinate are evaluated.
This class is still experimental.
Examples
--------
>>> import cma, numpy as np
>>> s = cma.Sections(cma.Fcts.rosen, np.zeros(3)).do(plot=False)
>>> s.do(plot=False) # evaluate the same points again, i.e. check for noise
>> try:
... s.plot()
... except:
... print('plotting failed: matplotlib.pyplot package missing?')
Details
-------
Data are saved after each function call during `do()`. The filename
is attribute ``name`` and by default ``str(func)``, see `__init__()`.
A random (orthogonal) basis can be generated with
``cma.Rotation()(np.eye(3))``.
CAVEAT: The default name is unique in the function name, but it
should be unique in all parameters of `__init__()` but `plot_cmd`
and `load`. If, for example, a different basis is chosen, either
the name must be changed or the ``.pkl`` file containing the
previous data must first be renamed or deleted.
``s.res`` is a dictionary with an entry for each "coordinate" ``i``
and with an entry ``'x'``, the middle point. Each entry ``i`` is
again a dictionary with keys being different dx values and the
value being a sequence of f-values. For example ``s.res[2][0.1] ==
[0.01, 0.01]``, which is generated using the difference vector ``s
.basis[2]`` like
``s.res[2][dx] += func(s.res['x'] + dx * s.basis[2])``.
:See: `__init__()`
"""
def __init__(self, func, x, args=(), basis=None, name=None,
plot_cmd=pyplot.plot if pyplot else None, load=True):
"""
Parameters
----------
`func`
objective function
`x`
point in search space, middle point of the sections
`args`
arguments passed to `func`
`basis`
evaluated points are ``func(x + locations[j] * basis[i])
for i in len(basis) for j in len(locations)``,
see `do()`
`name`
filename where to save the result
`plot_cmd`
command used to plot the data, typically matplotlib pyplots `plot` or `semilogy`
`load`
load previous data from file ``str(func) + '.pkl'``
"""
self.func = func
self.args = args
self.x = x
self.name = name if name else str(func).replace(' ', '_').replace('>', '').replace('<', '')
self.plot_cmd = plot_cmd # or semilogy
self.basis = np.eye(len(x)) if basis is None else basis
try:
self.load()
if any(self.res['x'] != x):
self.res = {}
self.res['x'] = x # TODO: res['x'] does not look perfect
else:
print(self.name + ' loaded')
except:
self.res = {}
self.res['x'] = x
def do(self, repetitions=1, locations=np.arange(-0.5, 0.6, 0.2), plot=True):
"""generates, plots and saves function values ``func(y)``,
where ``y`` is 'close' to `x` (see `__init__()`). The data are stored in
the ``res`` attribute and the class instance is saved in a file
with (the weired) name ``str(func)``.
Parameters
----------
`repetitions`
for each point, only for noisy functions is >1 useful. For
``repetitions==0`` only already generated data are plotted.
`locations`
coordinated wise deviations from the middle point given in `__init__`
"""
if not repetitions:
self.plot()
return
res = self.res
for i in xrange(len(self.basis)): # i-th coordinate
if i not in res:
res[i] = {}
# xx = np.array(self.x)
# TODO: store res[i]['dx'] = self.basis[i] here?
for dx in locations:
xx = self.x + dx * self.basis[i]
xkey = dx # xx[i] if (self.basis == np.eye(len(self.basis))).all() else dx
if xkey not in res[i]:
res[i][xkey] = []
n = repetitions
while n > 0:
n -= 1
res[i][xkey].append(self.func(xx, *self.args))
if plot:
self.plot()
self.save()
return self
def plot(self, plot_cmd=None, tf=lambda y: y):
"""plot the data we have, return ``self``"""
if not plot_cmd:
plot_cmd = self.plot_cmd
colors = 'bgrcmyk'
pyplot.hold(False)
res = self.res
flatx, flatf = self.flattened()
minf = np.inf
for i in flatf:
minf = min((minf, min(flatf[i])))
addf = 1e-9 - minf if minf <= 1e-9 else 0
for i in sorted(res.keys()): # we plot not all values here
if isinstance(i, int):
color = colors[i % len(colors)]
arx = sorted(res[i].keys())
plot_cmd(arx, [tf(np.median(res[i][x]) + addf) for x in arx], color + '-')
pyplot.text(arx[-1], tf(np.median(res[i][arx[-1]])), i)
pyplot.hold(True)
plot_cmd(flatx[i], tf(np.array(flatf[i]) + addf), color + 'o')
pyplot.ylabel('f + ' + str(addf))
pyplot.draw()
pyplot.ion()
pyplot.show()
# raw_input('press return')
return self
def flattened(self):
"""return flattened data ``(x, f)`` such that for the sweep through
coordinate ``i`` we have for data point ``j`` that ``f[i][j] == func(x[i][j])``
"""
flatx = {}
flatf = {}
for i in self.res:
if isinstance(i, int):
flatx[i] = []
flatf[i] = []
for x in sorted(self.res[i]):
for d in sorted(self.res[i][x]):
flatx[i].append(x)
flatf[i].append(d)
return flatx, flatf
def save(self, name=None):
"""save to file"""
import pickle
name = name if name else self.name
fun = self.func
del self.func # instance method produces error
pickle.dump(self, open(name + '.pkl', "wb"))
self.func = fun
return self
def load(self, name=None):
"""load from file"""
import pickle
name = name if name else self.name
s = pickle.load(open(name + '.pkl', 'rb'))
self.res = s.res # disregard the class
return self
#____________________________________________________________
#____________________________________________________________
class _Error(Exception):
"""generic exception of cma module"""
pass
# ____________________________________________________________
# ____________________________________________________________
#
class ElapsedTime(object):
"""using ``time.clock`` with overflow handling to measure CPU time.
Example:
>>> clock = ElapsedTime() # clock starts here
>>> t1 = clock() # get elapsed CPU time
Details: 32-bit C overflows after int(2**32/1e6) == 4294s about 72 min
"""
def __init__(self):
self.tic0 = time.clock()
self.tic = self.tic0
self.lasttoc = time.clock()
self.lastdiff = time.clock() - self.lasttoc
self.time_to_add = 0
self.messages = 0
reset = __init__
def __call__(self):
toc = time.clock()
if toc - self.tic >= self.lasttoc - self.tic:
self.lastdiff = toc - self.lasttoc
self.lasttoc = toc
else: # overflow, reset self.tic
if self.messages < 3:
self.messages += 1
print(' in cma.ElapsedTime: time measure overflow, last difference estimated from',
self.tic0, self.tic, self.lasttoc, toc, toc - self.lasttoc, self.lastdiff)
self.time_to_add += self.lastdiff + self.lasttoc - self.tic
self.tic = toc # reset
self.lasttoc = toc
self.elapsedtime = toc - self.tic + self.time_to_add
return self.elapsedtime
class Misc(object):
# ____________________________________________________________
# ____________________________________________________________
#
class MathHelperFunctions(object):
"""static convenience math helper functions, if the function name
is preceded with an "a", a numpy array is returned
"""
@staticmethod
def aclamp(x, upper):
return -Misc.MathHelperFunctions.apos(-x, -upper)
@staticmethod
def equals_approximately(a, b, eps=1e-12):
if a < 0:
a, b = -1 * a, -1 * b
return (a - eps < b < a + eps) or ((1 - eps) * a < b < (1 + eps) * a)
@staticmethod
def vequals_approximately(a, b, eps=1e-12):
a, b = array(a), array(b)
idx = np.where(a < 0)[0]
if len(idx):
a[idx], b[idx] = -1 * a[idx], -1 * b[idx]
return (np.all(a - eps < b) and np.all(b < a + eps)
) or (np.all((1 - eps) * a < b) and np.all(b < (1 + eps) * a))
@staticmethod
def expms(A, eig=np.linalg.eigh):
"""matrix exponential for a symmetric matrix"""
# TODO: check that this works reliably for low rank matrices
# first: symmetrize A
D, B = eig(A)
return np.dot(B, (np.exp(D) * B).T)
@staticmethod
def amax(vec, vec_or_scalar):
return array(Misc.MathHelperFunctions.max(vec, vec_or_scalar))
@staticmethod
def max(vec, vec_or_scalar):
b = vec_or_scalar
if isscalar(b):
m = [max(x, b) for x in vec]
else:
m = [max(vec[i], b[i]) for i in rglen((vec))]
return m
@staticmethod
def minmax(val, min_val, max_val):
assert min_val <= max_val
return min((max_val, max((val, min_val))))
@staticmethod
def aminmax(val, min_val, max_val):
return array([min((max_val, max((v, min_val)))) for v in val])
@staticmethod
def amin(vec_or_scalar, vec_or_scalar2):
return array(Misc.MathHelperFunctions.min(vec_or_scalar, vec_or_scalar2))
@staticmethod
def min(a, b):
iss = isscalar
if iss(a) and iss(b):
return min(a, b)
if iss(a):
a, b = b, a
# now only b can be still a scalar
if iss(b):
return [min(x, b) for x in a]
else: # two non-scalars must have the same length
return [min(a[i], b[i]) for i in rglen((a))]
@staticmethod
def norm(vec, expo=2):
return sum(vec**expo)**(1 / expo)
@staticmethod
def apos(x, lower=0):
"""clips argument (scalar or array) from below at lower"""
if lower == 0:
return (x > 0) * x
else:
return lower + (x > lower) * (x - lower)
@staticmethod
def prctile(data, p_vals=[0, 25, 50, 75, 100], sorted_=False):
"""``prctile(data, 50)`` returns the median, but p_vals can
also be a sequence.
Provides for small samples better values than matplotlib.mlab.prctile,
however also slower.
"""
ps = [p_vals] if isscalar(p_vals) else p_vals
if not sorted_:
data = sorted(data)
n = len(data)
d = []
for p in ps:
fi = p * n / 100 - 0.5
if fi <= 0: # maybe extrapolate?
d.append(data[0])
elif fi >= n - 1:
d.append(data[-1])
else:
i = int(fi)
d.append((i + 1 - fi) * data[i] + (fi - i) * data[i + 1])
return d[0] if isscalar(p_vals) else d
@staticmethod
def sround(nb): # TODO: to be vectorized
"""return stochastic round: floor(nb) + (rand()<remainder(nb))"""
return nb // 1 + (np.random.rand(1)[0] < (nb % 1))
@staticmethod
def cauchy_with_variance_one():
n = np.random.randn() / np.random.randn()
while abs(n) > 1000:
n = np.random.randn() / np.random.randn()
return n / 25
@staticmethod
def standard_finite_cauchy(size=1):
try:
l = len(size)
except TypeError:
l = 0
if l == 0:
return array([Mh.cauchy_with_variance_one() for _i in xrange(size)])
elif l == 1:
return array([Mh.cauchy_with_variance_one() for _i in xrange(size[0])])
elif l == 2:
return array([[Mh.cauchy_with_variance_one() for _i in xrange(size[1])]
for _j in xrange(size[0])])
else:
raise _Error('len(size) cannot be large than two')
@staticmethod
def likelihood(x, m=None, Cinv=None, sigma=1, detC=None):
"""return likelihood of x for the normal density N(m, sigma**2 * Cinv**-1)"""
# testing: MC integrate must be one: mean(p(x_i)) * volume(where x_i are uniformely sampled)
# for i in xrange(3): print mean([cma.likelihood(20*r-10, dim * [0], None, 3) for r in rand(10000,dim)]) * 20**dim
if m is None:
dx = x
else:
dx = x - m # array(x) - array(m)
n = len(x)
s2pi = (2 * np.pi)**(n / 2.)
if Cinv is None:
return exp(-sum(dx**2) / sigma**2 / 2) / s2pi / sigma**n
if detC is None:
detC = 1. / np.linalg.linalg.det(Cinv)
return exp(-np.dot(dx, np.dot(Cinv, dx)) / sigma**2 / 2) / s2pi / abs(detC)**0.5 / sigma**n
@staticmethod
def loglikelihood(self, x, previous=False):
"""return log-likelihood of `x` regarding the current sample distribution"""
# testing of original fct: MC integrate must be one: mean(p(x_i)) * volume(where x_i are uniformely sampled)
# for i in xrange(3): print mean([cma.likelihood(20*r-10, dim * [0], None, 3) for r in rand(10000,dim)]) * 20**dim
# TODO: test this!!
# c=cma.fmin...
# c[3]['cma'].loglikelihood(...)
if previous and hasattr(self, 'lastiter'):
sigma = self.lastiter.sigma
Crootinv = self.lastiter._Crootinv
xmean = self.lastiter.mean
D = self.lastiter.D
elif previous and self.countiter > 1:
raise _Error('no previous distribution parameters stored, check options importance_mixing')
else:
sigma = self.sigma
Crootinv = self._Crootinv
xmean = self.mean
D = self.D
dx = array(x) - xmean # array(x) - array(m)
n = self.N
logs2pi = n * log(2 * np.pi) / 2.
logdetC = 2 * sum(log(D))
dx = np.dot(Crootinv, dx)
res = -sum(dx**2) / sigma**2 / 2 - logs2pi - logdetC / 2 - n * log(sigma)
if 1 < 3: # testing
s2pi = (2 * np.pi)**(n / 2.)
detC = np.prod(D)**2
res2 = -sum(dx**2) / sigma**2 / 2 - log(s2pi * abs(detC)**0.5 * sigma**n)
assert res2 < res + 1e-8 or res2 > res - 1e-8
return res
# ____________________________________________________________
# ____________________________________________________________
#
# C and B are arrays rather than matrices, because they are
# addressed via B[i][j], matrices can only be addressed via B[i,j]
# tred2(N, B, diagD, offdiag);
# tql2(N, diagD, offdiag, B);
# Symmetric Householder reduction to tridiagonal form, translated from JAMA package.
@staticmethod
def eig(C):
"""eigendecomposition of a symmetric matrix, much slower than
`numpy.linalg.eigh`, return ``(EVals, Basis)``, the eigenvalues
and an orthonormal basis of the corresponding eigenvectors, where
``Basis[i]``
the i-th row of ``Basis``
columns of ``Basis``, ``[Basis[j][i] for j in range(len(Basis))]``
the i-th eigenvector with eigenvalue ``EVals[i]``
"""
# class eig(object):
# def __call__(self, C):
# Householder transformation of a symmetric matrix V into tridiagonal form.
# -> n : dimension
# -> V : symmetric nxn-matrix
# <- V : orthogonal transformation matrix:
# tridiag matrix == V * V_in * V^t
# <- d : diagonal
# <- e[0..n-1] : off diagonal (elements 1..n-1)
# Symmetric tridiagonal QL algorithm, iterative
# Computes the eigensystem from a tridiagonal matrix in roughtly 3N^3 operations
# -> n : Dimension.
# -> d : Diagonale of tridiagonal matrix.
# -> e[1..n-1] : off-diagonal, output from Householder
# -> V : matrix output von Householder
# <- d : eigenvalues
# <- e : garbage?
# <- V : basis of eigenvectors, according to d
# tred2(N, B, diagD, offdiag); B=C on input
# tql2(N, diagD, offdiag, B);
# private void tred2 (int n, double V[][], double d[], double e[]) {
def tred2 (n, V, d, e):
# This is derived from the Algol procedures tred2 by
# Bowdler, Martin, Reinsch, and Wilkinson, Handbook for
# Auto. Comp., Vol.ii-Linear Algebra, and the corresponding
# Fortran subroutine in EISPACK.
num_opt = False # factor 1.5 in 30-D
for j in xrange(n):
d[j] = V[n - 1][j] # d is output argument
# Householder reduction to tridiagonal form.
for i in xrange(n - 1, 0, -1):
# Scale to avoid under/overflow.
h = 0.0
if not num_opt:
scale = 0.0
for k in xrange(i):
scale = scale + abs(d[k])
else:
scale = sum(abs(d[0:i]))
if scale == 0.0:
e[i] = d[i - 1]
for j in xrange(i):
d[j] = V[i - 1][j]
V[i][j] = 0.0
V[j][i] = 0.0
else:
# Generate Householder vector.
if not num_opt:
for k in xrange(i):
d[k] /= scale
h += d[k] * d[k]
else:
d[:i] /= scale
h = np.dot(d[:i], d[:i])
f = d[i - 1]
g = h**0.5
if f > 0:
g = -g
e[i] = scale * g
h = h - f * g
d[i - 1] = f - g
if not num_opt:
for j in xrange(i):
e[j] = 0.0
else:
e[:i] = 0.0
# Apply similarity transformation to remaining columns.
for j in xrange(i):
f = d[j]
V[j][i] = f
g = e[j] + V[j][j] * f
if not num_opt:
for k in xrange(j + 1, i):
g += V[k][j] * d[k]
e[k] += V[k][j] * f
e[j] = g
else:
e[j + 1:i] += V.T[j][j + 1:i] * f
e[j] = g + np.dot(V.T[j][j + 1:i], d[j + 1:i])
f = 0.0
if not num_opt:
for j in xrange(i):
e[j] /= h
f += e[j] * d[j]
else:
e[:i] /= h
f += np.dot(e[:i], d[:i])
hh = f / (h + h)
if not num_opt:
for j in xrange(i):
e[j] -= hh * d[j]
else:
e[:i] -= hh * d[:i]
for j in xrange(i):
f = d[j]
g = e[j]
if not num_opt:
for k in xrange(j, i):
V[k][j] -= (f * e[k] + g * d[k])
else:
V.T[j][j:i] -= (f * e[j:i] + g * d[j:i])
d[j] = V[i - 1][j]
V[i][j] = 0.0
d[i] = h
# end for i--
# Accumulate transformations.
for i in xrange(n - 1):
V[n - 1][i] = V[i][i]
V[i][i] = 1.0
h = d[i + 1]
if h != 0.0:
if not num_opt:
for k in xrange(i + 1):
d[k] = V[k][i + 1] / h
else:
d[:i + 1] = V.T[i + 1][:i + 1] / h
for j in xrange(i + 1):
if not num_opt:
g = 0.0
for k in xrange(i + 1):
g += V[k][i + 1] * V[k][j]
for k in xrange(i + 1):
V[k][j] -= g * d[k]
else:
g = np.dot(V.T[i + 1][0:i + 1], V.T[j][0:i + 1])
V.T[j][:i + 1] -= g * d[:i + 1]
if not num_opt:
for k in xrange(i + 1):
V[k][i + 1] = 0.0
else:
V.T[i + 1][:i + 1] = 0.0
if not num_opt:
for j in xrange(n):
d[j] = V[n - 1][j]
V[n - 1][j] = 0.0
else:
d[:n] = V[n - 1][:n]
V[n - 1][:n] = 0.0
V[n - 1][n - 1] = 1.0
e[0] = 0.0
# Symmetric tridiagonal QL algorithm, taken from JAMA package.
# private void tql2 (int n, double d[], double e[], double V[][]) {
# needs roughly 3N^3 operations
def tql2 (n, d, e, V):
# This is derived from the Algol procedures tql2, by
# Bowdler, Martin, Reinsch, and Wilkinson, Handbook for
# Auto. Comp., Vol.ii-Linear Algebra, and the corresponding
# Fortran subroutine in EISPACK.
num_opt = False # using vectors from numpy makes it faster
if not num_opt:
for i in xrange(1, n): # (int i = 1; i < n; i++):
e[i - 1] = e[i]
else:
e[0:n - 1] = e[1:n]
e[n - 1] = 0.0
f = 0.0
tst1 = 0.0
eps = 2.0**-52.0
for l in xrange(n): # (int l = 0; l < n; l++) {
# Find small subdiagonal element
tst1 = max(tst1, abs(d[l]) + abs(e[l]))
m = l
while m < n:
if abs(e[m]) <= eps * tst1:
break
m += 1
# If m == l, d[l] is an eigenvalue,
# otherwise, iterate.
if m > l:
iiter = 0
while 1: # do {
iiter += 1 # (Could check iteration count here.)
# Compute implicit shift
g = d[l]
p = (d[l + 1] - g) / (2.0 * e[l])
r = (p**2 + 1)**0.5 # hypot(p,1.0)
if p < 0:
r = -r
d[l] = e[l] / (p + r)
d[l + 1] = e[l] * (p + r)
dl1 = d[l + 1]
h = g - d[l]
if not num_opt:
for i in xrange(l + 2, n):
d[i] -= h
else:
d[l + 2:n] -= h
f = f + h
# Implicit QL transformation.
p = d[m]
c = 1.0
c2 = c
c3 = c
el1 = e[l + 1]
s = 0.0
s2 = 0.0
# hh = V.T[0].copy() # only with num_opt
for i in xrange(m - 1, l - 1, -1): # (int i = m-1; i >= l; i--) {
c3 = c2
c2 = c
s2 = s
g = c * e[i]
h = c * p
r = (p**2 + e[i]**2)**0.5 # hypot(p,e[i])
e[i + 1] = s * r
s = e[i] / r
c = p / r
p = c * d[i] - s * g
d[i + 1] = h + s * (c * g + s * d[i])
# Accumulate transformation.
if not num_opt: # overall factor 3 in 30-D
for k in xrange(n): # (int k = 0; k < n; k++) {
h = V[k][i + 1]
V[k][i + 1] = s * V[k][i] + c * h
V[k][i] = c * V[k][i] - s * h
else: # about 20% faster in 10-D
hh = V.T[i + 1].copy()
# hh[:] = V.T[i+1][:]
V.T[i + 1] = s * V.T[i] + c * hh
V.T[i] = c * V.T[i] - s * hh
# V.T[i] *= c
# V.T[i] -= s * hh
p = -s * s2 * c3 * el1 * e[l] / dl1
e[l] = s * p
d[l] = c * p
# Check for convergence.
if abs(e[l]) <= eps * tst1:
break
# } while (Math.abs(e[l]) > eps*tst1);
d[l] = d[l] + f
e[l] = 0.0
# Sort eigenvalues and corresponding vectors.
# tql2
N = len(C[0])
if 1 < 3:
V = [[x[i] for i in xrange(N)] for x in C] # copy each "row"
d = N * [0.]
e = N * [0.]
tred2(N, V, d, e)
tql2(N, d, e, V)
return (array(d), array(V))
Mh = Misc.MathHelperFunctions
# if _experimental:
# from new_stuff import *
def pprint(to_be_printed):
"""nicely formated print"""
try:
import pprint as pp
# generate an instance PrettyPrinter
# pp.PrettyPrinter().pprint(to_be_printed)
pp.pprint(to_be_printed)
except ImportError:
if isinstance(to_be_printed, dict):
print('{')
for k, v in to_be_printed.items():
print("'" + k + "'" if isinstance(k, basestring) else k,
': ',
"'" + v + "'" if isinstance(k, basestring) else v,
sep="")
print('}')
else:
print('could not import pprint module, appling regular print')
print(to_be_printed)
pp = pprint
class ConstRandnShift(object):
"""``ConstRandnShift()(x)`` adds a fixed realization of
``stddev * randn(len(x))`` to the vector x.
By default, the realized shift is the same for each instance of
``ConstRandnShift``, see ``seed`` argument. This class is used in
class ``FFWrapper.ShiftedFitness`` as default transformation.
See: class ``FFWrapper.ShiftedFitness``
"""
def __init__(self, stddev=3, seed=1):
"""with ``seed=None`` each instance realizes a different shift"""
self.seed = seed
self.stddev = stddev
self._xopt = {}
def __call__(self, x):
"""return "shifted" ``x - shift``
"""
try:
x_opt = self._xopt[len(x)]
except KeyError:
if self.seed is None:
shift = np.random.randn(len(x))
else:
rstate = np.random.get_state()
np.random.seed(self.seed)
shift = np.random.randn(len(x))
np.random.set_state(rstate)
x_opt = self._xopt.setdefault(len(x), self.stddev * shift)
return array(x, copy=False) - x_opt
def get(self, dimension):
"""return shift applied to ``zeros(dimension)``
>>> import numpy as np, cma
>>> s = cma.ConstRandnShift()
>>> assert all(s(-s.get(3)) == np.zeros(3))
>>> assert all(s.get(3) == s(np.zeros(3)))
"""
return self.__call__(np.zeros(dimension))
class Rotation(object):
"""Rotation class that implements an orthogonal linear transformation,
one for each dimension.
By default reach ``Rotation`` instance provides a different "random"
but fixed rotation. This class is used to implement non-separable
test functions, most conveniently via `FFWrapper.RotatedFitness`.
Example:
>>> import cma, numpy as np
>>> R = cma.Rotation()
>>> R2 = cma.Rotation() # another rotation
>>> x = np.array((1,2,3))
>>> print(R(R(x), inverse=1))
[ 1. 2. 3.]
See: `FFWrapper.RotatedFitness`
"""
dicMatrices = {} # store matrix if necessary, for each dimension
def __init__(self, seed=None):
"""by default a random but fixed rotation, different for each instance"""
self.seed = seed
self.dicMatrices = {} # otherwise there might be shared bases which is probably not what we want
def __call__(self, x, inverse=False): # function when calling an object
"""Rotates the input array `x` with a fixed rotation matrix
(``self.dicMatrices['str(len(x))']``)
"""
x = np.array(x, copy=False)
N = x.shape[0] # can be an array or matrix, TODO: accept also a list of arrays?
if str(N) not in self.dicMatrices: # create new N-basis for once and all
rstate = np.random.get_state()
if self.seed is not None:
np.random.seed(self.seed)
B = np.random.randn(N, N)
for i in xrange(N):
for j in xrange(0, i):
B[i] -= np.dot(B[i], B[j]) * B[j]
B[i] /= sum(B[i]**2)**0.5
self.dicMatrices[str(N)] = B
np.random.set_state(rstate)
if inverse:
return np.dot(self.dicMatrices[str(N)].T, x) # compute rotation
else:
return np.dot(self.dicMatrices[str(N)], x) # compute rotation
# Use rotate(x) to rotate x
rotate = Rotation()
# ____________________________________________________________
# ____________________________________________________________
#
class FFWrapper(object):
"""
A collection of (yet experimental) classes to implement fitness
transformations and wrappers. Aliased to `FF2` below.
"""
class FitnessTransformation(object):
"""This class does nothing but serve as an interface template.
Typical use-case::
f = FitnessTransformation(f, parameters_if_needed)``
See: class ``TransformSearchSpace``
"""
def __init__(self, fitness_function, *args, **kwargs):
"""`fitness_function` must be callable (e.g. a function
or a callable class instance)"""
# the original fitness to be called
self.inner_fitness = fitness_function
# self.condition_number = ...
def __call__(self, x, *args):
"""identity as default transformation"""
if hasattr(self, 'x_transformation'):
x = self.x_transformation(x)
f = self.inner_fitness(x, *args)
if hasattr(self, 'f_transformation'):
f = self.f_transformation(f)
return f
class BookKeeping(FitnessTransformation):
"""a stump for experimenting with use-cases and possible
extensions of book keeping
use-case:
f = BookKeeping(f)
print(f.count_evaluations)
"""
def __init__(self, callable=None):
self.count_evaluations = 0
self.inner_fitness = callable
def __call__(self, *args):
# assert len(args[0]) # x-vector
self.count_evaluations += 1
return self.inner_fitness(*args)
class TransformSearchSpace(FitnessTransformation):
"""::
f = TransformSearchSpace(f, ConstRandnShift())
constructs the composed function f <- f o shift.
Details: to some extend this is a nice shortcut for::
f = lambda x, *args: f_in(ConstRandnShift()(x), *args)
however the `lambda` definition depends on the value of
``f_in`` even after ``f`` has been assigned.
See: `ShiftedFitness`, `RotatedFitness`
"""
def __init__(self, fitness_function, transformation):
"""``TransformSearchSpace(f, s)(x) == f(s(x))``
>>> import cma
>>> f0 = lambda x: sum(x)
>>> shift_fct = cma.ConstRandnShift()
>>> f = cma.FF2.TransformSearchSpace(f0, shift_fct)
>>> x = [1, 2, 3]
>>> assert f(x) == f0(shift_fct(x))
"""
self.inner_fitness = fitness_function
# akin to FitnessTransformation.__init__(self, fitness_function)
# akin to super(TransformSearchSpace, self).__init__(fitness_function)
self.x_transformation = transformation
# will be used in base class
class ScaleCoordinates(TransformSearchSpace):
"""define a scaling of each variable
"""
def __init__(self, fitness_function, multipliers=None):
"""
:param fitness_function: a callable object
:param multipliers: recycling is not implemented, i.e.
the dimension must fit to the `fitness_function` argument
when called
"""
super(FFWrapper.ScaleCoordinates, self).__init__(
fitness_function, self.transformation)
# TransformSearchSpace.__init__(self, fitness_function,
# self.transformation)
self.multiplier = multipliers
if self.multiplier is not None and hasattr(self.multiplier, 'len'):
self.multiplier = array(self.multiplier, copy=True)
def transformation(x, *args):
if self.multiplier is None:
return array(x, copy=False)
return self.multiplier * array(x, copy=False)
class ShiftedFitness(TransformSearchSpace):
"""``f = cma.ShiftedFitness(cma.fcts.sphere)`` constructs a
shifted sphere function, by default the shift is computed
from class ``ConstRandnShift`` with std dev 3.
"""
def __init__(self, f, shift=None):
"""``shift(x)`` must return a (stable) shift of x.
Details: this class solely provides as default second
argument to TransformSearchSpace a shift in search space.
``shift=lambda x: x`` would provide "no shift", ``None``
expands to ``cma.ConstRandnShift()``.
"""
self.inner_fitness = f
self.x_transformation = shift if shift else ConstRandnShift()
# alternatively we could have called super
class RotatedFitness(TransformSearchSpace):
"""``f = cma.RotatedFitness(cma.fcts.elli)`` constructs a
rotated ellipsoid function
"""
def __init__(self, f, rotate=rotate):
"""``rotate(x)`` must return a (stable) rotation of x.
Details: this class solely provides a default second
argument to TransformSearchSpace, namely a search space
rotation.
"""
super(FFWrapper.RotatedFitness, self).__init__(f, rotate)
# self.x_transformation = rotate
class FixVariables(TransformSearchSpace):
"""fix variables to given values, thereby reducing the
dimensionality of the preimage.
The constructor takes ``index_value_pairs`` as dict or list of
pairs as input and returns a function with smaller preimage space
than `f`.
Details: this might replace the fixed_variables option in
CMAOptions in future, but hasn't been tested yet.
"""
def __init__(self, f, index_value_pairs):
"""`f` has """
super(FFWrapper.FixVariables, self).__init__(f, self.insert_variables)
# same as TransformSearchSpace.__init__(f, self.insert_variables)
self.index_value_pairs = dict(index_value_pairs)
def insert_variables(self, x):
y = np.zeros(len(x) + len(self.index_value_pairs))
assert len(y) > max(self.index_value_pairs)
j = 0
for i in xrange(len(y)):
if i in self.index_value_pairs:
y[i] = self.index_value_pairs[i]
else:
y[i] = x[j]
j += 1
return y
class SomeNaNFitness(FitnessTransformation):
def __init__(self, fitness_function, probability_of_nan=0.1):
self.p = probability_of_nan
self.inner_fitness = fitness_function
def __call__(self, x, *args):
if np.random.rand(1) <= self.p:
return np.NaN
else:
return self.inner_fitness(x, *args)
class NoisyFitness(FitnessTransformation):
"""apply noise via f += rel_noise(dim) * f + abs_noise()"""
def __init__(self, fitness_function,
rel_noise=lambda dim: 1.1 * np.random.randn() / dim,
abs_noise=lambda: 1.1 * np.random.randn()):
self.rel_noise = rel_noise
self.abs_noise = abs_noise
self.inner_fitness = fitness_function
def __call__(self, x, *args):
f = self.inner_fitness(x, *args)
if self.rel_noise:
f += f * self.rel_noise(len(x))
assert isscalar(f)
if self.abs_noise:
f += self.abs_noise()
return f
class GlueArguments(FitnessTransformation):
"""``f = cma.FF2.GlueArguments(cma.fcts.elli, cond=1e4)``
>>> import cma
>>> f = cma.FF2.GlueArguments(cma.fcts.elli, cond=1e1)
>>> f([1, 2]) # == 1**2 + 1e1 * 2**2
41.0
"""
def __init__(self, fitness_function, *args, **kwargs):
self.inner_fitness = fitness_function
self.args = args
self.kwargs = kwargs
def __call__(self, x, *args):
return self.inner_fitness(array(x, copy=False),
*(args + self.args), **self.kwargs)
class UnknownFF(object):
"""search in [-10, 10] for the unknown (optimum)"""
def __init__(self, seed=2):
self.seed = seed
self._x_opt_ = {}
self.rotate = Rotation(seed)
self.count_evaluations = 0
def _x_opt(self, dim):
rstate = np.random.get_state()
if self.seed is not None:
np.random.seed(self.seed)
x = self._x_opt_.setdefault(dim,
0 * 3 * np.random.randn(dim))
np.random.set_state(rstate)
return x
def typical_x(self, dim):
off = self.rotate(np.floor(np.arange(0, 3, 3. / dim)) /
np.logspace(0, 1, dim), inverse=True)
off[np.s_[3:]] += 0.005
off[-1] *= 1e2
off[0] /= 2.0e3 if off[0] > 0 else 1e3
off[2] /= 3.01e4 if off[2] < 0 else 2e4
return self._x_opt(dim) + off
def __call__(self, x):
self.count_evaluations += 1
N = len(x)
x = x - self._x_opt(N)
x[-1] /= 1e2
x[0] *= 2.0e3 if x[0] > 0 else 1e3
x[2] *= 3.01e4 if x[2] < 0 else 2e4
x = np.logspace(0, 1, N) * self.rotate(x)
return 10 * N - np.e**2 + \
sum(x**2 - 10 * np.cos(2 * np.pi * x))
FF2 = FFWrapper
class FitnessFunctions(object):
""" versatile container for test objective functions """
def __init__(self):
self.counter = 0 # number of calls or any other practical use
def rot(self, x, fun, rot=1, args=()):
"""returns ``fun(rotation(x), *args)``, ie. `fun` applied to a rotated argument"""
if len(np.shape(array(x))) > 1: # parallelized
res = []
for x in x:
res.append(self.rot(x, fun, rot, args))
return res
if rot:
return fun(rotate(x, *args))
else:
return fun(x)
def somenan(self, x, fun, p=0.1):
"""returns sometimes np.NaN, otherwise fun(x)"""
if np.random.rand(1) < p:
return np.NaN
else:
return fun(x)
def rand(self, x):
"""Random test objective function"""
return np.random.random(1)[0]
def linear(self, x):
return -x[0]
def lineard(self, x):
if 1 < 3 and any(array(x) < 0):
return np.nan
if 1 < 3 and sum([ (10 + i) * x[i] for i in rglen(x)]) > 50e3:
return np.nan
return -sum(x)
def sphere(self, x):
"""Sphere (squared norm) test objective function"""
# return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]
return sum((x + 0)**2)
def grad_sphere(self, x, *args):
return 2*array(x, copy=False)
def grad_to_one(self, x, *args):
return array(x, copy=False) - 1
def sphere_pos(self, x):
"""Sphere (squared norm) test objective function"""
# return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]
c = 0.0
if x[0] < c:
return np.nan
return -c**2 + sum((x + 0)**2)
def spherewithoneconstraint(self, x):
return sum((x + 0)**2) if x[0] > 1 else np.nan
def elliwithoneconstraint(self, x, idx=[-1]):
return self.ellirot(x) if all(array(x)[idx] > 1) else np.nan
def spherewithnconstraints(self, x):
return sum((x + 0)**2) if all(array(x) > 1) else np.nan
# zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
def noisysphere(self, x, noise=2.10e-9, cond=1.0, noise_offset=0.10):
"""noise=10 does not work with default popsize, noise handling does not help """
return self.elli(x, cond=cond) * (1 + noise * np.random.randn() / len(x)) + noise_offset * np.random.rand()
def spherew(self, x):
"""Sphere (squared norm) with sum x_i = 1 test objective function"""
# return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]
# s = sum(abs(x))
# return sum((x/s+0)**2) - 1/len(x)
# return sum((x/s)**2) - 1/len(x)
return -0.01 * x[0] + abs(x[0])**-2 * sum(x[1:]**2)
def partsphere(self, x):
"""Sphere (squared norm) test objective function"""
self.counter += 1
# return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]
dim = len(x)
x = array([x[i % dim] for i in xrange(2 * dim)])
N = 8
i = self.counter % dim
# f = sum(x[i:i + N]**2)
f = sum(x[np.random.randint(dim, size=N)]**2)
return f
def sectorsphere(self, x):
"""asymmetric Sphere (squared norm) test objective function"""
return sum(x**2) + (1e6 - 1) * sum(x[x < 0]**2)
def cornersphere(self, x):
"""Sphere (squared norm) test objective function constraint to the corner"""
nconstr = len(x) - 0
if any(x[:nconstr] < 1):
return np.NaN
return sum(x**2) - nconstr
def cornerelli(self, x):
""" """
if any(x < 1):
return np.NaN
return self.elli(x) - self.elli(np.ones(len(x)))
def cornerellirot(self, x):
""" """
if any(x < 1):
return np.NaN
return self.ellirot(x)
def normalSkew(self, f):
N = np.random.randn(1)[0]**2
if N < 1:
N = f * N # diminish blow up lower part
return N
def noiseC(self, x, func=sphere, fac=10, expon=0.8):
f = func(self, x)
N = np.random.randn(1)[0] / np.random.randn(1)[0]
return max(1e-19, f + (float(fac) / len(x)) * f**expon * N)
def noise(self, x, func=sphere, fac=10, expon=1):
f = func(self, x)
# R = np.random.randn(1)[0]
R = np.log10(f) + expon * abs(10 - np.log10(f)) * np.random.rand(1)[0]
# sig = float(fac)/float(len(x))
# R = log(f) + 0.5*log(f) * random.randn(1)[0]
# return max(1e-19, f + sig * (f**np.log10(f)) * np.exp(R))
# return max(1e-19, f * np.exp(sig * N / f**expon))
# return max(1e-19, f * normalSkew(f**expon)**sig)
return f + 10**R # == f + f**(1+0.5*RN)
def cigar(self, x, rot=0, cond=1e6, noise=0):
"""Cigar test objective function"""
if rot:
x = rotate(x)
x = [x] if isscalar(x[0]) else x # scalar into list
f = [(x[0]**2 + cond * sum(x[1:]**2)) * np.exp(noise * np.random.randn(1)[0] / len(x)) for x in x]
return f if len(f) > 1 else f[0] # 1-element-list into scalar
def grad_cigar(self, x, *args):
grad = 2 * 1e6 * np.array(x)
grad[0] /= 1e6
return grad
def diagonal_cigar(self, x, cond=1e6):
axis = np.ones(len(x)) / len(x)**0.5
proj = dot(axis, x) * axis
s = sum(proj**2)
s += cond * sum((x - proj)**2)
return s
def tablet(self, x, rot=0):
"""Tablet test objective function"""
if rot and rot is not fcts.tablet:
x = rotate(x)
x = [x] if isscalar(x[0]) else x # scalar into list
f = [1e6 * x[0]**2 + sum(x[1:]**2) for x in x]
return f if len(f) > 1 else f[0] # 1-element-list into scalar
def grad_tablet(self, x, *args):
grad = 2 * np.array(x)
grad[0] *= 1e6
return grad
def cigtab(self, y):
"""Cigtab test objective function"""
X = [y] if isscalar(y[0]) else y
f = [1e-4 * x[0]**2 + 1e4 * x[1]**2 + sum(x[2:]**2) for x in X]
return f if len(f) > 1 else f[0]
def twoaxes(self, y):
"""Cigtab test objective function"""
X = [y] if isscalar(y[0]) else y
N2 = len(X[0]) // 2
f = [1e6 * sum(x[0:N2]**2) + sum(x[N2:]**2) for x in X]
return f if len(f) > 1 else f[0]
def ellirot(self, x):
return fcts.elli(array(x), 1)
def hyperelli(self, x):
N = len(x)
return sum((np.arange(1, N + 1) * x)**2)
def halfelli(self, x):
l = len(x) // 2
felli = self.elli(x[:l])
return felli + 1e-8 * sum(x[l:]**2)
def elli(self, x, rot=0, xoffset=0, cond=1e6, actuator_noise=0.0, both=False):
"""Ellipsoid test objective function"""
if not isscalar(x[0]): # parallel evaluation
return [self.elli(xi, rot) for xi in x] # could save 20% overall
if rot:
x = rotate(x)
N = len(x)
if actuator_noise:
x = x + actuator_noise * np.random.randn(N)
ftrue = sum(cond**(np.arange(N) / (N - 1.)) * (x + xoffset)**2)
alpha = 0.49 + 1. / N
beta = 1
felli = np.random.rand(1)[0]**beta * ftrue * \
max(1, (10.**9 / (ftrue + 1e-99))**(alpha * np.random.rand(1)[0]))
# felli = ftrue + 1*np.random.randn(1)[0] / (1e-30 +
# np.abs(np.random.randn(1)[0]))**0
if both:
return (felli, ftrue)
else:
# return felli # possibly noisy value
return ftrue # + np.random.randn()
def grad_elli(self, x, *args):
cond = 1e6
N = len(x)
return 2 * cond**(np.arange(N) / (N - 1.)) * array(x, copy=False)
def fun_as_arg(self, x, *args):
"""``fun_as_arg(x, fun, *more_args)`` calls ``fun(x, *more_args)``.
Use case::
fmin(cma.fun_as_arg, args=(fun,), gradf=grad_numerical)
calls fun_as_args(x, args) and grad_numerical(x, fun, args=args)
"""
fun = args[0]
more_args = args[1:] if len(args) > 1 else ()
return fun(x, *more_args)
def grad_numerical(self, x, func, epsilon=None):
"""symmetric gradient"""
eps = 1e-8 * (1 + abs(x)) if epsilon is None else epsilon
grad = np.zeros(len(x))
ei = np.zeros(len(x)) # float is 1.6 times faster than int
for i in rglen(x):
ei[i] = eps[i]
grad[i] = (func(x + ei) - func(x - ei)) / (2*eps[i])
ei[i] = 0
return grad
def elliconstraint(self, x, cfac=1e8, tough=True, cond=1e6):
"""ellipsoid test objective function with "constraints" """
N = len(x)
f = sum(cond**(np.arange(N)[-1::-1] / (N - 1)) * x**2)
cvals = (x[0] + 1,
x[0] + 1 + 100 * x[1],
x[0] + 1 - 100 * x[1])
if tough:
f += cfac * sum(max(0, c) for c in cvals)
else:
f += cfac * sum(max(0, c + 1e-3)**2 for c in cvals)
return f
def rosen(self, x, alpha=1e2):
"""Rosenbrock test objective function"""
x = [x] if isscalar(x[0]) else x # scalar into list
f = [sum(alpha * (x[:-1]**2 - x[1:])**2 + (1. - x[:-1])**2) for x in x]
return f if len(f) > 1 else f[0] # 1-element-list into scalar
def grad_rosen(self, x, *args):
N = len(x)
grad = np.zeros(N)
grad[0] = 2 * (x[0] - 1) + 200 * (x[1] - x[0]**2) * -2 * x[0]
i = np.arange(1, N - 1)
grad[i] = 2 * (x[i] - 1) - 400 * (x[i+1] - x[i]**2) * x[i] + 200 * (x[i] - x[i-1]**2)
grad[N-1] = 200 * (x[N-1] - x[N-2]**2)
return grad
def diffpow(self, x, rot=0):
"""Diffpow test objective function"""
N = len(x)
if rot:
x = rotate(x)
return sum(np.abs(x)**(2. + 4.*np.arange(N) / (N - 1.)))**0.5
def rosenelli(self, x):
N = len(x)
return self.rosen(x[:N / 2]) + self.elli(x[N / 2:], cond=1)
def ridge(self, x, expo=2):
x = [x] if isscalar(x[0]) else x # scalar into list
f = [x[0] + 100 * np.sum(x[1:]**2)**(expo / 2.) for x in x]
return f if len(f) > 1 else f[0] # 1-element-list into scalar
def ridgecircle(self, x, expo=0.5):
"""happy cat by HG Beyer"""
a = len(x)
s = sum(x**2)
return ((s - a)**2)**(expo / 2) + s / a + sum(x) / a
def happycat(self, x, alpha=1. / 8):
s = sum(x**2)
return ((s - len(x))**2)**alpha + (s / 2 + sum(x)) / len(x) + 0.5
def flat(self, x):
return 1
return 1 if np.random.rand(1) < 0.9 else 1.1
return np.random.randint(1, 30)
def branin(self, x):
# in [0,15]**2
y = x[1]
x = x[0] + 5
return (y - 5.1 * x**2 / 4 / np.pi**2 + 5 * x / np.pi - 6)**2 + 10 * (1 - 1 / 8 / np.pi) * np.cos(x) + 10 - 0.397887357729738160000
def goldsteinprice(self, x):
x1 = x[0]
x2 = x[1]
return (1 + (x1 + x2 + 1)**2 * (19 - 14 * x1 + 3 * x1**2 - 14 * x2 + 6 * x1 * x2 + 3 * x2**2)) * (
30 + (2 * x1 - 3 * x2)**2 * (18 - 32 * x1 + 12 * x1**2 + 48 * x2 - 36 * x1 * x2 + 27 * x2**2)) - 3
def griewank(self, x):
# was in [-600 600]
x = (600. / 5) * x
return 1 - np.prod(np.cos(x / sqrt(1. + np.arange(len(x))))) + sum(x**2) / 4e3
def rastrigin(self, x):
"""Rastrigin test objective function"""
if not isscalar(x[0]):
N = len(x[0])
return [10 * N + sum(xi**2 - 10 * np.cos(2 * np.pi * xi)) for xi in x]
# return 10*N + sum(x**2 - 10*np.cos(2*np.pi*x), axis=1)
N = len(x)
return 10 * N + sum(x**2 - 10 * np.cos(2 * np.pi * x))
def schaffer(self, x):
""" Schaffer function x0 in [-100..100]"""
N = len(x)
s = x[0:N - 1]**2 + x[1:N]**2
return sum(s**0.25 * (np.sin(50 * s**0.1)**2 + 1))
def schwefelelli(self, x):
s = 0
f = 0
for i in rglen(x):
s += x[i]
f += s**2
return f
def schwefelmult(self, x, pen_fac=1e4):
"""multimodal Schwefel function with domain -500..500"""
y = [x] if isscalar(x[0]) else x
N = len(y[0])
f = array([418.9829 * N - 1.27275661e-5 * N - sum(x * np.sin(np.abs(x)**0.5))
+ pen_fac * sum((abs(x) > 500) * (abs(x) - 500)**2) for x in y])
return f if len(f) > 1 else f[0]
def optprob(self, x):
n = np.arange(len(x)) + 1
f = n * x * (1 - x)**(n - 1)
return sum(1 - f)
def lincon(self, x, theta=0.01):
"""ridge like linear function with one linear constraint"""
if x[0] < 0:
return np.NaN
return theta * x[1] + x[0]
def rosen_nesterov(self, x, rho=100):
"""needs exponential number of steps in a non-increasing f-sequence.
x_0 = (-1,1,...,1)
See Jarre (2011) "On Nesterov's Smooth Chebyshev-Rosenbrock Function"
"""
f = 0.25 * (x[0] - 1)**2
f += rho * sum((x[1:] - 2 * x[:-1]**2 + 1)**2)
return f
def powel_singular(self, x):
# ((8 * np.sin(7 * (x[i] - 0.9)**2)**2 ) + (6 * np.sin()))
res = np.sum((x[i - 1] + 10 * x[i])**2 + 5 * (x[i + 1] - x[i + 2])**2 +
(x[i] - 2 * x[i + 1])**4 + 10 * (x[i - 1] - x[i + 2])**4
for i in xrange(1, len(x) - 2))
return 1 + res
def styblinski_tang(self, x):
"""in [-5, 5]
"""
# x_opt = N * [-2.90353402], seems to have essentially
# (only) 2**N local optima
return (39.1661657037714171054273576010019 * len(x))**1 + \
sum(x**4 - 16*x**2 + 5*x) / 2
def trid(self, x):
return sum((x-1)**2) - sum(x[:-1] * x[1:])
def bukin(self, x):
"""Bukin function from Wikipedia, generalized simplistically from 2-D.
http://en.wikipedia.org/wiki/Test_functions_for_optimization"""
s = 0
for k in xrange((1+len(x)) // 2):
z = x[2 * k]
y = x[min((2*k + 1, len(x)-1))]
s += 100 * np.abs(y - 0.01 * z**2)**0.5 + 0.01 * np.abs(z + 10)
return s
fcts = FitnessFunctions()
Fcts = fcts # for cross compatibility, as if the functions were static members of class Fcts
FF = fcts
def felli(x):
"""unbound test function, needed to test multiprocessor"""
return sum(1e6**(np.arange(len(x)) / (len(x) - 1)) * (np.array(x, copy=False))**2)
# ____________________________________________
# ____________________________________________________________
def _test(module=None): # None is fine when called from inside the module
import doctest
print(doctest.testmod(module)) # this is pretty coool!
def process_doctest_output(stream=None):
""" """
import fileinput
s1 = ""
s2 = ""
s3 = ""
state = 0
for line in fileinput.input(stream): # takes argv as file or stdin
if 1 < 3:
s3 += line
if state < -1 and line.startswith('***'):
print(s3)
if line.startswith('***'):
s3 = ""
if state == -1: # found a failed example line
s1 += '\n\n*** Failed Example:' + line
s2 += '\n\n\n' # line
# state = 0 # wait for 'Expected:' line
if line.startswith('Expected:'):
state = 1
continue
elif line.startswith('Got:'):
state = 2
continue
elif line.startswith('***'): # marks end of failed example
state = 0
elif line.startswith('Failed example:'):
state = -1
elif line.startswith('Exception raised'):
state = -2
# in effect more else:
if state == 1:
s1 += line + ''
if state == 2:
s2 += line + ''
# ____________________________________________________________
# ____________________________________________________________
#
def main(argv=None):
"""to install and/or test from the command line use::
python cma.py [options | func dim sig0 [optkey optval][optkey optval]...]
with options being
``--test`` (or ``-t``) to run the doctest, ``--test -v`` to get (much) verbosity.
``install`` to install cma.py (uses setup from distutils.core).
``--doc`` for more infos.
Or start Python or (even better) ``ipython`` and::
import cma
cma.main('--test')
help(cma)
help(cma.fmin)
res = fmin(cma.fcts.rosen, 10 * [0], 1)
cma.plot()
Examples
========
Testing with the local python distribution from a command line
in a folder where ``cma.py`` can be found::
python cma.py --test
And a single run on the Rosenbrock function::
python cma.py rosen 10 1 # dimension initial_sigma
python cma.py plot
In the python shell::
import cma
cma.main('--test')
"""
if argv is None:
argv = sys.argv # should have better been sys.argv[1:]
else:
if isinstance(argv, list):
argv = ['python'] + argv # see above
else:
argv = ['python'] + [argv]
# uncomment for unit test
# _test()
# handle input arguments, getopt might be helpful ;-)
if len(argv) >= 1: # function and help
if len(argv) == 1 or argv[1].startswith('-h') or argv[1].startswith('--help'):
print(main.__doc__)
fun = None
elif argv[1].startswith('-t') or argv[1].startswith('--test'):
import doctest
if len(argv) > 2 and (argv[2].startswith('--v') or argv[2].startswith('-v')): # verbose
print('doctest for cma.py: due to different platforms and python versions')
print('and in some cases due to a missing unique random seed')
print('many examples will "fail". This is OK, if they give a similar')
print('to the expected result and if no exception occurs. ')
# if argv[1][2] == 'v':
doctest.testmod(sys.modules[__name__], report=True) # this is quite cool!
else: # was: if len(argv) > 2 and (argv[2].startswith('--qu') or argv[2].startswith('-q')):
print('doctest for cma.py: launching...') # not anymore: (it might be necessary to close the pop up window to finish)
fn = '_cma_doctest_.txt'
stdout = sys.stdout
try:
with open(fn, 'w') as f:
sys.stdout = f
clock = ElapsedTime()
doctest.testmod(sys.modules[__name__], report=True) # this is quite cool!
t_elapsed = clock()
finally:
sys.stdout = stdout
process_doctest_output(fn)
# clean up
try:
import os
for name in os.listdir('.'):
if (name.startswith('bound_method_FitnessFunctions.rosen_of_cma.FitnessFunctions_object_at_')
and name.endswith('.pkl')):
os.remove(name)
except:
pass
print('doctest for cma.py: finished (no other output should be seen after launching, more in file _cma_doctest_.txt)')
print(' elapsed time [s]:', t_elapsed)
return
elif argv[1] == '--doc':
print(__doc__)
print(CMAEvolutionStrategy.__doc__)
print(fmin.__doc__)
fun = None
elif argv[1] == '--fcts':
print('List of valid function names:')
print([d for d in dir(fcts) if not d.startswith('_')])
fun = None
elif argv[1] in ('install', '--install'):
from distutils.core import setup
setup(name="cma",
long_description=__doc__,
version=__version__.split()[0],
description="CMA-ES, Covariance Matrix Adaptation Evolution Strategy for non-linear numerical optimization in Python",
author="Nikolaus Hansen",
author_email="hansen at lri.fr",
maintainer="Nikolaus Hansen",
maintainer_email="hansen at lri.fr",
url="https://www.lri.fr/~hansen/cmaes_inmatlab.html#python",
license="BSD",
classifiers = [
"Intended Audience :: Science/Research",
"Intended Audience :: Education",
"Intended Audience :: Other Audience",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Development Status :: 4 - Beta",
"Environment :: Console",
"License :: OSI Approved :: BSD License",
# "License :: OSI Approved :: MIT License",
],
keywords=["optimization", "CMA-ES", "cmaes"],
py_modules=["cma"],
requires=["numpy"],
)
fun = None
elif argv[1] in ('plot',):
plot(name=argv[2] if len(argv) > 2 else None)
raw_input('press return')
fun = None
elif len(argv) > 3:
fun = eval('fcts.' + argv[1])
else:
print('try -h option')
fun = None
if fun is not None:
if len(argv) > 2: # dimension
x0 = np.ones(eval(argv[2]))
if len(argv) > 3: # sigma
sig0 = eval(argv[3])
opts = {}
for i in xrange(5, len(argv), 2):
opts[argv[i - 1]] = eval(argv[i])
# run fmin
if fun is not None:
tic = time.time()
fmin(fun, x0, sig0, opts) # ftarget=1e-9, tolfacupx=1e9, verb_log=10)
# plot()
# print ' best function value ', res[2]['es'].best[1]
print('elapsed time [s]: + %.2f', round(time.time() - tic, 2))
elif not len(argv):
fmin(fcts.elli, np.ones(6) * 0.1, 0.1, {'ftarget':1e-9})
# ____________________________________________________________
# ____________________________________________________________
#
# mainly for testing purpose
# executed when called from an OS shell
if __name__ == "__main__":
# for i in xrange(1000): # how to find the memory leak
# main(["cma.py", "rastrigin", "10", "5", "popsize", "200", "maxfevals", "24999", "verb_log", "0"])
main()
| gpl-3.0 |
gclenaghan/scikit-learn | benchmarks/bench_plot_fastkmeans.py | 294 | 4676 | from __future__ import print_function
from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
data = nr.random_integers(-50, 50, (n_samples, n_features))
print('K-Means')
tstart = time()
kmeans = KMeans(init='k-means++', n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results['kmeans_speed'].append(delta)
results['kmeans_quality'].append(kmeans.inertia_)
print('Fast K-Means')
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=10,
batch_size=chunk)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array([[1, 1], [-1, -1], [1, -1], [-1, 1],
[0.5, 0.5], [0.75, -0.5], [-1, 0.75], [1, 0]])
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
print('Fast K-Means')
tstart = time()
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=8,
batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 150, 5).astype(np.int)
features_range = np.linspace(150, 50000, 5).astype(np.int)
chunks = np.linspace(500, 10000, 15).astype(np.int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max([max(i) for i in [t for (label, t) in results.iteritems()
if "speed" in label]])
max_inertia = max([max(i) for i in [
t for (label, t) in results.iteritems()
if "speed" not in label]])
fig = plt.figure('scikit-learn K-Means benchmark results')
for c, (label, timings) in zip('brcy',
sorted(results.iteritems())):
if 'speed' in label:
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
i = 0
for c, (label, timings) in zip('br',
sorted(results_2.iteritems())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel('Chunks')
ax.set_ylabel(label)
plt.show()
| bsd-3-clause |
altairpearl/scikit-learn | sklearn/linear_model/tests/test_passive_aggressive.py | 169 | 8809 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier().fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_class_weights():
# Test class weights.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100, class_weight=None,
random_state=100)
clf.fit(X2, y2)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100,
class_weight={1: 0.001},
random_state=100)
clf.fit(X2, y2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_partial_fit_weight_class_balanced():
# partial_fit with class_weight='balanced' not supported
clf = PassiveAggressiveClassifier(class_weight="balanced")
assert_raises(ValueError, clf.partial_fit, X, y, classes=np.unique(y))
def test_equal_class_weight():
X2 = [[1, 0], [1, 0], [0, 1], [0, 1]]
y2 = [0, 0, 1, 1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=1000, class_weight=None)
clf.fit(X2, y2)
# Already balanced, so "balanced" weights should have no effect
clf_balanced = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight="balanced")
clf_balanced.fit(X2, y2)
clf_weighted = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X2, y2)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
assert_almost_equal(clf.coef_, clf_balanced.coef_, decimal=2)
def test_wrong_class_weight_label():
# ValueError due to wrong class_weight label.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight={0: 0.5})
assert_raises(ValueError, clf.fit, X2, y2)
def test_wrong_class_weight_format():
# ValueError due to wrong class_weight argument type.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight=[0.5])
assert_raises(ValueError, clf.fit, X2, y2)
clf = PassiveAggressiveClassifier(class_weight="the larch")
assert_raises(ValueError, clf.fit, X2, y2)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
| bsd-3-clause |
formath/mxnet | example/svm_mnist/svm_mnist.py | 44 | 4094 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#############################################################
## Please read the README.md document for better reference ##
#############################################################
from __future__ import print_function
import mxnet as mx
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.decomposition import PCA
# import matplotlib.pyplot as plt
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# Network declaration as symbols. The following pattern was based
# on the article, but feel free to play with the number of nodes
# and with the activation function
data = mx.symbol.Variable('data')
fc1 = mx.symbol.FullyConnected(data = data, name='fc1', num_hidden=512)
act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu")
fc2 = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 512)
act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu")
fc3 = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=10)
# Here we add the ultimate layer based on L2-SVM objective
mlp = mx.symbol.SVMOutput(data=fc3, name='svm')
# To use L1-SVM objective, comment the line above and uncomment the line below
# mlp = mx.symbol.SVMOutput(data=fc3, name='svm', use_linear=True)
# Now we fetch MNIST dataset, add some noise, as the article suggests,
# permutate and assign the examples to be used on our network
mnist = fetch_mldata('MNIST original')
mnist_pca = PCA(n_components=70).fit_transform(mnist.data)
noise = np.random.normal(size=mnist_pca.shape)
mnist_pca += noise
np.random.seed(1234) # set seed for deterministic ordering
p = np.random.permutation(mnist_pca.shape[0])
X = mnist_pca[p]
Y = mnist.target[p]
X_show = mnist.data[p]
# This is just to normalize the input and separate train set and test set
X = X.astype(np.float32)/255
X_train = X[:60000]
X_test = X[60000:]
X_show = X_show[60000:]
Y_train = Y[:60000]
Y_test = Y[60000:]
# Article's suggestion on batch size
batch_size = 200
train_iter = mx.io.NDArrayIter(X_train, Y_train, batch_size=batch_size, label_name='svm_label')
test_iter = mx.io.NDArrayIter(X_test, Y_test, batch_size=batch_size, label_name='svm_label')
# Here we instatiate and fit the model for our data
# The article actually suggests using 400 epochs,
# But I reduced to 10, for convinience
mod = mx.mod.Module(
context = mx.cpu(0), # Run on CPU 0
symbol = mlp, # Use the network we just defined
label_names = ['svm_label'],
)
mod.fit(
train_data=train_iter,
eval_data=test_iter, # Testing data set. MXNet computes scores on test set every epoch
batch_end_callback = mx.callback.Speedometer(batch_size, 200), # Logging module to print out progress
num_epoch = 10, # Train for 10 epochs
optimizer_params = {
'learning_rate': 0.1, # Learning rate
'momentum': 0.9, # Momentum for SGD with momentum
'wd': 0.00001, # Weight decay for regularization
},
)
# Uncomment to view an example
# plt.imshow((X_show[0].reshape((28,28))*255).astype(np.uint8), cmap='Greys_r')
# plt.show()
# print 'Result:', model.predict(X_test[0:1])[0].argmax()
# Now it prints how good did the network did for this configuration
print('Accuracy:', mod.score(test_iter, mx.metric.Accuracy())[0][1]*100, '%')
| apache-2.0 |
harisbal/pandas | pandas/tests/io/formats/test_eng_formatting.py | 22 | 8085 | import numpy as np
import pandas as pd
from pandas import DataFrame
from pandas.compat import u
import pandas.io.formats.format as fmt
from pandas.util import testing as tm
class TestEngFormatter(object):
def test_eng_float_formatter(self):
df = DataFrame({'A': [1.41, 141., 14100, 1410000.]})
fmt.set_eng_float_format()
result = df.to_string()
expected = (' A\n'
'0 1.410E+00\n'
'1 141.000E+00\n'
'2 14.100E+03\n'
'3 1.410E+06')
assert result == expected
fmt.set_eng_float_format(use_eng_prefix=True)
result = df.to_string()
expected = (' A\n'
'0 1.410\n'
'1 141.000\n'
'2 14.100k\n'
'3 1.410M')
assert result == expected
fmt.set_eng_float_format(accuracy=0)
result = df.to_string()
expected = (' A\n'
'0 1E+00\n'
'1 141E+00\n'
'2 14E+03\n'
'3 1E+06')
assert result == expected
tm.reset_display_options()
def compare(self, formatter, input, output):
formatted_input = formatter(input)
assert formatted_input == output
def compare_all(self, formatter, in_out):
"""
Parameters:
-----------
formatter: EngFormatter under test
in_out: list of tuples. Each tuple = (number, expected_formatting)
It is tested if 'formatter(number) == expected_formatting'.
*number* should be >= 0 because formatter(-number) == fmt is also
tested. *fmt* is derived from *expected_formatting*
"""
for input, output in in_out:
self.compare(formatter, input, output)
self.compare(formatter, -input, "-" + output[1:])
def test_exponents_with_eng_prefix(self):
formatter = fmt.EngFormatter(accuracy=3, use_eng_prefix=True)
f = np.sqrt(2)
in_out = [
(f * 10 ** -24, " 1.414y"), (f * 10 ** -23, " 14.142y"),
(f * 10 ** -22, " 141.421y"), (f * 10 ** -21, " 1.414z"),
(f * 10 ** -20, " 14.142z"), (f * 10 ** -19, " 141.421z"),
(f * 10 ** -18, " 1.414a"), (f * 10 ** -17, " 14.142a"),
(f * 10 ** -16, " 141.421a"), (f * 10 ** -15, " 1.414f"),
(f * 10 ** -14, " 14.142f"), (f * 10 ** -13, " 141.421f"),
(f * 10 ** -12, " 1.414p"), (f * 10 ** -11, " 14.142p"),
(f * 10 ** -10, " 141.421p"), (f * 10 ** -9, " 1.414n"),
(f * 10 ** -8, " 14.142n"), (f * 10 ** -7, " 141.421n"),
(f * 10 ** -6, " 1.414u"), (f * 10 ** -5, " 14.142u"),
(f * 10 ** -4, " 141.421u"), (f * 10 ** -3, " 1.414m"),
(f * 10 ** -2, " 14.142m"), (f * 10 ** -1, " 141.421m"),
(f * 10 ** 0, " 1.414"), (f * 10 ** 1, " 14.142"),
(f * 10 ** 2, " 141.421"), (f * 10 ** 3, " 1.414k"),
(f * 10 ** 4, " 14.142k"), (f * 10 ** 5, " 141.421k"),
(f * 10 ** 6, " 1.414M"), (f * 10 ** 7, " 14.142M"),
(f * 10 ** 8, " 141.421M"), (f * 10 ** 9, " 1.414G"),
(f * 10 ** 10, " 14.142G"), (f * 10 ** 11, " 141.421G"),
(f * 10 ** 12, " 1.414T"), (f * 10 ** 13, " 14.142T"),
(f * 10 ** 14, " 141.421T"), (f * 10 ** 15, " 1.414P"),
(f * 10 ** 16, " 14.142P"), (f * 10 ** 17, " 141.421P"),
(f * 10 ** 18, " 1.414E"), (f * 10 ** 19, " 14.142E"),
(f * 10 ** 20, " 141.421E"), (f * 10 ** 21, " 1.414Z"),
(f * 10 ** 22, " 14.142Z"), (f * 10 ** 23, " 141.421Z"),
(f * 10 ** 24, " 1.414Y"), (f * 10 ** 25, " 14.142Y"),
(f * 10 ** 26, " 141.421Y")]
self.compare_all(formatter, in_out)
def test_exponents_without_eng_prefix(self):
formatter = fmt.EngFormatter(accuracy=4, use_eng_prefix=False)
f = np.pi
in_out = [
(f * 10 ** -24, " 3.1416E-24"),
(f * 10 ** -23, " 31.4159E-24"),
(f * 10 ** -22, " 314.1593E-24"),
(f * 10 ** -21, " 3.1416E-21"),
(f * 10 ** -20, " 31.4159E-21"),
(f * 10 ** -19, " 314.1593E-21"),
(f * 10 ** -18, " 3.1416E-18"),
(f * 10 ** -17, " 31.4159E-18"),
(f * 10 ** -16, " 314.1593E-18"),
(f * 10 ** -15, " 3.1416E-15"),
(f * 10 ** -14, " 31.4159E-15"),
(f * 10 ** -13, " 314.1593E-15"),
(f * 10 ** -12, " 3.1416E-12"),
(f * 10 ** -11, " 31.4159E-12"),
(f * 10 ** -10, " 314.1593E-12"),
(f * 10 ** -9, " 3.1416E-09"),
(f * 10 ** -8, " 31.4159E-09"),
(f * 10 ** -7, " 314.1593E-09"),
(f * 10 ** -6, " 3.1416E-06"),
(f * 10 ** -5, " 31.4159E-06"),
(f * 10 ** -4, " 314.1593E-06"),
(f * 10 ** -3, " 3.1416E-03"),
(f * 10 ** -2, " 31.4159E-03"),
(f * 10 ** -1, " 314.1593E-03"),
(f * 10 ** 0, " 3.1416E+00"),
(f * 10 ** 1, " 31.4159E+00"),
(f * 10 ** 2, " 314.1593E+00"),
(f * 10 ** 3, " 3.1416E+03"),
(f * 10 ** 4, " 31.4159E+03"),
(f * 10 ** 5, " 314.1593E+03"),
(f * 10 ** 6, " 3.1416E+06"),
(f * 10 ** 7, " 31.4159E+06"),
(f * 10 ** 8, " 314.1593E+06"),
(f * 10 ** 9, " 3.1416E+09"),
(f * 10 ** 10, " 31.4159E+09"),
(f * 10 ** 11, " 314.1593E+09"),
(f * 10 ** 12, " 3.1416E+12"),
(f * 10 ** 13, " 31.4159E+12"),
(f * 10 ** 14, " 314.1593E+12"),
(f * 10 ** 15, " 3.1416E+15"),
(f * 10 ** 16, " 31.4159E+15"),
(f * 10 ** 17, " 314.1593E+15"),
(f * 10 ** 18, " 3.1416E+18"),
(f * 10 ** 19, " 31.4159E+18"),
(f * 10 ** 20, " 314.1593E+18"),
(f * 10 ** 21, " 3.1416E+21"),
(f * 10 ** 22, " 31.4159E+21"),
(f * 10 ** 23, " 314.1593E+21"),
(f * 10 ** 24, " 3.1416E+24"),
(f * 10 ** 25, " 31.4159E+24"),
(f * 10 ** 26, " 314.1593E+24")]
self.compare_all(formatter, in_out)
def test_rounding(self):
formatter = fmt.EngFormatter(accuracy=3, use_eng_prefix=True)
in_out = [(5.55555, ' 5.556'), (55.5555, ' 55.556'),
(555.555, ' 555.555'), (5555.55, ' 5.556k'),
(55555.5, ' 55.556k'), (555555, ' 555.555k')]
self.compare_all(formatter, in_out)
formatter = fmt.EngFormatter(accuracy=1, use_eng_prefix=True)
in_out = [(5.55555, ' 5.6'), (55.5555, ' 55.6'), (555.555, ' 555.6'),
(5555.55, ' 5.6k'), (55555.5, ' 55.6k'), (555555, ' 555.6k')]
self.compare_all(formatter, in_out)
formatter = fmt.EngFormatter(accuracy=0, use_eng_prefix=True)
in_out = [(5.55555, ' 6'), (55.5555, ' 56'), (555.555, ' 556'),
(5555.55, ' 6k'), (55555.5, ' 56k'), (555555, ' 556k')]
self.compare_all(formatter, in_out)
formatter = fmt.EngFormatter(accuracy=3, use_eng_prefix=True)
result = formatter(0)
assert result == u(' 0.000')
def test_nan(self):
# Issue #11981
formatter = fmt.EngFormatter(accuracy=1, use_eng_prefix=True)
result = formatter(np.nan)
assert result == u('NaN')
df = pd.DataFrame({'a': [1.5, 10.3, 20.5],
'b': [50.3, 60.67, 70.12],
'c': [100.2, 101.33, 120.33]})
pt = df.pivot_table(values='a', index='b', columns='c')
fmt.set_eng_float_format(accuracy=1)
result = pt.to_string()
assert 'NaN' in result
tm.reset_display_options()
def test_inf(self):
# Issue #11981
formatter = fmt.EngFormatter(accuracy=1, use_eng_prefix=True)
result = formatter(np.inf)
assert result == u('inf')
| bsd-3-clause |
pratapvardhan/pandas | pandas/core/indexes/frozen.py | 20 | 4619 | """
frozen (immutable) data structures to support MultiIndexing
These are used for:
- .names (FrozenList)
- .levels & .labels (FrozenNDArray)
"""
import numpy as np
from pandas.core.base import PandasObject
from pandas.core.dtypes.cast import coerce_indexer_dtype
from pandas.io.formats.printing import pprint_thing
class FrozenList(PandasObject, list):
"""
Container that doesn't allow setting item *but*
because it's technically non-hashable, will be used
for lookups, appropriately, etc.
"""
# Sidenote: This has to be of type list, otherwise it messes up PyTables
# typechecks
def __add__(self, other):
if isinstance(other, tuple):
other = list(other)
return self.__class__(super(FrozenList, self).__add__(other))
__iadd__ = __add__
# Python 2 compat
def __getslice__(self, i, j):
return self.__class__(super(FrozenList, self).__getslice__(i, j))
def __getitem__(self, n):
# Python 3 compat
if isinstance(n, slice):
return self.__class__(super(FrozenList, self).__getitem__(n))
return super(FrozenList, self).__getitem__(n)
def __radd__(self, other):
if isinstance(other, tuple):
other = list(other)
return self.__class__(other + list(self))
def __eq__(self, other):
if isinstance(other, (tuple, FrozenList)):
other = list(other)
return super(FrozenList, self).__eq__(other)
__req__ = __eq__
def __mul__(self, other):
return self.__class__(super(FrozenList, self).__mul__(other))
__imul__ = __mul__
def __reduce__(self):
return self.__class__, (list(self),)
def __hash__(self):
return hash(tuple(self))
def _disabled(self, *args, **kwargs):
"""This method will not function because object is immutable."""
raise TypeError("'%s' does not support mutable operations." %
self.__class__.__name__)
def __unicode__(self):
return pprint_thing(self, quote_strings=True,
escape_chars=('\t', '\r', '\n'))
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__,
str(self))
__setitem__ = __setslice__ = __delitem__ = __delslice__ = _disabled
pop = append = extend = remove = sort = insert = _disabled
class FrozenNDArray(PandasObject, np.ndarray):
# no __array_finalize__ for now because no metadata
def __new__(cls, data, dtype=None, copy=False):
if copy is None:
copy = not isinstance(data, FrozenNDArray)
res = np.array(data, dtype=dtype, copy=copy).view(cls)
return res
def _disabled(self, *args, **kwargs):
"""This method will not function because object is immutable."""
raise TypeError("'%s' does not support mutable operations." %
self.__class__)
__setitem__ = __setslice__ = __delitem__ = __delslice__ = _disabled
put = itemset = fill = _disabled
def _shallow_copy(self):
return self.view()
def values(self):
"""returns *copy* of underlying array"""
arr = self.view(np.ndarray).copy()
return arr
def __unicode__(self):
"""
Return a string representation for this object.
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
prepr = pprint_thing(self, escape_chars=('\t', '\r', '\n'),
quote_strings=True)
return "%s(%s, dtype='%s')" % (type(self).__name__, prepr, self.dtype)
def searchsorted(self, v, side='left', sorter=None):
"""
Find indices where elements of v should be inserted
in a to maintain order.
For full documentation, see `numpy.searchsorted`
See Also
--------
numpy.searchsorted : equivalent function
"""
# we are much more performant if the searched
# indexer is the same type as the array
# this doesn't matter for int64, but DOES
# matter for smaller int dtypes
# https://github.com/numpy/numpy/issues/5370
try:
v = self.dtype.type(v)
except:
pass
return super(FrozenNDArray, self).searchsorted(
v, side=side, sorter=sorter)
def _ensure_frozen(array_like, categories, copy=False):
array_like = coerce_indexer_dtype(array_like, categories)
array_like = array_like.view(FrozenNDArray)
if copy:
array_like = array_like.copy()
return array_like
| bsd-3-clause |
webmasterraj/FogOrNot | flask/lib/python2.7/site-packages/pandas/tests/test_msgpack/test_pack.py | 15 | 4819 | #!/usr/bin/env python
# coding: utf-8
import unittest
import nose
import struct
from pandas import compat
from pandas.compat import u, OrderedDict
from pandas.msgpack import packb, unpackb, Unpacker, Packer
class TestPack(unittest.TestCase):
def check(self, data, use_list=False):
re = unpackb(packb(data), use_list=use_list)
assert re == data
def testPack(self):
test_data = [
0, 1, 127, 128, 255, 256, 65535, 65536,
-1, -32, -33, -128, -129, -32768, -32769,
1.0,
b"", b"a", b"a"*31, b"a"*32,
None, True, False,
(), ((),), ((), None,),
{None: 0},
(1<<23),
]
for td in test_data:
self.check(td)
def testPackUnicode(self):
test_data = [
u(""), u("abcd"), [u("defgh")], u("Русский текст"),
]
for td in test_data:
re = unpackb(packb(td, encoding='utf-8'), use_list=1, encoding='utf-8')
assert re == td
packer = Packer(encoding='utf-8')
data = packer.pack(td)
re = Unpacker(compat.BytesIO(data), encoding='utf-8', use_list=1).unpack()
assert re == td
def testPackUTF32(self):
test_data = [
compat.u(""),
compat.u("abcd"),
[compat.u("defgh")],
compat.u("Русский текст"),
]
for td in test_data:
re = unpackb(packb(td, encoding='utf-32'), use_list=1, encoding='utf-32')
assert re == td
def testPackBytes(self):
test_data = [
b"", b"abcd", (b"defgh",),
]
for td in test_data:
self.check(td)
def testIgnoreUnicodeErrors(self):
re = unpackb(packb(b'abc\xeddef'), encoding='utf-8', unicode_errors='ignore', use_list=1)
assert re == "abcdef"
def testStrictUnicodeUnpack(self):
self.assertRaises(UnicodeDecodeError, unpackb, packb(b'abc\xeddef'), encoding='utf-8', use_list=1)
def testStrictUnicodePack(self):
self.assertRaises(UnicodeEncodeError, packb, compat.u("abc\xeddef"), encoding='ascii', unicode_errors='strict')
def testIgnoreErrorsPack(self):
re = unpackb(packb(compat.u("abcФФФdef"), encoding='ascii', unicode_errors='ignore'), encoding='utf-8', use_list=1)
assert re == compat.u("abcdef")
def testNoEncoding(self):
self.assertRaises(TypeError, packb, compat.u("abc"), encoding=None)
def testDecodeBinary(self):
re = unpackb(packb("abc"), encoding=None, use_list=1)
assert re == b"abc"
def testPackFloat(self):
assert packb(1.0, use_single_float=True) == b'\xca' + struct.pack('>f', 1.0)
assert packb(1.0, use_single_float=False) == b'\xcb' + struct.pack('>d', 1.0)
def testArraySize(self, sizes=[0, 5, 50, 1000]):
bio = compat.BytesIO()
packer = Packer()
for size in sizes:
bio.write(packer.pack_array_header(size))
for i in range(size):
bio.write(packer.pack(i))
bio.seek(0)
unpacker = Unpacker(bio, use_list=1)
for size in sizes:
assert unpacker.unpack() == list(range(size))
def test_manualreset(self, sizes=[0, 5, 50, 1000]):
packer = Packer(autoreset=False)
for size in sizes:
packer.pack_array_header(size)
for i in range(size):
packer.pack(i)
bio = compat.BytesIO(packer.bytes())
unpacker = Unpacker(bio, use_list=1)
for size in sizes:
assert unpacker.unpack() == list(range(size))
packer.reset()
assert packer.bytes() == b''
def testMapSize(self, sizes=[0, 5, 50, 1000]):
bio = compat.BytesIO()
packer = Packer()
for size in sizes:
bio.write(packer.pack_map_header(size))
for i in range(size):
bio.write(packer.pack(i)) # key
bio.write(packer.pack(i * 2)) # value
bio.seek(0)
unpacker = Unpacker(bio)
for size in sizes:
assert unpacker.unpack() == dict((i, i * 2) for i in range(size))
def test_odict(self):
seq = [(b'one', 1), (b'two', 2), (b'three', 3), (b'four', 4)]
od = OrderedDict(seq)
assert unpackb(packb(od), use_list=1) == dict(seq)
def pair_hook(seq):
return list(seq)
assert unpackb(packb(od), object_pairs_hook=pair_hook, use_list=1) == seq
def test_pairlist(self):
pairlist = [(b'a', 1), (2, b'b'), (b'foo', b'bar')]
packer = Packer()
packed = packer.pack_map_pairs(pairlist)
unpacked = unpackb(packed, object_pairs_hook=list)
assert pairlist == unpacked
| gpl-2.0 |
CallaJun/hackprince | indico/matplotlib/font_manager.py | 10 | 47415 | """
A module for finding, managing, and using fonts across platforms.
This module provides a single :class:`FontManager` instance that can
be shared across backends and platforms. The :func:`findfont`
function returns the best TrueType (TTF) font file in the local or
system font path that matches the specified :class:`FontProperties`
instance. The :class:`FontManager` also handles Adobe Font Metrics
(AFM) font files for use by the PostScript backend.
The design is based on the `W3C Cascading Style Sheet, Level 1 (CSS1)
font specification <http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_.
Future versions may implement the Level 2 or 2.1 specifications.
Experimental support is included for using `fontconfig` on Unix
variant platforms (Linux, OS X, Solaris). To enable it, set the
constant ``USE_FONTCONFIG`` in this file to ``True``. Fontconfig has
the advantage that it is the standard way to look up fonts on X11
platforms, so if a font is installed, it is much more likely to be
found.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import cPickle as pickle
"""
KNOWN ISSUES
- documentation
- font variant is untested
- font stretch is incomplete
- font size is incomplete
- font size_adjust is incomplete
- default font algorithm needs improvement and testing
- setWeights function needs improvement
- 'light' is an invalid weight value, remove it.
- update_fonts not implemented
Authors : John Hunter <[email protected]>
Paul Barrett <[email protected]>
Michael Droettboom <[email protected]>
Copyright : John Hunter (2004,2005), Paul Barrett (2004,2005)
License : matplotlib license (PSF compatible)
The font directory code is from ttfquery,
see license/LICENSE_TTFQUERY.
"""
import os, sys, warnings
try:
set
except NameError:
from sets import Set as set
from collections import Iterable
import matplotlib
from matplotlib import afm
from matplotlib import ft2font
from matplotlib import rcParams, get_cachedir
from matplotlib.cbook import is_string_like
import matplotlib.cbook as cbook
from matplotlib.compat import subprocess
from matplotlib.fontconfig_pattern import \
parse_fontconfig_pattern, generate_fontconfig_pattern
USE_FONTCONFIG = False
verbose = matplotlib.verbose
font_scalings = {
'xx-small' : 0.579,
'x-small' : 0.694,
'small' : 0.833,
'medium' : 1.0,
'large' : 1.200,
'x-large' : 1.440,
'xx-large' : 1.728,
'larger' : 1.2,
'smaller' : 0.833,
None : 1.0}
stretch_dict = {
'ultra-condensed' : 100,
'extra-condensed' : 200,
'condensed' : 300,
'semi-condensed' : 400,
'normal' : 500,
'semi-expanded' : 600,
'expanded' : 700,
'extra-expanded' : 800,
'ultra-expanded' : 900}
weight_dict = {
'ultralight' : 100,
'light' : 200,
'normal' : 400,
'regular' : 400,
'book' : 400,
'medium' : 500,
'roman' : 500,
'semibold' : 600,
'demibold' : 600,
'demi' : 600,
'bold' : 700,
'heavy' : 800,
'extra bold' : 800,
'black' : 900}
font_family_aliases = set([
'serif',
'sans-serif',
'sans serif',
'cursive',
'fantasy',
'monospace',
'sans'])
# OS Font paths
MSFolders = \
r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders'
MSFontDirectories = [
r'SOFTWARE\Microsoft\Windows NT\CurrentVersion\Fonts',
r'SOFTWARE\Microsoft\Windows\CurrentVersion\Fonts']
X11FontDirectories = [
# an old standard installation point
"/usr/X11R6/lib/X11/fonts/TTF/",
"/usr/X11/lib/X11/fonts",
# here is the new standard location for fonts
"/usr/share/fonts/",
# documented as a good place to install new fonts
"/usr/local/share/fonts/",
# common application, not really useful
"/usr/lib/openoffice/share/fonts/truetype/",
]
OSXFontDirectories = [
"/Library/Fonts/",
"/Network/Library/Fonts/",
"/System/Library/Fonts/",
# fonts installed via MacPorts
"/opt/local/share/fonts"
""
]
if not USE_FONTCONFIG and sys.platform != 'win32':
home = os.environ.get('HOME')
if home is not None:
# user fonts on OSX
path = os.path.join(home, 'Library', 'Fonts')
OSXFontDirectories.append(path)
path = os.path.join(home, '.fonts')
X11FontDirectories.append(path)
def get_fontext_synonyms(fontext):
"""
Return a list of file extensions extensions that are synonyms for
the given file extension *fileext*.
"""
return {'ttf': ('ttf', 'otf'),
'otf': ('ttf', 'otf'),
'afm': ('afm',)}[fontext]
def list_fonts(directory, extensions):
"""
Return a list of all fonts matching any of the extensions,
possibly upper-cased, found recursively under the directory.
"""
pattern = ';'.join(['*.%s;*.%s' % (ext, ext.upper())
for ext in extensions])
return cbook.listFiles(directory, pattern)
def win32FontDirectory():
"""
Return the user-specified font directory for Win32. This is
looked up from the registry key::
\\HKEY_CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders\Fonts
If the key is not found, $WINDIR/Fonts will be returned.
"""
try:
from six.moves import winreg
except ImportError:
pass # Fall through to default
else:
try:
user = winreg.OpenKey(winreg.HKEY_CURRENT_USER, MSFolders)
try:
try:
return winreg.QueryValueEx(user, 'Fonts')[0]
except OSError:
pass # Fall through to default
finally:
winreg.CloseKey(user)
except OSError:
pass # Fall through to default
return os.path.join(os.environ['WINDIR'], 'Fonts')
def win32InstalledFonts(directory=None, fontext='ttf'):
"""
Search for fonts in the specified font directory, or use the
system directories if none given. A list of TrueType font
filenames are returned by default, or AFM fonts if *fontext* ==
'afm'.
"""
from six.moves import winreg
if directory is None:
directory = win32FontDirectory()
fontext = get_fontext_synonyms(fontext)
key, items = None, {}
for fontdir in MSFontDirectories:
try:
local = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, fontdir)
except OSError:
continue
if not local:
return list_fonts(directory, fontext)
try:
for j in range(winreg.QueryInfoKey(local)[1]):
try:
key, direc, any = winreg.EnumValue( local, j)
if not is_string_like(direc):
continue
if not os.path.dirname(direc):
direc = os.path.join(directory, direc)
direc = os.path.abspath(direc).lower()
if os.path.splitext(direc)[1][1:] in fontext:
items[direc] = 1
except EnvironmentError:
continue
except WindowsError:
continue
except MemoryError:
continue
return list(six.iterkeys(items))
finally:
winreg.CloseKey(local)
return None
def OSXInstalledFonts(directories=None, fontext='ttf'):
"""
Get list of font files on OS X - ignores font suffix by default.
"""
if directories is None:
directories = OSXFontDirectories
fontext = get_fontext_synonyms(fontext)
files = []
for path in directories:
if fontext is None:
files.extend(cbook.listFiles(path, '*'))
else:
files.extend(list_fonts(path, fontext))
return files
def get_fontconfig_fonts(fontext='ttf'):
"""
Grab a list of all the fonts that are being tracked by fontconfig
by making a system call to ``fc-list``. This is an easy way to
grab all of the fonts the user wants to be made available to
applications, without needing knowing where all of them reside.
"""
fontext = get_fontext_synonyms(fontext)
fontfiles = {}
try:
pipe = subprocess.Popen(['fc-list', '--format=%{file}\\n'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output = pipe.communicate()[0]
except (OSError, IOError):
# Calling fc-list did not work, so we'll just return nothing
return fontfiles
if pipe.returncode == 0:
# The line breaks between results are in ascii, but each entry
# is in in sys.filesystemencoding().
for fname in output.split(b'\n'):
try:
fname = six.text_type(fname, sys.getfilesystemencoding())
except UnicodeDecodeError:
continue
if (os.path.splitext(fname)[1][1:] in fontext and
os.path.exists(fname)):
fontfiles[fname] = 1
return fontfiles
def findSystemFonts(fontpaths=None, fontext='ttf'):
"""
Search for fonts in the specified font paths. If no paths are
given, will use a standard set of system paths, as well as the
list of fonts tracked by fontconfig if fontconfig is installed and
available. A list of TrueType fonts are returned by default with
AFM fonts as an option.
"""
fontfiles = {}
fontexts = get_fontext_synonyms(fontext)
if fontpaths is None:
if sys.platform == 'win32':
fontdir = win32FontDirectory()
fontpaths = [fontdir]
# now get all installed fonts directly...
for f in win32InstalledFonts(fontdir):
base, ext = os.path.splitext(f)
if len(ext)>1 and ext[1:].lower() in fontexts:
fontfiles[f] = 1
else:
fontpaths = X11FontDirectories
# check for OS X & load its fonts if present
if sys.platform == 'darwin':
for f in OSXInstalledFonts(fontext=fontext):
fontfiles[f] = 1
for f in get_fontconfig_fonts(fontext):
fontfiles[f] = 1
elif isinstance(fontpaths, six.string_types):
fontpaths = [fontpaths]
for path in fontpaths:
files = list_fonts(path, fontexts)
for fname in files:
fontfiles[os.path.abspath(fname)] = 1
return [fname for fname in six.iterkeys(fontfiles) if os.path.exists(fname)]
def weight_as_number(weight):
"""
Return the weight property as a numeric value. String values
are converted to their corresponding numeric value.
"""
if isinstance(weight, six.string_types):
try:
weight = weight_dict[weight.lower()]
except KeyError:
weight = 400
elif weight in range(100, 1000, 100):
pass
else:
raise ValueError('weight not a valid integer')
return weight
class FontEntry(object):
"""
A class for storing Font properties. It is used when populating
the font lookup dictionary.
"""
def __init__(self,
fname ='',
name ='',
style ='normal',
variant='normal',
weight ='normal',
stretch='normal',
size ='medium',
):
self.fname = fname
self.name = name
self.style = style
self.variant = variant
self.weight = weight
self.stretch = stretch
try:
self.size = str(float(size))
except ValueError:
self.size = size
def __repr__(self):
return "<Font '%s' (%s) %s %s %s %s>" % (
self.name, os.path.basename(self.fname), self.style, self.variant,
self.weight, self.stretch)
def ttfFontProperty(font):
"""
A function for populating the :class:`FontKey` by extracting
information from the TrueType font file.
*font* is a :class:`FT2Font` instance.
"""
name = font.family_name
# Styles are: italic, oblique, and normal (default)
sfnt = font.get_sfnt()
sfnt2 = sfnt.get((1,0,0,2))
sfnt4 = sfnt.get((1,0,0,4))
if sfnt2:
sfnt2 = sfnt2.decode('macroman').lower()
else:
sfnt2 = ''
if sfnt4:
sfnt4 = sfnt4.decode('macroman').lower()
else:
sfnt4 = ''
if sfnt4.find('oblique') >= 0:
style = 'oblique'
elif sfnt4.find('italic') >= 0:
style = 'italic'
elif sfnt2.find('regular') >= 0:
style = 'normal'
elif font.style_flags & ft2font.ITALIC:
style = 'italic'
else:
style = 'normal'
# Variants are: small-caps and normal (default)
# !!!! Untested
if name.lower() in ['capitals', 'small-caps']:
variant = 'small-caps'
else:
variant = 'normal'
# Weights are: 100, 200, 300, 400 (normal: default), 500 (medium),
# 600 (semibold, demibold), 700 (bold), 800 (heavy), 900 (black)
# lighter and bolder are also allowed.
weight = None
for w in six.iterkeys(weight_dict):
if sfnt4.find(w) >= 0:
weight = w
break
if not weight:
if font.style_flags & ft2font.BOLD:
weight = 700
else:
weight = 400
weight = weight_as_number(weight)
# Stretch can be absolute and relative
# Absolute stretches are: ultra-condensed, extra-condensed, condensed,
# semi-condensed, normal, semi-expanded, expanded, extra-expanded,
# and ultra-expanded.
# Relative stretches are: wider, narrower
# Child value is: inherit
if sfnt4.find('narrow') >= 0 or sfnt4.find('condensed') >= 0 or \
sfnt4.find('cond') >= 0:
stretch = 'condensed'
elif sfnt4.find('demi cond') >= 0:
stretch = 'semi-condensed'
elif sfnt4.find('wide') >= 0 or sfnt4.find('expanded') >= 0:
stretch = 'expanded'
else:
stretch = 'normal'
# Sizes can be absolute and relative.
# Absolute sizes are: xx-small, x-small, small, medium, large, x-large,
# and xx-large.
# Relative sizes are: larger, smaller
# Length value is an absolute font size, e.g., 12pt
# Percentage values are in 'em's. Most robust specification.
# !!!! Incomplete
if font.scalable:
size = 'scalable'
else:
size = str(float(font.get_fontsize()))
# !!!! Incomplete
size_adjust = None
return FontEntry(font.fname, name, style, variant, weight, stretch, size)
def afmFontProperty(fontpath, font):
"""
A function for populating a :class:`FontKey` instance by
extracting information from the AFM font file.
*font* is a class:`AFM` instance.
"""
name = font.get_familyname()
fontname = font.get_fontname().lower()
# Styles are: italic, oblique, and normal (default)
if font.get_angle() != 0 or name.lower().find('italic') >= 0:
style = 'italic'
elif name.lower().find('oblique') >= 0:
style = 'oblique'
else:
style = 'normal'
# Variants are: small-caps and normal (default)
# !!!! Untested
if name.lower() in ['capitals', 'small-caps']:
variant = 'small-caps'
else:
variant = 'normal'
# Weights are: 100, 200, 300, 400 (normal: default), 500 (medium),
# 600 (semibold, demibold), 700 (bold), 800 (heavy), 900 (black)
# lighter and bolder are also allowed.
weight = weight_as_number(font.get_weight().lower())
# Stretch can be absolute and relative
# Absolute stretches are: ultra-condensed, extra-condensed, condensed,
# semi-condensed, normal, semi-expanded, expanded, extra-expanded,
# and ultra-expanded.
# Relative stretches are: wider, narrower
# Child value is: inherit
if fontname.find('narrow') >= 0 or fontname.find('condensed') >= 0 or \
fontname.find('cond') >= 0:
stretch = 'condensed'
elif fontname.find('demi cond') >= 0:
stretch = 'semi-condensed'
elif fontname.find('wide') >= 0 or fontname.find('expanded') >= 0:
stretch = 'expanded'
else:
stretch = 'normal'
# Sizes can be absolute and relative.
# Absolute sizes are: xx-small, x-small, small, medium, large, x-large,
# and xx-large.
# Relative sizes are: larger, smaller
# Length value is an absolute font size, e.g., 12pt
# Percentage values are in 'em's. Most robust specification.
# All AFM fonts are apparently scalable.
size = 'scalable'
# !!!! Incomplete
size_adjust = None
return FontEntry(fontpath, name, style, variant, weight, stretch, size)
def createFontList(fontfiles, fontext='ttf'):
"""
A function to create a font lookup list. The default is to create
a list of TrueType fonts. An AFM font list can optionally be
created.
"""
fontlist = []
# Add fonts from list of known font files.
seen = {}
for fpath in fontfiles:
verbose.report('createFontDict: %s' % (fpath), 'debug')
fname = os.path.split(fpath)[1]
if fname in seen: continue
else: seen[fname] = 1
if fontext == 'afm':
try:
fh = open(fpath, 'rb')
except:
verbose.report("Could not open font file %s" % fpath)
continue
try:
try:
font = afm.AFM(fh)
finally:
fh.close()
except RuntimeError:
verbose.report("Could not parse font file %s"%fpath)
continue
try:
prop = afmFontProperty(fpath, font)
except KeyError:
continue
else:
try:
font = ft2font.FT2Font(fpath)
except RuntimeError:
verbose.report("Could not open font file %s"%fpath)
continue
except UnicodeError:
verbose.report("Cannot handle unicode filenames")
#print >> sys.stderr, 'Bad file is', fpath
continue
try:
prop = ttfFontProperty(font)
except KeyError:
continue
fontlist.append(prop)
return fontlist
class FontProperties(object):
"""
A class for storing and manipulating font properties.
The font properties are those described in the `W3C Cascading
Style Sheet, Level 1
<http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_ font
specification. The six properties are:
- family: A list of font names in decreasing order of priority.
The items may include a generic font family name, either
'serif', 'sans-serif', 'cursive', 'fantasy', or 'monospace'.
In that case, the actual font to be used will be looked up
from the associated rcParam in :file:`matplotlibrc`.
- style: Either 'normal', 'italic' or 'oblique'.
- variant: Either 'normal' or 'small-caps'.
- stretch: A numeric value in the range 0-1000 or one of
'ultra-condensed', 'extra-condensed', 'condensed',
'semi-condensed', 'normal', 'semi-expanded', 'expanded',
'extra-expanded' or 'ultra-expanded'
- weight: A numeric value in the range 0-1000 or one of
'ultralight', 'light', 'normal', 'regular', 'book', 'medium',
'roman', 'semibold', 'demibold', 'demi', 'bold', 'heavy',
'extra bold', 'black'
- size: Either an relative value of 'xx-small', 'x-small',
'small', 'medium', 'large', 'x-large', 'xx-large' or an
absolute font size, e.g., 12
The default font property for TrueType fonts (as specified in the
default :file:`matplotlibrc` file) is::
sans-serif, normal, normal, normal, normal, scalable.
Alternatively, a font may be specified using an absolute path to a
.ttf file, by using the *fname* kwarg.
The preferred usage of font sizes is to use the relative values,
e.g., 'large', instead of absolute font sizes, e.g., 12. This
approach allows all text sizes to be made larger or smaller based
on the font manager's default font size.
This class will also accept a `fontconfig
<http://www.fontconfig.org/>`_ pattern, if it is the only argument
provided. See the documentation on `fontconfig patterns
<http://www.fontconfig.org/fontconfig-user.html>`_. This support
does not require fontconfig to be installed. We are merely
borrowing its pattern syntax for use here.
Note that matplotlib's internal font manager and fontconfig use a
different algorithm to lookup fonts, so the results of the same pattern
may be different in matplotlib than in other applications that use
fontconfig.
"""
def __init__(self,
family = None,
style = None,
variant= None,
weight = None,
stretch= None,
size = None,
fname = None, # if this is set, it's a hardcoded filename to use
_init = None # used only by copy()
):
self._family = None
self._slant = None
self._variant = None
self._weight = None
self._stretch = None
self._size = None
self._file = None
# This is used only by copy()
if _init is not None:
self.__dict__.update(_init.__dict__)
return
if is_string_like(family):
# Treat family as a fontconfig pattern if it is the only
# parameter provided.
if (style is None and
variant is None and
weight is None and
stretch is None and
size is None and
fname is None):
self.set_fontconfig_pattern(family)
return
self.set_family(family)
self.set_style(style)
self.set_variant(variant)
self.set_weight(weight)
self.set_stretch(stretch)
self.set_file(fname)
self.set_size(size)
def _parse_fontconfig_pattern(self, pattern):
return parse_fontconfig_pattern(pattern)
def __hash__(self):
l = (tuple(self.get_family()),
self.get_slant(),
self.get_variant(),
self.get_weight(),
self.get_stretch(),
self.get_size_in_points(),
self.get_file())
return hash(l)
def __eq__(self, other):
return hash(self) == hash(other)
def __ne__(self, other):
return hash(self) != hash(other)
def __str__(self):
return self.get_fontconfig_pattern()
def get_family(self):
"""
Return a list of font names that comprise the font family.
"""
if self._family is None:
family = rcParams['font.family']
if is_string_like(family):
return [family]
return family
return self._family
def get_name(self):
"""
Return the name of the font that best matches the font
properties.
"""
return ft2font.FT2Font(findfont(self)).family_name
def get_style(self):
"""
Return the font style. Values are: 'normal', 'italic' or
'oblique'.
"""
if self._slant is None:
return rcParams['font.style']
return self._slant
get_slant = get_style
def get_variant(self):
"""
Return the font variant. Values are: 'normal' or
'small-caps'.
"""
if self._variant is None:
return rcParams['font.variant']
return self._variant
def get_weight(self):
"""
Set the font weight. Options are: A numeric value in the
range 0-1000 or one of 'light', 'normal', 'regular', 'book',
'medium', 'roman', 'semibold', 'demibold', 'demi', 'bold',
'heavy', 'extra bold', 'black'
"""
if self._weight is None:
return rcParams['font.weight']
return self._weight
def get_stretch(self):
"""
Return the font stretch or width. Options are: 'ultra-condensed',
'extra-condensed', 'condensed', 'semi-condensed', 'normal',
'semi-expanded', 'expanded', 'extra-expanded', 'ultra-expanded'.
"""
if self._stretch is None:
return rcParams['font.stretch']
return self._stretch
def get_size(self):
"""
Return the font size.
"""
if self._size is None:
return rcParams['font.size']
return self._size
def get_size_in_points(self):
if self._size is not None:
try:
return float(self._size)
except ValueError:
pass
default_size = FontManager.get_default_size()
return default_size * font_scalings.get(self._size)
def get_file(self):
"""
Return the filename of the associated font.
"""
return self._file
def get_fontconfig_pattern(self):
"""
Get a fontconfig pattern suitable for looking up the font as
specified with fontconfig's ``fc-match`` utility.
See the documentation on `fontconfig patterns
<http://www.fontconfig.org/fontconfig-user.html>`_.
This support does not require fontconfig to be installed or
support for it to be enabled. We are merely borrowing its
pattern syntax for use here.
"""
return generate_fontconfig_pattern(self)
def set_family(self, family):
"""
Change the font family. May be either an alias (generic name
is CSS parlance), such as: 'serif', 'sans-serif', 'cursive',
'fantasy', or 'monospace', a real font name or a list of real
font names. Real font names are not supported when
`text.usetex` is `True`.
"""
if family is None:
family = rcParams['font.family']
if is_string_like(family):
family = [six.text_type(family)]
elif (not is_string_like(family) and isinstance(family, Iterable)):
family = [six.text_type(f) for f in family]
self._family = family
set_name = set_family
def set_style(self, style):
"""
Set the font style. Values are: 'normal', 'italic' or
'oblique'.
"""
if style is None:
style = rcParams['font.style']
if style not in ('normal', 'italic', 'oblique', None):
raise ValueError("style must be normal, italic or oblique")
self._slant = style
set_slant = set_style
def set_variant(self, variant):
"""
Set the font variant. Values are: 'normal' or 'small-caps'.
"""
if variant is None:
variant = rcParams['font.variant']
if variant not in ('normal', 'small-caps', None):
raise ValueError("variant must be normal or small-caps")
self._variant = variant
def set_weight(self, weight):
"""
Set the font weight. May be either a numeric value in the
range 0-1000 or one of 'ultralight', 'light', 'normal',
'regular', 'book', 'medium', 'roman', 'semibold', 'demibold',
'demi', 'bold', 'heavy', 'extra bold', 'black'
"""
if weight is None:
weight = rcParams['font.weight']
try:
weight = int(weight)
if weight < 0 or weight > 1000:
raise ValueError()
except ValueError:
if weight not in weight_dict:
raise ValueError("weight is invalid")
self._weight = weight
def set_stretch(self, stretch):
"""
Set the font stretch or width. Options are: 'ultra-condensed',
'extra-condensed', 'condensed', 'semi-condensed', 'normal',
'semi-expanded', 'expanded', 'extra-expanded' or
'ultra-expanded', or a numeric value in the range 0-1000.
"""
if stretch is None:
stretch = rcParams['font.stretch']
try:
stretch = int(stretch)
if stretch < 0 or stretch > 1000:
raise ValueError()
except ValueError:
if stretch not in stretch_dict:
raise ValueError("stretch is invalid")
self._stretch = stretch
def set_size(self, size):
"""
Set the font size. Either an relative value of 'xx-small',
'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'
or an absolute font size, e.g., 12.
"""
if size is None:
size = rcParams['font.size']
try:
size = float(size)
except ValueError:
if size is not None and size not in font_scalings:
raise ValueError("size is invalid")
self._size = size
def set_file(self, file):
"""
Set the filename of the fontfile to use. In this case, all
other properties will be ignored.
"""
self._file = file
def set_fontconfig_pattern(self, pattern):
"""
Set the properties by parsing a fontconfig *pattern*.
See the documentation on `fontconfig patterns
<http://www.fontconfig.org/fontconfig-user.html>`_.
This support does not require fontconfig to be installed or
support for it to be enabled. We are merely borrowing its
pattern syntax for use here.
"""
for key, val in six.iteritems(self._parse_fontconfig_pattern(pattern)):
if type(val) == list:
getattr(self, "set_" + key)(val[0])
else:
getattr(self, "set_" + key)(val)
def copy(self):
"""Return a deep copy of self"""
return FontProperties(_init = self)
def ttfdict_to_fnames(d):
"""
flatten a ttfdict to all the filenames it contains
"""
fnames = []
for named in six.itervalues(d):
for styled in six.itervalues(named):
for variantd in six.itervalues(styled):
for weightd in six.itervalues(variantd):
for stretchd in six.itervalues(weightd):
for fname in six.itervalues(stretchd):
fnames.append(fname)
return fnames
def pickle_dump(data, filename):
"""
Equivalent to pickle.dump(data, open(filename, 'w'))
but closes the file to prevent filehandle leakage.
"""
with open(filename, 'wb') as fh:
pickle.dump(data, fh)
def pickle_load(filename):
"""
Equivalent to pickle.load(open(filename, 'r'))
but closes the file to prevent filehandle leakage.
"""
with open(filename, 'rb') as fh:
data = pickle.load(fh)
return data
class TempCache(object):
"""
A class to store temporary caches that are (a) not saved to disk
and (b) invalidated whenever certain font-related
rcParams---namely the family lookup lists---are changed or the
font cache is reloaded. This avoids the expensive linear search
through all fonts every time a font is looked up.
"""
# A list of rcparam names that, when changed, invalidated this
# cache.
invalidating_rcparams = (
'font.serif', 'font.sans-serif', 'font.cursive', 'font.fantasy',
'font.monospace')
def __init__(self):
self._lookup_cache = {}
self._last_rcParams = self.make_rcparams_key()
def make_rcparams_key(self):
return [id(fontManager)] + [
rcParams[param] for param in self.invalidating_rcparams]
def get(self, prop):
key = self.make_rcparams_key()
if key != self._last_rcParams:
self._lookup_cache = {}
self._last_rcParams = key
return self._lookup_cache.get(prop)
def set(self, prop, value):
key = self.make_rcparams_key()
if key != self._last_rcParams:
self._lookup_cache = {}
self._last_rcParams = key
self._lookup_cache[prop] = value
class FontManager(object):
"""
On import, the :class:`FontManager` singleton instance creates a
list of TrueType fonts based on the font properties: name, style,
variant, weight, stretch, and size. The :meth:`findfont` method
does a nearest neighbor search to find the font that most closely
matches the specification. If no good enough match is found, a
default font is returned.
"""
# Increment this version number whenever the font cache data
# format or behavior has changed and requires a existing font
# cache files to be rebuilt.
__version__ = 101
def __init__(self, size=None, weight='normal'):
self._version = self.__version__
self.__default_weight = weight
self.default_size = size
paths = [os.path.join(rcParams['datapath'], 'fonts', 'ttf'),
os.path.join(rcParams['datapath'], 'fonts', 'afm'),
os.path.join(rcParams['datapath'], 'fonts', 'pdfcorefonts')]
# Create list of font paths
for pathname in ['TTFPATH', 'AFMPATH']:
if pathname in os.environ:
ttfpath = os.environ[pathname]
if ttfpath.find(';') >= 0: #win32 style
paths.extend(ttfpath.split(';'))
elif ttfpath.find(':') >= 0: # unix style
paths.extend(ttfpath.split(':'))
else:
paths.append(ttfpath)
verbose.report('font search path %s'%(str(paths)))
# Load TrueType fonts and create font dictionary.
self.ttffiles = findSystemFonts(paths) + findSystemFonts()
self.defaultFamily = {
'ttf': 'Bitstream Vera Sans',
'afm': 'Helvetica'}
self.defaultFont = {}
for fname in self.ttffiles:
verbose.report('trying fontname %s' % fname, 'debug')
if fname.lower().find('vera.ttf')>=0:
self.defaultFont['ttf'] = fname
break
else:
# use anything
self.defaultFont['ttf'] = self.ttffiles[0]
self.ttflist = createFontList(self.ttffiles)
self.afmfiles = findSystemFonts(paths, fontext='afm') + \
findSystemFonts(fontext='afm')
self.afmlist = createFontList(self.afmfiles, fontext='afm')
if len(self.afmfiles):
self.defaultFont['afm'] = self.afmfiles[0]
else:
self.defaultFont['afm'] = None
def get_default_weight(self):
"""
Return the default font weight.
"""
return self.__default_weight
@staticmethod
def get_default_size():
"""
Return the default font size.
"""
return rcParams['font.size']
def set_default_weight(self, weight):
"""
Set the default font weight. The initial value is 'normal'.
"""
self.__default_weight = weight
def update_fonts(self, filenames):
"""
Update the font dictionary with new font files.
Currently not implemented.
"""
# !!!! Needs implementing
raise NotImplementedError
# Each of the scoring functions below should return a value between
# 0.0 (perfect match) and 1.0 (terrible match)
def score_family(self, families, family2):
"""
Returns a match score between the list of font families in
*families* and the font family name *family2*.
An exact match anywhere in the list returns 0.0.
A match by generic font name will return 0.1.
No match will return 1.0.
"""
if not isinstance(families, (list, tuple)):
families = [families]
family2 = family2.lower()
for i, family1 in enumerate(families):
family1 = family1.lower()
if family1 in font_family_aliases:
if family1 in ('sans', 'sans serif'):
family1 = 'sans-serif'
options = rcParams['font.' + family1]
options = [x.lower() for x in options]
if family2 in options:
idx = options.index(family2)
return ((0.1 * (idx / len(options))) *
((i + 1) / len(families)))
elif family1 == family2:
# The score should be weighted by where in the
# list the font was found.
return i / len(families)
return 1.0
def score_style(self, style1, style2):
"""
Returns a match score between *style1* and *style2*.
An exact match returns 0.0.
A match between 'italic' and 'oblique' returns 0.1.
No match returns 1.0.
"""
if style1 == style2:
return 0.0
elif style1 in ('italic', 'oblique') and \
style2 in ('italic', 'oblique'):
return 0.1
return 1.0
def score_variant(self, variant1, variant2):
"""
Returns a match score between *variant1* and *variant2*.
An exact match returns 0.0, otherwise 1.0.
"""
if variant1 == variant2:
return 0.0
else:
return 1.0
def score_stretch(self, stretch1, stretch2):
"""
Returns a match score between *stretch1* and *stretch2*.
The result is the absolute value of the difference between the
CSS numeric values of *stretch1* and *stretch2*, normalized
between 0.0 and 1.0.
"""
try:
stretchval1 = int(stretch1)
except ValueError:
stretchval1 = stretch_dict.get(stretch1, 500)
try:
stretchval2 = int(stretch2)
except ValueError:
stretchval2 = stretch_dict.get(stretch2, 500)
return abs(stretchval1 - stretchval2) / 1000.0
def score_weight(self, weight1, weight2):
"""
Returns a match score between *weight1* and *weight2*.
The result is the absolute value of the difference between the
CSS numeric values of *weight1* and *weight2*, normalized
between 0.0 and 1.0.
"""
try:
weightval1 = int(weight1)
except ValueError:
weightval1 = weight_dict.get(weight1, 500)
try:
weightval2 = int(weight2)
except ValueError:
weightval2 = weight_dict.get(weight2, 500)
return abs(weightval1 - weightval2) / 1000.0
def score_size(self, size1, size2):
"""
Returns a match score between *size1* and *size2*.
If *size2* (the size specified in the font file) is 'scalable', this
function always returns 0.0, since any font size can be generated.
Otherwise, the result is the absolute distance between *size1* and
*size2*, normalized so that the usual range of font sizes (6pt -
72pt) will lie between 0.0 and 1.0.
"""
if size2 == 'scalable':
return 0.0
# Size value should have already been
try:
sizeval1 = float(size1)
except ValueError:
sizeval1 = self.default_size * font_scalings(size1)
try:
sizeval2 = float(size2)
except ValueError:
return 1.0
return abs(sizeval1 - sizeval2) / 72.0
def findfont(self, prop, fontext='ttf', directory=None,
fallback_to_default=True, rebuild_if_missing=True):
"""
Search the font list for the font that most closely matches
the :class:`FontProperties` *prop*.
:meth:`findfont` performs a nearest neighbor search. Each
font is given a similarity score to the target font
properties. The first font with the highest score is
returned. If no matches below a certain threshold are found,
the default font (usually Vera Sans) is returned.
`directory`, is specified, will only return fonts from the
given directory (or subdirectory of that directory).
The result is cached, so subsequent lookups don't have to
perform the O(n) nearest neighbor search.
If `fallback_to_default` is True, will fallback to the default
font family (usually "Bitstream Vera Sans" or "Helvetica") if
the first lookup hard-fails.
See the `W3C Cascading Style Sheet, Level 1
<http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_ documentation
for a description of the font finding algorithm.
"""
if not isinstance(prop, FontProperties):
prop = FontProperties(prop)
fname = prop.get_file()
if fname is not None:
verbose.report('findfont returning %s'%fname, 'debug')
return fname
if fontext == 'afm':
fontlist = self.afmlist
else:
fontlist = self.ttflist
if directory is None:
cached = _lookup_cache[fontext].get(prop)
if cached is not None:
return cached
best_score = 1e64
best_font = None
for font in fontlist:
if (directory is not None and
os.path.commonprefix([font.fname, directory]) != directory):
continue
# Matching family should have highest priority, so it is multiplied
# by 10.0
score = \
self.score_family(prop.get_family(), font.name) * 10.0 + \
self.score_style(prop.get_style(), font.style) + \
self.score_variant(prop.get_variant(), font.variant) + \
self.score_weight(prop.get_weight(), font.weight) + \
self.score_stretch(prop.get_stretch(), font.stretch) + \
self.score_size(prop.get_size(), font.size)
if score < best_score:
best_score = score
best_font = font
if score == 0:
break
if best_font is None or best_score >= 10.0:
if fallback_to_default:
warnings.warn(
'findfont: Font family %s not found. Falling back to %s' %
(prop.get_family(), self.defaultFamily[fontext]))
default_prop = prop.copy()
default_prop.set_family(self.defaultFamily[fontext])
return self.findfont(default_prop, fontext, directory, False)
else:
# This is a hard fail -- we can't find anything reasonable,
# so just return the vera.ttf
warnings.warn(
'findfont: Could not match %s. Returning %s' %
(prop, self.defaultFont[fontext]),
UserWarning)
result = self.defaultFont[fontext]
else:
verbose.report(
'findfont: Matching %s to %s (%s) with score of %f' %
(prop, best_font.name, repr(best_font.fname), best_score))
result = best_font.fname
if not os.path.isfile(result):
if rebuild_if_missing:
verbose.report(
'findfont: Found a missing font file. Rebuilding cache.')
_rebuild()
return fontManager.findfont(
prop, fontext, directory, True, False)
else:
raise ValueError("No valid font could be found")
if directory is None:
_lookup_cache[fontext].set(prop, result)
return result
_is_opentype_cff_font_cache = {}
def is_opentype_cff_font(filename):
"""
Returns True if the given font is a Postscript Compact Font Format
Font embedded in an OpenType wrapper. Used by the PostScript and
PDF backends that can not subset these fonts.
"""
if os.path.splitext(filename)[1].lower() == '.otf':
result = _is_opentype_cff_font_cache.get(filename)
if result is None:
with open(filename, 'rb') as fd:
tag = fd.read(4)
result = (tag == 'OTTO')
_is_opentype_cff_font_cache[filename] = result
return result
return False
fontManager = None
_fmcache = None
# The experimental fontconfig-based backend.
if USE_FONTCONFIG and sys.platform != 'win32':
import re
def fc_match(pattern, fontext):
fontexts = get_fontext_synonyms(fontext)
ext = "." + fontext
try:
pipe = subprocess.Popen(
['fc-match', '-s', '--format=%{file}\\n', pattern],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output = pipe.communicate()[0]
except (OSError, IOError):
return None
# The bulk of the output from fc-list is ascii, so we keep the
# result in bytes and parse it as bytes, until we extract the
# filename, which is in sys.filesystemencoding().
if pipe.returncode == 0:
for fname in output.split(b'\n'):
try:
fname = six.text_type(fname, sys.getfilesystemencoding())
except UnicodeDecodeError:
continue
if os.path.splitext(fname)[1][1:] in fontexts:
return fname
return None
_fc_match_cache = {}
def findfont(prop, fontext='ttf'):
if not is_string_like(prop):
prop = prop.get_fontconfig_pattern()
cached = _fc_match_cache.get(prop)
if cached is not None:
return cached
result = fc_match(prop, fontext)
if result is None:
result = fc_match(':', fontext)
_fc_match_cache[prop] = result
return result
else:
_fmcache = None
if not 'TRAVIS' in os.environ:
cachedir = get_cachedir()
if cachedir is not None:
if six.PY3:
_fmcache = os.path.join(cachedir, 'fontList.py3k.cache')
else:
_fmcache = os.path.join(cachedir, 'fontList.cache')
fontManager = None
_lookup_cache = {
'ttf': TempCache(),
'afm': TempCache()
}
def _rebuild():
global fontManager
fontManager = FontManager()
if _fmcache:
pickle_dump(fontManager, _fmcache)
verbose.report("generated new fontManager")
if _fmcache:
try:
fontManager = pickle_load(_fmcache)
if (not hasattr(fontManager, '_version') or
fontManager._version != FontManager.__version__):
_rebuild()
else:
fontManager.default_size = None
verbose.report("Using fontManager instance from %s" % _fmcache)
except:
_rebuild()
else:
_rebuild()
def findfont(prop, **kw):
global fontManager
font = fontManager.findfont(prop, **kw)
return font
| lgpl-3.0 |
ThomasMiconi/htmresearch | projects/sequence_prediction/mackey_glass/nupic_output.py | 13 | 6035 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Provides two classes with the same signature for writing data out of NuPIC
models.
(This is a component of the One Hot Gym Prediction Tutorial.)
"""
import csv
from collections import deque
from abc import ABCMeta, abstractmethod
# Try to import matplotlib, but we don't have to.
try:
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.dates import date2num
except ImportError:
pass
WINDOW = 100
class NuPICOutput(object):
__metaclass__ = ABCMeta
def __init__(self, names, showAnomalyScore=False):
self.names = names
self.showAnomalyScore = showAnomalyScore
@abstractmethod
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
pass
@abstractmethod
def close(self):
pass
class NuPICFileOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICFileOutput, self).__init__(*args, **kwargs)
self.outputFiles = []
self.outputWriters = []
self.lineCounts = []
headerRow = ['timestamp', 'y', 'prediction']
for name in self.names:
self.lineCounts.append(0)
outputFileName = "out_%s" % name
print "Preparing to output %s data to %s" % (name, outputFileName)
outputFile = open(outputFileName, "w")
self.outputFiles.append(outputFile)
outputWriter = csv.writer(outputFile)
self.outputWriters.append(outputWriter)
outputWriter.writerow(headerRow)
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
assert len(timestamps) == len(actualValues) == len(predictedValues)
for index in range(len(self.names)):
timestamp = timestamps[index]
actual = actualValues[index]
prediction = predictedValues[index]
writer = self.outputWriters[index]
if timestamp is not None:
outputRow = [timestamp, actual, prediction]
writer.writerow(outputRow)
self.lineCounts[index] += 1
def close(self):
for index, name in enumerate(self.names):
self.outputFiles[index].close()
print "Done. Wrote %i data lines to %s." % (self.lineCounts[index], name)
class NuPICPlotOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICPlotOutput, self).__init__(*args, **kwargs)
# Turn matplotlib interactive mode on.
plt.ion()
self.dates = []
self.convertedDates = []
self.actualValues = []
self.predictedValues = []
self.actualLines = []
self.predictedLines = []
self.linesInitialized = False
self.graphs = []
plotCount = len(self.names)
plotHeight = max(plotCount * 3, 6)
fig = plt.figure(figsize=(14, plotHeight))
gs = gridspec.GridSpec(plotCount, 1)
for index in range(len(self.names)):
self.graphs.append(fig.add_subplot(gs[index, 0]))
plt.title(self.names[index])
plt.ylabel('KW Energy Consumption')
plt.xlabel('Date')
plt.tight_layout()
def initializeLines(self, timestamps):
for index in range(len(self.names)):
print "initializing %s" % self.names[index]
# graph = self.graphs[index]
self.dates.append(deque([timestamps[index]] * WINDOW, maxlen=WINDOW))
self.convertedDates.append(deque(
[date2num(date) for date in self.dates[index]], maxlen=WINDOW
))
self.actualValues.append(deque([0.0] * WINDOW, maxlen=WINDOW))
self.predictedValues.append(deque([0.0] * WINDOW, maxlen=WINDOW))
actualPlot, = self.graphs[index].plot(
self.dates[index], self.actualValues[index]
)
self.actualLines.append(actualPlot)
predictedPlot, = self.graphs[index].plot(
self.dates[index], self.predictedValues[index]
)
self.predictedLines.append(predictedPlot)
self.linesInitialized = True
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
assert len(timestamps) == len(actualValues) == len(predictedValues)
# We need the first timestamp to initialize the lines at the right X value,
# so do that check first.
if not self.linesInitialized:
self.initializeLines(timestamps)
for index in range(len(self.names)):
self.dates[index].append(timestamps[index])
self.convertedDates[index].append(date2num(timestamps[index]))
self.actualValues[index].append(actualValues[index])
self.predictedValues[index].append(predictedValues[index])
# Update data
self.actualLines[index].set_xdata(self.convertedDates[index])
self.actualLines[index].set_ydata(self.actualValues[index])
self.predictedLines[index].set_xdata(self.convertedDates[index])
self.predictedLines[index].set_ydata(self.predictedValues[index])
self.graphs[index].relim()
self.graphs[index].autoscale_view(True, True, True)
plt.draw()
plt.legend(('actual','predicted'), loc=3)
def close(self):
plt.ioff()
plt.show()
NuPICOutput.register(NuPICFileOutput)
NuPICOutput.register(NuPICPlotOutput)
| agpl-3.0 |
MartinSavc/scikit-learn | sklearn/datasets/samples_generator.py | 103 | 56423 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause |
MechCoder/scikit-learn | sklearn/utils/validation.py | 2 | 27135 | """Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from ..utils.fixes import signature
from .. import get_config as _get_config
from ..exceptions import NonBLASDotWarning
from ..exceptions import NotFittedError
from ..exceptions import DataConversionWarning
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
if _get_config()['assume_finite']:
return
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Parameters
----------
X : array or sparse matrix
"""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats.
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
if X.dtype.kind in 'uib' and X.dtype.itemsize <= 4:
return_dtype = np.float32
else:
return_dtype = np.float64
return X.astype(return_dtype)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit') and callable(x.fit):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent representation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
lengths = [_num_samples(X) for X in arrays if X is not None]
uniques = np.unique(lengths)
if len(uniques) > 1:
raise ValueError("Found input variables with inconsistent numbers of"
" samples: %r" % [int(l) for l in lengths])
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, boolean or list/tuple of strings
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). If the input is sparse but
not in the allowed format, it will be converted to the first listed
format. True allows the input to be any format. False means
that a sparse matrix input will raise an error.
dtype : string, type or None
Data type of result. If None, the dtype of the input is preserved.
copy : boolean
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if isinstance(accept_sparse, six.string_types):
accept_sparse = [accept_sparse]
if accept_sparse is False:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
elif isinstance(accept_sparse, (list, tuple)):
if len(accept_sparse) == 0:
raise ValueError("When providing 'accept_sparse' "
"as a tuple or list, it must contain at "
"least one string value.")
# ensure correct sparse format
if spmatrix.format not in accept_sparse:
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
elif accept_sparse is not True:
# any other type
raise ValueError("Parameter 'accept_sparse' should be a string, "
"boolean or list of strings. You provided "
"'accept_sparse={}'.".format(accept_sparse))
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=False, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2D numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, boolean or list/tuple of strings (default=False)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
.. deprecated:: 0.19
Passing 'None' to parameter ``accept_sparse`` in methods is
deprecated in version 0.19 "and will be removed in 0.21. Use
``accept_sparse=False`` instead.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
When order is None (default), then if copy=False, nothing is ensured
about the memory layout of the output array; otherwise (copy=True)
the memory layout of the returned array is kept as close as possible
to the original array.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to raise a value error if X is not 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
# accept_sparse 'None' deprecation check
if accept_sparse is None:
warnings.warn(
"Passing 'None' to parameter 'accept_sparse' in methods "
"check_array and check_X_y is deprecated in version 0.19 "
"and will be removed in 0.21. Use 'accept_sparse=False' "
" instead.", DeprecationWarning)
accept_sparse = False
# store whether originally we wanted numeric dtype
dtype_numeric = isinstance(dtype, six.string_types) and dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if estimator is not None:
if isinstance(estimator, six.string_types):
estimator_name = estimator
else:
estimator_name = estimator.__class__.__name__
else:
estimator_name = "Estimator"
context = " by %s" % estimator_name if estimator is not None else ""
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
array = np.array(array, dtype=dtype, order=order, copy=copy)
if ensure_2d:
if array.ndim == 1:
raise ValueError(
"Expected 2D array, got 1D array instead: \narray={}\n "
"Reshape your data either using array.reshape(-1, 1) if "
"your data has a single feature or array.reshape(1, -1) "
"if it contains a single sample.".format(array))
array = np.atleast_2d(array)
# To ensure that array flags are maintained
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. %s expected <= 2."
% (array.ndim, estimator_name))
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required%s."
% (n_samples, shape_repr, ensure_min_samples,
context))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required%s."
% (n_features, shape_repr, ensure_min_features,
context))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s%s."
% (dtype_orig, array.dtype, context))
warnings.warn(msg, DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=False, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y, such as checking that y
does not have np.nan or np.inf targets. For multi-label y, set
multi_output=True to allow 2d and sparse y. If the dtype of X is
object, attempt converting to float, raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, boolean or list of string (default=False)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
.. deprecated:: 0.19
Passing 'None' to parameter ``accept_sparse`` in methods is
deprecated in version 0.19 "and will be removed in 0.21. Use
``accept_sparse=False`` instead.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X. This parameter
does not influence whether y can have np.inf or np.nan values.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector. y cannot have np.nan or np.inf values if
multi_output=True.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Parameters
----------
estimator : object
An estimator to inspect.
parameter: str
The searched parameter.
Returns
-------
is_parameter: bool
Whether the parameter was found to be a a named parameter of the
estimator's fit method.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in signature(estimator.fit).parameters
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
Returns
-------
None
Raises
------
NotFittedError
If the attributes are not found.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : array-like or sparse matrix
Input data.
whom : string
Who passed X to this function.
"""
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
| bsd-3-clause |
dkillick/cartopy | lib/cartopy/tests/mpl/test_axes.py | 2 | 2979 | # (C) British Crown Copyright 2011 - 2015, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
import unittest
from matplotlib.testing.decorators import cleanup
import matplotlib.path as mpath
import matplotlib.pyplot as plt
from nose.tools import assert_equal
import numpy as np
import cartopy.crs as ccrs
from cartopy.mpl.geoaxes import InterProjectionTransform
from .test_caching import CallCounter
class TestNoSpherical(unittest.TestCase):
def setUp(self):
self.ax = plt.axes(projection=ccrs.PlateCarree())
self.data = np.arange(12).reshape((3, 4))
def tearDown(self):
plt.clf()
plt.close()
def test_contour(self):
with self.assertRaises(ValueError):
self.ax.contour(self.data, transform=ccrs.Geodetic())
def test_contourf(self):
with self.assertRaises(ValueError):
self.ax.contourf(self.data, transform=ccrs.Geodetic())
def test_pcolor(self):
with self.assertRaises(ValueError):
self.ax.pcolor(self.data, transform=ccrs.Geodetic())
def test_pcolormesh(self):
with self.assertRaises(ValueError):
self.ax.pcolormesh(self.data, transform=ccrs.Geodetic())
def test_transform_PlateCarree_shortcut():
src = ccrs.PlateCarree(central_longitude=0)
target = ccrs.PlateCarree(central_longitude=180)
# of the 3 paths, 2 of them cannot be short-cutted.
pth1 = mpath.Path([[0.5, 0], [10, 10]])
pth2 = mpath.Path([[0.5, 91], [10, 10]])
pth3 = mpath.Path([[-0.5, 0], [10, 10]])
trans = InterProjectionTransform(src, target)
counter = CallCounter(target, 'project_geometry')
with counter:
trans.transform_path(pth1)
# pth1 should allow a short-cut.
assert_equal(counter.count, 0)
with counter:
trans.transform_path(pth2)
assert_equal(counter.count, 1)
with counter:
trans.transform_path(pth3)
assert_equal(counter.count, 2)
@cleanup
def test_geoaxes_subplot():
ax = plt.subplot(1, 1, 1, projection=ccrs.PlateCarree())
assert_equal(str(ax.__class__),
"<class 'cartopy.mpl.geoaxes.GeoAxesSubplot'>")
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| lgpl-3.0 |
TNT-Samuel/Coding-Projects | DNS Server/Source/Lib/site-packages/dask/dataframe/tests/test_ufunc.py | 4 | 15369 | from __future__ import absolute_import, division, print_function
import pytest
pd = pytest.importorskip('pandas')
import pandas.util.testing as tm
import numpy as np
import dask.array as da
import dask.dataframe as dd
from dask.dataframe.utils import assert_eq
_BASE_UFUNCS = ['conj', 'exp', 'log', 'log2', 'log10', 'log1p',
'expm1', 'sqrt', 'square', 'sin', 'cos', 'tan',
'arcsin','arccos', 'arctan', 'sinh', 'cosh', 'tanh',
'arcsinh', 'arccosh', 'arctanh', 'deg2rad', 'rad2deg',
'isfinite', 'isinf', 'isnan', 'signbit',
'degrees', 'radians', 'rint', 'fabs', 'sign', 'absolute',
'floor', 'ceil', 'trunc', 'logical_not', 'cbrt', 'exp2',
'negative', 'reciprocal', 'spacing']
@pytest.mark.parametrize('pandas_input', [
pd.Series(np.random.randint(1, 100, size=20)),
pd.Series(np.abs(np.random.randn(100))),
pd.DataFrame({'A': np.random.randint(1, 100, size=20),
'B': np.random.randint(1, 100, size=20),
'C': np.abs(np.random.randn(20))}),
pd.Series(np.random.randint(1, 100, size=20),
index=list('abcdefghijklmnopqrst')),
pd.Series(np.abs(np.random.randn(20)),
index=list('abcdefghijklmnopqrst')),
pd.DataFrame({'A': np.random.randint(1, 100, size=20),
'B': np.random.randint(1, 100, size=20),
'C': np.abs(np.random.randn(20))},
index=list('abcdefghijklmnopqrst'))])
@pytest.mark.parametrize('ufunc', _BASE_UFUNCS)
def test_ufunc(pandas_input, ufunc):
dafunc = getattr(da, ufunc)
npfunc = getattr(np, ufunc)
dask_input = dd.from_pandas(pandas_input, 3)
pandas_type = pandas_input.__class__
dask_type = dask_input.__class__
# applying Dask ufunc doesn't trigger computation
with pytest.warns(None):
# Some cause warnings (arcsine)
assert isinstance(dafunc(dask_input), dask_type)
assert_eq(dafunc(dask_input), npfunc(pandas_input))
# applying NumPy ufunc is lazy
if isinstance(npfunc, np.ufunc) and np.__version__ >= '1.13.0':
assert isinstance(npfunc(dask_input), dask_type)
else:
assert isinstance(npfunc(dask_input), pandas_type)
assert_eq(npfunc(dask_input), npfunc(pandas_input))
# applying Dask ufunc to normal Series triggers computation
assert isinstance(dafunc(pandas_input), pandas_type)
assert_eq(dafunc(dask_input), npfunc(pandas_input))
# Index
if pandas_input.index.dtype in [object, str]:
return
if ufunc in ('logical_not', 'signbit', 'isnan', 'isinf', 'isfinite'):
return
with pytest.warns(None):
assert isinstance(dafunc(dask_input.index), dd.Index)
assert_eq(dafunc(dask_input.index), npfunc(pandas_input.index))
# applying NumPy ufunc is lazy
if isinstance(npfunc, np.ufunc) and np.__version__ >= '1.13.0':
assert isinstance(npfunc(dask_input.index), dd.Index)
else:
assert isinstance(npfunc(dask_input.index), pd.Index)
assert_eq(npfunc(dask_input.index), npfunc(dask_input.index))
# applying Dask ufunc to normal Series triggers computation
with pytest.warns(None):
# some (da.log) cause warnings
assert isinstance(dafunc(pandas_input.index), pd.Index)
assert_eq(dafunc(pandas_input), npfunc(pandas_input))
@pytest.mark.parametrize('ufunc', ['isreal', 'iscomplex', 'real', 'imag',
'angle', 'fix', 'i0', 'sinc', 'nan_to_num'])
def test_ufunc_array_wrap(ufunc):
"""
some np.ufuncs doesn't call __array_wrap__
(or __array_ufunc__ starting from numpy v.1.13.0), it should work as below
- da.ufunc(dd.Series) => dd.Series
- da.ufunc(pd.Series) => np.ndarray
- np.ufunc(dd.Series) => np.ndarray
- np.ufunc(pd.Series) => np.ndarray
"""
if ufunc == 'fix' and np.__version__ >= '1.13.0':
pytest.skip('fix calls floor in a way that we do not yet support')
dafunc = getattr(da, ufunc)
npfunc = getattr(np, ufunc)
s = pd.Series(np.random.randint(1, 100, size=20),
index=list('abcdefghijklmnopqrst'))
ds = dd.from_pandas(s, 3)
# applying Dask ufunc doesn't trigger computation
assert isinstance(dafunc(ds), dd.Series)
assert_eq(dafunc(ds), pd.Series(npfunc(s), index=s.index))
assert isinstance(npfunc(ds), np.ndarray)
tm.assert_numpy_array_equal(npfunc(ds), npfunc(s))
assert isinstance(dafunc(s), np.ndarray)
tm.assert_numpy_array_equal(dafunc(s), npfunc(s))
df = pd.DataFrame({'A': np.random.randint(1, 100, size=20),
'B': np.random.randint(1, 100, size=20),
'C': np.abs(np.random.randn(20))},
index=list('abcdefghijklmnopqrst'))
ddf = dd.from_pandas(df, 3)
# applying Dask ufunc doesn't trigger computation
assert isinstance(dafunc(ddf), dd.DataFrame)
# result may be read-only ndarray
exp = pd.DataFrame(npfunc(df).copy(), columns=df.columns, index=df.index)
assert_eq(dafunc(ddf), exp)
assert isinstance(npfunc(ddf), np.ndarray)
tm.assert_numpy_array_equal(npfunc(ddf), npfunc(df))
assert isinstance(dafunc(df), np.ndarray)
tm.assert_numpy_array_equal(dafunc(df), npfunc(df))
_UFUNCS_2ARG = ['logaddexp', 'logaddexp2', 'arctan2',
'hypot', 'copysign', 'nextafter', 'ldexp',
'fmod', 'logical_and', 'logical_or',
'logical_xor', 'maximum', 'minimum',
'fmax', 'fmin', 'greater',
'greater_equal', 'less', 'less_equal',
'not_equal', 'equal', 'logical_or',
'logical_and', 'logical_xor']
@pytest.mark.parametrize('ufunc', _UFUNCS_2ARG)
@pytest.mark.parametrize('make_pandas_input', [
lambda: pd.Series(np.random.randint(1, 100, size=20)),
lambda: pd.DataFrame(np.random.randint(1, 100, size=(20, 2)),
columns=['A', 'B'])
])
def test_ufunc_with_2args(ufunc, make_pandas_input):
dafunc = getattr(da, ufunc)
npfunc = getattr(np, ufunc)
pandas1 = make_pandas_input()
pandas2 = make_pandas_input()
dask1 = dd.from_pandas(pandas1, 3)
dask2 = dd.from_pandas(pandas2, 4)
pandas_type = pandas1.__class__
dask_type = dask1.__class__
# applying Dask ufunc doesn't trigger computation
assert isinstance(dafunc(dask1, dask2), dask_type)
assert_eq(dafunc(dask1, dask2), npfunc(pandas1, pandas2))
# should be fine with pandas as a second arg, too
assert isinstance(dafunc(dask1, pandas2), dask_type)
assert_eq(dafunc(dask1, pandas2), npfunc(pandas1, pandas2))
# applying NumPy ufunc is lazy
if isinstance(npfunc, np.ufunc) and np.__version__ >= '1.13.0':
assert isinstance(npfunc(dask1, dask2), dask_type)
assert isinstance(npfunc(dask1, pandas2), dask_type)
else:
assert isinstance(npfunc(dask1, dask2), pandas_type)
assert isinstance(npfunc(dask1, pandas2), pandas_type)
assert_eq(npfunc(dask1, dask2), npfunc(pandas1, pandas2))
assert_eq(npfunc(dask1, pandas2), npfunc(pandas1, pandas2))
# applying Dask ufunc to normal Series triggers computation
assert isinstance(dafunc(pandas1, pandas2), pandas_type)
assert_eq(dafunc(pandas1, pandas2), npfunc(pandas1, pandas2))
@pytest.mark.parametrize('pandas,min,max', [
(pd.Series(np.random.randint(1, 100, size=20)), 5, 50),
(pd.DataFrame(np.random.randint(1, 100, size=(20, 2)),
columns=['A', 'B']), 5.5, 40.5)
])
def test_clip(pandas, min, max):
dask = dd.from_pandas(pandas, 3)
pandas_type = pandas.__class__
dask_type = dask.__class__
# clip internally calls dd.Series.clip
# applying Dask ufunc doesn't trigger computation
assert isinstance(da.clip(dask, min, max), dask_type)
assert_eq(da.clip(dask, min, max), np.clip(pandas, min, max))
# applying Numpy ufunc doesn't trigger computation
assert isinstance(np.clip(dask, min, max), dask_type)
assert_eq(np.clip(dask, min, max), np.clip(pandas, min, max))
# applying Dask ufunc to normal pandas objects triggers computation
assert isinstance(da.clip(pandas, min, max), pandas_type)
assert_eq(da.clip(pandas, min, max), np.clip(pandas, min, max))
@pytest.mark.skipif(np.__version__ < '1.13.0', reason='array_ufunc not present')
@pytest.mark.parametrize('ufunc', _BASE_UFUNCS)
def test_frame_ufunc_out(ufunc):
npfunc = getattr(np, ufunc)
dafunc = getattr(da, ufunc)
input_matrix = np.random.randint(1, 100, size=(20, 2))
df = pd.DataFrame(input_matrix, columns=['A', 'B'])
ddf = dd.from_pandas(df, 3)
df_out = pd.DataFrame(np.random.randint(1, 100, size=(20, 2)),
columns=['Y', 'Z'])
ddf_out_np = dd.from_pandas(df_out, 3)
ddf_out_da = dd.from_pandas(df_out, 3)
with pytest.warns(None):
npfunc(ddf, out=ddf_out_np)
dafunc(ddf, out=ddf_out_da)
assert_eq(ddf_out_np, ddf_out_da)
with pytest.warns(None):
expected = pd.DataFrame(npfunc(input_matrix), columns=['A', 'B'])
assert_eq(ddf_out_np, expected)
@pytest.mark.skipif(np.__version__ < '1.13.0', reason='array_ufunc not present')
def test_frame_2ufunc_out():
input_matrix = np.random.randint(1, 100, size=(20, 2))
df = pd.DataFrame(input_matrix, columns=['A', 'B'])
ddf = dd.from_pandas(df, 3)
# column number mismatch
df_out = pd.DataFrame(np.random.randint(1, 100, size=(20, 3)),
columns=['X', 'Y', 'Z'])
ddf_out = dd.from_pandas(df_out, 3)
with pytest.raises(ValueError):
np.sin(ddf, out=ddf_out)
# types mismatch
ddf_out = dd.from_pandas(pd.Series([0]),1)
with pytest.raises(TypeError):
np.sin(ddf, out=ddf_out)
df_out = pd.DataFrame(np.random.randint(1, 100, size=(20, 2)),
columns=['X', 'Y'])
ddf_out = dd.from_pandas(df_out, 3)
np.sin(ddf, out=ddf_out)
np.add(ddf_out, 10, out=ddf_out)
expected = pd.DataFrame(np.sin(input_matrix) + 10, columns=['A', 'B'])
assert_eq(ddf_out, expected)
@pytest.mark.skipif(np.__version__ < '1.13.0', reason='array_ufunc not present')
@pytest.mark.parametrize('arg1', [
pd.Series(np.abs(np.random.randn(100))),
pd.DataFrame({'A': np.random.randint(1, 100, size=20),
'B': np.random.randint(1, 100, size=20),
'C': np.abs(np.random.randn(20))})])
@pytest.mark.parametrize('arg2', [2, dd.from_pandas(pd.Series([0]), 1).sum()])
@pytest.mark.parametrize('ufunc', _UFUNCS_2ARG)
def test_mixed_types(ufunc, arg1, arg2):
npfunc = getattr(np, ufunc)
dafunc = getattr(da, ufunc)
dask = dd.from_pandas(arg1, 3)
pandas_type = arg1.__class__
dask_type = dask.__class__
# applying Dask ufunc doesn't trigger computation
assert isinstance(dafunc(dask, arg2), dask_type)
assert_eq(dafunc(dask, arg2), npfunc(dask, arg2))
# applying NumPy ufunc is lazy
assert isinstance(npfunc(dask, arg2), dask_type)
assert_eq(npfunc(dask, arg2), npfunc(arg1, arg2))
# applying Dask ufunc to normal Series triggers computation
assert isinstance(dafunc(arg1, arg2), pandas_type)
assert_eq(dafunc(arg1, arg2), npfunc(arg1, arg2))
# swapping arguments
# first parameter of ldexp should be array-like
if ufunc == 'ldexp':
return
# applying Dask ufunc doesn't trigger computation
assert isinstance(dafunc(arg2, dask), dask_type)
assert_eq(dafunc(arg2, dask), npfunc(arg2, dask))
# applying NumPy ufunc is lazy
assert isinstance(npfunc(arg2, dask), dask_type)
assert_eq(npfunc(arg2, dask), npfunc(arg2, dask))
# applying Dask ufunc to normal Series triggers computation
assert isinstance(dafunc(arg2, arg1), pandas_type)
assert_eq(dafunc(arg2, arg1), npfunc(arg2, arg1))
@pytest.mark.skipif(np.__version__ < '1.13.0', reason='array_ufunc not present')
@pytest.mark.parametrize('ufunc', _UFUNCS_2ARG)
@pytest.mark.parametrize('pandas,darray',
[(pd.Series(np.random.randint(1, 100, size=(100,))),
da.from_array(np.random.randint(1, 100, size=(100,)),
chunks=(50,))),
(pd.DataFrame(np.random.randint(1, 100, size=(20, 2)),
columns=['A', 'B']),
da.from_array(np.random.randint(1, 100, size=(20, 2)),
chunks=(10, 2)))])
def test_2args_with_array(ufunc, pandas, darray):
dafunc = getattr(da, ufunc)
npfunc = getattr(np, ufunc)
dask = dd.from_pandas(pandas, 2)
dask_type = dask.__class__
# applying Dask ufunc doesn't trigger computation
assert isinstance(dafunc(dask, darray), dask_type)
assert isinstance(dafunc(darray, dask), dask_type)
tm.assert_numpy_array_equal(dafunc(dask, darray).compute().values,
npfunc(pandas.values, darray).compute())
# applying NumPy ufunc is lazy
assert isinstance(npfunc(dask, darray), dask_type)
assert isinstance(npfunc(darray, dask), dask_type)
tm.assert_numpy_array_equal(npfunc(dask, darray).compute().values,
npfunc(pandas.values, darray.compute()))
tm.assert_numpy_array_equal(npfunc(darray, dask).compute().values,
npfunc(darray.compute(), pandas.values))
@pytest.mark.parametrize('redfunc', ['sum', 'prod', 'min', 'max', 'mean'])
@pytest.mark.parametrize('ufunc', _BASE_UFUNCS)
@pytest.mark.parametrize('pandas',
[pd.Series(np.abs(np.random.randn(100))),
pd.DataFrame({'A': np.random.randint(1, 100, size=20),
'B': np.random.randint(1, 100, size=20),
'C': np.abs(np.random.randn(20))})])
def test_ufunc_with_reduction(redfunc, ufunc, pandas):
dask = dd.from_pandas(pandas, 3)
np_redfunc = getattr(np, redfunc)
np_ufunc = getattr(np, ufunc)
with pytest.warns(None):
assert isinstance(np_redfunc(dask), (dd.DataFrame, dd.Series, dd.core.Scalar))
assert_eq(np_redfunc(np_ufunc(dask)), np_redfunc(np_ufunc(pandas)))
@pytest.mark.parametrize('pandas',
[pd.Series(np.random.randint(1, 100, size=100)),
pd.DataFrame({'A': np.random.randint(1, 100, size=20),
'B': np.random.randint(1, 100, size=20),
'C': np.abs(np.random.randn(20))})])
@pytest.mark.parametrize('scalar', [15, 16.4, np.int64(15), np.float64(16.4)])
def test_ufunc_numpy_scalar_comparison(pandas, scalar):
# Regression test for issue #3392
dask_compare = scalar >= dd.from_pandas(pandas, npartitions=3)
pandas_compare = scalar >= pandas
assert_eq(dask_compare, pandas_compare)
| gpl-3.0 |
mathemage/h2o-3 | h2o-py/tests/testdir_algos/gbm/pyunit_weights_gbm.py | 6 | 9236 | from __future__ import print_function
from builtins import zip
from builtins import range
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
import random
from h2o.estimators.gbm import H2OGradientBoostingEstimator
def weights_check():
def check_same(data1, data2, min_rows_scale):
gbm1_regression = H2OGradientBoostingEstimator(min_rows=20,
ntrees=5,
seed=20,
max_depth=4)
gbm1_regression.train(x=["displacement", "power", "weight", "acceleration", "year"],
y="economy",
training_frame=data1)
gbm2_regression = H2OGradientBoostingEstimator(min_rows=20*min_rows_scale,
ntrees=5,
seed=20,
max_depth=4)
gbm2_regression.train(x=["displacement", "power", "weight", "acceleration", "year"],
y="economy",
training_frame=data2,
weights_column="weights")
gbm1_binomial = H2OGradientBoostingEstimator(min_rows=20,
distribution="bernoulli",
ntrees=5,
seed=20,
max_depth=4)
gbm1_binomial.train(x=["displacement", "power", "weight", "acceleration", "year"],
y="economy_20mpg",
training_frame=data1)
gbm2_binomial = H2OGradientBoostingEstimator(min_rows=20*min_rows_scale,
distribution="bernoulli",
ntrees=5,
seed=20,
max_depth=4)
gbm2_binomial.train(x=["displacement", "power", "weight", "acceleration", "year"],
y="economy_20mpg",
training_frame=data2,
weights_column="weights")
gbm1_multinomial = H2OGradientBoostingEstimator(min_rows=20,
distribution="multinomial",
ntrees=5,
seed=20,
max_depth=4)
gbm1_multinomial.train(x=["displacement", "power", "weight", "acceleration", "year"],
y="cylinders",
training_frame=data1)
gbm2_multinomial = H2OGradientBoostingEstimator(min_rows=20*min_rows_scale,
distribution="multinomial",
ntrees=5,
seed=20,
max_depth=4)
gbm2_multinomial.train(x=["displacement", "power", "weight", "acceleration", "year"],
y="cylinders",
weights_column="weights", training_frame=data2)
reg1_mse = gbm1_regression.mse()
reg2_mse = gbm2_regression.mse()
bin1_auc = gbm1_binomial.auc()
bin2_auc = gbm2_binomial.auc()
mul1_mse = gbm1_multinomial.mse()
mul2_mse = gbm2_multinomial.mse()
print("MSE (regresson) no weights vs. weights: {0}, {1}".format(reg1_mse, reg2_mse))
print("AUC (binomial) no weights vs. weights: {0}, {1}".format(bin1_auc, bin2_auc))
print("MSE (multinomial) no weights vs. weights: {0}, {1}".format(mul1_mse, mul2_mse))
assert abs(reg1_mse - reg2_mse) < 1e-5 * reg1_mse, "Expected mse's to be the same, but got {0}, and {1}".format(reg1_mse, reg2_mse)
assert abs(bin1_auc - bin2_auc) < 3e-3 * bin1_auc, "Expected auc's to be the same, but got {0}, and {1}".format(bin1_auc, bin2_auc)
assert abs(mul1_mse - mul1_mse) < 1e-6 * mul1_mse, "Expected auc's to be the same, but got {0}, and {1}".format(mul1_mse, mul2_mse)
h2o_cars_data = h2o.import_file(pyunit_utils.locate("smalldata/junit/cars_20mpg.csv"))
h2o_cars_data["economy_20mpg"] = h2o_cars_data["economy_20mpg"].asfactor()
h2o_cars_data["cylinders"] = h2o_cars_data["cylinders"].asfactor()
# uniform weights same as no weights
random.seed(2222)
weight = 3 # random.randint(1,10) # PY3 hackt
uniform_weights = [[weight]] *406
h2o_uniform_weights = h2o.H2OFrame(uniform_weights)
h2o_uniform_weights.set_names(["weights"])
h2o_data_uniform_weights = h2o_cars_data.cbind(h2o_uniform_weights)
print("Checking that using uniform weights is equivalent to no weights:")
print()
check_same(h2o_cars_data, h2o_data_uniform_weights, weight)
# zero weights same as removed observations
zero_weights = [[1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0]] # [[0 if random.randint(0,1) else 1 for r in range(406)]]
h2o_zero_weights = h2o.H2OFrame(list(zip(*zero_weights)))
h2o_zero_weights.set_names(["weights"])
h2o_data_zero_weights = h2o_cars_data.cbind(h2o_zero_weights)
h2o_data_zeros_removed = h2o_cars_data[h2o_zero_weights["weights"] == 1]
print("Checking that using some zero weights is equivalent to removing those observations:")
print()
check_same(h2o_data_zeros_removed, h2o_data_zero_weights, 1)
# doubled weights same as doubled observations
doubled_weights = [[1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 2, 2, 2, 1, 1, 2, 2, 1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 2, 2, 2, 1, 2, 1, 2, 2, 2, 1, 1, 2, 2, 1, 2, 2, 1, 2, 1, 1, 2, 2, 2, 1, 1, 2, 1, 2, 2, 2, 1, 1, 2, 1, 2, 2, 1, 1, 1, 2, 2, 2, 1, 2, 1, 2, 2, 1, 1, 1, 1, 2, 1, 1, 2, 2, 1, 2, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 2, 1, 2, 1, 1, 2, 2, 1, 1, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 2, 1, 2, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 2, 1, 1, 2, 2, 2, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1, 1, 2, 2, 1, 2, 2, 2, 1, 1, 1, 1, 1, 2, 2, 1, 2, 1, 1, 1, 1, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 1, 2, 2, 2, 1, 2, 2, 2, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 2, 1, 2, 1, 1, 1, 2, 1, 2, 1, 1, 2, 1, 1, 1, 2, 1, 2, 2, 2, 2, 1, 2, 1, 1, 1, 2, 1, 2, 2, 2, 2, 1, 1, 1, 2, 1, 1, 2, 2, 1, 1, 1, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 2, 2, 1, 2, 2, 2, 1, 2, 1, 2, 2, 2, 1, 2, 1, 2, 1, 2, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 1, 1, 1, 1, 2, 1, 1, 2, 2, 2, 1, 1, 2, 2, 2, 1, 2, 1, 2, 2, 2, 2, 1, 2, 1, 2, 1, 2, 2, 1, 2, 2, 1, 2, 1, 2, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 2, 1, 1, 2, 1]] # [[1 if random.randint(0,1) else 2 for r in range(406)]]
h2o_doubled_weights = h2o.H2OFrame(list(zip(*doubled_weights)))
h2o_doubled_weights.set_names(["weights"])
h2o_data_doubled_weights = h2o_cars_data.cbind(h2o_doubled_weights)
doubled_data = h2o.as_list(h2o_cars_data, use_pandas=False)
colnames = doubled_data.pop(0)
for idx, w in enumerate(doubled_weights[0]):
if w == 2: doubled_data.append(doubled_data[idx])
h2o_data_doubled = h2o.H2OFrame(doubled_data)
h2o_data_doubled.set_names(list(colnames))
h2o_data_doubled["economy_20mpg"] = h2o_data_doubled["economy_20mpg"].asfactor()
h2o_data_doubled["cylinders"] = h2o_data_doubled["cylinders"].asfactor()
h2o_data_doubled_weights["economy_20mpg"] = h2o_data_doubled_weights["economy_20mpg"].asfactor()
h2o_data_doubled_weights["cylinders"] = h2o_data_doubled_weights["cylinders"].asfactor()
print("Checking that doubling some weights is equivalent to doubling those observations:")
print()
check_same(h2o_data_doubled, h2o_data_doubled_weights, 1)
# TODO: random weights
# TODO: all zero weights???
# TODO: negative weights???
if __name__ == "__main__":
pyunit_utils.standalone_test(weights_check)
else:
weights_check()
| apache-2.0 |
ska-sa/montblanc | montblanc/tests/test_meq_tf.py | 1 | 16128 | """
This script runs Montblanc and MeqTrees and compares
the visibilities output by both.
To run this script you'll need a Measurement Set.
It's often easier to create this using the
`simms <https://github.com/radio-astro/simms_>`_ package.
For example, create a VLA Measurement Set, call the following:
.. code-block:: shell
simms -T vla -t ascii -cs itrf -st 1 -dt 4 -f0 1.42GHz -df 4MHz
-nc 8 -dir "J2000,5h42m36.1378s,+49d51m7.23000000001s"
-feed 'perfect R L' -pl "RR RL LR LL" -n vla_test.ms
~/paper_sims/simms/simms/observatories/vlac.itrf.txt
Then, to call the script run
.. code-block:: shell
python test_meq_tf.py vla.ms -p circular -a "overwrite_beams=True"
This tells the script to configure Montblanc+MeqTrees for circular
polarisations (the VLA telescope uses circular polarisations) and
to create new cos**3 testing beams. It is possible to also supply
your own beams for the test by specifying
``-a "beam_file_schema='my_beam_\$(corr)_\$(reim).fits'`` for example.
Other options can be passed
-- See :func:`run_test` for more details
and options.
"""
import itertools
import os
import subprocess
import sys
import tempfile
from astropy.io import fits
import numpy as np
import pyrap.tables as pt
rf = np.random.random
from montblanc.tests.beam_factory import beam_factory
# Directory that holds MS and Beam data
DATA_DIR = 'data'
def run_test(msfile, pol_type, **kwargs):
"""
Parameters
----------
msfile : str
Name of the Measurement Set
pol_type : str
'linear' or 'circular'
beam_file_schema (optional) : str
Beam filename schema. Defaults to 'test_beam_$(corr)_$(reim).fits'
overwrite_beams (optional) : bool
If ``True`` create new beams using the cos**3 beam
"""
#=========================================
# Directory and Script Configuration
#=========================================
# Directory in which we expect our measurement set to be located
meq_vis_column = 'MODEL_DATA'
mb_vis_column = 'CORRECTED_DATA'
# Directory in which meqtree-related files are read/written
meq_dir = 'meqtrees'
# Scripts
meqpipe = 'meqtree-pipeliner.py'
# Meqtree profile and script
cfg_file = os.path.join(meq_dir, 'tdlconf.profiles')
sim_script = os.path.join(meq_dir, 'turbo-sim.py')
tigger_sky_file = os.path.join(meq_dir, 'sky_model.txt')
# Is the beam enabled
beam_on = kwargs.get('beam_on', True)
beam_on = 1 if beam_on is True else 0
# Directory in which we expect our beams to be located
beam_file_schema = 'test_beam_$(corr)_$(reim).fits'
# Beam file pattern
beam_file_schema = kwargs.get("beam_file_schema", beam_file_schema)
l_axis = kwargs.get('l_axis', '-X')
m_axis = kwargs.get('m_axis', 'Y')
# Find the location of the meqtree pipeliner script
meqpipe_actual = subprocess.check_output(['which', meqpipe]).strip()
cfg_section = '-'.join(('montblanc', 'compare', pol_type))
#======================================================
# Configure the beam files with frequencies from the MS
#======================================================
from montblanc.impl.rime.tensorflow.sources.fits_beam_source_provider import (
_create_filenames, _open_fits_files)
# Zero the visibility data
with pt.table(msfile, ack=False, readonly=False) as T:
data_desc = T.getcoldesc('DATA')
try:
shape = data_desc['shape'].tolist()
except KeyError:
shape = list(T.getcol('DATA', startrow=0, nrow=1).shape[1:])
shape = [T.nrows()] + shape
T.putcol(mb_vis_column, np.zeros(shape, dtype=np.complex64))
T.putcol(meq_vis_column, np.zeros(shape, dtype=np.complex64))
# Extract frequencies from the MS
with pt.table(msfile + '::SPECTRAL_WINDOW', ack=False) as SW:
frequency = SW.getcol('CHAN_FREQ')[0]
bandwidth = frequency[-1] - frequency[0]
overwrite_beams = kwargs.get('overwrite_beams', False)
# Get filenames from pattern and open the files
filenames = beam_factory(polarisation_type=pol_type,
frequency=frequency,
schema=beam_file_schema,
overwrite=overwrite_beams)
#=========================================
# Source Configuration
#=========================================
np.random.seed(0)
dtype = np.float64
ctype = np.complex128 if dtype == np.float64 else np.complex64
def get_point_sources(nsrc):
source_coords = np.empty(shape=(nsrc, 2), dtype=dtype)
stokes = np.empty(shape=(nsrc, 4), dtype=dtype)
I, Q, U, V = stokes[:,0], stokes[:,1], stokes[:,2], stokes[:,3]
alphas = np.empty(shape=(nsrc,), dtype=dtype)
ref_freq = np.empty(shape=(nsrc,), dtype=dtype)
# Source coordinates between -45 and 45 degrees
source_coords[:] = (rf(size=source_coords.shape) - 0.5)*90.0
Q[:] = rf(size=Q.shape)*0.1
U[:] = rf(size=U.shape)*0.1
V[:] = rf(size=V.shape)*0.1
I[:] = np.sqrt(Q**2 + U**2 + V**2)*1.5 + rf(size=I.shape)*0.1
# Zero and invert selected stokes parameters
if nsrc > 0:
zero_srcs = np.random.randint(nsrc, size=(2,))
source_coords[zero_srcs,:] = 0
# Create sources with both positive and negative flux
sign = 2*np.random.randint(2, size=I.shape) - 1
I[:] *= sign
alphas[:] = 2*(np.random.random(size=alphas.size) - 0.5)
ref_freq[:] = 1.3e9 + np.random.random(ref_freq.size)*0.2e9
return (np.deg2rad(source_coords), np.asarray(stokes),
np.asarray(alphas), np.asarray(ref_freq))
def get_gaussian_sources(nsrc):
c, s, a, r= get_point_sources(nsrc)
gauss_shape = np.empty(shape=(3, nsrc), dtype=np.float64)
gauss_shape[:] = rf(size=gauss_shape.shape)
return c, s, a, r, gauss_shape
npsrc, ngsrc = 10, 10
pt_lm, pt_stokes, pt_alpha, pt_ref_freq = get_point_sources(npsrc)
assert pt_lm.shape == (npsrc, 2), pt_lm.shape
assert pt_stokes.shape == (npsrc, 4), pt_stokes.shape
assert pt_alpha.shape == (npsrc,), pt_alpha.shape
assert pt_ref_freq.shape == (npsrc,), pt_ref_freq.shape
g_lm, g_stokes, g_alpha, g_ref_freq, g_shape = get_gaussian_sources(ngsrc)
#=========================================
# Create Tigger ASCII sky model
#=========================================
from Tigger.Models.Formats.AIPSCCFITS import lm_to_radec
# Need the phase centre for lm_to_radec
with pt.table(msfile + '::FIELD', ack=False, readonly=True) as F:
ra0, dec0 = F.getcol('PHASE_DIR')[0][0]
# Create the tigger sky model
with open(tigger_sky_file, 'w') as f:
f.write('#format: ra_d dec_d i q u v spi freq0 emaj_s emin_s pa_d\n')
it = enumerate(zip(pt_lm, pt_stokes, pt_alpha, pt_ref_freq))
for i, ((l, m), (I, Q, U, V), alpha, ref_freq) in it:
ra, dec = lm_to_radec(l, m, ra0, dec0)
l, m = np.rad2deg([ra,dec])
f.write('{l:.20f} {m:.20f} {i} {q} {u} {v} {spi} {rf:.20f}\n'.format(
l=l, m=m, i=I, q=Q, u=U, v=V, spi=alpha, rf=ref_freq))
it = enumerate(zip(g_lm, g_stokes, g_alpha, g_ref_freq, g_shape.T))
for i, ((l, m), (I, Q, U, V), alpha, ref_freq, (emaj, emin, pa)) in it:
ra, dec = lm_to_radec(l, m, ra0, dec0)
l, m = np.rad2deg([ra,dec])
# Convert to seconds
emaj, emin = np.asarray([emaj, emin])*648000./np.pi
# Convert to degrees
pa *= 180.0/np.pi
f.write('{l:.20f} {m:.20f} {i} {q} {u} {v} {spi} {rf:.20f} '
'{emaj} {emin} {pa}\n'.format(
l=l, m=m, i=I, q=Q, u=U, v=V, spi=alpha, rf=ref_freq,
emaj=emaj, emin=emin, pa=pa))
#=========================================
# Call MeqTrees
#=========================================
#=========================================
# Call MeqTrees
#=========================================
cmd_list = ['python',
# Meqtree Pipeline script
meqpipe_actual,
# Configuration File
'-c', cfg_file,
# Configuration section
'[{section}]'.format(section=cfg_section),
# Enable the beam?
'me.e_enable = {e}'.format(e=beam_on),
# Measurement Set
'ms_sel.msname={ms}'.format(ms=msfile),
# Tigger sky file
'tiggerlsm.filename={sm}'.format(sm=tigger_sky_file),
# Output column
'ms_sel.output_column={c}'.format(c=meq_vis_column),
# Imaging Column
'img_sel.imaging_column={c}'.format(c=meq_vis_column),
# Beam FITS file pattern
'pybeams_fits.filename_pattern={p}'.format(p=beam_file_schema),
# FITS L and M AXIS
'pybeams_fits.l_axis={l}'.format(l=l_axis),
'pybeams_fits.m_axis={m}'.format(m=m_axis),
sim_script,
'=simulate'
]
import montblanc
from montblanc.impl.rime.tensorflow.ms import MeasurementSetManager
from montblanc.impl.rime.tensorflow.sources import (SourceProvider,
MSSourceProvider,
FitsBeamSourceProvider,
CachedSourceProvider)
from montblanc.impl.rime.tensorflow.sinks import MSSinkProvider
class RadioSourceProvider(SourceProvider):
def name(self):
return "RadioSourceProvider"
def point_lm(self, context):
lp, up = context.dim_extents('npsrc')
return pt_lm[lp:up, :]
def point_stokes(self, context):
(lp, up), (lt, ut), (lc, uc) = context.dim_extents('npsrc', 'ntime', 'nchan')
# (npsrc, ntime, nchan, 4)
s = pt_stokes[lp:up,None,None,:]
a = np.broadcast_to(pt_alpha[lp:up,None,None,None], (up-lp,ut-lt,1,1))
rf = pt_ref_freq[lp:up,None,None,None]
f = frequency[None,None,lc:uc,None]
return s*(f/rf)**a
def gaussian_lm(self, context):
lg, ug = context.dim_extents('ngsrc')
return g_lm[lg:ug, :]
def gaussian_stokes(self, context):
(lg, ug), (lt, ut), (lc, uc) = context.dim_extents('ngsrc', 'ntime', 'nchan')
# (ngsrc, ntime, nchan, 4)
s = g_stokes[lg:ug,None,None,:]
a = np.broadcast_to(pt_alpha[lg:ug,None,None,None], (ug-lg,ut-lt,1,1))
rf = g_ref_freq[lg:ug,None,None,None]
f = frequency[None,None,lc:uc,None]
return s*(f/rf)**a
def gaussian_shape(self, context):
(lg, ug) = context.dim_extents('ngsrc')
gauss_shape = g_shape[:,lg:ug]
emaj = gauss_shape[0]
emin = gauss_shape[1]
pa = gauss_shape[2]
gauss = np.empty(context.shape, dtype=context.dtype)
gauss[0,:] = emaj * np.sin(pa)
gauss[1,:] = emaj * np.cos(pa)
emaj[emaj == 0.0] = 1.0
gauss[2,:] = emin / emaj
return gauss
def updated_dimensions(self):
return [('npsrc', pt_lm.shape[0]), ('ngsrc', g_lm.shape[0])]
slvr_cfg = montblanc.rime_solver_cfg(
mem_budget=1024*1024*1024,
data_source='default',
dtype='double' if dtype == np.float64 else 'float',
polarisation_type=pol_type,
auto_correlations=False,
version='tf')
slvr = montblanc.rime_solver(slvr_cfg)
ms_mgr = MeasurementSetManager(msfile, slvr_cfg)
source_providers = []
source_providers.append(MSSourceProvider(ms_mgr))
if beam_on == 1:
beam_prov = FitsBeamSourceProvider(beam_file_schema,
l_axis=l_axis, m_axis=m_axis)
source_providers.append(beam_prov)
source_providers.append(RadioSourceProvider())
cache_prov = CachedSourceProvider(source_providers)
source_providers = [cache_prov]
sink_providers = [MSSinkProvider(ms_mgr, mb_vis_column)]
slvr.solve(source_providers=source_providers,
sink_providers=sink_providers)
import time
time.sleep(1)
for obj in source_providers + sink_providers + [ms_mgr]:
obj.close()
# Call the meqtrees simulation script, dumping visibilities into MODEL_DATA
subprocess.call(cmd_list)
# Compare MeqTree and Montblanc visibilities
with pt.table(msfile, ack=False, readonly=True) as MS:
ntime, nbl, nchan = slvr.hypercube.dim_global_size('ntime', 'nbl', 'nchan')
shape = (ntime, nbl, nchan, 4)
meq_vis = MS.getcol(meq_vis_column).reshape(shape)
mb_vis = MS.getcol(mb_vis_column).reshape(shape)
# Compare
close = np.isclose(meq_vis, mb_vis)
not_close = np.invert(close)
problems = np.nonzero(not_close)
# Everything agrees, exit
if problems[0].size == 0:
print('Montblanc and MeqTree visibilities agree')
sys.exit(1)
bad_vis_file = 'bad_visibilities.txt'
# Some visibilities differ, do some analysis
print((("Montblanc differs from MeqTrees by {nc}/{t} visibilities. "
"Writing them out to '{bvf}'").format(
nc=problems[0].size, t=not_close.size, bvf=bad_vis_file)))
abs_diff = np.abs(meq_vis - mb_vis)
rmsd = np.sqrt(np.sum(abs_diff**2)/abs_diff.size)
nrmsd = rmsd / (np.max(abs_diff) - np.min(abs_diff))
print(('RMSD {rmsd} NRMSD {nrmsd}'.format(rmsd=rmsd, nrmsd=nrmsd)))
# Plot a histogram of the difference
try:
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
except:
print(("Exception importing matplotlib %s" % sys.exc_info()[2]))
else:
try:
nr_of_bins = 100
n, bins, patches = plt.hist(abs_diff.flatten(),
bins=np.logspace(np.log10(1e-10), np.log10(1.0), nr_of_bins))
plt.gca().set_xscale("log")
plt.xlabel('Magnitude Difference')
plt.ylabel('Counts')
plt.grid(True)
plt.savefig('histogram.pdf')
except:
print(("Error plotting histogram %s" % sys.exc_info()[2]))
mb_problems = mb_vis[problems]
meq_problems = meq_vis[problems]
difference = mb_problems - meq_problems
amplitude = np.abs(difference)
# Create an iterator over the first 100 problematic visibilities
t = (np.asarray(problems).T, mb_problems, meq_problems, difference, amplitude)
it = enumerate(zip(*t))
it = itertools.islice(it, 0, 1000, 1)
# Write out the problematic visibilities to file
with open(bad_vis_file, 'w') as f:
for i, (p, mb, meq, d, amp) in it:
f.write("{i} {t} Montblanc: {mb} MeqTrees: {meq} "
"Difference {d} Absolute Difference {ad} \n".format(
i=i, t=p, mb=mb, meq=meq, d=d, ad=amp))
if __name__ == "__main__":
import argparse
from os.path import join as pjoin
from montblanc.util import parse_python_assigns
def create_parser():
p = argparse.ArgumentParser()
p.add_argument("ms", default=pjoin("data", "WSRT.MS"),
nargs="?")
p.add_argument("-p", "--polarisation-type",
choices=['linear', 'circular'],
default='linear')
p.add_argument("-a", "--args", type=parse_python_assigns,
default="",
help="semi-colon separated list of "
"python variable assignments. "
"These variable assignments are "
"passed into the run_tests function."
"See its docstring for details.")
return p
args = create_parser().parse_args()
run_test(args.ms, args.polarisation_type, **args.args)
| gpl-2.0 |
alexeyum/scikit-learn | examples/svm/plot_svm_kernels.py | 329 | 1971 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
rajnikant1010/EVAutomation | focus_controller_ws/src/focus_control/src/kalman.py | 1 | 5407 | #!/usr/bin/env python
# license removed for brevity
import rospy
#from std_msgs.msg import String
import std_msgs.msg as msg
import numpy as np
import matplotlib.pyplot as plt
import scipy.io as sio
import math
from swiftnav_piksi.msg import loc
from swiftnav_piksi.msg import fil
from nav_msgs.msg import Odometry
#Initialize Global Variables
data_received = False
no_gps_data = True
gps_t = 0
gps_t_last = -0.1
gps_x = 0
gps_y = 0
gps_z = 0
time_started = False
start_time = 0
xcov = 1000;
ycov = 1000;
vxcov = 1000;
vycov = 1000;
def callback(data):
global gps_t, gps_x, gps_y, gps_z, data_received,no_gps_data,gps_vx,gps_vy,time_started,start_time,xcov,ycov,vxcov,vycov
no_gps_data = False
if time_started == False:
start_time = data.header.stamp.secs + (data.header.stamp.nsecs*(10**(-9)));
time_started = True
gps_t = data.header.stamp.secs + (data.header.stamp.nsecs*(10**(-9))) - start_time;
gps_x = data.pose.pose.position.x;
gps_y = data.pose.pose.position.y;
gps_z = data.pose.pose.position.z;
gps_vx = data.twist.twist.linear.x;
gps_vy = data.twist.twist.linear.y;
xcov = data.pose.covariance[0];
ycov = data.pose.covariance[7];
vxcov = data.pose.covariance[21];
vycov = data.pose.covariance[28];
data_received = True
def kalman():
#Initialize ROS Nodes
#pub = rospy.Publisher('filter_output', fil, queue_size=10)
pub = rospy.Publisher('filter_output', Odometry, queue_size=10)
rospy.init_node('kalman', anonymous=True)
rospy.Subscriber("/gps/rtkfix", Odometry, callback)
#Initialize Variables
global gps_t, gps_x, gps_y, gps_z, gps_t_last,data_received,no_gps_data,xcov,ycov,vxcov,vycov
#filter_output = fil()
filter_output = Odometry()
t = rospy.Time.now()
sec = t.secs
nsec = t.nsecs
current_time_s = sec + nsec*(10**-9)
#current_time_s = rospy.get_time()
#current_time_s = current_time.to_sec
last_time_s = current_time_s
vxdata_last = 0
vydata_last = 0
#Filter Initialization
Pfilter = np.array([(1**2,0,0,0),(0,1**2,0,0),(0,0,5**2,0),(0,0,0,5**2)])
Q = 10*np.array([(1**2,0,0,0),(0,1**2,0,0),(0,0,1**2,0),(0,0,0,1**2)])
#R = 0.03**2*np.array([(1,0),(0,1)])
R = np.array([(xcov,0,0,0),(0,ycov,0,0),(0,0,vxcov,0),(0,0,0,vycov)])
F = np.array([(0,0,1,0),(0,0,0,1),(0,0,0,0),(0,0,0,0)])
H = np.array([(1,0,0,0),(0,1,0,0),(0,0,1,0),(0,0,0,1)])
M = 10
pi = math.pi
#Open Output File
#f = open('/home/acostley/Desktop/ece6320/corrupted_data/kalman_out_cor','w')
rate = rospy.Rate(100) # 100hz (10 times faster than GPS)
while no_gps_data and not rospy.is_shutdown():
#rospy.loginfo("No GPS Data Received")
rate.sleep()
rospy.loginfo("GPS Data Received")
#Xdata = np.array([(0),(0),(0),(0)])
#Xfilter = np.array([(gps_x),(gps_y),(0.1),(0.1)]) #At t=0
Xfilter = np.array([(gps_x),(gps_y),(gps_vx),(gps_vy)]) #At t=0
Xdata = Xfilter
vxdata_last = Xdata[2]
vydata_last = Xdata[3]
while not rospy.is_shutdown():
R = np.array([(xcov,0,0,0),(0,ycov,0,0),(0,0,vxcov,0),(0,0,0,vycov)])
#dt = gps_t - gps_t_last
dt = 0.1
#current_time = rospy.get_time()
#current_time_s = current_time.to_sec
t = rospy.Time.now()
sec = t.secs
nsec = t.nsecs
current_time_s = sec + nsec*(10**-9)
#dt = current_time_s - last_time_s
ax = (Xfilter[2] - vxdata_last)/dt
ay = (Xfilter[3] - vydata_last)/dt
#Prediction
#xdot = np.array([(Xfilter[2]),(Xfilter[3]),(10),(5)])
xdot = np.array([(Xfilter[2]),(Xfilter[3]),(0),(0)])
Xfilter = Xfilter + (dt/M)*xdot
Pfilter = Pfilter + (dt/M)*(F.dot(Pfilter)+Pfilter.dot(F.transpose())+Q)
#Measurement
if data_received == True:
data_received = False
tmp = np.linalg.inv(R+H.dot(Pfilter).dot(H.transpose()))
K = Pfilter.dot(H.transpose()).dot(tmp)
states = np.array([(gps_x),(gps_y),(gps_vx),(gps_vy)]);
#Xfilter_xy = np.array([(Xfilter[0]),(Xfilter[1])])
#Xfilter = Xfilter - K.dot(Xfilter_xy - xy)
Xfilter = Xfilter - K.dot(Xfilter - states)
Pfilter = (np.identity(4) - K.dot(H)).dot(Pfilter)
Xdata = Xfilter
xf = Xdata[0]
yf = Xdata[1]
vxf = Xdata[2]
vyf = Xdata[3]
vel = math.sqrt(vxf**2 + vyf**2)*2.23694
psi = math.atan2(vyf,vxf)*180/pi
#filter_output.t = gps_t
#filter_output.x = gps_x
#filter_output.y = gps_y
#filter_output.vx = gps_vx
#filter_output.vy = gps_vy
#filter_output.xf = xf
#filter_output.yf = yf
#filter_output.vxf = vxf
#filter_output.vyf = vyf
#filter_output.vel = vel
#filter_output.psi = psi
filter_output.header.stamp = rospy.Time.from_sec(gps_t)
filter_output.pose.pose.position.x = xf
filter_output.pose.pose.position.y = yf
filter_output.twist.twist.linear.x = vxf
filter_output.twist.twist.linear.y = vyf
#f.write(repr(gps_t)+','+repr(gps_x)+','+repr(gps_y)+','+repr(gps_vx)+','+repr(gps_vy)+','+repr(xf)+','+repr(yf)+','+repr(vxf)+','+repr(vyf)+','+repr(vel)+','+repr(psi)+','+repr(dt)+','+repr(current_time_s)+','+repr(last_time_s)+'\n')
gps_t_last = gps_t
last_time_s = current_time_s
vxdata_last = vxf
vydata_last = vyf
#rospy.loginfo(filter_output)
#rospy.loginfo(xcov);
#rospy.loginfo(ycov);
#rospy.loginfo(vxcov);
#rospy.loginfo(vycov);
pub.publish(filter_output)
rate.sleep()
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
#f.close()
if __name__ == '__main__':
try:
kalman()
except rospy.ROSInterruptException:
pass
| bsd-2-clause |
untom/scikit-learn | examples/datasets/plot_iris_dataset.py | 283 | 1928 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
hlin117/scikit-learn | sklearn/__check_build/__init__.py | 345 | 1671 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
flybird119/voltdb | tools/vis.py | 13 | 10217 | #!/usr/bin/env python
# This is a visualizer which pulls TPC-C benchmark results from the MySQL
# databases and visualizes them. Four graphs will be generated, latency graph on
# sinigle node and multiple nodes, and throughput graph on single node and
# multiple nodes.
#
# Run it without any arguments to see what arguments are needed.
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))) +
os.sep + 'tests/scripts/')
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from voltdbclient import *
STATS_SERVER = 'volt2'
def COLORS(k):
return (((k ** 3) % 255) / 255.0,
((k * 100) % 255) / 255.0,
((k * k) % 255) / 255.0)
MARKERS = ['+', '*', '<', '>', '^', '_',
'D', 'H', 'd', 'h', 'o', 'p']
def get_branches(hostname, port, days):
mydate = datetime.datetime.today()-datetime.timedelta(days=days)
query = "select branch, count(*) from app_stats where date >= '%s' group by branch order by 1 asc" % \
mydate.strftime('%Y-%m-%d 00:00:00')
conn = FastSerializer(hostname, port)
proc = VoltProcedure(conn, '@AdHoc',
[FastSerializer.VOLTTYPE_STRING])
resp = proc.call([query])
conn.close()
branches = []
for row in resp.tables[0].tuples:
branches.append(str(row[0]))
return branches
def get_min_date(hostname, port):
query = "select min(date) from app_stats"
conn = FastSerializer(hostname, port)
proc = VoltProcedure(conn, '@AdHoc',
[FastSerializer.VOLTTYPE_STRING])
resp = proc.call([query])
conn.close()
ndays = datetime.datetime.today()-resp.tables[0].tuples[0][0]
return ndays.days+1
def get_stats(hostname, port, days, branch):
"""Get statistics of all runs
Example return value:
{ u'VoltKV': [ { 'lat95': 21,
'lat99': 35,
'nodes': 1,
'throughput': 104805,
'date': datetime object}],
u'Voter': [ { 'lat95': 20,
'lat99': 47,
'nodes': 1,
'throughput': 66287,
'date': datetime object}]}
"""
conn = FastSerializer(hostname, port)
proc = VoltProcedure(conn, 'BestOfPeriod',
[FastSerializer.VOLTTYPE_SMALLINT,
FastSerializer.VOLTTYPE_STRING])
resp = proc.call([days, branch])
conn.close()
# keyed on app name, value is a list of runs sorted chronologically
stats = dict()
run_stat_keys = ['nodes', 'date', 'tps', 'lat95', 'lat99']
for row in resp.tables[0].tuples:
app_stats = []
if row[0] not in stats:
stats[row[0]] = app_stats
else:
app_stats = stats[row[0]]
run_stats = dict(zip(run_stat_keys, row[1:]))
app_stats.append(run_stats)
# sort each one
for app_stats in stats.itervalues():
app_stats.sort(key=lambda x: x['date'])
return stats
class Plot:
DPI = 100.0
def __init__(self, title, xlabel, ylabel, filename, w, h, ndays):
self.filename = filename
self.ndays = ndays
self.legends = {}
w = w == None and 1200 or w
h = h == None and 400 or h
fig = plt.figure(figsize=(w / self.DPI, h / self.DPI),
dpi=self.DPI)
self.ax = fig.add_subplot(111)
self.ax.set_title(title)
plt.xticks(fontsize=10)
plt.yticks(fontsize=10)
plt.ylabel(ylabel, fontsize=8)
plt.xlabel(xlabel, fontsize=8)
fig.autofmt_xdate()
def plot(self, x, y, color, marker_shape, legend):
self.ax.plot(x, y, linestyle="-", label=str(legend),
marker=marker_shape, markerfacecolor=color, markersize=4)
def close(self):
formatter = matplotlib.dates.DateFormatter("%b %d %y")
self.ax.xaxis.set_major_formatter(formatter)
ymin, ymax = plt.ylim()
plt.ylim((ymin-(ymax-ymin)*0.1, ymax+(ymax-ymin)*0.1))
xmax = datetime.datetime.today().toordinal()
plt.xlim((xmax-self.ndays, xmax))
plt.legend(prop={'size': 10}, loc=2)
plt.savefig(self.filename, format="png", transparent=False,
bbox_inches="tight", pad_inches=0.2)
plt.close('all')
def plot(title, xlabel, ylabel, filename, width, height, app, data, data_type, ndays):
plot_data = dict()
for run in data:
if run['nodes'] not in plot_data:
plot_data[run['nodes']] = {'time': [], data_type: []}
datenum = matplotlib.dates.date2num(run['date'])
plot_data[run['nodes']]['time'].append(datenum)
if data_type == 'tps':
value = run['tps']/run['nodes']
else:
value = run[data_type]
plot_data[run['nodes']][data_type].append(value)
if len(plot_data) == 0:
return
i = 0
pl = Plot(title, xlabel, ylabel, filename, width, height, ndays)
sorted_data = sorted(plot_data.items(), key=lambda x: x[0])
for k, v in sorted_data:
pl.plot(v['time'], v[data_type], COLORS(i), MARKERS[i], k)
i += 3
for k, v in sorted_data:
x = v['time'][-1]
y = v[data_type][-1]
pl.ax.annotate(str(y), xy=(x,y), xycoords='data', xytext=(5,-5),
textcoords='offset points', ha='left')
xmin, ymin = [(v['time'][i],y) for i,y in enumerate(v[data_type]) if y == min(v[data_type])][-1]
xmax, ymax= [(v['time'][i],y) for i,y in enumerate(v[data_type]) if y == max(v[data_type])][-1]
if ymax != ymin:
if xmax != x:
pl.ax.annotate(str(ymax), xy=(xmax,ymax),
textcoords='offset points', ha='center', va='bottom', xytext=(0,5))
if xmin != x:
pl.ax.annotate(str(ymin), xy=(xmin,ymin),
textcoords='offset points', ha='center', va='top', xytext=(0,-5))
pl.close()
def generate_index_file(filenames):
row = """
<tr>
<td width="100">%s</td>
<td><a href="%s"><img src="%s" width="400" height="200"/></a></td>
<td><a href="%s"><img src="%s" width="400" height="200"/></a></td>
<td><a href="%s"><img src="%s" width="400" height="200"/></a></td>
</tr>
"""
sep = """
</table>
<table frame="box">
<tr>
<th colspan="4"><a name="%s">%s</a></th>
</tr>
"""
full_content = """
<html>
<head>
<title>Performance Graphs</title>
</head>
<body>
<table frame="box">
%s
</table>
</body>
</html>
"""
hrow = """
<tr>
<td><a href=#%s>%s</a></td>
<td><a href=#%s>%s</a></td>
<td><a href=#%s>%s</a></td>
<td><a href=#%s>%s</a></td>
</tr>
"""
toc = sorted(list(set([x[0] for x in filenames])))
h = map(lambda x:(x.replace(' ','%20'), x), toc)
n = 4
z = n-len(h)%n
while z > 0 and z < n:
h.append(('',''))
z -= 1
rows = []
t = ()
for i in range(1, len(h)+1):
t += tuple(h[i-1])
if i%n == 0:
rows.append(hrow % t)
t = ()
last_app = None
for i in filenames:
if i[0] != last_app:
rows.append(sep % (i[0], i[0]))
last_app = i[0]
rows.append(row % (i[4], i[1], i[1], i[2], i[2], i[3], i[3]))
return full_content % ''.join(rows)
def usage():
print "Usage:"
print "\t", sys.argv[0], "output_dir filename_base [ndays]" \
" [width] [height]"
print
print "\t", "width in pixels"
print "\t", "height in pixels"
def main():
if len(sys.argv) < 3:
usage()
exit(-1)
if not os.path.exists(sys.argv[1]):
print sys.argv[1], "does not exist"
exit(-1)
prefix = sys.argv[2]
path = os.path.join(sys.argv[1], sys.argv[2])
if len(sys.argv) >=4:
ndays = int(sys.argv[3])
else:
ndays = get_min_date(STATS_SERVER, 21212)
width = None
height = None
if len(sys.argv) >= 5:
width = int(sys.argv[4])
if len(sys.argv) >= 6:
height = int(sys.argv[7])
# show all the history
branches = get_branches(STATS_SERVER, 21212, ndays)
branches.sort
i=0
for p in ['master', 'release-']:
for b in branches:
if b.startswith(p):
x=branches.pop(branches.index(b))
branches.insert(i, x)
i+=1
root_path = path
filenames = [] # (appname, latency, throughput)
iorder = 0
for branch in branches:
iorder += 1
stats = get_stats(STATS_SERVER, 21212, ndays, branch)
prefix = sys.argv[2] + "-" + branch
path = root_path + "-" + branch
# Plot single node stats for all apps
for app, data in stats.iteritems():
app_filename = app.replace(' ', '_')
latency95_filename = '%s-latency95-%s.png' % (prefix, app_filename)
latency99_filename = '%s-latency99-%s.png' % (prefix, app_filename)
throughput_filename = '%s-throughput-%s.png' % (prefix, app_filename)
filenames.append((app, latency95_filename, latency99_filename, throughput_filename, branch, iorder))
plot(app + " latency95 on " + branch, "Time", "Latency (ms)",
path + "-latency95-" + app_filename + ".png", width, height, app,
data, 'lat95', ndays)
plot(app + " latency99 on " + branch, "Time", "Latency (ms)",
path + "-latency99-" + app_filename + ".png", width, height, app,
data, 'lat99', ndays)
plot(app + " throughput(best) on " + branch, "Time", "Throughput (txns/sec)",
path + "-throughput-" + app_filename + ".png", width, height, app,
data, 'tps', ndays)
# generate index file
index_file = open(root_path + '-index.html', 'w')
sorted_filenames = sorted(filenames, key=lambda f: f[0].lower()+str(f[5]))
index_file.write(generate_index_file(sorted_filenames))
index_file.close()
if __name__ == "__main__":
main()
| agpl-3.0 |
zhangmianhongni/MyPractice | Python/MachineLearning/ud120-projects-master/choose_your_own/your_algorithm.py | 7 | 1400 | #!/usr/bin/python
import matplotlib.pyplot as plt
from prep_terrain_data import makeTerrainData
from class_vis import prettyPicture
features_train, labels_train, features_test, labels_test = makeTerrainData()
### the training data (features_train, labels_train) have both "fast" and "slow"
### points mixed together--separate them so we can give them different colors
### in the scatterplot and identify them visually
grade_fast = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==0]
bumpy_fast = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==0]
grade_slow = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==1]
bumpy_slow = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==1]
#### initial visualization
plt.xlim(0.0, 1.0)
plt.ylim(0.0, 1.0)
plt.scatter(bumpy_fast, grade_fast, color = "b", label="fast")
plt.scatter(grade_slow, bumpy_slow, color = "r", label="slow")
plt.legend()
plt.xlabel("bumpiness")
plt.ylabel("grade")
plt.show()
################################################################################
### your code here! name your classifier object clf if you want the
### visualization code (prettyPicture) to show you the decision boundary
try:
prettyPicture(clf, features_test, labels_test)
except NameError:
pass
| apache-2.0 |
wwf5067/statsmodels | statsmodels/examples/ex_multivar_kde.py | 34 | 1504 |
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d
import statsmodels.api as sm
"""
This example illustrates the nonparametric estimation of a
bivariate bi-modal distribution that is a mixture of two normal
distributions.
author: George Panterov
"""
if __name__ == '__main__':
np.random.seed(123456)
# generate the data
nobs = 500
BW = 'cv_ml'
mu1 = [3, 4]
mu2 = [6, 1]
cov1 = np.asarray([[1, 0.7], [0.7, 1]])
cov2 = np.asarray([[1, -0.7], [-0.7, 1]])
ix = np.random.uniform(size=nobs) > 0.5
V = np.random.multivariate_normal(mu1, cov1, size=nobs)
V[ix, :] = np.random.multivariate_normal(mu2, cov2, size=nobs)[ix, :]
x = V[:, 0]
y = V[:, 1]
dens = sm.nonparametric.KDEMultivariate(data=[x, y], var_type='cc', bw=BW,
defaults=sm.nonparametric.EstimatorSettings(efficient=True))
supportx = np.linspace(min(x), max(x), 60)
supporty = np.linspace(min(y), max(y), 60)
X, Y = np.meshgrid(supportx, supporty)
edat = np.column_stack([X.ravel(), Y.ravel()])
Z = dens.pdf(edat).reshape(X.shape)
# plot
fig = plt.figure(1)
ax = fig.gca(projection='3d')
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.jet,
linewidth=0, antialiased=False)
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.figure(2)
plt.imshow(Z)
plt.show()
| bsd-3-clause |
xubenben/data-science-from-scratch | code/visualizing_data.py | 58 | 5116 | import matplotlib.pyplot as plt
from collections import Counter
def make_chart_simple_line_chart(plt):
years = [1950, 1960, 1970, 1980, 1990, 2000, 2010]
gdp = [300.2, 543.3, 1075.9, 2862.5, 5979.6, 10289.7, 14958.3]
# create a line chart, years on x-axis, gdp on y-axis
plt.plot(years, gdp, color='green', marker='o', linestyle='solid')
# add a title
plt.title("Nominal GDP")
# add a label to the y-axis
plt.ylabel("Billions of $")
plt.show()
def make_chart_simple_bar_chart(plt):
movies = ["Annie Hall", "Ben-Hur", "Casablanca", "Gandhi", "West Side Story"]
num_oscars = [5, 11, 3, 8, 10]
# bars are by default width 0.8, so we'll add 0.1 to the left coordinates
# so that each bar is centered
xs = [i + 0.1 for i, _ in enumerate(movies)]
# plot bars with left x-coordinates [xs], heights [num_oscars]
plt.bar(xs, num_oscars)
plt.ylabel("# of Academy Awards")
plt.title("My Favorite Movies")
# label x-axis with movie names at bar centers
plt.xticks([i + 0.5 for i, _ in enumerate(movies)], movies)
plt.show()
def make_chart_histogram(plt):
grades = [83,95,91,87,70,0,85,82,100,67,73,77,0]
decile = lambda grade: grade // 10 * 10
histogram = Counter(decile(grade) for grade in grades)
plt.bar([x - 4 for x in histogram.keys()], # shift each bar to the left by 4
histogram.values(), # give each bar its correct height
8) # give each bar a width of 8
plt.axis([-5, 105, 0, 5]) # x-axis from -5 to 105,
# y-axis from 0 to 5
plt.xticks([10 * i for i in range(11)]) # x-axis labels at 0, 10, ..., 100
plt.xlabel("Decile")
plt.ylabel("# of Students")
plt.title("Distribution of Exam 1 Grades")
plt.show()
def make_chart_misleading_y_axis(plt, mislead=True):
mentions = [500, 505]
years = [2013, 2014]
plt.bar([2012.6, 2013.6], mentions, 0.8)
plt.xticks(years)
plt.ylabel("# of times I heard someone say 'data science'")
# if you don't do this, matplotlib will label the x-axis 0, 1
# and then add a +2.013e3 off in the corner (bad matplotlib!)
plt.ticklabel_format(useOffset=False)
if mislead:
# misleading y-axis only shows the part above 500
plt.axis([2012.5,2014.5,499,506])
plt.title("Look at the 'Huge' Increase!")
else:
plt.axis([2012.5,2014.5,0,550])
plt.title("Not So Huge Anymore.")
plt.show()
def make_chart_several_line_charts(plt):
variance = [1,2,4,8,16,32,64,128,256]
bias_squared = [256,128,64,32,16,8,4,2,1]
total_error = [x + y for x, y in zip(variance, bias_squared)]
xs = range(len(variance))
# we can make multiple calls to plt.plot
# to show multiple series on the same chart
plt.plot(xs, variance, 'g-', label='variance') # green solid line
plt.plot(xs, bias_squared, 'r-.', label='bias^2') # red dot-dashed line
plt.plot(xs, total_error, 'b:', label='total error') # blue dotted line
# because we've assigned labels to each series
# we can get a legend for free
# loc=9 means "top center"
plt.legend(loc=9)
plt.xlabel("model complexity")
plt.title("The Bias-Variance Tradeoff")
plt.show()
def make_chart_scatter_plot(plt):
friends = [ 70, 65, 72, 63, 71, 64, 60, 64, 67]
minutes = [175, 170, 205, 120, 220, 130, 105, 145, 190]
labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
plt.scatter(friends, minutes)
# label each point
for label, friend_count, minute_count in zip(labels, friends, minutes):
plt.annotate(label,
xy=(friend_count, minute_count), # put the label with its point
xytext=(5, -5), # but slightly offset
textcoords='offset points')
plt.title("Daily Minutes vs. Number of Friends")
plt.xlabel("# of friends")
plt.ylabel("daily minutes spent on the site")
plt.show()
def make_chart_scatterplot_axes(plt, equal_axes=False):
test_1_grades = [ 99, 90, 85, 97, 80]
test_2_grades = [100, 85, 60, 90, 70]
plt.scatter(test_1_grades, test_2_grades)
plt.xlabel("test 1 grade")
plt.ylabel("test 2 grade")
if equal_axes:
plt.title("Axes Are Comparable")
plt.axis("equal")
else:
plt.title("Axes Aren't Comparable")
plt.show()
def make_chart_pie_chart(plt):
plt.pie([0.95, 0.05], labels=["Uses pie charts", "Knows better"])
# make sure pie is a circle and not an oval
plt.axis("equal")
plt.show()
if __name__ == "__main__":
make_chart_simple_line_chart(plt)
make_chart_simple_bar_chart(plt)
make_chart_histogram(plt)
make_chart_misleading_y_axis(plt, mislead=True)
make_chart_misleading_y_axis(plt, mislead=False)
make_chart_several_line_charts(plt)
make_chart_scatterplot_axes(plt, equal_axes=False)
make_chart_scatterplot_axes(plt, equal_axes=True)
make_chart_pie_chart(plt)
| unlicense |
jmargeta/scikit-learn | sklearn/cluster/__init__.py | 7 | 1331 | """
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import mean_shift, MeanShift, estimate_bandwidth, \
get_bin_seeds
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import ward_tree, Ward, WardAgglomeration
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from ..utils import deprecated
# backward compatibility
@deprecated("to be removed in 0.15;"
" use sklearn.manifold.spectral_embedding instead")
def spectral_embedding(*args, **kwargs):
"""Deprecated, use ``sklearn.manifold.spectral_embedding`` instead"""
from ..manifold.spectral_embedding import spectral_embedding
return spectral_embedding(*args, **kwargs)
__all__ = ['AffinityPropagation',
'DBSCAN',
'KMeans',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'Ward',
'WardAgglomeration',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'mean_shift',
'spectral_clustering',
'spectral_embedding',
'ward_tree']
| bsd-3-clause |
NUAAXXY/globOpt | evaluation/readGraphProperties.py | 2 | 2823 | import packages.project as project
import packages.primitive as primitive
import packages.processing as processing
import packages.relationGraph as relgraph
import packages.io
import packages.utils as utils
import packages.colours as colours
import packages.orderedSet as orderedSet
import argparse
import matplotlib.pyplot as plt
import networkx as nx
from networkx.algorithms import isomorphism
import numpy as np
try:
from networkx import graphviz_layout
layout=nx.graphviz_layout
except ImportError:
print("PyGraphviz not found; drawing with spring layout; will be slow.")
layout=nx.spring_layout
################################################################################
## UI Generation
def setupGraphUI(graph, primitives, title):
fig, ax1 = plt.subplots()
fig.canvas.set_window_title(title)
lay = layout(graph.G)
CmapObject = colours.Colours()
cmap, gfilter = CmapObject.getDIDColourMap(primitives)
nx.draw_networkx_edges(graph.G, lay)
for did, colour in cmap.iteritems():
nx.draw_networkx_nodes(graph.G, lay, node_size=800, nodelist=gfilter[did], node_color=colour)
nx.draw_networkx_labels(graph.G, lay)
tolerance = 0.1
################################################################################
## Command line parsing
parser = argparse.ArgumentParser(description='Simply read a graph an output its statistics.')
parser.add_argument('primitives')
parser.add_argument('point_primitives')
parser.add_argument('cloud')
parser.add_argument('--angles', nargs='*')
parser.add_argument('--iteration', default=" unknown")
args = parser.parse_args()
projectdir = args.primitives
angles = utils.parseAngles(args.angles)
itId = args.iteration
linesfile = args.primitives
assignfile = args.point_primitives
cloud = packages.io.readPointCloudFromPly(args.cloud)
################################################################################
## Reading input files
lines = primitive.readPrimitivesFromFile(linesfile)
assign = packages.io.readPointAssignementFromFiles(assignfile)
#cleanlines = processing.removeUnassignedPrimitives(lines, assign)
################################################################################
## Build and display relation graphs
graph = relgraph.RelationGraph(lines, assign, angles, tolerance)
print "Number of points: ", len(cloud)
print "Number of primitives : ",graph.G.number_of_nodes()
print "Number of metanodes: ", graph.getNumberOfMetanodes()
print "Number of N to N rels: ", graph.getNumberOfNodeToNodeRelations()
#print "Number of connections: ",graph.G.number_of_edges()
#print "Max nb of connections: ",graph.G.number_of_nodes()*graph.G.number_of_nodes()
exit()
setupGraphUI(graph, lines, "Iteration "+itId)
plt.savefig("relationGraphs_it"+itId+".svg")
| apache-2.0 |
davidsamu/seal | seal/analysis/stats.py | 1 | 6036 | # -*- coding: utf-8 -*-
"""
Functions to test statistical difference between samples and across
time-series.
@author: David Samu
"""
import numpy as np
import scipy as sp
import pandas as pd
# Constants.
min_sample_size = 10
# %% Basic statistical tests.
def sem(x):
"""Calculates the standard error of the mean of vector x."""
s = sp.stats.sem(x)
return s
def pearsonr(x, y):
"""
Calculates a Pearson correlation coefficient and
the p-value for testing non-correlation.
"""
r, p = sp.stats.pearsonr(x, y)
return r, p
def t_test(x, y, paired=False, equal_var=False, nan_policy='propagate'):
"""
Run t-test between two related (paired) or independent (unpaired) samples.
"""
# Remove any NaN values.
if paired:
idx = np.logical_and(~np.isnan(x), ~np.isnan(y))
x, y = x[idx], y[idx]
# Insufficient sample size.
xvalid, yvalid = [v[~np.isnan(v)] for v in (x, y)]
if min(len(xvalid), len(yvalid)) < min_sample_size:
return np.nan, np.nan
if paired:
stat, pval = sp.stats.ttest_rel(x, y, nan_policy=nan_policy)
else:
stat, pval = sp.stats.ttest_ind(xvalid, yvalid, equal_var=equal_var)
return stat, pval
def wilcoxon_test(x, y, zero_method='wilcox', correction=False):
"""
Run Wilcoxon test, testing the null-hypothesis that
two related paired samples come from the same distribution. In particular,
it tests whether the distribution of the differences x-y is symmetric
about zero. It is a non-parametric version of the paired T-test.
Note: Because the normal approximation is used for the calculation,
the samples used should be large. A typical rule is to require
that n > 20.
"""
# Remove any NaN values. Test is always paired!
idx = np.logical_and(~np.isnan(x), ~np.isnan(y))
x, y = x[idx], y[idx]
# Insufficient sample size.
if min(len(x), len(y)) < min_sample_size:
return np.nan, np.nan
stat, pval = sp.stats.wilcoxon(x, y, zero_method=zero_method,
correction=correction)
return stat, pval
def mann_whithney_u_test(x, y, use_continuity=True, alternative='two-sided'):
"""Run Mann-Whitney (aka unpaired Wilcoxon) rank test on samples."""
# Insufficient sample size.
xvalid, yvalid = [v[~np.isnan(v)] for v in (x, y)]
if min(len(xvalid), len(yvalid)) < min_sample_size:
return np.nan, np.nan
# At least one item should differ from rest
xv_un, yv_un = np.unique(xvalid), np.unique(yvalid)
if len(xv_un) == 1 and len(yv_un) == 1 and np.array_equal(xv_un, yv_un):
return np.nan, np.nan
stat, pval = sp.stats.mannwhitneyu(xvalid, yvalid,
use_continuity, alternative)
return stat, pval
# %% Meta-functions testing statistical differences on time series.
def sign_diff(ts1, ts2, p, test, **kwargs):
"""
Return times of significant difference between two sets of time series.
ts1, ts2: time series stored in DataFrames, columns are time samples.
"""
# Get intersection of time vector.
tvec = np.intersect1d(ts1.columns, ts2.columns)
# Select test.
if test == 't-test':
test_func = t_test
elif test == 'wilcoxon':
test_func = wilcoxon_test
elif test == 'mann_whitney_u':
test_func = mann_whithney_u_test
else:
print('Unrecognised test name: ' + str(test) + ', running t-test.')
test_func = t_test
# Calculate p-values and times of significant difference.
pvals = pd.Series([test_func(ts1[t], ts2[t], **kwargs)[1]
for t in tvec], index=tvec)
tsign = pvals < p
return pvals, tsign
def periods(t_on_ser, min_len=None):
"""Return list of periods where t_on is True and with minimum length."""
if not len(t_on_ser.index):
return []
# Init data.
tvec = np.array(t_on_ser.index)
t_on = np.array(t_on_ser)
# Starts of periods.
tstarts = np.insert(t_on, 0, False)
istarts = np.logical_and(tstarts[:-1] == False, tstarts[1:] == True)
# Ends of periods.
tends = np.append(t_on, False)
iends = np.logical_and(tends[:-1] == True, tends[1:] == False)
# Zip (start, end) pairs of periods.
pers = [(t1, t2) for t1, t2 in zip(tvec[istarts], tvec[iends])]
# Drop periods shorter than minimum length.
if min_len is not None:
pers = [(t1, t2) for t1, t2 in pers if t2-t1 >= min_len]
return pers
def sign_periods(ts1, ts2, pval, test, min_len=None, **kwargs):
"""
Return list of periods of significantly difference
between two sets of time series (row: samples, columns: time points).
"""
# Indices of significant difference.
tsign = sign_diff(ts1, ts2, pval, test, **kwargs)[1]
# Periods of significant difference.
sign_periods = periods(tsign, min_len)
return sign_periods
def prd_in_window(t, tmin, tmax, tlen, tdim=None):
"""
Return limits of time period of given length centered around t
within window.
"""
# Init.
tlen = float(tlen)
half_len = tlen/2
# Extend of overflow of specified time window on each side.
left_overflow = max(tmin - (t - half_len), 0)
right_overflow = max((t + half_len) - tmax, 0)
# End points.
tstart = max(t - half_len - right_overflow, tmin)
tend = min(t + half_len + left_overflow, tmax)
# Add temporal dimension (ms, s, etc).
if tdim is not None:
tstart = tstart * tdim
tend = tend * tdim
TW = pd.Series([tstart, tend], index=['tstart', 'tend'])
return TW
def perm_pval(score, perm_scores):
"""
Calculate p-value of original score agains a vector of permuted scores.
Based on Def 1 of Ojala and Garriga. Permutation Tests for Studying
Classifier Performance. The Journal of Machine Learning Research (2010)
"""
pval = (np.sum(perm_scores >= score)+1) / (len(perm_scores)+1)
return pval
| gpl-3.0 |
BaileyGlen/genedb | genedb/genedb.py | 1 | 1256 | # -*- coding: utf-8 -*-
"""Main module."""
import pandas as pd
import csv
from genedb import cleanerfunc
# def removespecchar(test):
# import re
# if type(test) == str:
# test2=re.sub('\t','',test)
# test=re.sub('\"','',test2)
# return(test)
"""String Cleaning Function."""
"""Function to create bed file.
Changes excel file into seperate bed files"""
def createbed(dfgenes, disease, sheetindex):
df=pd.read_excel("data/MyeloidGeneKB01.xlsx", sheetname=sheetindex, index_col="GeneSymbol", parse_cols = [0,1,2,3,4])
df.columns= disease +"_"+ df.columns
dfgenes2 = dfgenes.join(df, how="outer")
dfgenes2.reset_index(inplace=True)
dfgenes2=dfgenes2.fillna("")
dfgenes2=dfgenes2.applymap(cleanerfunc.removespecchar)
collist=list(dfgenes2.columns)
newcollist=collist[6:9] + collist[0:6] + collist[9:]
dfgenes2.to_csv("data\\" + disease+".bed", sep='\t', columns=newcollist, index=False)
dfgenes=pd.read_excel("data/MyeloidGeneKB01.xlsx", sheetname=0,index_col="GeneSymbol", dtypes=object)
myeloidDiseaseList = ["Unknown", "AML", "MDS", "MPN", ]
myeloidSheetList = [1,2,3,4]
for disease, sheetindex in zip (myeloidDiseaseList, myeloidSheetList):
createbed(dfgenes,disease,sheetindex)
| mit |
kjung/scikit-learn | sklearn/kernel_approximation.py | 258 | 17973 | """
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps base on Fourier transforms.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import check_array, check_random_state, as_float_array
from .utils.extmath import safe_sparse_dot
from .utils.validation import check_is_fitted
from .metrics.pairwise import pairwise_kernels
class RBFSampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of an RBF kernel by Monte Carlo approximation
of its Fourier transform.
It implements a variant of Random Kitchen Sinks.[1]
Read more in the :ref:`User Guide <rbf_kernel_approx>`.
Parameters
----------
gamma : float
Parameter of RBF kernel: exp(-gamma * x^2)
n_components : int
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
[1] "Weighted Sums of Random Kitchen Sinks: Replacing
minimization with randomization in learning" by A. Rahimi and
Benjamin Recht.
(http://www.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
"""
def __init__(self, gamma=1., n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal(
size=(n_features, self.n_components)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = check_array(X, accept_sparse='csr')
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of the "skewed chi-squared" kernel by Monte
Carlo approximation of its Fourier transform.
Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
Parameters
----------
skewedness : float
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
See also
--------
AdditiveChi2Sampler : A different approach for approximating an additive
variant of the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
"""
def __init__(self, skewedness=1., n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = (1. / np.pi
* np.log(np.tan(np.pi / 2. * uniform)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = as_float_array(X, copy=True)
X = check_array(X, copy=False)
if (X < 0).any():
raise ValueError("X may not contain entries smaller than zero.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
Parameters
----------
sample_steps : int, optional
Gives the number of (complex) sampling points.
sample_interval : float, optional
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
See also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
2011
"""
def __init__(self, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set parameters."""
X = check_array(X, accept_sparse='csr')
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError("If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval")
else:
self.sample_interval_ = self.sample_interval
return self
def transform(self, X, y=None):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Returns
-------
X_new : {array, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array of sparse matrix depends on
the type of the input X.
"""
msg = ("%(name)s is not fitted. Call fit to set the parameters before"
" calling transform")
check_is_fitted(self, "sample_interval_", msg=msg)
X = check_array(X, accept_sparse='csr')
sparse = sp.issparse(X)
# check if X has negative values. Doesn't play well with np.log.
if ((X.data if sparse else X) < 0).any():
raise ValueError("Entries of X must be non-negative.")
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = (X != 0.0)
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
return sp.hstack(X_new)
class Nystroem(BaseEstimator, TransformerMixin):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
n_components : int
Number of features to construct.
How many data points will be used to construct the mapping.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Attributes
----------
components_ : array, shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : array, shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : array, shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
See also
--------
RBFSampler : An approximation to the RBF kernel using random Fourier
features.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3,
kernel_params=None, n_components=100, random_state=None):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Training data.
"""
X = check_array(X, accept_sparse='csr')
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn("n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel.")
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(basis, metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U * 1. / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : array, shape=(n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, 'components_')
X = check_array(X, accept_sparse='csr')
kernel_params = self._get_kernel_params()
embedded = pairwise_kernels(X, self.components_,
metric=self.kernel,
filter_params=True,
**kernel_params)
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
return params
| bsd-3-clause |
FRidh/python-acoustics | tests/test_imaging.py | 3 | 1189 | import numpy as np
import pytest
has_matplotlib = pytest.importorskip("matplotlib")
if has_matplotlib:
from acoustics.bands import octave, third
from acoustics.imaging import plot_octave, plot_third, plot_bands
def setup_module(imaging):
imaging.octaves = octave(16, 16000)
imaging.thirds = third(63, 8000)
imaging.tl_oct = np.array([3, 4, 5, 12, 15, 24, 28, 23, 35, 45, 55])
imaging.tl_third = np.array([0, 0, 0, 1, 1, 2, 3, 5, 8, 13, 21,
32, 41, 47, 46, 44, 58, 77, 61, 75, 56, 54])
imaging.title = 'Title'
imaging.label = 'Label'
def test_plot_octave():
plot_octave(tl_oct, octaves)
def test_plot_octave_kHz():
plot_octave(tl_oct, octaves, kHz=True, xlabel=label, ylabel=label,
title=title, separator='.')
def test_plot_third_octave():
plot_third(tl_third, thirds, marker='s', separator=',')
def test_plot_third_octave_kHz():
plot_third(tl_third, thirds, marker='s', kHz=True, xlabel=label,
ylabel=label, title=title)
def test_plot_band_oct():
plot_bands(tl_oct, octaves, axes=None, band_type='octave')
def teardown_module(imaging):
pass
| bsd-3-clause |
laurent-george/bokeh | bokeh/compat/mpl.py | 32 | 2834 | "Supporting objects and functions to convert Matplotlib objects into Bokeh."
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from warnings import warn
import matplotlib.pyplot as plt
from .bokeh_exporter import BokehExporter
from .bokeh_renderer import BokehRenderer
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def to_bokeh(fig=None, name=None, server=None, notebook=None, pd_obj=True, xkcd=False):
""" Uses bokeh to display a Matplotlib Figure.
You can store a bokeh plot in a standalone HTML file, as a document in
a Bokeh plot server, or embedded directly into an IPython Notebook
output cell.
Parameters
----------
fig: matplotlib.figure.Figure
The figure to display. If None or not specified, then the current figure
will be used.
name: str (default=None)
If this option is provided, then the Bokeh figure will be saved into
this HTML file, and then a web browser will be used to display it.
server: str (default=None)
Fully specified URL of bokeh plot server. Default bokeh plot server
URL is "http://localhost:5006" or simply "deault"
notebook: bool (default=False)
Return an output value from this function which represents an HTML
object that the IPython notebook can display. You can also use it with
a bokeh plot server just specifying the URL.
pd_obj: bool (default=True)
The implementation asumes you are plotting using the pandas.
You have the option to turn it off (False) to plot the datetime xaxis
with other non-pandas interfaces.
xkcd: bool (default=False)
If this option is True, then the Bokeh figure will be saved with a
xkcd style.
"""
if name is not None:
warn("Use standard output_file(...) from bokeh.io")
if server is not None:
warn("Use standard output_server(...) from bokeh.io")
if notebook is not None:
warn("Use standard output_notebook() from bokeh.io")
if fig is None:
fig = plt.gcf()
renderer = BokehRenderer(pd_obj, xkcd)
exporter = BokehExporter(renderer)
exporter.run(fig)
return renderer.fig
| bsd-3-clause |
MarkWieczorek/SHTOOLS | pyshtools/shclasses/shtensor.py | 2 | 176597 | """
Class for the gravity and magnetic field 'gradient' tensors.
"""
import numpy as _np
import matplotlib as _mpl
import matplotlib.pyplot as _plt
import copy as _copy
from scipy.linalg import eigvalsh as _eigvalsh
import xarray as _xr
from .shgrid import SHGrid as _SHGrid
class Tensor(object):
"""
Generic class for gravity and magnetic field tensors. To initialize the
class, use the method tensor() of an SHGravCoeffs or SHMagCoeffs
class instance.
"""
def __init__(self):
"""Unused constructor of the main class."""
print('Initialize the class using one of the two methods:\n'
'>>> pyshtools.SHGravCoeffs.tensor\n'
'>>> pyshtools.SHMagCoeffs.tensor\n')
def compute_invar(self):
"""
Compute the three invariants (I0, I1, I2) of the tensor, as well as
the quantity I = -(I2/2)**2 / (I1/3)**3.
"""
self.i0 = self.vxx + self.vyy + self.vzz
self.i1 = (self.vxx*self.vyy + self.vyy*self.vzz + self.vxx*self.vzz -
self.vxy**2 - self.vyz**2 - self.vxz**2)
self.i2 = (self.vxx*(self.vyy*self.vzz - self.vyz**2) +
self.vxy*(self.vyz*self.vxz - self.vxy*self.vzz) +
self.vxz*(self.vxy*self.vyz - self.vxz*self.vyy))
self.i = (-1.) * (self.i2 / 2.)**2
self.i.data[1:self.nlat-self.extend, :] /= \
(self.i1.data[1:self.nlat-self.extend, :] / 3.)**3
def compute_eig(self):
"""
Compute the three eigenvalues of the tensor: eig1, eig2, ei3.
"""
self.eig1 = _SHGrid.from_array(_np.zeros_like(self.vxx.data),
grid='DH')
self.eig2 = _SHGrid.from_array(_np.zeros_like(self.vxx.data),
grid='DH')
self.eig3 = _SHGrid.from_array(_np.zeros_like(self.vxx.data),
grid='DH')
for i in range(self.nlat):
for j in range(self.nlon):
a = _np.array([[self.vxx.data[i, j],
self.vxy.data[i, j],
self.vxz.data[i, j]],
[self.vyx.data[i, j],
self.vyy.data[i, j],
self.vyz.data[i, j]],
[self.vzx.data[i, j],
self.vzy.data[i, j],
self.vzz.data[i, j]]])
eigs = _eigvalsh(a)
self.eig1.data[i, j] = eigs[2]
self.eig2.data[i, j] = eigs[1]
self.eig3.data[i, j] = eigs[0]
def compute_eigh(self):
"""
Compute the two horizontal eigenvalues of the tensor (eigh1, and
eigh2), as well as the combined maximum absolute value of the two
(eighh).
"""
self.eigh1 = _SHGrid.from_array(_np.zeros_like(self.vxx.data),
grid='DH')
self.eigh2 = _SHGrid.from_array(_np.zeros_like(self.vxx.data),
grid='DH')
self.eighh = _SHGrid.from_array(_np.zeros_like(self.vxx.data),
grid='DH')
for i in range(self.nlat):
for j in range(self.nlon):
a = _np.array([[self.vxx.data[i, j],
self.vxy.data[i, j]],
[self.vyx.data[i, j],
self.vyy.data[i, j]]])
eigs = _eigvalsh(a)
self.eigh1.data[i, j] = eigs[1]
self.eigh2.data[i, j] = eigs[0]
if abs(eigs[0]) >= abs(eigs[1]):
self.eighh.data[i, j] = eigs[0]
else:
self.eighh.data[i, j] = eigs[1]
def copy(self):
"""
Return a deep copy of the class instance.
Usage
-----
copy = x.copy()
"""
return _copy.deepcopy(self)
def info(self):
"""
Print a summary of the data stored in the SHGravTensor class instance.
Usage
-----
x.info()
"""
print(repr(self))
def plot_vxx(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None, cb_tick_interval=None,
grid=False, axes_labelsize=None, tick_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, show=True, ax=None,
fname=None):
"""
Plot the Vxx component of the tensor.
Usage
-----
x.plot_vxx([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$V_{xx}$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._vxx_label
return self.vxx.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_vyy(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None, cb_tick_interval=None,
grid=False, axes_labelsize=None, tick_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, show=True, ax=None,
fname=None):
"""
Plot the Vyy component of the tensor.
Usage
-----
x.plot_vyy([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$V_{yy}$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._vyy_label
return self.vyy.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_vzz(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None, cb_tick_interval=None,
grid=False, axes_labelsize=None, tick_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, show=True, ax=None,
fname=None):
"""
Plot the Vzz component of the tensor.
Usage
-----
x.plot_vzz([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$V_{zz}$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._vzz_label
return self.vzz.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_vxy(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None, cb_tick_interval=None,
grid=False, axes_labelsize=None, tick_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, show=True, ax=None,
fname=None):
"""
Plot the Vxx component of the tensor.
Usage
-----
x.plot_vxy([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$V_{xy}$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._vxy_label
return self.vxy.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_vyx(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None, cb_tick_interval=None,
grid=False, axes_labelsize=None, tick_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, show=True, ax=None,
fname=None):
"""
Plot the Vyx component of the tensor.
Usage
-----
x.plot_vyx([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$V_{yx}$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._vyx_label
return self.vyx.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_vxz(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None, cb_tick_interval=None,
grid=False, axes_labelsize=None, tick_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, show=True, ax=None,
fname=None):
"""
Plot the Vxz component of the tensor.
Usage
-----
x.plot_vxz([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$V_{xz}$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._vxz_label
return self.vxz.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_vzx(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None, cb_tick_interval=None,
grid=False, axes_labelsize=None, tick_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, show=True, ax=None,
fname=None):
"""
Plot the Vzx component of the tensor.
Usage
-----
x.plot_vzx([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$V_{zx}$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._vzx_label
return self.vzx.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_vyz(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None, cb_tick_interval=None,
grid=False, axes_labelsize=None, tick_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, show=True, ax=None,
fname=None):
"""
Plot the Vyz component of the tensor.
Usage
-----
x.plot_vyz([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$V_{yz}$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._vyz_label
return self.vyz.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_vzy(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None, cb_tick_interval=None,
grid=False, axes_labelsize=None, tick_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, show=True, ax=None,
fname=None):
"""
Plot the Vzy component of the tensor.
Usage
-----
x.plot_vzy([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$V_{zy}$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._vzy_label
return self.vzy.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot(self, projection=None, tick_interval=[90, 90],
minor_tick_interval=[30, 30], xlabel='', ylabel='',
colorbar='bottom', cmap='viridis', cmap_limits=None,
cmap_reverse=False, cb_triangles='neither', cb_label=None,
cb_tick_interval=None, grid=False, axes_labelsize=8,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, tick_labelsize=8, show=True,
ax=None, fname=None):
"""
Plot the 9 components of the tensor.
Usage
-----
x.plot([projection, tick_interval, minor_tick_interval, ticks, xlabel,
ylabel, colorbar, cmap, cmap_limits, cmap_reverse,
cb_triangles, cb_label, cb_ylabel, cb_tick_interval,
cb_minor_tick_interval, cb_offset, cb_width, grid,
axes_labelsize, tick_labelsize, ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [90, 90]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = ''
Label for the longitude axis.
ylabel : str, optional, default = ''
Label for the latitude axis.
colorbar : str, optional, default = 'bottom'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = None
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
axes_labelsize : int, optional, default = 8
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = 8
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if colorbar is not None:
if colorbar in set(['bottom', 'top']):
scale = 0.9
else:
scale = 0.45
else:
scale = 0.55
figsize = (_mpl.rcParams['figure.figsize'][0],
_mpl.rcParams['figure.figsize'][0] * scale)
fig, ax = _plt.subplots(3, 3, figsize=figsize)
self.plot_vxx(projection=projection, ax=ax.flat[0],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks, cb_offset=cb_offset,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
self.plot_vxy(projection=projection, ax=ax.flat[1],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks, cb_offset=cb_offset,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
self.plot_vxz(projection=projection, ax=ax.flat[2],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks, cb_offset=cb_offset,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
self.plot_vyx(projection=projection, ax=ax.flat[3],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks, cb_offset=cb_offset,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
self.plot_vyy(projection=projection, ax=ax.flat[4],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks, cb_offset=cb_offset,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
self.plot_vyz(projection=projection, ax=ax.flat[5],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks, cb_offset=cb_offset,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
self.plot_vzx(projection=projection, ax=ax.flat[6],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks, cb_offset=cb_offset,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
self.plot_vzy(projection=projection, ax=ax.flat[7],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks, cb_offset=cb_offset,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
self.plot_vzz(projection=projection, ax=ax.flat[8],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks, cb_offset=cb_offset,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
fig.tight_layout(pad=0.5)
if fname is not None:
fig.savefig(fname)
return fig, ax
def plot_i0(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None, cb_tick_interval=None,
grid=False, axes_labelsize=None, tick_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, show=True, ax=None, fname=None):
"""
Plot the first invariant I0 (the trace) of the tensor
I0 = vxx + vyy + vzz
which should be identically zero.
Usage
-----
x.plot_i0([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = 'Tr $V_{ij}$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._i0_label
if self.i0 is None:
self.compute_invar()
return self.i0.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_i1(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None, cb_tick_interval=None,
grid=False, axes_labelsize=None, tick_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, show=True, ax=None, fname=None):
"""
Plot the second invariant I1 of the tensor:
I1 = vxx*vyy + vyy*vzz + vxx*vzz - vxy**2 - vyz**2 - vxz**2
Usage
-----
x.plot_i1([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$I_1$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._i1_label
if self.i1 is None:
self.compute_invar()
return self.i1.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_i2(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None, cb_tick_interval=None,
grid=False, axes_labelsize=None, tick_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, show=True, ax=None, fname=None):
"""
Plot the third invariant I2 (the determinant) of the tensor:
I2 = vxx*(vyy*vzz - vyz**2) + vxy*(vyz*vxz - vxy*vzz)
+ vxz*(vxy*vyz - vxz*vyy)
Usage
-----
x.plot_i2([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = 'det $V_{ij}$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._i2_label
if self.i2 is None:
self.compute_invar()
return self.i2.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_i(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None, cb_tick_interval=None,
grid=False, axes_labelsize=None, tick_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, show=True, ax=None, fname=None):
"""
Plot the dimensionless quantity I of Pedersen and Rasmussen (1990)
I = -(I2/2)**2 / (I1/3)**3
that is bounded by 0 and 1.
Usage
-----
x.plot_i([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$-(I_2/2)^{2} / (I_1/3)^{3}$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._i_label
if self.i is None:
self.compute_invar()
return self.i.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_invar(self, projection=None, tick_interval=[60, 60],
minor_tick_interval=[30, 30], xlabel='',
ylabel='', colorbar='bottom', cmap='viridis',
cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None,
cb_tick_interval=None, grid=False, axes_labelsize=9,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, tick_labelsize=8, show=True,
ax=None, fname=None):
"""
Plot the three invariants of the tensor and the derived quantity I.
Usage
-----
x.plot_invar([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, axes_labelsize, tick_labelsize, ax, show,
fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [60, 60]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = ''
Label for the longitude axis.
ylabel : str, optional, default = ''
Label for the latitude axis.
colorbar : str, optional, default = 'bottom'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = None
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
axes_labelsize : int, optional, default = 9
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = 8
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if colorbar is not None:
if colorbar in set(['bottom', 'top']):
scale = 0.8
else:
scale = 0.5
else:
scale = 0.6
figsize = (_mpl.rcParams['figure.figsize'][0],
_mpl.rcParams['figure.figsize'][0] * scale)
fig, ax = _plt.subplots(2, 2, figsize=figsize)
self.plot_i0(projection=projection, ax=ax.flat[0],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks, cb_offset=cb_offset,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
self.plot_i1(projection=projection, ax=ax.flat[1],
tick_interval=tick_interval, cb_offset=cb_offset,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
self.plot_i2(projection=projection, ax=ax.flat[2],
tick_interval=tick_interval, cb_offset=cb_offset,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
self.plot_i(projection=projection, ax=ax.flat[3],
tick_interval=tick_interval, cb_offset=cb_offset,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
fig.tight_layout(pad=0.5)
if fname is not None:
fig.savefig(fname)
return fig, ax
def plot_eig1(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None, cb_tick_interval=None,
grid=False, axes_labelsize=None, tick_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, show=True, ax=None,
fname=None):
"""
Plot the first eigenvalue of the tensor.
Usage
-----
x.plot_eig1([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$\lambda_1$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._eig1_label
if self.eig1 is None:
self.compute_eig()
return self.eig1.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_eig2(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None, cb_tick_interval=None,
grid=False, axes_labelsize=None, tick_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, show=True, ax=None,
fname=None):
"""
Plot the second eigenvalue of the tensor.
Usage
-----
x.plot_eig2([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$\lambda_2$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._eig2_label
if self.eig1 is None:
self.compute_eig()
return self.eig2.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_eig3(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None, cb_tick_interval=None,
grid=False, axes_labelsize=None, tick_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, show=True, ax=None,
fname=None):
"""
Plot the third eigenvalue of the tensor.
Usage
-----
x.plot_eig3([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$\lambda_3$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._eig3_label
if self.eig1 is None:
self.compute_eig()
return self.eig3.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_eigs(self, projection=None, tick_interval=[60, 60],
minor_tick_interval=[30, 30], xlabel='',
ylabel='', colorbar='bottom', cmap='viridis',
cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None,
cb_tick_interval=None, grid=False, axes_labelsize=9,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, tick_labelsize=8, show=True,
ax=None, fname=None):
"""
Plot the three eigenvalues of the tensor.
Usage
-----
x.plot_eigs([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, colorbar, cmap, cmap_limits, cmap_reverse,
cb_triangles, cb_label, cb_ylabel, cb_tick_interval,
cb_minor_tick_interval, cb_offset, cb_width, grid,
axes_labelsize, tick_labelsize, ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [60, 60]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = ''
Label for the longitude axis.
ylabel : str, optional, default = ''
Label for the latitude axis.
colorbar : str, optional, default = 'bottom'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = None
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
axes_labelsize : int, optional, default = 9
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = 8
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if colorbar is not None:
if colorbar in set(['bottom', 'top']):
scale = 2.3
else:
scale = 1.4
else:
scale = 1.65
figsize = (_mpl.rcParams['figure.figsize'][0],
_mpl.rcParams['figure.figsize'][0] * scale)
fig, ax = _plt.subplots(3, 1, figsize=figsize)
self.plot_eig1(projection=projection, ax=ax.flat[0],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks, cb_offset=cb_offset,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
self.plot_eig2(projection=projection, ax=ax.flat[1],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks, cb_offset=cb_offset,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
self.plot_eig3(projection=projection, ax=ax.flat[2],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks, cb_offset=cb_offset,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
fig.tight_layout(pad=0.5)
if fname is not None:
fig.savefig(fname)
return fig, ax
def plot_eigh1(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None,
cb_tick_interval=None, grid=False, axes_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, tick_labelsize=None,
show=True, ax=None, fname=None):
"""
Plot the first eigenvalue of the horizontal tensor.
Usage
-----
x.plot_eigh1([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize,
tick_labelsize, ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$\lambda_{h1}$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._eigh1_label
if self.eigh1 is None:
self.compute_eigh()
return self.eigh1.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_eigh2(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None,
cb_tick_interval=None, grid=False, axes_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, tick_labelsize=None,
show=True, ax=None, fname=None):
"""
Plot the second eigenvalue of the horizontal tensor.
Usage
-----
x.plot_eigh2([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize,
tick_labelsize, ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$\lambda_{h2}$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._eigh2_label
if self.eigh1 is None:
self.compute_eigh()
return self.eigh2.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_eighh(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None,
cb_tick_interval=None, grid=False, axes_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, tick_labelsize=None,
show=True, ax=None, fname=None):
"""
Plot the maximum absolute value eigenvalue of the horizontal tensor.
Usage
-----
x.plot_eighh([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize,
tick_labelsize, ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$\lambda_{hh}$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._eighh_label
if self.eigh1 is None:
self.compute_eigh()
return self.eighh.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_eigh(self, projection=None, tick_interval=[60, 60],
minor_tick_interval=[30, 30], xlabel='',
ylabel='', colorbar='bottom', cmap='viridis',
cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None,
cb_tick_interval=None, grid=False, axes_labelsize=9,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, tick_labelsize=8, show=True,
ax=None, fname=None):
"""
Plot the two eigenvalues and maximum absolute value eigenvalue of the
horizontal tensor.
Usage
-----
x.plot_eigh([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, colorbar, cmap, cmap_limits, cmap_reverse,
cb_triangles, cb_label, cb_ylabel, cb_tick_interval,
cb_minor_tick_interval, cb_offset, cb_width, grid,
axes_labelsize, tick_labelsize, ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [60, 60]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = ''
Label for the longitude axis.
ylabel : str, optional, default = ''
Label for the latitude axis.
colorbar : str, optional, default = 'bottom'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = None
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
axes_labelsize : int, optional, default = 9
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = 8
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if colorbar is not None:
if colorbar in set(['bottom', 'top']):
scale = 2.3
else:
scale = 1.4
else:
scale = 1.65
figsize = (_mpl.rcParams['figure.figsize'][0],
_mpl.rcParams['figure.figsize'][0] * scale)
fig, ax = _plt.subplots(3, 1, figsize=figsize)
self.plot_eigh1(projection=projection, ax=ax.flat[0],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks, cb_offset=cb_offset,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
self.plot_eigh2(projection=projection, ax=ax.flat[1],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks, cb_offset=cb_offset,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
self.plot_eighh(projection=projection, ax=ax.flat[2],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks, cb_offset=cb_offset,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
fig.tight_layout(pad=0.5)
if fname is not None:
fig.savefig(fname)
return fig, ax
def to_xarray(self, title='', description='',
comment='pyshtools grid'):
"""
Return all tensor gridded data as an xarray DataSet.
Usage
-----
x.to_xarray([title, description, comment])
Parameters
----------
title : str, optional, default = ''
Title of the dataset.
description : str, optional, default = ''
Description of the dataset ('Remark' in gmt grd files).
comment : str, optional, default = 'pyshtools grid'
Additional information about how the data were generated.
"""
attrs = {'title': title,
'description': description,
'comment': comment,
'nlat': self.nlat,
'nlon': self.nlon,
'lmax': self.lmax,
'lmax_calc': self.lmax_calc,
'sampling': self.sampling,
'grid': self.grid,
'a': self.a,
'f': self.f,
'n': self.n,
'extend': repr(self.extend)
}
if isinstance(self, SHGravTensor):
attrs['gm'] = self.gm
if self.epoch is not None:
attrs['epoch'] = self.epoch
desc = 'gravity tensor component '
else:
if self.year is not None:
attrs['year'] = self.year
desc = 'magnetic field tensor component '
_vxx = self.vxx.to_xarray(title=desc+'(Vxx)', long_name='$V_{xx}$',
units=self._vii_units)
_vxy = self.vxy.to_xarray(title=desc+'(Vxy)', long_name='$V_{xy}$',
units=self._vii_units)
_vxz = self.vxz.to_xarray(title=desc+'(Vxz)', long_name='$V_{xz}$',
units=self._vii_units)
_vyx = self.vyx.to_xarray(title=desc+'(Vyx)', long_name='$V_{yx}$',
units=self._vii_units)
_vyy = self.vyy.to_xarray(title=desc+'(Vyy)', long_name='$V_{yy}$',
units=self._vii_units)
_vyz = self.vyz.to_xarray(title=desc+'(Vyz)', long_name='$V_{yz}$',
units=self._vii_units)
_vzx = self.vzx.to_xarray(title=desc+'(Vzx)', long_name='$V_{zx}$',
units=self._vii_units)
_vzy = self.vzy.to_xarray(title=desc+'(Vzy)', long_name='$V_{zy}$',
units=self._vii_units)
_vzz = self.vzz.to_xarray(title=desc+'(Vzz)', long_name='$V_{zz}$',
units=self._vii_units)
dataset = _xr.Dataset({'vxx': _vxx, 'vxy': _vxy, 'vxz': _vxz,
'vyx': _vyx, 'vyy': _vyy, 'vyz': _vyz,
'vzx': _vzx, 'vzy': _vzy, 'vzz': _vzz},
attrs=attrs)
if self.i0 is not None:
if isinstance(self, SHGravTensor):
desc0 = 'First invariant of the gravity tensor'
desc1 = 'Second invariant of the gravity tensor'
desc2 = 'Third invariant of the gravity tensor'
desc = 'Unitless invariant of the gravity tensor'
else:
desc0 = 'First invariant of the magnetic field tensor'
desc1 = 'Second invariant of the magnetic field tensor'
desc2 = 'Third invariant of the magnetic field tensor'
desc = 'Unitless invariant of the magnetic field tensor'
_i0 = self.i0.to_xarray(title=desc0,
long_name='$I_0$, Tr $V_{ii}$',
units=self._i0_units)
_i1 = self.i1.to_xarray(title=desc1, long_name='$I_1$',
units=self._i1_units)
_i2 = self.i2.to_xarray(title=desc2,
long_name='$I_2$, det $V_{ij}$',
units=self._i2_units)
_i = self.i.to_xarray(title=desc,
long_name='$-(I_2/2)^{2} / ' +
'(I_1/3)^{3}$',
units='none')
dataset['i0'] = _i0
dataset['i1'] = _i1
dataset['i2'] = _i2
dataset['i'] = _i
if self.eig1 is not None:
if isinstance(self, SHGravTensor):
desc1 = 'First eigenvalue of the gravity tensor'
desc2 = 'Second eigenvalue of the gravity tensor'
desc3 = 'Third eigenvalue of the gravity tensor'
else:
desc1 = 'First eigenvalue of the magnetic field tensor'
desc2 = 'Second eigenvalue of the magnetic field tensor'
desc3 = 'Third eigenvalue of the magnetic field tensor'
_eig1 = self.eig1.to_xarray(title=desc1,
long_name='${\lambda}_1$',
units=self._vii_units)
_eig2 = self.eig2.to_xarray(title=desc2,
long_name='${\lambda}_2$',
units=self._vii_units)
_eig3 = self.eig3.to_xarray(title=desc3,
long_name='${\lambda}_3$',
units=self._vii_units)
dataset['eig1'] = _eig1
dataset['eig2'] = _eig2
dataset['eig3'] = _eig3
if self.eighh is not None:
if isinstance(self, SHGravTensor):
desc1 = 'First horizontal eigenvalue of the gravity tensor'
desc2 = 'Second horizontal eigenvalue of the gravity tensor'
desc3 = 'Combined horizontal eigenvalue of the gravity tensor'
else:
desc1 = 'First horizontal eigenvalue of the magnetic ' \
+ 'field tensor'
desc2 = 'Second horizontal eigenvalue of the magnetic ' \
+ 'field tensor'
desc3 = 'Combined horizontal eigenvalue of the magnetic ' \
+ 'field tensor'
_eigh1 = self.eigh1.to_xarray(title=desc1,
long_name='${\lambda}_{h1}$',
units=self._vii_units)
_eigh2 = self.eigh2.to_xarray(title=desc2,
long_name='${\lambda}_{h2}$',
units=self._vii_units)
_eighh = self.eighh.to_xarray(title=desc3,
long_name='${\lambda}_{hh}$',
units=self._vii_units)
dataset['eigh1'] = _eigh1
dataset['eigh2'] = _eigh2
dataset['eighh'] = _eighh
return dataset
class SHGravTensor(Tensor):
"""
Class for the gravity field tensor and eigenvalues. The class is
initialized from a class instance of SHGravCoeffs using the method
tensor().
Attributes:
vxx, vxy, vzz, : The 9 components of the gravity tensor.
vyx, vyy, vyz,
vzx, vzy, vzz
i0, i1, i2, i : The three invariants of the gravity tensor and a
derived quantity that is bounded between 0 and 1.
These are computed by a call to compute_invar().
eig1, eig2, eig3 : The three eigenvalues of the gravity tensor, which are
computed by a call to compute_eig().
eigh1, eigh2, : The horizontal eigenvalues of the gravity tensor, which
eighh are computed by a call to compute_eigh().
gm : The gravitational constant times the mass of the body.
a : Semimajor axis of the reference ellipsoid.
f : Flattening of the reference ellipsoid, f=(a-b)/a.
lmax : The maximum spherical harmonic degree resolvable by the
grids.
lmax_calc : The maximum spherical harmonic degree of the
gravitational potential used in creating the grids.
units : The units of the gridded data.
epoch : The epoch time of the gravity model.
nlat, nlon : The number of latitude and longitude bands in the grids.
n : The number of samples in latitude.
sampling : The longitudinal sampling for Driscoll and Healy grids.
Either 1 for equally sampled grids (nlat=nlon) or 2 for
equally spaced grids in degrees.
extend : True if the grid contains the redundant column for
360 E and the unnecessary row for 90 S.
Methods:
plot() : Plot all 9 components of the gravity tensor.
plot_vxx() : Plot the vxx component of the gravity tensor.
plot_vxy() : Plot the vxy component of the gravity tensor.
plot_vxz() : Plot the vxz component of the gravity tensor.
plot_vyx() : Plot the vyx component of the gravity tensor.
plot_vyy() : Plot the vyy component of the gravity tensor.
plot_vyz() : Plot the vyz component of the gravity tensor.
plot_vzx() : Plot the vzx component of the gravity tensor.
plot_vzy() : Plot the vzy component of the gravity tensor.
plot_vzz() : Plot the vzz component of the gravity tensor.
compute_invar() : Compute the invariants of the gravity tensor.
plot_i0() : Plot the first invariant I0 of the gravity tensor.
plot_i1() : Plot the second invariant I1 of the gravity tensor.
plot_i2() : Plot the third invariant I2 of the gravity tensor.
plot_i() : Plot the derived quantity I = -(I2/2)**2 / (I1/3)**3.
compute_eig() : Compute the three eigenvalues of the gravity tensor.
plot_eig() : Plot the three eigenvalues of the gravity tensor.
plot_eig1() : Plot the first eigenvalue of the gravity tensor.
plot_eig2() : Plot the second eigenvalue of the gravity tensor.
plot_eig3() : Plot the third eigenvalue of the gravity tensor.
compute_eigh() : Compute the horizontal eigenvalues of the gravity tensor.
plot_eigh() : Plot the two horizontal eigenvalues and the combined
maximum absolute eigenvalue of the gravity tensor.
plot_eigh1() : Plot the first horizontal eigenvalue of the gravity
tensor.
plot_eigh2() : Plot the second horizontal eigenvalue of the gravity
tensor.
plot_eighh() : Plot the combined maximum absolute eigenvalue of the
gravity tensor.
to_xarray() : Return an xarray DataSet of all gridded data.
copy() : Return a copy of the class instance.
info() : Print a summary of the data stored in the SHGravTensor
instance.
"""
def __init__(self, vxx, vyy, vzz, vxy, vxz, vyz, gm, a, f, lmax,
lmax_calc, units='Eötvös', epoch=None):
"""
Initialize the SHGravTensor class.
"""
self.vxx = _SHGrid.from_array(vxx, grid='DH', units=units)
self.vyy = _SHGrid.from_array(vyy, grid='DH', units=units)
self.vzz = _SHGrid.from_array(vzz, grid='DH', units=units)
self.vxy = _SHGrid.from_array(vxy, grid='DH', units=units)
self.vxz = _SHGrid.from_array(vxz, grid='DH', units=units)
self.vyz = _SHGrid.from_array(vyz, grid='DH', units=units)
self.vyx = self.vxy
self.vzx = self.vxz
self.vzy = self.vyz
self.grid = self.vxx.grid
self.sampling = self.vxx.sampling
self.nlat = self.vxx.nlat
self.nlon = self.vxx.nlon
self.n = self.vxx.n
self.extend = self.vxx.extend
self.gm = gm
self.a = a
self.f = f
self.lmax = lmax
self.lmax_calc = lmax_calc
self.i0 = None
self.i1 = None
self.i2 = None
self.i = None
self.eig1 = None
self.eig2 = None
self.eig3 = None
self.eigh1 = None
self.eigh2 = None
self.eighh = None
self.units = units
self.epoch = epoch
self._vxx_label = '$V_{xx}$, ' + self.units
self._vxy_label = '$V_{xy}$, ' + self.units
self._vxz_label = '$V_{xz}$, ' + self.units
self._vyx_label = '$V_{yx}$, ' + self.units
self._vyy_label = '$V_{yy}$, ' + self.units
self._vyz_label = '$V_{yz}$, ' + self.units
self._vzx_label = '$V_{zx}$, ' + self.units
self._vzy_label = '$V_{zy}$, ' + self.units
self._vzz_label = '$V_{zz}$, ' + self.units
self._i0_label = 'Tr $V_{ii}$, ' + self.units
self._i1_label = '$I_1$, ' + self.units + '$^2$'
self._i2_label = 'det $V_{ij}$, ' + self.units + '$^3$'
self._i_label = '$-(I_2/2)^{2} / (I_1/3)^{3}$'
self._eig1_label = '$\lambda_1$, ' + self.units
self._eig2_label = '$\lambda_2$, ' + self.units
self._eig3_label = '$\lambda_3$, ' + self.units
self._eigh1_label = '$\lambda_{h1}$, ' + self.units
self._eigh2_label = '$\lambda_{h2}$, ' + self.units
self._eighh_label = '$\lambda_{hh}$, ' + self.units
def __repr__(self):
str = ('grid = {:s}\n'
'nlat = {:d}\n'
'nlon = {:d}\n'
'n = {:d}\n'
'sampling = {:d}\n'
'extend = {}\n'
'lmax = {:d}\n'
'lmax_calc = {:d}\n'
'gm (m3 / s2) = {:e}\n'
'a (m)= {:e}\n'
'f = {:e}\n'
'units = {:s}\n'
'epoch = {:s}'
.format(self.grid, self.nlat, self.nlon, self.n, self.sampling,
self.extend, self.lmax, self.lmax_calc, self.gm, self.a,
self.f, repr(self.units), repr(self.epoch)))
return str
class SHMagTensor(Tensor):
"""
Class for the magnetic field tensor and eigenvalues. The class is
initialized from a class instance of SHMagCoeffs using the method
tensor().
Attributes:
vxx, vxy, vzz, : The 9 components of the magnetic field tensor.
vyx, vyy, vyz,
vzx, vzy, vzz
i0, i1, i2, i : The three invariants of the magnetic field tensor and a
derived quantity that is bounded between 0 and 1.
eig1, eig2, eig3 : The three eigenvalues of the magnetic field tensor,
which are computed by a call to compute_eig().
eigh1, eigh2, : The horizontal eigenvalues of the magnetic field
eighh tensor, which are computed by a call to compute_eigh().
a : Semimajor axis of the reference ellipsoid.
f : Flattening of the reference ellipsoid, f=(a-b)/a.
lmax : The maximum spherical harmonic degree resolvable by the
grids.
lmax_calc : The maximum spherical harmonic degree of the
magnetic potential used in creating the grids.
units : The units of the gridded data.
year : The year of the time-variable magnetic field data.
nlat, nlon : The number of latitude and longitude bands in the grids.
sampling : The longitudinal sampling for Driscoll and Healy grids.
Either 1 for equally sampled grids (nlat=nlon) or 2 for
equally spaced grids in degrees.
extend : True if the grid contains the redundant column for
360 E and the unnecessary row for 90 S.
Methods:
plot() : Plot all 9 components of the magnetic field tensor.
plot_vxx() : Plot the vxx component of the magnetic field tensor.
plot_vxy() : Plot the vxy component of the magnetic field tensor.
plot_vxz() : Plot the vxz component of the magnetic field tensor.
plot_vyx() : Plot the vyx component of the magnetic field tensor.
plot_vyy() : Plot the vyy component of the magnetic field tensor.
plot_vyz() : Plot the vyz component of the magnetic field tensor.
plot_vzx() : Plot the vzx component of the magnetic field tensor.
plot_vzy() : Plot the vzy component of the magnetic field tensor.
plot_vzz() : Plot the vzz component of the magnetic field tensor.
compute_invar() : Compute the invariants of the magnetic field tensor.
plot_i0() : Plot the first invariant I0 of the magnetic field tensor.
plot_i1() : Plot the second invariant I1 of themagnetic field tensor.
plot_i2() : Plot the third invariant I2 of the magnetic field tensor.
plot_i() : Plot the derived quantity I = -(I2/2)**2 / (I1/3)**3.
compute_eig() : Compute the three eigenvalues of the magnetic field
tensor.
plot_eig() : Plot the three eigenvalues of the magnetic field tensor.
plot_eig1() : Plot the first eigenvalue of the magnetic field tensor.
plot_eig2() : Plot the second eigenvalue of the magnetic field tensor.
plot_eig3() : Plot the third eigenvalue of the magnetic field tensor.
compute_eigh() : Compute the horizontal eigenvalues of the magnetic field
tensor.
plot_eigh() : Plot the two horizontal eigenvalues and the combined
maximum absolute eigenvalue of the magnetic field tensor.
plot_eigh1() : Plot the first horizontal eigenvalue of the magnetic
field tensor.
plot_eigh2() : Plot the second horizontal eigenvalue of the magnetic
field tensor.
plot_eighh() : Plot the combined maximum absolute eigenvalue of the
magnetic field tensor.
to_xarray() : Return an xarray DataSet of all gridded data.
copy() : Return a copy of the class instance.
info() : Print a summary of the data stored in the SHMagTensor
instance.
"""
def __init__(self, vxx, vyy, vzz, vxy, vxz, vyz, a, f, lmax,
lmax_calc, units=None, year=None):
"""
Initialize the SHMagTensor class.
"""
self.vxx = _SHGrid.from_array(vxx, grid='DH', units=units)
self.vyy = _SHGrid.from_array(vyy, grid='DH', units=units)
self.vzz = _SHGrid.from_array(vzz, grid='DH', units=units)
self.vxy = _SHGrid.from_array(vxy, grid='DH', units=units)
self.vxz = _SHGrid.from_array(vxz, grid='DH', units=units)
self.vyz = _SHGrid.from_array(vyz, grid='DH', units=units)
self.vyx = self.vxy
self.vzx = self.vxz
self.vzy = self.vyz
self.grid = self.vxx.grid
self.sampling = self.vxx.sampling
self.nlat = self.vxx.nlat
self.nlon = self.vxx.nlon
self.n = self.vxx.n
self.extend = self.vxx.extend
self.a = a
self.f = f
self.lmax = lmax
self.lmax_calc = lmax_calc
self.i0 = None
self.i1 = None
self.i2 = None
self.i = None
self.eig1 = None
self.eig2 = None
self.eig3 = None
self.eigh1 = None
self.eigh2 = None
self.eighh = None
self.units = units
self.year = year
if self.units.lower() == 'nt/m':
self._units_formatted = 'nT m$^{-1}$'
self._i1_units = 'nT$^2$ m$^{-2}$'
self._i2_units = 'nT$^3$ m$^{-3}$'
else:
self._units_formatted = 'T m$^{-1}$'
self._i1_units = 'T$^2$ m$^{-2}$'
self._i2_units = 'T$^3$ m$^{-3}$'
self._vxx_label = '$V_{xx}$, ' + self._units_formatted
self._vxy_label = '$V_{xy}$, ' + self._units_formatted
self._vxz_label = '$V_{xz}$, ' + self._units_formatted
self._vyx_label = '$V_{yx}$, ' + self._units_formatted
self._vyy_label = '$V_{yy}$, ' + self._units_formatted
self._vyz_label = '$V_{yz}$, ' + self._units_formatted
self._vzx_label = '$V_{zx}$, ' + self._units_formatted
self._vzy_label = '$V_{zy}$, ' + self._units_formatted
self._vzz_label = '$V_{zz}$, ' + self._units_formatted
self._i0_label = 'Tr $V_{ii}$, ' + self._units_formatted
self._i1_label = '$I_1$, ' + self._i1_units
self._i2_label = 'det $V_{ij}$, ' + self._i2_units
self._i_label = '$-(I_2/2)^{2} / (I_1/3)^{3}$'
self._eig1_label = '$\lambda_1$, ' + self._units_formatted
self._eig2_label = '$\lambda_2$, ' + self._units_formatted
self._eig3_label = '$\lambda_3$, ' + self._units_formatted
self._eigh1_label = '$\lambda_{h1}$, ' + self._units_formatted
self._eigh2_label = '$\lambda_{h2}$, ' + self._units_formatted
self._eighh_label = '$\lambda_{hh}$, ' + self._units_formatted
def __repr__(self):
str = ('grid = {:s}\n'
'nlat = {:d}\n'
'nlon = {:d}\n'
'n = {:d}\n'
'sampling = {:d}\n'
'extend = {}\n'
'lmax = {:d}\n'
'lmax_calc = {:d}\n'
'a (m)= {:e}\n'
'f = {:e}\n'
'units = {:s}\n'
'year = {:s}'
.format(self.grid, self.nlat, self.nlon, self.n, self.sampling,
self.extend, self.lmax, self.lmax_calc, self.a,
self.f, repr(self.units), repr(self.year)))
return str
| bsd-3-clause |
sodafree/backend | build/ipython/IPython/parallel/tests/test_view.py | 3 | 21761 | # -*- coding: utf-8 -*-
"""test View objects
Authors:
* Min RK
"""
#-------------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Imports
#-------------------------------------------------------------------------------
import sys
import platform
import time
from tempfile import mktemp
from StringIO import StringIO
import zmq
from nose import SkipTest
from IPython.testing import decorators as dec
from IPython.testing.ipunittest import ParametricTestCase
from IPython.utils.io import capture_output
from IPython import parallel as pmod
from IPython.parallel import error
from IPython.parallel import AsyncResult, AsyncHubResult, AsyncMapResult
from IPython.parallel import DirectView
from IPython.parallel.util import interactive
from IPython.parallel.tests import add_engines
from .clienttest import ClusterTestCase, crash, wait, skip_without
def setup():
add_engines(3, total=True)
class TestView(ClusterTestCase, ParametricTestCase):
def setUp(self):
# On Win XP, wait for resource cleanup, else parallel test group fails
if platform.system() == "Windows" and platform.win32_ver()[0] == "XP":
# 1 sec fails. 1.5 sec seems ok. Using 2 sec for margin of safety
time.sleep(2)
super(TestView, self).setUp()
def test_z_crash_mux(self):
"""test graceful handling of engine death (direct)"""
raise SkipTest("crash tests disabled, due to undesirable crash reports")
# self.add_engines(1)
eid = self.client.ids[-1]
ar = self.client[eid].apply_async(crash)
self.assertRaisesRemote(error.EngineError, ar.get, 10)
eid = ar.engine_id
tic = time.time()
while eid in self.client.ids and time.time()-tic < 5:
time.sleep(.01)
self.client.spin()
self.assertFalse(eid in self.client.ids, "Engine should have died")
def test_push_pull(self):
"""test pushing and pulling"""
data = dict(a=10, b=1.05, c=range(10), d={'e':(1,2),'f':'hi'})
t = self.client.ids[-1]
v = self.client[t]
push = v.push
pull = v.pull
v.block=True
nengines = len(self.client)
push({'data':data})
d = pull('data')
self.assertEquals(d, data)
self.client[:].push({'data':data})
d = self.client[:].pull('data', block=True)
self.assertEquals(d, nengines*[data])
ar = push({'data':data}, block=False)
self.assertTrue(isinstance(ar, AsyncResult))
r = ar.get()
ar = self.client[:].pull('data', block=False)
self.assertTrue(isinstance(ar, AsyncResult))
r = ar.get()
self.assertEquals(r, nengines*[data])
self.client[:].push(dict(a=10,b=20))
r = self.client[:].pull(('a','b'), block=True)
self.assertEquals(r, nengines*[[10,20]])
def test_push_pull_function(self):
"test pushing and pulling functions"
def testf(x):
return 2.0*x
t = self.client.ids[-1]
v = self.client[t]
v.block=True
push = v.push
pull = v.pull
execute = v.execute
push({'testf':testf})
r = pull('testf')
self.assertEqual(r(1.0), testf(1.0))
execute('r = testf(10)')
r = pull('r')
self.assertEquals(r, testf(10))
ar = self.client[:].push({'testf':testf}, block=False)
ar.get()
ar = self.client[:].pull('testf', block=False)
rlist = ar.get()
for r in rlist:
self.assertEqual(r(1.0), testf(1.0))
execute("def g(x): return x*x")
r = pull(('testf','g'))
self.assertEquals((r[0](10),r[1](10)), (testf(10), 100))
def test_push_function_globals(self):
"""test that pushed functions have access to globals"""
@interactive
def geta():
return a
# self.add_engines(1)
v = self.client[-1]
v.block=True
v['f'] = geta
self.assertRaisesRemote(NameError, v.execute, 'b=f()')
v.execute('a=5')
v.execute('b=f()')
self.assertEquals(v['b'], 5)
def test_push_function_defaults(self):
"""test that pushed functions preserve default args"""
def echo(a=10):
return a
v = self.client[-1]
v.block=True
v['f'] = echo
v.execute('b=f()')
self.assertEquals(v['b'], 10)
def test_get_result(self):
"""test getting results from the Hub."""
c = pmod.Client(profile='iptest')
# self.add_engines(1)
t = c.ids[-1]
v = c[t]
v2 = self.client[t]
ar = v.apply_async(wait, 1)
# give the monitor time to notice the message
time.sleep(.25)
ahr = v2.get_result(ar.msg_ids)
self.assertTrue(isinstance(ahr, AsyncHubResult))
self.assertEquals(ahr.get(), ar.get())
ar2 = v2.get_result(ar.msg_ids)
self.assertFalse(isinstance(ar2, AsyncHubResult))
c.spin()
c.close()
def test_run_newline(self):
"""test that run appends newline to files"""
tmpfile = mktemp()
with open(tmpfile, 'w') as f:
f.write("""def g():
return 5
""")
v = self.client[-1]
v.run(tmpfile, block=True)
self.assertEquals(v.apply_sync(lambda f: f(), pmod.Reference('g')), 5)
def test_apply_tracked(self):
"""test tracking for apply"""
# self.add_engines(1)
t = self.client.ids[-1]
v = self.client[t]
v.block=False
def echo(n=1024*1024, **kwargs):
with v.temp_flags(**kwargs):
return v.apply(lambda x: x, 'x'*n)
ar = echo(1, track=False)
self.assertTrue(isinstance(ar._tracker, zmq.MessageTracker))
self.assertTrue(ar.sent)
ar = echo(track=True)
self.assertTrue(isinstance(ar._tracker, zmq.MessageTracker))
self.assertEquals(ar.sent, ar._tracker.done)
ar._tracker.wait()
self.assertTrue(ar.sent)
def test_push_tracked(self):
t = self.client.ids[-1]
ns = dict(x='x'*1024*1024)
v = self.client[t]
ar = v.push(ns, block=False, track=False)
self.assertTrue(isinstance(ar._tracker, zmq.MessageTracker))
self.assertTrue(ar.sent)
ar = v.push(ns, block=False, track=True)
self.assertTrue(isinstance(ar._tracker, zmq.MessageTracker))
ar._tracker.wait()
self.assertEquals(ar.sent, ar._tracker.done)
self.assertTrue(ar.sent)
ar.get()
def test_scatter_tracked(self):
t = self.client.ids
x='x'*1024*1024
ar = self.client[t].scatter('x', x, block=False, track=False)
self.assertTrue(isinstance(ar._tracker, zmq.MessageTracker))
self.assertTrue(ar.sent)
ar = self.client[t].scatter('x', x, block=False, track=True)
self.assertTrue(isinstance(ar._tracker, zmq.MessageTracker))
self.assertEquals(ar.sent, ar._tracker.done)
ar._tracker.wait()
self.assertTrue(ar.sent)
ar.get()
def test_remote_reference(self):
v = self.client[-1]
v['a'] = 123
ra = pmod.Reference('a')
b = v.apply_sync(lambda x: x, ra)
self.assertEquals(b, 123)
def test_scatter_gather(self):
view = self.client[:]
seq1 = range(16)
view.scatter('a', seq1)
seq2 = view.gather('a', block=True)
self.assertEquals(seq2, seq1)
self.assertRaisesRemote(NameError, view.gather, 'asdf', block=True)
@skip_without('numpy')
def test_scatter_gather_numpy(self):
import numpy
from numpy.testing.utils import assert_array_equal, assert_array_almost_equal
view = self.client[:]
a = numpy.arange(64)
view.scatter('a', a)
b = view.gather('a', block=True)
assert_array_equal(b, a)
def test_scatter_gather_lazy(self):
"""scatter/gather with targets='all'"""
view = self.client.direct_view(targets='all')
x = range(64)
view.scatter('x', x)
gathered = view.gather('x', block=True)
self.assertEquals(gathered, x)
@dec.known_failure_py3
@skip_without('numpy')
def test_push_numpy_nocopy(self):
import numpy
view = self.client[:]
a = numpy.arange(64)
view['A'] = a
@interactive
def check_writeable(x):
return x.flags.writeable
for flag in view.apply_sync(check_writeable, pmod.Reference('A')):
self.assertFalse(flag, "array is writeable, push shouldn't have pickled it")
view.push(dict(B=a))
for flag in view.apply_sync(check_writeable, pmod.Reference('B')):
self.assertFalse(flag, "array is writeable, push shouldn't have pickled it")
@skip_without('numpy')
def test_apply_numpy(self):
"""view.apply(f, ndarray)"""
import numpy
from numpy.testing.utils import assert_array_equal, assert_array_almost_equal
A = numpy.random.random((100,100))
view = self.client[-1]
for dt in [ 'int32', 'uint8', 'float32', 'float64' ]:
B = A.astype(dt)
C = view.apply_sync(lambda x:x, B)
assert_array_equal(B,C)
@skip_without('numpy')
def test_push_pull_recarray(self):
"""push/pull recarrays"""
import numpy
from numpy.testing.utils import assert_array_equal
view = self.client[-1]
R = numpy.array([
(1, 'hi', 0.),
(2**30, 'there', 2.5),
(-99999, 'world', -12345.6789),
], [('n', int), ('s', '|S10'), ('f', float)])
view['RR'] = R
R2 = view['RR']
r_dtype, r_shape = view.apply_sync(interactive(lambda : (RR.dtype, RR.shape)))
self.assertEquals(r_dtype, R.dtype)
self.assertEquals(r_shape, R.shape)
self.assertEquals(R2.dtype, R.dtype)
self.assertEquals(R2.shape, R.shape)
assert_array_equal(R2, R)
def test_map(self):
view = self.client[:]
def f(x):
return x**2
data = range(16)
r = view.map_sync(f, data)
self.assertEquals(r, map(f, data))
def test_map_iterable(self):
"""test map on iterables (direct)"""
view = self.client[:]
# 101 is prime, so it won't be evenly distributed
arr = range(101)
# ensure it will be an iterator, even in Python 3
it = iter(arr)
r = view.map_sync(lambda x:x, arr)
self.assertEquals(r, list(arr))
def test_scatterGatherNonblocking(self):
data = range(16)
view = self.client[:]
view.scatter('a', data, block=False)
ar = view.gather('a', block=False)
self.assertEquals(ar.get(), data)
@skip_without('numpy')
def test_scatter_gather_numpy_nonblocking(self):
import numpy
from numpy.testing.utils import assert_array_equal, assert_array_almost_equal
a = numpy.arange(64)
view = self.client[:]
ar = view.scatter('a', a, block=False)
self.assertTrue(isinstance(ar, AsyncResult))
amr = view.gather('a', block=False)
self.assertTrue(isinstance(amr, AsyncMapResult))
assert_array_equal(amr.get(), a)
def test_execute(self):
view = self.client[:]
# self.client.debug=True
execute = view.execute
ar = execute('c=30', block=False)
self.assertTrue(isinstance(ar, AsyncResult))
ar = execute('d=[0,1,2]', block=False)
self.client.wait(ar, 1)
self.assertEquals(len(ar.get()), len(self.client))
for c in view['c']:
self.assertEquals(c, 30)
def test_abort(self):
view = self.client[-1]
ar = view.execute('import time; time.sleep(1)', block=False)
ar2 = view.apply_async(lambda : 2)
ar3 = view.apply_async(lambda : 3)
view.abort(ar2)
view.abort(ar3.msg_ids)
self.assertRaises(error.TaskAborted, ar2.get)
self.assertRaises(error.TaskAborted, ar3.get)
def test_abort_all(self):
"""view.abort() aborts all outstanding tasks"""
view = self.client[-1]
ars = [ view.apply_async(time.sleep, 0.25) for i in range(10) ]
view.abort()
view.wait(timeout=5)
for ar in ars[5:]:
self.assertRaises(error.TaskAborted, ar.get)
def test_temp_flags(self):
view = self.client[-1]
view.block=True
with view.temp_flags(block=False):
self.assertFalse(view.block)
self.assertTrue(view.block)
@dec.known_failure_py3
def test_importer(self):
view = self.client[-1]
view.clear(block=True)
with view.importer:
import re
@interactive
def findall(pat, s):
# this globals() step isn't necessary in real code
# only to prevent a closure in the test
re = globals()['re']
return re.findall(pat, s)
self.assertEquals(view.apply_sync(findall, '\w+', 'hello world'), 'hello world'.split())
def test_unicode_execute(self):
"""test executing unicode strings"""
v = self.client[-1]
v.block=True
if sys.version_info[0] >= 3:
code="a='é'"
else:
code=u"a=u'é'"
v.execute(code)
self.assertEquals(v['a'], u'é')
def test_unicode_apply_result(self):
"""test unicode apply results"""
v = self.client[-1]
r = v.apply_sync(lambda : u'é')
self.assertEquals(r, u'é')
def test_unicode_apply_arg(self):
"""test passing unicode arguments to apply"""
v = self.client[-1]
@interactive
def check_unicode(a, check):
assert isinstance(a, unicode), "%r is not unicode"%a
assert isinstance(check, bytes), "%r is not bytes"%check
assert a.encode('utf8') == check, "%s != %s"%(a,check)
for s in [ u'é', u'ßø®∫',u'asdf' ]:
try:
v.apply_sync(check_unicode, s, s.encode('utf8'))
except error.RemoteError as e:
if e.ename == 'AssertionError':
self.fail(e.evalue)
else:
raise e
def test_map_reference(self):
"""view.map(<Reference>, *seqs) should work"""
v = self.client[:]
v.scatter('n', self.client.ids, flatten=True)
v.execute("f = lambda x,y: x*y")
rf = pmod.Reference('f')
nlist = list(range(10))
mlist = nlist[::-1]
expected = [ m*n for m,n in zip(mlist, nlist) ]
result = v.map_sync(rf, mlist, nlist)
self.assertEquals(result, expected)
def test_apply_reference(self):
"""view.apply(<Reference>, *args) should work"""
v = self.client[:]
v.scatter('n', self.client.ids, flatten=True)
v.execute("f = lambda x: n*x")
rf = pmod.Reference('f')
result = v.apply_sync(rf, 5)
expected = [ 5*id for id in self.client.ids ]
self.assertEquals(result, expected)
def test_eval_reference(self):
v = self.client[self.client.ids[0]]
v['g'] = range(5)
rg = pmod.Reference('g[0]')
echo = lambda x:x
self.assertEquals(v.apply_sync(echo, rg), 0)
def test_reference_nameerror(self):
v = self.client[self.client.ids[0]]
r = pmod.Reference('elvis_has_left')
echo = lambda x:x
self.assertRaisesRemote(NameError, v.apply_sync, echo, r)
def test_single_engine_map(self):
e0 = self.client[self.client.ids[0]]
r = range(5)
check = [ -1*i for i in r ]
result = e0.map_sync(lambda x: -1*x, r)
self.assertEquals(result, check)
def test_len(self):
"""len(view) makes sense"""
e0 = self.client[self.client.ids[0]]
yield self.assertEquals(len(e0), 1)
v = self.client[:]
yield self.assertEquals(len(v), len(self.client.ids))
v = self.client.direct_view('all')
yield self.assertEquals(len(v), len(self.client.ids))
v = self.client[:2]
yield self.assertEquals(len(v), 2)
v = self.client[:1]
yield self.assertEquals(len(v), 1)
v = self.client.load_balanced_view()
yield self.assertEquals(len(v), len(self.client.ids))
# parametric tests seem to require manual closing?
self.client.close()
# begin execute tests
def test_execute_reply(self):
e0 = self.client[self.client.ids[0]]
e0.block = True
ar = e0.execute("5", silent=False)
er = ar.get()
self.assertEquals(str(er), "<ExecuteReply[%i]: 5>" % er.execution_count)
self.assertEquals(er.pyout['data']['text/plain'], '5')
def test_execute_reply_stdout(self):
e0 = self.client[self.client.ids[0]]
e0.block = True
ar = e0.execute("print (5)", silent=False)
er = ar.get()
self.assertEquals(er.stdout.strip(), '5')
def test_execute_pyout(self):
"""execute triggers pyout with silent=False"""
view = self.client[:]
ar = view.execute("5", silent=False, block=True)
expected = [{'text/plain' : '5'}] * len(view)
mimes = [ out['data'] for out in ar.pyout ]
self.assertEquals(mimes, expected)
def test_execute_silent(self):
"""execute does not trigger pyout with silent=True"""
view = self.client[:]
ar = view.execute("5", block=True)
expected = [None] * len(view)
self.assertEquals(ar.pyout, expected)
def test_execute_magic(self):
"""execute accepts IPython commands"""
view = self.client[:]
view.execute("a = 5")
ar = view.execute("%whos", block=True)
# this will raise, if that failed
ar.get(5)
for stdout in ar.stdout:
lines = stdout.splitlines()
self.assertEquals(lines[0].split(), ['Variable', 'Type', 'Data/Info'])
found = False
for line in lines[2:]:
split = line.split()
if split == ['a', 'int', '5']:
found = True
break
self.assertTrue(found, "whos output wrong: %s" % stdout)
def test_execute_displaypub(self):
"""execute tracks display_pub output"""
view = self.client[:]
view.execute("from IPython.core.display import *")
ar = view.execute("[ display(i) for i in range(5) ]", block=True)
expected = [ {u'text/plain' : unicode(j)} for j in range(5) ]
for outputs in ar.outputs:
mimes = [ out['data'] for out in outputs ]
self.assertEquals(mimes, expected)
def test_apply_displaypub(self):
"""apply tracks display_pub output"""
view = self.client[:]
view.execute("from IPython.core.display import *")
@interactive
def publish():
[ display(i) for i in range(5) ]
ar = view.apply_async(publish)
ar.get(5)
expected = [ {u'text/plain' : unicode(j)} for j in range(5) ]
for outputs in ar.outputs:
mimes = [ out['data'] for out in outputs ]
self.assertEquals(mimes, expected)
def test_execute_raises(self):
"""exceptions in execute requests raise appropriately"""
view = self.client[-1]
ar = view.execute("1/0")
self.assertRaisesRemote(ZeroDivisionError, ar.get, 2)
def test_remoteerror_render_exception(self):
"""RemoteErrors get nice tracebacks"""
view = self.client[-1]
ar = view.execute("1/0")
ip = get_ipython()
ip.user_ns['ar'] = ar
with capture_output() as io:
ip.run_cell("ar.get(2)")
self.assertTrue('ZeroDivisionError' in io.stdout, io.stdout)
def test_compositeerror_render_exception(self):
"""CompositeErrors get nice tracebacks"""
view = self.client[:]
ar = view.execute("1/0")
ip = get_ipython()
ip.user_ns['ar'] = ar
with capture_output() as io:
ip.run_cell("ar.get(2)")
self.assertEqual(io.stdout.count('ZeroDivisionError'), len(view) * 2, io.stdout)
self.assertEqual(io.stdout.count('by zero'), len(view), io.stdout)
self.assertEqual(io.stdout.count(':execute'), len(view), io.stdout)
@dec.skipif_not_matplotlib
def test_magic_pylab(self):
"""%pylab works on engines"""
view = self.client[-1]
ar = view.execute("%pylab inline")
# at least check if this raised:
reply = ar.get(5)
# include imports, in case user config
ar = view.execute("plot(rand(100))", silent=False)
reply = ar.get(5)
self.assertEquals(len(reply.outputs), 1)
output = reply.outputs[0]
self.assertTrue("data" in output)
data = output['data']
self.assertTrue("image/png" in data)
| bsd-3-clause |
liebermeister/flux-enzyme-cost-minimization | scripts/export_data_for_machine_learning.py | 1 | 4557 | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 8 10:38:03 2015
@author: noore
"""
import zipfile, os
import pandas as pd
import definitions as D
TSNE_DIM_1 = 't-SNE dim1'
TSNE_DIM_2 = 't-SNE dim2'
if __name__ == '__main__':
figure_data = D.get_figure_data()
for fig_name in ['monod_glucose_aero', 'monod_glucose_anae']:
zip_fname = D.DATA_FILES[fig_name][0][0]
prefix, ext = os.path.splitext(os.path.basename(zip_fname))
with zipfile.ZipFile(zip_fname, 'r') as z:
rates_df = pd.read_csv(z.open('%s/rates.csv' % prefix, 'r'),
header=0, index_col=0)
stoich_df = pd.read_csv(z.open('%s/stoich.csv' % prefix, 'r'),
header=None, index_col=None)
kcat_df = pd.read_csv(z.open('%s/kcats.csv' % prefix, 'r'),
header=None, index_col=None)
#%%
efms = list(rates_df.index)
reactions = list(rates_df.columns)
rates_df.index.name = 'efm'
rates_df['efm'] = efms
# convert rates_df to an SQL-style DataFrame,
# where the columns are 'efm', 'reaction', 'rate'
# and remove all cases where the rate is 0
melted_rates_df = pd.melt(rates_df, id_vars=['efm'])
melted_rates_df.rename(columns={'variable': 'reaction', 'value': 'rate'}, inplace=True)
melted_rates_df = melted_rates_df[melted_rates_df['rate'] != 0]
# stoich_df is alread in SQL-style
stoich_df.rename(columns={0: 'reaction', 1: 'metabolite', 2: 'coeff'}, inplace=True)
# kcat_df is alread in SQL-style
kcat_df.rename(columns={0: 'reaction', 1: 'kcat'}, inplace=True)
kcat_df.set_index('reaction', inplace=True)
# calculate degree of each metabolite using GROUP BY
met_degree = stoich_df.groupby('metabolite').count()[['reaction']]
met_degree.rename(columns={'reaction': 'degree'}, inplace=True)
traindata_x = rates_df[reactions].copy()
traindata_x.rename(columns=dict(zip(reactions, map(lambda s: s + ' rate', reactions))), inplace=True)
# count the number of active reactions in each EFM
traindata_x['# of reactions'] = melted_rates_df.groupby('efm').count()['reaction']
traindata_x['sum of all rates'] = rates_df.sum(1)
# calculate the number of metabolites participating in each EFM (i.e.
# appearing as a substrate or product in at least one active reaction)
efm_met_pairs = pd.merge(melted_rates_df, stoich_df, on='reaction')[['efm', 'metabolite']].drop_duplicates()
traindata_x['# of metabolites'] = efm_met_pairs.groupby('efm').count()['metabolite']
# count separately the active metabolites according to their degree
efm_degree_pairs = efm_met_pairs.join(met_degree, on='metabolite')[['efm', 'degree']]
traindata_x['# of metabolites with degree 1'] = efm_degree_pairs[efm_degree_pairs['degree'] == 1].groupby('efm').count()['degree']
traindata_x['# of metabolites with degree 2'] = efm_degree_pairs[efm_degree_pairs['degree'] == 2].groupby('efm').count()['degree']
traindata_x['# of metabolites with degree >=3'] = efm_degree_pairs[efm_degree_pairs['degree'] >= 3].groupby('efm').count()['degree']
# add the rate/kcat of all reactions and the total
rates_over_kcat = rates_df[reactions].divide(kcat_df.loc[reactions, 'kcat'].transpose())
rates_over_kcat.rename(columns=dict(zip(reactions, map(lambda s: s + ' rate/kcat', reactions))), inplace=True)
rates_over_kcat['total rate/kcat'] = rates_over_kcat.sum(1)
traindata_x = traindata_x.join(rates_over_kcat)
traindata_x.to_csv(os.path.join(D.OUTPUT_DIR, '%s_features.csv' % fig_name))
#%%
# prepare the data for learning, i.e. the enzyme costs of each EFM in several conditions
# specifically in units of kD(protein) / (kD(biomass) / hour)
traindata_y = figure_data[fig_name].reset_index()
traindata_y = pd.melt(traindata_y, id_vars='efm',
var_name='external glucose conc. [mM]',
value_name='growth rate [1/h]')
traindata_y.sort_values(by=['efm', 'external glucose conc. [mM]'], inplace=True)
traindata_y.to_csv(os.path.join(D.OUTPUT_DIR, '%s_enzyme_cost.csv' % fig_name), index=False)
| gpl-2.0 |
tacaswell/dataportal | dataportal/muxer/data_muxer.py | 1 | 38457 | # ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from collections import namedtuple, deque
import logging
import pandas as pd
import numpy as np
from scipy.interpolate import interp1d
import pandas.core.groupby # to get custom exception
logger = logging.getLogger(__name__)
__all__ = ['DataMuxer', 'dataframe_to_dict']
class BinningError(Exception):
"""
An exception to raise if there are insufficient sampling rules to
upsampling or downsample a data column into specified bins.
"""
pass
class BadDownsamplerError(Exception):
"""
An exception to raise if a downsampler produces unexpected output.
"""
pass
class ColSpec(namedtuple(
'ColSpec', ['name', 'ndim', 'shape', 'upsample', 'downsample'])):
"""
Named-tuple sub-class to validate the column specifications for the
DataMuxer
Parameters
----------
name : hashable
ndim : uint
Dimensionality of the data stored in the column
shape : tuple or None
like ndarray.shape, where 0 or None are scalar
upsample : {None, 'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'ffill', 'bfill'}
None means that each time bin must have at least one value.
The names refer to kinds of scipy.interpolator. See documentation
link below.
downsample : None or a function
None if the data cannot be downsampled (reduced). Otherwise,
any callable that reduces multiple data points (of whatever dimension)
to a single data point.
References
----------
http://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp1d.html
"""
# These reflect the 'method' argument of pandas.DataFrame.fillna
upsampling_methods = {'None', 'linear', 'nearest', 'zero', 'slinear',
'quadratic', 'cubic', 'ffill', 'bfill'}
downsampling_methods = {'None', 'last', 'first', 'median', 'mean', 'sum',
'min', 'max'}
_downsample_mapping = {'last': lambda x: x[-1],
'first': lambda x: x[0],
# new in np 1.9
'median': lambda x: np.median(x, 0),
'mean': lambda x: np.mean(x, 0),
'sum': lambda x: np.sum(x, 0),
'min': lambda x: np.min(x, 0),
'max': lambda x: np.max(x, 0)}
__slots__ = ()
def __new__(cls, name, ndim, shape, upsample, downsample):
# Validations
upsample = _validate_upsample(upsample)
downsample = _validate_downsample(downsample)
if int(ndim) < 0:
raise ValueError("ndim must be positive not {}".format(ndim))
if shape is not None:
shape = tuple(shape)
return super(ColSpec, cls).__new__(
cls, name, int(ndim), shape, upsample, downsample)
def _validate_upsample(input):
# TODO The upsampling method could be any callable.
if input is None or input == 'None':
return 'None'
if not (input in ColSpec.upsampling_methods):
raise ValueError("{} is not a valid upsampling method. It "
"must be one of {}".format(
input, ColSpec.upsampling_methods))
return input.lower()
def _validate_downsample(input):
# TODO The downsampling methods could have string aliases like 'mean'.
if (input is not None) and (not (callable(input) or
input in ColSpec.downsampling_methods)):
raise ValueError("The downsampling method must be a callable, None, "
"or one of {}.".format(ColSpec.downsampling_methods))
if input is None:
return 'None'
return input
class DataMuxer(object):
"""
This class provides a wrapper layer of signals and slots
around a pandas DataFrame to make plugging stuff in for live
view easier.
The data collection/event model being used is all measurements
(that is values that come off of the hardware) are time stamped
to ring time.
The language being used through out is that of pandas data frames.
The data model is that of a sparse table keyed on time stamps which
is 'densified' on demand by propagating measurements forwards. Not
all measurements (ex images) can be filled. This behavior is controlled
by the `col_info` tuple.
Parameters
----------
events : list
list of Events (any object with the expected attributes will do)
"""
class Planner(object):
def __init__(self, dm):
self.dm = dm
def determine_upsample(self, interpolation=None, use_cols=None):
"Resolve (and if necessary validate) upsampling rules."
if interpolation is None:
interpolation = dict()
if use_cols is None:
use_cols = self.dm.columns
rules = dict()
for name in use_cols:
col_info = self.dm.col_info[name]
rule = _validate_upsample(
interpolation.get(name, col_info.upsample))
rule = _normalize_string_none(rule)
if (rule is not None) and (col_info.ndim > 0):
raise NotImplementedError(
"Only scalar data can be upsampled. "
"The {0}-dimensional source {1} was given the "
"upsampling rule {2}.".format(
col_info.ndim, name, rule))
rules[name] = rule
return rules
def determine_downsample(self, agg=None, use_cols=None):
"Resolve (and if necessary validate) sampling rules."
if agg is None:
agg = dict()
if use_cols is None:
use_cols = self.dm.columns
rules = dict()
for name in use_cols:
col_info = self.dm.col_info[name]
rule = _validate_downsample(agg.get(name, col_info.downsample))
rule = _normalize_string_none(rule)
rules[name] = rule
return rules
def bin_by_edges(self, bin_edges, bin_anchors, interpolation=None,
agg=None, use_cols=None):
"""Explain operation of DataMuxer.bin_by_edges
Parameters
----------
bin_edges : list
list of two-element items like [(t1, t2), (t3, t4), ...]
bin_anchors : list
These are time points where interpolated values will be
evaluated. Bin centers are usually a good choice.
interpolation : dict, optional
Override the default interpolation (upsampling) behavior of any
data source by passing a dictionary of source names mapped onto
one of the following interpolation methods.
{None, 'linear', 'nearest', 'zero', 'slinear', 'quadratic',
'cubic', 'ffill', 'bfill'}
None means that each time bin must have at least one value.
See scipy.interpolator for more on the other methods.
agg : dict, optional
Override the default reduction (downsampling) behavior of any
data source by passing a dictionary of source names mapped onto
any callable that reduces multiple data points (of whatever
dimension) to a single data point.
use_cols : list, optional
List of columns to include in binning; use all columns by
default.
Returns
-------
df : pandas.DataFrame
table giving upsample and downsample rules for each data column
and indicating whether those rules are applicable
References
----------
http://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp1d.html
"""
bin_anchors, binning = self.dm._bin_by_edges(bin_anchors, bin_edges)
# TODO Cache the grouping for reuse by resample.
grouped = self.dm._dataframe.groupby(binning)
counts = grouped.count()
df = pd.DataFrame.from_dict(_is_resampling_applicable(counts))
df['upsample'] = self.determine_upsample(interpolation, use_cols)
df['downsample'] = self.determine_downsample(agg, use_cols)
return df
def bin_on(self, source_name, interpolation=None, agg=None,
use_cols=None):
"""Explain operation of DataMuxer.bin_on.
Parameters
----------
source_name : string
interpolation : dict, optional
Override the default interpolation (upsampling) behavior of any
data source by passing a dictionary of source names mapped onto
one of the following interpolation methods.
{None, 'linear', 'nearest', 'zero', 'slinear', 'quadratic',
'cubic'}
None means that each time bin must have at least one value.
See scipy.interpolator for more on the other methods.
agg : dict, optional
Override the default reduction (downsampling) behavior of any
data source by passing a dictionary of source names mapped onto
any callable that reduces multiple data points (of whatever
dimension) to a single data point.
use_cols : list, optional
List of columns to include in binning; use all columns by
default.
Returns
-------
df : pandas.DataFrame
table giving upsample and downsample rules for each data column
and indicating whether those rules are applicable
References
----------
http://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp1d.html
"""
centers, bin_edges = self.dm._bin_on(source_name)
bin_anchors, binning = self.dm._bin_by_edges(centers, bin_edges)
# TODO Cache the grouping for reuse by resample.
grouped = self.dm._dataframe.groupby(binning)
counts = grouped.count()
df = pd.DataFrame.from_dict(_is_resampling_applicable(counts))
df['upsample'] = self.determine_upsample(interpolation, use_cols)
df['downsample'] = self.determine_downsample(agg, use_cols)
return df
default_upsample = None
default_downsample = None
def __init__(self):
self.sources = {}
self.col_info = {}
self.col_info['time'] = ColSpec('time', 0, [], 'linear', 'mean')
self._data = deque()
self._time = deque()
self._timestamps = deque()
self._timestamps_as_data = set()
self._known_events = set()
self._known_descriptors = set()
self._stale = True
self.plan = self.Planner(self)
self.convert_times = True
self._reference_time = None
@property
def reference_time(self):
return self._reference_time
@reference_time.setter
def reference_time(self, val):
self._reference_time = pd.Timestamp(val, unit='s')
@property
def columns(self):
"The columns of DataFrames returned by methods that return DataFrames."
return set(self.sources) | self._time_columns
@property
def _time_columns(self):
ts_names = [name + '_timestamp' for name in self._timestamps_as_data]
return {'time'} | set(ts_names)
@classmethod
def from_events(cls, events, verbose=False):
"""
Create a DataMuxer from a list of Events.
Parameters
----------
events : list
list of Events (any objects with the expected attributes will do)
"""
instance = cls()
instance.append_events(events, verbose)
return instance
def append_events(self, events, verbose=False):
"""Add a list of events to the DataMuxer.
Parameters
----------
events : list
list of Events (any objects with the expected attributes will do)
"""
for idx, event in enumerate(events):
if verbose and idx % 25 == 0:
print('loading event %s' % idx),
self.append_event(event)
def append_event(self, event):
"""Add an event to the DataMuxer.
Parameters
----------
event : Event
Event Document or any object with the expected attributes
Returns
-------
is_new : bool
True if event was added, False is it has already been added
"""
if event.uid in self._known_events:
return False
self._known_events.add(event.uid)
self._stale = True
if event.descriptor.uid not in self._known_descriptors:
self._process_new_descriptor(event.descriptor)
# Both scalar and nonscalar data will get stored in the DataFrame.
# This may be optimized later, but it might not actually help much.
self._data.append(
{name: data for name, data in six.iteritems(event.data)})
self._timestamps.append(
{name: ts for name, ts in six.iteritems(event.timestamps)})
self._time.append(event.time)
return True
def _process_new_descriptor(self, descriptor):
"Build a ColSpec and update state."
for name, description in six.iteritems(descriptor.data_keys):
# If we already have this source name, the unique source
# identifiers must match. Ambiguous names are not allowed.
if name in self.sources:
if self.sources[name] != description['source']:
raise ValueError("In a previously loaded descriptor, "
"'{0}' refers to {1} but in Event "
"Descriptor {2} it refers to {3}.".format(
name, self.sources[name],
descriptor.uid,
description['source']))
if name == 'time':
# We can argue later about how best to handle this corner
# case, but anything is better than silently mislabeling
# data.
raise ValueError("The name 'time' is reserved and cannot "
"be used as an alias.")
# If it is a new name, determine a ColSpec.
else:
self.sources[name] = description['source']
if 'external' in description and 'shape' in description:
shape = description['shape']
ndim = len(shape)
else:
# External data can be scalar. Nonscalar data must
# have a specified shape. Thus, if no shape is given,
# assume scalar.
shape = None
ndim = 0
upsample = self.default_upsample
if ndim > 0:
upsample = None
col_info = ColSpec(name, ndim, shape, upsample,
self.default_downsample) # defaults
# TODO Look up source-specific default in a config file
# or some other source of reference data.
self.col_info[name] = col_info
self._known_descriptors.add(descriptor.uid)
@property
def _dataframe(self):
"See also to_sparse_dataframe, the public version of this."
# Rebuild the DataFrame if more data has been added.
if self._stale:
df = pd.DataFrame(list(self._data))
df['time'] = list(self._time)
if self._timestamps_as_data:
# Only build this if we need it.
# TODO: We shouldn't have to build
# the whole thing, but there is already a lot of trickiness
# here so we'll worry about optimization later.
timestamps = pd.DataFrame(list(self._timestamps))
for source_name in self._timestamps_as_data:
col_name = _timestamp_col_name(source_name)
df[col_name] = timestamps[source_name]
logger.debug("Including %s timestamps as data", source_name)
self._df = df.sort('time').reset_index(drop=True)
self._stale = False
return self._df
def to_sparse_dataframe(self, include_all_timestamps=False):
"""Obtain all measurements in a DataFrame, one row per Event time.
Parameters
----------
include_all_timestamps : bool
The result will always contain a 'time' column but, by default,
not timestamps for individual data sources like 'motor_timestamp'.
Set this to True to export timestamp columns for each data column
Returns
-------
df : pandas.DataFrame
"""
if include_all_timestamps:
raise NotImplementedError("TODO")
result = self._dataframe.copy()
for col_name in self._time_columns:
result[col_name] = self._maybe_convert_times(result[col_name])
return result
def _maybe_convert_times(self, data):
if self.convert_times:
if self.reference_time is None:
return pd.to_datetime(data, unit='s')
return pd.to_datetime(data, unit='s') - self.reference_time
return data # no-op
def include_timestamp_data(self, source_name):
"""Add the exact timing of a data source as a data column.
Parameters
----------
source_name : string
one of the source names in DataMuxer.sources
"""
# self._timestamps_as_data is a set of sources who timestamps
# should be treated as data in the _dataframe method above.
self._timestamps_as_data.add(source_name)
name = _timestamp_col_name(source_name)
self.col_info[name] = ColSpec(name, 0, None, None, np.mean)
self._stale = True
def remove_timestamp_data(self, source_name):
"""Remove the exact timing of a data source from the data columns.
Parameters
----------
source_name : string
one of the source names in DataMuxer.sources
"""
self._timestamps_as_data.remove(source_name)
# Do not force a rebuilt (i.e., self._stale). Just remove it here.
del self._df[_timestamp_col_name(source_name)]
def bin_on(self, source_name, interpolation=None, agg=None, use_cols=None):
"""
Return data resampled to align with the data from a particular source.
Parameters
----------
source_name : string
interpolation : dict, optional
Override the default interpolation (upsampling) behavior of any
data source by passing a dictionary of source names mapped onto
one of the following interpolation methods.
{None, 'linear', 'nearest', 'zero', 'slinear', 'quadratic',
'cubic'}
None means that each time bin must have at least one value.
See scipy.interpolator for more on the other methods.
agg : dict, optional
Override the default reduction (downsampling) behavior of any data
source by passing a dictionary of source names mapped onto any
callable that reduces multiple data points (of whatever dimension)
to a single data point.
use_cols : list, optional
List of columns to include in binning; use all columns by default.
Returns
-------
resampled_df : pandas.DataFrame
References
----------
http://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp1d.html
"""
centers, bin_edges = self._bin_on(source_name)
return self.bin_by_edges(bin_edges, bin_anchors=centers,
interpolation=interpolation, agg=agg,
use_cols=use_cols)
def _bin_on(self, source_name):
"Compute bin edges spaced around centers defined by source_name points."
col = self._dataframe[source_name]
centers = self._dataframe['time'].reindex_like(col.dropna()).values
# [2, 4, 6] -> [-inf, 3, 5, inf]
bin_edges = np.mean([centers[1:], centers[:-1]], 0)
# [-inf, 3, 5, inf] -> [(-inf, 3), (3, 5), (5, inf)]
bin_edges = [-np.inf] + list(np.repeat(bin_edges, 2)) + [np.inf]
bin_edges = np.reshape(bin_edges, (-1, 2))
return centers, bin_edges
def bin_by_edges(self, bin_edges, bin_anchors, interpolation=None, agg=None,
use_cols=None):
"""
Return data resampled into bins with the specified edges.
Parameters
----------
bin_edges : list
list of two-element items like [(t1, t2), (t3, t4), ...]
bin_anchors : list
These are time points where interpolated values will be evaluated.
Bin centers are usually a good choice.
interpolation : dict, optional
Override the default interpolation (upsampling) behavior of any
data source by passing a dictionary of source names mapped onto
one of the following interpolation methods.
{None, 'linear', 'nearest', 'zero', 'slinear', 'quadratic',
'cubic'}
None means that each time bin must have at least one value.
See scipy.interpolator for more on the other methods.
agg : dict, optional
Override the default reduction (downsampling) behavior of any data
source by passing a dictionary of source names mapped onto any
callable that reduces multiple data points (of whatever dimension)
to a single data point.
use_cols : list, optional
List of columns to include in binning; use all columns by default.
Returns
-------
resampled_df : pandas.DataFrame
References
----------
http://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp1d.html
"""
bin_anchors, binning = self._bin_by_edges(bin_anchors, bin_edges)
return self.resample(bin_anchors, binning, interpolation, agg,
use_cols=use_cols)
def _bin_by_edges(self, bin_anchors, bin_edges):
"Compute bin assignment and, if needed, bin_anchors."
time = self._dataframe['time'].values
# Get edges into 1D array[L, R, L, R, ...]
edges_as_pairs = np.reshape(bin_edges, (-1, 2))
all_edges = np.ravel(edges_as_pairs)
if not np.all(np.diff(all_edges) >= 0):
raise ValueError("Illegal binning: the left edge must be less "
"than the right edge.")
# Sort out where the array each time would be inserted.
binning = np.searchsorted(all_edges, time).astype(float)
# Times that would get inserted at even positions are between bins.
# Mark them
binning[binning % 2 == 0] = np.nan
binning //= 2 # Make bin number sequential, not odds only.
if bin_anchors is None:
bin_anchors = np.mean(edges_as_pairs, axis=1) # bin centers
else:
if len(bin_anchors) != len(bin_edges):
raise ValueError("There are {0} bin_anchors but {1} pairs of "
"bin_edges. These must match.".format(
len(bin_anchors), len(bin_edges)))
return bin_anchors, binning
def resample(self, bin_anchors, binning, interpolation=None, agg=None,
verify_integrity=True, use_cols=None):
"""
Return data resampled into bins with the specified edges.
Parameters
----------
bin_anchors : list
These are time points where interpolated values will be evaluated.
Bin centers are usually a good choice.
bin_anchors : list
Bin assignment. Example: [1, 1, 2, 2, 3, 3] puts six data points
into three bins with two points each.
interpolation : dict, optional
Override the default interpolation (upsampling) behavior of any
data source by passing a dictionary of source names mapped onto
one of the following interpolation methods.
{None, 'linear', 'nearest', 'zero', 'slinear', 'quadratic',
'cubic'}
None means that each time bin must have at least one value.
See scipy.interpolator for more on the other methods.
agg : dict, optional
Override the default reduction (downsampling) behavior of any data
source by passing a dictionary of source names mapped onto any
callable that reduces multiple data points (of whatever dimension)
to a single data point.
verify_integrity : bool, optional
For a cost in performance, verify that the downsampling function
produces data of the expected shape. True by default.
use_cols : list, optional
List of columns to include in binning; use all columns by default.
Returns
-------
resampled_df : pandas.DataFrame
References
----------
http://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp1d.html
"""
if use_cols is None:
use_cols = self.columns
plan = self.Planner(self)
upsampling_rules = plan.determine_upsample(interpolation, use_cols)
downsampling_rules = plan.determine_downsample(agg, use_cols)
grouped = self._dataframe.groupby(binning)
first_point = grouped.first()
counts = grouped.count()
resampling_requirements = _is_resampling_applicable(counts)
index = np.arange(len(bin_anchors))
result = {} # dict of DataFrames, to become one MultiIndexed DataFrame
for name in use_cols:
upsample = upsampling_rules[name]
downsample = downsampling_rules[name]
upsampling_possible = resampling_requirements['upsampling_possible'][name]
downsampling_needed = resampling_requirements['downsampling_needed'][name]
result[name] = pd.DataFrame(index=index)
# Put the first (maybe only) value into a Series.
# We will overwrite as needed below.
result[name]['val'] = pd.Series(data=first_point[name])
# Short-circuit if we are done.
if not (upsampling_possible or downsampling_needed):
logger.debug("%s has exactly one data point per bin", name)
continue
result[name]['count'] = counts[name]
# If any bin has no data, use the upsampling rule to interpolate
# at the center of the empty bins. If there is no rule, simply
# leave some bins empty. Do not raise an error.
if upsampling_possible and (upsample is not None):
if upsample in ('ffill', 'bfill'):
result[name]['val'].fillna(method=upsample, inplace=True)
else:
dense_col = self._dataframe[name].dropna()
y = dense_col.values
x = self._dataframe['time'].reindex_like(dense_col).values
interpolator = interp1d(x, y, kind=upsample)
# Outside the limits of the data, the interpolator will
# fail. Leave any such entires empty.
is_safe = ((bin_anchors > np.min(x)) &
(bin_anchors < np.max(x)))
safe_times = bin_anchors[is_safe]
safe_bins = index[is_safe]
interp_points = pd.Series(interpolator(safe_times),
index=safe_bins)
logger.debug("Interpolating to fill %d of %d "
"empty bins in %s",
len(safe_bins), (counts[name] == 0).sum(),
name)
result[name]['val'].fillna(interp_points, inplace=True)
# Short-circuit if we are done.
if not downsampling_needed:
logger.debug("%s has at most one data point per bin", name)
continue
# Multi-valued bins must be downsampled (reduced). If there is no
# rule for downsampling, we have no recourse: we must raise.
if (downsample is None):
raise BinningError("The specified binning puts multiple "
"'{0}' measurements in at least one bin, "
"and there is no rule for downsampling "
"(i.e., reducing) it.".format(name))
if verify_integrity and callable(downsample):
downsample = _build_verified_downsample(
downsample, self.col_info[name].shape)
g = grouped[name] # for brevity
if self.col_info[name].ndim == 0:
logger.debug("The scalar column %s must be downsampled.", name)
# For scalars, pandas knows what to do.
downsampled = g.agg(downsample)
std_series = g.std()
max_series = g.max()
min_series = g.min()
else:
# For nonscalars, we are abusing groupby and must go to a
# a little more trouble to guarantee success.
logger.debug("The nonscalar column %s must be downsampled.",
name)
if not callable(downsample):
# Do this lookup here so that strings can be passed
# in the call to resample.
downsample = ColSpec._downsample_mapping[downsample]
downsampled = g.apply(lambda x: downsample(np.asarray(x.dropna())))
std_series = g.apply(lambda x: np.std(np.asarray(x.dropna()), 0))
max_series = g.apply(lambda x: np.max(np.asarray(x.dropna()), 0))
min_series = g.apply(lambda x: np.min(np.asarray(x.dropna()), 0))
# This (counts[name] > 1) is redundant, but there is no clean way to
# pass it here without refactoring. Not a huge cost.
result[name]['val'].where(~(counts[name] > 1), downsampled, inplace=True)
result[name]['std'] = std_series
result[name]['max'] = max_series
result[name]['min'] = min_series
result = pd.concat(result, axis=1) # one MultiIndexed DataFrame
result.index.name = 'bin'
# Convert time timestamp or timedelta, depending on the state of
# self.convert_times and self.reference_time.
for col_name in self._time_columns:
if isinstance(result[col_name], pd.DataFrame):
subcols = result[col_name].columns
for subcol in subcols & {'max', 'min', 'val'}:
result[(col_name, subcol)] = self._maybe_convert_times(
result[(col_name, subcol)])
for subcol in subcols & {'std'}:
result[(col_name, subcol)] = pd.to_timedelta(
result[(col_name, subcol)], unit='s')
else:
result[col_name] = self._maybe_convert_times(
result[col_name])
return result
def __getitem__(self, source_name):
if source_name not in list(self.col_info.keys()) + ['time']:
raise KeyError("No data from a source called '{0}' has been "
"added.".format(source_name))
# Unlike output from binning functions, this is indexed
# on time.
result = self._dataframe[source_name].dropna()
result.index = self._dataframe['time'].reindex_like(result)
return result
def __getattr__(self, attr):
# Developer beware: if any properties raise an AttributeError,
# this will mask it. Comment this magic method to debug properties.
if attr in self.col_info.keys():
return self[attr]
else:
raise AttributeError("DataMuxer has no attribute {0} and no "
"data source named '{0}'".format(attr))
@property
def ncols(self):
"""
The number of columns that the DataMuxer contains
"""
return len(self.col_info)
@property
def col_info_by_ndim(self):
"""Dictionary mapping dimensionality (ndim) onto a list of ColSpecs"""
result = {}
for name, col_spec in six.iteritems(self.col_info):
try:
result[col_spec.ndim]
except KeyError:
result[col_spec.ndim] = []
result[col_spec.ndim].append(col_spec)
return result
def dataframe_to_dict(df):
"""
Turn a DataFrame into a dict of lists.
Parameters
----------
df : DataFrame
Returns
-------
index : ndarray
The index of the data frame
data : dict
Dictionary keyed on column name of the column. The value is
one of (ndarray, list, pd.Series)
"""
dict_of_lists = {col: df[col].to_list() for col in df.columns}
return df.index.values, dict_of_lists
def _build_verified_downsample(downsample, expected_shape):
# Ensure two things:
# 1. The downsampling function shouldn't touch bins with only one point.
# 2. The result of downsample should have the right shape.
def _downsample(data):
if len(data) == 1:
return data
downsampled = downsample(data)
if (expected_shape is None or expected_shape == 0):
if not np.isscalar(downsampled):
raise BadDownsamplerError("The 'agg' (downsampling) function "
"for {0} is expected to produce "
"a scalar from the data in each "
"bin.".format(downsampled))
elif downsampled.shape != expected_shape:
raise BadDownsamplerError("An 'agg' (downsampling) function "
"returns data shaped {0} but the "
"shape {1} is expected.".format(
downsampled.shape, expected_shape))
return downsampled
return _downsample
def _timestamp_col_name(source_name):
return '{0}_timestamp'.format(source_name)
def _normalize_string_none(val):
"Replay passes 'None' to mean None."
try:
lowercase_val = val.lower()
except AttributeError:
return val
if lowercase_val == 'none':
return None
else:
return val
def _is_resampling_applicable(counts):
has_no_points = counts == 0
has_multiple_points = counts > 1
upsampling_possible = has_no_points.any()
downsampling_needed = has_multiple_points.any()
result = {}
result['upsampling_possible'] = upsampling_possible.to_dict()
result['downsampling_needed'] = downsampling_needed.to_dict()
return result
| bsd-3-clause |
DirkdeDraak/easybuild-easyblocks | easybuild/easyblocks/x/xmipp.py | 10 | 8904 | ##
# Copyright 2015-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing Xmipp, implemented as an easyblock
@author: Jens Timmerman (Ghent University)
@author: Pablo Escobar (sciCORE, SIB, University of Basel)
@author: Kenneth Hoste (Ghent University)
"""
import fileinput
import os
import re
import stat
import sys
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.generic.pythonpackage import det_pylibdir
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import adjust_permissions, mkdir, write_file
from easybuild.tools.modules import get_software_root, get_software_version
from easybuild.tools.run import run_cmd
class EB_Xmipp(EasyBlock):
"""
easyblock to install Xmipp
"""
def __init__(self, *args, **kwargs):
"""Easyblock constructor, enable building in installation directory."""
super(EB_Xmipp, self).__init__(*args, **kwargs)
self.build_in_installdir = True
self.xmipp_pythonpaths = []
def extract_step(self):
"""Extract Xmipp sources."""
# strip off 'xmipp' part to avoid having everything in a 'xmipp' subdirectory
self.cfg.update('unpack_options', '--strip-components=1')
super(EB_Xmipp, self).extract_step()
def configure_step(self):
"""Set configure options."""
if self.toolchain.mpi_family() == toolchain.INTELMPI:
mpi_bindir = os.path.join(get_software_root('impi'), 'intel64', 'bin')
else:
mpi_bindir = os.path.join(get_software_root(self.toolchain.MPI_MODULE_NAME[0]), 'bin')
root_java = get_software_root("Java")
if not get_software_root("Java"):
raise EasyBuildError("Module for dependency Java not loaded.")
configure_args = ' '.join([
'profile=no fast=yes warn=no release=yes gtest=yes static=no cuda=no debug=no matlab=no',
'LINKERFORPROGRAMS=%s' % os.getenv('CXX'),
'MPI_BINDIR=%s' % mpi_bindir,
'MPI_LIB=mpi',
'JAVA_HOME=%s' % os.getenv('JAVA_HOME'),
'JAVAC=javac',
'CC=%s' % os.getenv('CC'),
# pass $CXXFLAGS in Python list syntax and avoid spaces, e.g.: ['-O2','-march=native']
'CXXFLAGS=%s' % str(os.getenv('CXXFLAGS').split(' ')).replace(' ', ''),
'CXX=%s' % os.getenv('CXX'),
'MPI_CC=%s' % os.getenv('MPICC'),
# pass $CFLAGS in Python list syntax and avoid spaces, e.g.: ['-O2','-march=native']
'CCFLAGS=%s' % str(os.getenv('CFLAGS').split(' ')).replace(' ', ''),
'MPI_CXX=%s' % os.getenv('MPICXX'),
'MPI_INCLUDE=%s' % os.getenv('MPI_INC_DIR'),
'MPI_LIBDIR=%s' % os.getenv('MPI_LIB_DIR'),
'MPI_LINKERFORPROGRAMS=%s' % os.getenv('MPICXX'),
'LIBPATH=%s' % os.getenv('LD_LIBRARY_PATH'),
])
# define list of configure options, which will be passed to Xmipp's install.sh script via --configure-args
self.cfg['configopts'] = configure_args
self.log.info("Configure arguments for Xmipp install.sh script: %s", self.cfg['configopts'])
def build_step(self):
"""No custom build step (see install step)."""
pass
def install_step(self):
"""Build/install Xmipp using provided install.sh script."""
pylibdir = det_pylibdir()
self.xmipp_pythonpaths = [
# location where Python packages will be installed by Xmipp installer
pylibdir,
'protocols',
os.path.join('libraries', 'bindings', 'python'),
]
python_root = get_software_root('Python')
if python_root:
# extend $PYTHONPATH
all_pythonpaths = [os.path.join(self.installdir, p) for p in self.xmipp_pythonpaths]
# required so packages installed as extensions in Pythpn dep are picked up
all_pythonpaths.append(os.path.join(python_root, pylibdir))
all_pythonpaths.append(os.environ.get('PYTHONPATH', ''))
env.setvar('PYTHONPATH', os.pathsep.join(all_pythonpaths))
# location where Python packages will be installed by Xmipp installer must exist already (setuptools)
mkdir(os.path.join(self.installdir, pylibdir), parents=True)
# put dummy xmipp_python script in place if Python is used as a dependency
bindir = os.path.join(self.installdir, 'bin')
mkdir(bindir)
xmipp_python = os.path.join(bindir, 'xmipp_python')
xmipp_python_script_body = '\n'.join([
'#!/bin/sh',
'%s/bin/python "$@"' % python_root,
])
write_file(xmipp_python, xmipp_python_script_body)
adjust_permissions(xmipp_python, stat.S_IXUSR|stat.S_IXGRP|stat.S_IXOTH)
pyshortver = '.'.join(get_software_version('Python').split('.')[:2])
# make sure Python.h and numpy header are found
env.setvar('CPATH', os.pathsep.join([
os.path.join(python_root, 'include', 'python%s' % pyshortver),
os.path.join(python_root, pylibdir, 'numpy', 'core', 'include'),
os.environ.get('CPATH', ''),
]))
cmd_opts = []
# disable (re)building of supplied dependencies
dep_names = [dep['name'] for dep in self.cfg['dependencies']]
for dep in ['FFTW', 'HDF5', ('libjpeg-turbo', 'jpeg'), ('LibTIFF', 'tiff'), 'matplotlib', 'Python', 'SQLite',
'Tcl', 'Tk']:
if isinstance(dep, tuple):
dep, opt = dep
else:
opt = dep.lower()
# don't check via get_software_root, check listed dependencies directly (relevant for FFTW)
if dep in dep_names:
cmd_opts.append('--%s=false' % opt)
# Python should also provide numpy/mpi4py
if dep == 'Python':
cmd_opts.extend(['--numpy=false', '--mpi4py=false'])
if '--tcl=false' in cmd_opts and '--tk=false' in cmd_opts:
cmd_opts.append('--tcl-tk=false')
# patch install.sh script to inject configure options
# setting $CONFIGURE_ARGS or using --configure-args doesn't work...
for line in fileinput.input('install.sh', inplace=1, backup='.orig.eb'):
line = re.sub(r"^CONFIGURE_ARGS.*$", 'CONFIGURE_ARGS="%s"' % self.cfg['configopts'], line)
sys.stdout.write(line)
cmd = './install.sh -j %s --unattended=true %s' % (self.cfg['parallel'], ' '.join(cmd_opts))
out, _ = run_cmd(cmd, log_all=True, simple=False)
if not re.search("Xmipp has been successfully compiled", out):
raise EasyBuildError("Xmipp installation did not complete successfully?")
def sanity_check_step(self):
"""Custom sanity check for Xmipp."""
custom_paths = {
# incomplete list, random picks, cfr. http://xmipp.cnb.csic.es/twiki/bin/view/Xmipp/ListOfProgramsv3
'files': ['bin/xmipp_%s' % x for x in ['compile', 'imagej', 'mpi_run', 'phantom_create',
'transform_filter', 'tomo_project', 'volume_align']],
'dirs': ['lib'],
}
super(EB_Xmipp, self).sanity_check_step(custom_paths=custom_paths)
def make_module_extra(self):
"""Define Xmipp specific variables in generated module file, i.e. XMIPP_HOME."""
txt = super(EB_Xmipp, self).make_module_extra()
txt += self.module_generator.set_environment('XMIPP_HOME', self.installdir)
txt += self.module_generator.prepend_paths('PYTHONPATH', self.xmipp_pythonpaths)
return txt
| gpl-2.0 |
dmitriz/zipline | zipline/sources/simulated.py | 19 | 5264 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import copy
import six
import numpy as np
from datetime import timedelta
import pandas as pd
from zipline.sources.data_source import DataSource
from zipline.utils import tradingcalendar as calendar_nyse
from zipline.gens.utils import hash_args
class RandomWalkSource(DataSource):
"""RandomWalkSource that emits events with prices that follow a
random walk. Will generate valid datetimes that match market hours
of the supplied calendar and can generate emit events with
user-defined frequencies (e.g. minutely).
"""
VALID_FREQS = frozenset(('daily', 'minute'))
def __init__(self, start_prices=None, freq='minute', start=None,
end=None, drift=0.1, sd=0.1, calendar=calendar_nyse):
"""
:Arguments:
start_prices : dict
sid -> starting price.
Default: {0: 100, 1: 500}
freq : str <default='minute'>
Emits events according to freq.
Can be 'daily' or 'minute'
start : datetime <default=start of calendar>
Start dt to emit events.
end : datetime <default=end of calendar>
End dt until to which emit events.
drift: float <default=0.1>
Constant drift of the price series.
sd: float <default=0.1>
Standard deviation of the price series.
calendar : calendar object <default: NYSE>
Calendar to use.
See zipline.utils for different choices.
:Example:
# Assumes you have instantiated your Algorithm
# as myalgo.
myalgo = MyAlgo()
source = RandomWalkSource()
myalgo.run(source)
"""
# Hash_value for downstream sorting.
self.arg_string = hash_args(start_prices, freq, start, end,
calendar.__name__)
if freq not in self.VALID_FREQS:
raise ValueError('%s not in %s' % (freq, self.VALID_FREQS))
self.freq = freq
if start_prices is None:
self.start_prices = {0: 100,
1: 500}
else:
self.start_prices = start_prices
self.calendar = calendar
if start is None:
self.start = calendar.start
else:
self.start = start
if end is None:
self.end = calendar.end_base
else:
self.end = end
self.drift = drift
self.sd = sd
self.sids = self.start_prices.keys()
self.open_and_closes = \
calendar.open_and_closes[self.start:self.end]
self._raw_data = None
@property
def instance_hash(self):
return self.arg_string
@property
def mapping(self):
return {
'dt': (lambda x: x, 'dt'),
'sid': (lambda x: x, 'sid'),
'price': (float, 'price'),
'volume': (int, 'volume'),
'open_price': (float, 'open_price'),
'high': (float, 'high'),
'low': (float, 'low'),
}
def _gen_next_step(self, x):
x += np.random.randn() * self.sd + self.drift
return max(x, 0.1)
def _gen_events(self, cur_prices, current_dt):
for sid, price in six.iteritems(cur_prices):
cur_prices[sid] = self._gen_next_step(cur_prices[sid])
event = {
'dt': current_dt,
'sid': sid,
'price': cur_prices[sid],
'volume': np.random.randint(1e5, 1e6),
'open_price': cur_prices[sid],
'high': cur_prices[sid] + .1,
'low': cur_prices[sid] - .1,
}
yield event
def raw_data_gen(self):
cur_prices = copy(self.start_prices)
for _, (open_dt, close_dt) in self.open_and_closes.iterrows():
current_dt = copy(open_dt)
if self.freq == 'minute':
# Emit minutely trade signals from open to close
while current_dt <= close_dt:
for event in self._gen_events(cur_prices, current_dt):
yield event
current_dt += timedelta(minutes=1)
elif self.freq == 'daily':
# Emit one signal per day at close
for event in self._gen_events(
cur_prices, pd.tslib.normalize_date(close_dt)):
yield event
@property
def raw_data(self):
if not self._raw_data:
self._raw_data = self.raw_data_gen()
return self._raw_data
| apache-2.0 |
nooperpudd/trading-with-python | historicDataDownloader/historicDataDownloader.py | 77 | 4526 | '''
Created on 4 aug. 2012
Copyright: Jev Kuznetsov
License: BSD
a module for downloading historic data from IB
'''
import ib
import pandas
from ib.ext.Contract import Contract
from ib.opt import ibConnection, message
from time import sleep
import tradingWithPython.lib.logger as logger
from pandas import DataFrame, Index
import datetime as dt
from timeKeeper import TimeKeeper
import time
timeFormat = "%Y%m%d %H:%M:%S"
class DataHandler(object):
''' handles incoming messages '''
def __init__(self,tws):
self._log = logger.getLogger('DH')
tws.register(self.msgHandler,message.HistoricalData)
self.reset()
def reset(self):
self._log.debug('Resetting data')
self.dataReady = False
self._timestamp = []
self._data = {'open':[],'high':[],'low':[],'close':[],'volume':[],'count':[],'WAP':[]}
def msgHandler(self,msg):
#print '[msg]', msg
if msg.date[:8] == 'finished':
self._log.debug('Data recieved')
self.dataReady = True
return
self._timestamp.append(dt.datetime.strptime(msg.date,timeFormat))
for k in self._data.keys():
self._data[k].append(getattr(msg, k))
@property
def data(self):
''' return downloaded data as a DataFrame '''
df = DataFrame(data=self._data,index=Index(self._timestamp))
return df
class Downloader(object):
def __init__(self,debug=False):
self._log = logger.getLogger('DLD')
self._log.debug('Initializing data dwonloader. Pandas version={0}, ibpy version:{1}'.format(pandas.__version__,ib.version))
self.tws = ibConnection()
self._dataHandler = DataHandler(self.tws)
if debug:
self.tws.registerAll(self._debugHandler)
self.tws.unregister(self._debugHandler,message.HistoricalData)
self._log.debug('Connecting to tws')
self.tws.connect()
self._timeKeeper = TimeKeeper() # keep track of past requests
self._reqId = 1 # current request id
def _debugHandler(self,msg):
print '[debug]', msg
def requestData(self,contract,endDateTime,durationStr='1800 S',barSizeSetting='1 secs',whatToShow='TRADES',useRTH=1,formatDate=1):
self._log.debug('Requesting data for %s end time %s.' % (contract.m_symbol,endDateTime))
while self._timeKeeper.nrRequests(timeSpan=600) > 59:
print 'Too many requests done. Waiting... '
time.sleep(1)
self._timeKeeper.addRequest()
self._dataHandler.reset()
self.tws.reqHistoricalData(self._reqId,contract,endDateTime,durationStr,barSizeSetting,whatToShow,useRTH,formatDate)
self._reqId+=1
#wait for data
startTime = time.time()
timeout = 3
while not self._dataHandler.dataReady and (time.time()-startTime < timeout):
sleep(2)
if not self._dataHandler.dataReady:
self._log.error('Data timeout')
print self._dataHandler.data
return self._dataHandler.data
def getIntradayData(self,contract, dateTuple ):
''' get full day data on 1-s interval
date: a tuple of (yyyy,mm,dd)
'''
openTime = dt.datetime(*dateTuple)+dt.timedelta(hours=16)
closeTime = dt.datetime(*dateTuple)+dt.timedelta(hours=22)
timeRange = pandas.date_range(openTime,closeTime,freq='30min')
datasets = []
for t in timeRange:
datasets.append(self.requestData(contract,t.strftime(timeFormat)))
return pandas.concat(datasets)
def disconnect(self):
self.tws.disconnect()
if __name__=='__main__':
dl = Downloader(debug=True)
c = Contract()
c.m_symbol = 'SPY'
c.m_secType = 'STK'
c.m_exchange = 'SMART'
c.m_currency = 'USD'
df = dl.getIntradayData(c, (2012,8,6))
df.to_csv('test.csv')
# df = dl.requestData(c, '20120803 22:00:00')
# df.to_csv('test1.csv')
# df = dl.requestData(c, '20120803 21:30:00')
# df.to_csv('test2.csv')
dl.disconnect()
print 'Done.' | bsd-3-clause |
michigraber/scikit-learn | sklearn/metrics/tests/test_score_objects.py | 84 | 14181 | import pickle
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
REGRESSION_SCORERS = ['r2', 'mean_absolute_error', 'mean_squared_error',
'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'log_loss',
'adjusted_rand_score' # not really, but works
]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should a be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(return_indicator=True,
allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
# Test clustering scorers against gold standard labeling.
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
return_indicator=True,
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
estimator = dict([(name, sensible_regr)
for name in REGRESSION_SCORERS] +
[(name, sensible_clf)
for name in CLF_SCORERS] +
[(name, sensible_ml_clf)
for name in MULTILABEL_ONLY_SCORERS])
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
| bsd-3-clause |
treycausey/scikit-learn | examples/cluster/plot_lena_segmentation.py | 8 | 2421 | """
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <[email protected]>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import pylab as pl
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
pl.figure(figsize=(5, 5))
pl.imshow(lena, cmap=pl.cm.gray)
for l in range(N_REGIONS):
pl.contour(labels == l, contours=1,
colors=[pl.cm.spectral(l / float(N_REGIONS)), ])
pl.xticks(())
pl.yticks(())
pl.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
pl.show()
| bsd-3-clause |
brunston/whirlsf | app/app.py | 1 | 4650 | # whirl dash app
# brunston poon
import dash
import dash_core_components as dcc
import dash_html_components as dhtml
import pandas as pd
import os.path
import plotly.graph_objs as go
import whirl as wh
app = dash.Dash()
filenames = {
"park_old": "Park_Scores_2005-2014.csv",
"park_new": "Park_Evaluation_Scores_starting_Fiscal_Year_2015.csv",
"library": "Library_Usage.csv"
}
data_dir = os.path.dirname(os.getcwd()) + "/data/"
sf_dist_map = data_dir +\
"sf_supervisor_district_maps/sf_district_map_sfyimby.png"
whirl = wh.Whirl(data_dir, filenames)
# let's use a functional CSS dependency from the Dash tutorials; no need to
# spend an inordinate amount of time to develop my own at the moment.
app.css.append_css({"external_url": "https://codepen.io/chriddyp/pen/bWLwgP.css"})
colors = {
'background': '#EEEEEE',
'text': '#555555'
}
app.layout = dhtml.Div(children=[
dhtml.H1(
children='WhirlSF - a whirlwind tour of SF park and library data',
style={
'textAlign': 'center',
'color': colors['text']
}
),
dcc.Markdown(
children="## Perception\n##### As people who work with data, sometimes \
we forget to see the greater picture of our work; the folks who end up \
reading our analyses do not always think like the way that we do. \
As a result, often we forget that the easiest way to show someone \
something is to tell them a story..."
),
dcc.Graph(
id="supervisor_districts_park_scores",
figure = {
'data': [
go.Bar(
x = [
"SD 1", "SD 2", "SD 3", "SD 4", "SD 5", "SD 6", "SD 7",
"SD 8", "SD 9", "SD 10", "SD 11"],
y = wh.mean(whirl.parkgrp_df)["Score"].values.tolist(),
text = wh.mean(whirl.parkgrp_df)["Score"].values.tolist()
)
],
'layout': {
'title': "Average Park Scores by Supervisor District",
'yaxis': {
'range': [0.85, 0.96]
}
}
}
),
dcc.Markdown(
children="## Objective\n The objective of this exploration is to \
explore how geography plays roles across economics, education, and \
usage of libraries across supervisor districts in San \
Francisco."
),
dhtml.Img(src="https://i.imgur.com/6mROVMa.png", title="SF Supervisor \
District map, sourced from sfyimby.org"),
dcc.Markdown(
children="We can explore first the general income levels of each of \
the supervisor districts in San Francisco, using both federal Census data \
and city-sourced data, along with district-level education information to help \
us understand the geography of both financial capital as well as educational \
capital in San Francisco. \
(Source: Phase 1 Socioeconomic Equity in the \
City of San Francisco Policy Analysis Report, SF Board of Supervisors)"
),
dcc.Graph(
id="income_and_education_percent_district_level",
figure = {
'data': [
{
'x': whirl.sd_capital.index,
'y': whirl.sd_capital["fin"],
'type': 'bar',
'name': 'Average Household Income',
'yaxis': 'y1'
},
{
'x': whirl.sd_capital.index,
'y': whirl.sd_capital["edu"],
'type': 'bar',
'name': '% with college-level education',
'yaxis': 'y2'
}
],
'layout': {
'yaxis': {
'title': "t1",
'range': [30000,110000]
},
'yaxis2': {
'title': 't2',
'range': [0,100],
'overlaying': 'y',
'side': 'right'
}
}
}
),
dcc.Markdown(
children="How might we use this information in conjunction with \
knowledge about how libraries are used by district?"
),
dcc.Graph(
id="libraries_by_district",
figure = {
'data':
go.Bar(
x = whirl.libmean_df.index,
y = whirl.libmean_df["Total Checkouts"]
),
'layout': {
'title': "Average total checkouts by patron per district, 2003-2016"
}
}
)
]
)
if __name__ == '__main__':
app.run_server(debug=True)
| bsd-3-clause |
akrherz/iem | htdocs/plotting/auto/scripts200/p200.py | 1 | 12445 | """SPC Convective Outlook Heatmaps."""
import datetime
from collections import OrderedDict
import cartopy.crs as ccrs
import numpy as np
from rasterstats import zonal_stats
from geopandas import read_postgis
from affine import Affine
import pandas as pd
from pyiem.plot.geoplot import MapPlot
from pyiem.plot import get_cmap
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.exceptions import NoDataFound
PDICT5 = {
"yes": "YES: Draw Counties/Parishes",
"no": "NO: Do Not Draw Counties/Parishes",
}
ISSUANCE = OrderedDict(
(
("1.C.1", "Day 1 Convective @1z"),
("1.C.5", "Day 1 Convective @5z"),
("1.F.6", "Day 1 Fire Weather @6z"),
("1.C.12", "Day 1 Convective @12z"),
("1.C.16", "Day 1 Convective @16z"),
("1.F.16", "Day 1 Fire Weather @16z"),
("1.C.20", "Day 1 Convective @20z"),
("2.C.6", "Day 2 Convective @6z"),
("2.F.7", "Day 2 Fire Weather @7z"),
("2.C.17", "Day 2 Convective @17z"),
("2.F.19", "Day 2 Fire Weather @19z"),
("3.C.7", "Day 3 Convective @7z"),
("3.F.21", "Day 3 Fire Weather @21z"),
("4.C.8", "Day 4 Convective @8z"),
("5.C.8", "Day 5 Convective @8z"),
("6.C.8", "Day 6 Convective @8z"),
("7.C.8", "Day 7 Convective @8z"),
("8.C.8", "Day 8 Convective @8z"),
)
)
OUTLOOKS = OrderedDict(
(
("ANY SEVERE.0.02", "Any Severe 2% (Day 3+)"),
("ANY SEVERE.0.05", "Any Severe 5% (Day 3+)"),
("ANY SEVERE.0.15", "Any Severe 15% (Day 3+)"),
("ANY SEVERE.0.25", "Any Severe 25% (Day 3+)"),
("ANY SEVERE.0.30", "Any Severe 30% (Day 3+)"),
("ANY SEVERE.0.35", "Any Severe 35% (Day 3+)"),
("ANY SEVERE.0.45", "Any Severe 45% (Day 3+)"),
("ANY SEVERE.0.60", "Any Severe 60% (Day 3+)"),
("ANY SEVERE.SIGN", "Any Severe Significant (Day 3+)"),
("CATEGORICAL.TSTM", "Categorical Thunderstorm Risk (Days 1-3)"),
("CATEGORICAL.MRGL", "Categorical Marginal Risk (2015+) (Days 1-3)"),
("CATEGORICAL.SLGT", "Categorical Slight Risk (Days 1-3)"),
("CATEGORICAL.ENH", "Categorical Enhanced Risk (2015+) (Days 1-3)"),
("CATEGORICAL.MDT", "Categorical Moderate Risk (Days 1-3)"),
("CATEGORICAL.HIGH", "Categorical High Risk (Days 1-3)"),
(
"FIRE WEATHER CATEGORICAL.CRIT",
"Categorical Critical Fire Wx (Days 1-2)",
),
(
"FIRE WEATHER CATEGORICAL.EXTM",
"Categorical Extreme Fire Wx (Days 1-2)",
),
(
"CRITICAL FIRE WEATHER AREA.0.15",
"Critical Fire Weather Area 15% (Days3-7)",
),
("HAIL.0.05", "Hail 5% (Days 1+2)"),
("HAIL.0.15", "Hail 15% (Days 1+2)"),
("HAIL.0.25", "Hail 25% (Days 1+2)"),
("HAIL.0.30", "Hail 30% (Days 1+2)"),
("HAIL.0.35", "Hail 35% (Days 1+2)"),
("HAIL.0.45", "Hail 45% (Days 1+2)"),
("HAIL.0.60", "Hail 60% (Days 1+2)"),
("HAIL.SIGN", "Hail Significant (Days 1+2)"),
("TORNADO.0.02", "Tornado 2% (Days 1+2)"),
("TORNADO.0.05", "Tornado 5% (Days 1+2)"),
("TORNADO.0.10", "Tornado 10% (Days 1+2)"),
("TORNADO.0.15", "Tornado 15% (Days 1+2)"),
("TORNADO.0.25", "Tornado 25% (Days 1+2)"),
("TORNADO.0.30", "Tornado 30% (Days 1+2)"),
("TORNADO.0.35", "Tornado 35% (Days 1+2)"),
("TORNADO.0.45", "Tornado 45% (Days 1+2)"),
("TORNADO.0.60", "Tornado 60% (Days 1+2)"),
("TORNADO.SIGN", "Tornado Significant (Days 1+2)"),
("WIND.0.05", "Wind 5% (Days 1+2)"),
("WIND.0.15", "Wind 15% (Days 1+2)"),
("WIND.0.25", "Wind 25% (Days 1+2)"),
("WIND.0.30", "Wind 30% (Days 1+2)"),
("WIND.0.35", "Wind 35% (Days 1+2)"),
("WIND.0.45", "Wind 45% (Days 1+2)"),
("WIND.0.60", "Wind 60% (Days 1+2)"),
("WIND.SIGN", "Wind Significant (Days 1+2)"),
)
)
PDICT = {"cwa": "Plot by NWS Forecast Office", "state": "Plot by State/Sector"}
MDICT = OrderedDict(
[
("all", "No Month/Time Limit"),
("spring", "Spring (MAM)"),
("fall", "Fall (SON)"),
("winter", "Winter (DJF)"),
("summer", "Summer (JJA)"),
("jan", "January"),
("feb", "February"),
("mar", "March"),
("apr", "April"),
("may", "May"),
("jun", "June"),
("jul", "July"),
("aug", "August"),
("sep", "September"),
("oct", "October"),
("nov", "November"),
("dec", "December"),
]
)
griddelta = 0.05
GRIDWEST = -139.2
GRIDEAST = -55.1
GRIDNORTH = 54.51
GRIDSOUTH = 19.47
PRECIP_AFF = Affine(griddelta, 0.0, GRIDWEST, 0.0, griddelta * -1, GRIDNORTH)
YSZ = (GRIDNORTH - GRIDSOUTH) / griddelta
XSZ = (GRIDEAST - GRIDWEST) / griddelta
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc["data"] = True
desc["cache"] = 86400
desc[
"description"
] = """
This application generates heatmaps of Storm Prediction Center
convective outlooks.
<p><strong>Major Caveat</strong>: Due to how the IEM stores the outlook
geometries, the values presented here are for an outlook level and levels
higher. For example, if a location was in a moderate risk and you asked
this app to total slight risks, the moderate risk would count toward the
slight risk total.</p>
<p><i class="fa fa-info"></i> This autoplot likely has some rough
edges yet, so please let us know of problems encountered.</p>
"""
desc["arguments"] = [
dict(
type="select",
name="month",
default="all",
label="Month Limiter",
options=MDICT,
),
dict(
type="select",
name="p",
default="1.C.16",
options=ISSUANCE,
label="Select SPC Product Issuance",
),
dict(
type="select",
name="level",
default="CATEGORICAL.SLGT",
options=OUTLOOKS,
label="Select outlook level:",
),
dict(
type="select",
name="t",
default="state",
options=PDICT,
label="Select plot extent type:",
),
dict(
type="networkselect",
name="station",
network="WFO",
default="DMX",
label="Select WFO: (ignored if plotting state)",
),
dict(
type="csector",
name="csector",
default="conus",
label="Select state/sector to plot",
),
dict(
type="select",
name="drawc",
default="no",
options=PDICT5,
label="Plot County/Parish borders on maps?",
),
dict(type="cmap", name="cmap", default="jet", label="Color Ramp:"),
]
return desc
def plotter(fdict):
""" Go """
ctx = get_autoplot_context(fdict, get_description())
level = ctx["level"]
station = ctx["station"][:4]
t = ctx["t"]
p = ctx["p"]
month = ctx["month"]
if month == "all":
months = range(1, 13)
elif month == "fall":
months = [9, 10, 11]
elif month == "winter":
months = [12, 1, 2]
elif month == "spring":
months = [3, 4, 5]
elif month == "summer":
months = [6, 7, 8]
else:
ts = datetime.datetime.strptime("2000-" + month + "-01", "%Y-%b-%d")
# make sure it is length two for the trick below in SQL
months = [ts.month, 999]
ones = np.ones((int(YSZ), int(XSZ)))
counts = np.zeros((int(YSZ), int(XSZ)))
# counts = np.load('counts.npy')
lons = np.arange(GRIDWEST, GRIDEAST, griddelta)
lats = np.arange(GRIDSOUTH, GRIDNORTH, griddelta)
pgconn = get_dbconn("postgis")
hour = int(p.split(".")[2])
df = read_postgis(
"""
WITH data as (
select product_issue, issue, expire, geom,
rank() OVER (PARTITION by issue ORDER by product_issue DESC)
from spc_outlook o JOIN spc_outlook_geometries g on
(o.id = g.spc_outlook_id) where
outlook_type = %s and day = %s and threshold = %s and
category = %s and
ST_Within(geom, ST_GeomFromEWKT('SRID=4326;POLYGON((%s %s, %s %s,
%s %s, %s %s, %s %s))'))
and extract(hour from product_issue at time zone 'UTC') in %s
and extract(month from product_issue) in %s
)
SELECT * from data where rank = 1
""",
pgconn,
params=(
p.split(".")[1],
p.split(".")[0],
level.split(".", 1)[1],
level.split(".")[0],
GRIDWEST,
GRIDSOUTH,
GRIDWEST,
GRIDNORTH,
GRIDEAST,
GRIDNORTH,
GRIDEAST,
GRIDSOUTH,
GRIDWEST,
GRIDSOUTH,
tuple([hour - 1, hour, hour + 1]),
tuple(months),
),
geom_col="geom",
)
if df.empty:
raise NoDataFound("No results found for query")
for _, row in df.iterrows():
zs = zonal_stats(
row["geom"],
ones,
affine=PRECIP_AFF,
nodata=-1,
all_touched=True,
raster_out=True,
)
for z in zs:
aff = z["mini_raster_affine"]
west = aff.c
north = aff.f
raster = np.flipud(z["mini_raster_array"])
x0 = int((west - GRIDWEST) / griddelta)
y1 = int((north - GRIDSOUTH) / griddelta)
dy, dx = np.shape(raster)
x1 = x0 + dx
y0 = y1 - dy
counts[y0:y1, x0:x1] += np.where(raster.mask, 0, 1)
mindate = datetime.datetime(2014, 10, 1)
if level not in ["CATEGORICAL.MRGL", "CATEGORICAL.ENH"]:
mindate = datetime.datetime(2002, 1, 1)
if p.split(".")[1] == "F":
mindate = datetime.datetime(2017, 1, 1)
years = (
(datetime.datetime.now() - mindate).total_seconds() / 365.25 / 86400.0
)
data = counts / years
subtitle = "Found %s events for CONUS between %s and %s" % (
len(df.index),
df["issue"].min().strftime("%d %b %Y"),
df["issue"].max().strftime("%d %b %Y"),
)
if t == "cwa":
sector = "cwa"
subtitle = "Plotted for %s (%s). %s" % (
ctx["_nt"].sts[station]["name"],
station,
subtitle,
)
else:
sector = "state" if len(ctx["csector"]) == 2 else ctx["csector"]
mp = MapPlot(
sector=sector,
state=ctx["csector"],
cwa=(station if len(station) == 3 else station[1:]),
axisbg="white",
title="SPC %s Outlook [%s] of at least %s"
% (
ISSUANCE[p],
month.capitalize(),
OUTLOOKS[level].split("(")[0].strip(),
),
subtitle=subtitle,
nocaption=True,
twitter=True,
)
# Get the main axes bounds
if t == "state" and ctx["csector"] == "conus":
domain = data
lons, lats = np.meshgrid(lons, lats)
df2 = pd.DataFrame()
else:
(west, east, south, north) = mp.ax.get_extent(ccrs.PlateCarree())
i0 = int((west - GRIDWEST) / griddelta)
j0 = int((south - GRIDSOUTH) / griddelta)
i1 = int((east - GRIDWEST) / griddelta)
j1 = int((north - GRIDSOUTH) / griddelta)
jslice = slice(j0, j1)
islice = slice(i0, i1)
domain = data[jslice, islice]
lons, lats = np.meshgrid(lons[islice], lats[jslice])
df2 = pd.DataFrame(
{"lat": lats.ravel(), "lon": lons.ravel(), "freq": domain.ravel()}
)
rng = [
round(x, 2)
for x in np.linspace(
max([0.01, np.min(domain) - 0.5]), np.max(domain) + 0.1, 10
)
]
cmap = get_cmap(ctx["cmap"])
cmap.set_under("white")
cmap.set_over("black")
res = mp.pcolormesh(
lons,
lats,
domain,
rng,
cmap=cmap,
clip_on=False,
units="days per year",
)
# Cut down on SVG et al size
res.set_rasterized(True)
if ctx["drawc"] == "yes":
mp.drawcounties()
return mp.fig, df2
if __name__ == "__main__":
plotter(dict(level="CATEGORICAL.HIGH"))
| mit |
xuewei4d/scikit-learn | examples/linear_model/plot_lasso_model_selection.py | 15 | 5402 | """
===================================================
Lasso model selection: Cross-Validation / AIC / BIC
===================================================
Use the Akaike information criterion (AIC), the Bayes Information
criterion (BIC) and cross-validation to select an optimal value
of the regularization parameter alpha of the :ref:`lasso` estimator.
Results obtained with LassoLarsIC are based on AIC/BIC criteria.
Information-criterion based model selection is very fast, but it
relies on a proper estimation of degrees of freedom, are
derived for large samples (asymptotic results) and assume the model
is correct, i.e. that the data are actually generated by this model.
They also tend to break when the problem is badly conditioned
(more features than samples).
For cross-validation, we use 20-fold with 2 algorithms to compute the
Lasso path: coordinate descent, as implemented by the LassoCV class, and
Lars (least angle regression) as implemented by the LassoLarsCV class.
Both algorithms give roughly the same results. They differ with regards
to their execution speed and sources of numerical errors.
Lars computes a path solution only for each kink in the path. As a
result, it is very efficient when there are only of few kinks, which is
the case if there are few features or samples. Also, it is able to
compute the full path without setting any meta parameter. On the
opposite, coordinate descent compute the path points on a pre-specified
grid (here we use the default). Thus it is more efficient if the number
of grid points is smaller than the number of kinks in the path. Such a
strategy can be interesting if the number of features is really large
and there are enough samples to select a large amount. In terms of
numerical errors, for heavily correlated variables, Lars will accumulate
more errors, while the coordinate descent algorithm will only sample the
path on a grid.
Note how the optimal value of alpha varies for each fold. This
illustrates why nested-cross validation is necessary when trying to
evaluate the performance of a method for which a parameter is chosen by
cross-validation: this choice of parameter may not be optimal for unseen
data.
"""
print(__doc__)
# Author: Olivier Grisel, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LassoCV, LassoLarsCV, LassoLarsIC
from sklearn import datasets
# This is to avoid division by zero while doing np.log10
EPSILON = 1e-4
X, y = datasets.load_diabetes(return_X_y=True)
rng = np.random.RandomState(42)
X = np.c_[X, rng.randn(X.shape[0], 14)] # add some bad features
# normalize data as done by Lars to allow for comparison
X /= np.sqrt(np.sum(X ** 2, axis=0))
# #############################################################################
# LassoLarsIC: least angle regression with BIC/AIC criterion
model_bic = LassoLarsIC(criterion='bic')
t1 = time.time()
model_bic.fit(X, y)
t_bic = time.time() - t1
alpha_bic_ = model_bic.alpha_
model_aic = LassoLarsIC(criterion='aic')
model_aic.fit(X, y)
alpha_aic_ = model_aic.alpha_
def plot_ic_criterion(model, name, color):
criterion_ = model.criterion_
plt.semilogx(model.alphas_ + EPSILON, criterion_, '--', color=color,
linewidth=3, label='%s criterion' % name)
plt.axvline(model.alpha_ + EPSILON, color=color, linewidth=3,
label='alpha: %s estimate' % name)
plt.xlabel(r'$\alpha$')
plt.ylabel('criterion')
plt.figure()
plot_ic_criterion(model_aic, 'AIC', 'b')
plot_ic_criterion(model_bic, 'BIC', 'r')
plt.legend()
plt.title('Information-criterion for model selection (training time %.3fs)'
% t_bic)
# #############################################################################
# LassoCV: coordinate descent
# Compute paths
print("Computing regularization path using the coordinate descent lasso...")
t1 = time.time()
model = LassoCV(cv=20).fit(X, y)
t_lasso_cv = time.time() - t1
# Display results
plt.figure()
ymin, ymax = 2300, 3800
plt.semilogx(model.alphas_ + EPSILON, model.mse_path_, ':')
plt.plot(model.alphas_ + EPSILON, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(model.alpha_ + EPSILON, linestyle='--', color='k',
label='alpha: CV estimate')
plt.legend()
plt.xlabel(r'$\alpha$')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: coordinate descent '
'(train time: %.2fs)' % t_lasso_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
# #############################################################################
# LassoLarsCV: least angle regression
# Compute paths
print("Computing regularization path using the Lars lasso...")
t1 = time.time()
model = LassoLarsCV(cv=20).fit(X, y)
t_lasso_lars_cv = time.time() - t1
# Display results
plt.figure()
plt.semilogx(model.cv_alphas_ + EPSILON, model.mse_path_, ':')
plt.semilogx(model.cv_alphas_ + EPSILON, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(model.alpha_, linestyle='--', color='k',
label='alpha CV')
plt.legend()
plt.xlabel(r'$\alpha$')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: Lars (train time: %.2fs)'
% t_lasso_lars_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
plt.show()
| bsd-3-clause |
bsipocz/statsmodels | statsmodels/discrete/tests/test_sandwich_cov.py | 24 | 18710 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 09 21:29:20 2013
Author: Josef Perktold
"""
import os
import numpy as np
import pandas as pd
import statsmodels.discrete.discrete_model as smd
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod import families
from statsmodels.genmod.families import links
from statsmodels.regression.linear_model import OLS
import statsmodels.stats.sandwich_covariance as sc
from statsmodels.base.covtype import get_robustcov_results
from statsmodels.tools.tools import add_constant
from numpy.testing import assert_allclose, assert_equal
import statsmodels.tools._testing as smt
# get data and results as module global for now, TODO: move to class
from .results import results_count_robust_cluster as results_st
cur_dir = os.path.dirname(os.path.abspath(__file__))
filepath = os.path.join(cur_dir, "results", "ships.csv")
data_raw = pd.read_csv(filepath, index_col=False)
data = data_raw.dropna()
#mod = smd.Poisson.from_formula('accident ~ yr_con + op_75_79', data=dat)
# Don't use formula for tests against Stata because intercept needs to be last
endog = data['accident']
exog_data = data['yr_con op_75_79'.split()]
exog = add_constant(exog_data, prepend=False)
group = np.asarray(data['ship'], int)
exposure = np.asarray(data['service'])
# TODO get the test methods from regression/tests
class CheckCountRobustMixin(object):
def test_basic(self):
res1 = self.res1
res2 = self.res2
if len(res1.params) == (len(res2.params) - 1):
# Stata includes lnalpha in table for NegativeBinomial
mask = np.ones(len(res2.params), np.bool_)
mask[-2] = False
res2_params = res2.params[mask]
res2_bse = res2.bse[mask]
else:
res2_params = res2.params
res2_bse = res2.bse
assert_allclose(res1._results.params, res2_params, 1e-4)
assert_allclose(self.bse_rob / self.corr_fact, res2_bse, 6e-5)
@classmethod
def get_robust_clu(cls):
res1 = cls.res1
cov_clu = sc.cov_cluster(res1, group)
cls.bse_rob = sc.se_cov(cov_clu)
nobs, k_vars = res1.model.exog.shape
k_params = len(res1.params)
#n_groups = len(np.unique(group))
corr_fact = (nobs-1.) / float(nobs - k_params)
# for bse we need sqrt of correction factor
cls.corr_fact = np.sqrt(corr_fact)
def test_oth(self):
res1 = self.res1
res2 = self.res2
assert_allclose(res1._results.llf, res2.ll, 1e-4)
assert_allclose(res1._results.llnull, res2.ll_0, 1e-4)
def test_ttest(self):
smt.check_ttest_tvalues(self.res1)
def test_waldtest(self):
smt.check_ftest_pvalues(self.res1)
class TestPoissonClu(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_clu
mod = smd.Poisson(endog, exog)
cls.res1 = mod.fit(disp=False)
cls.get_robust_clu()
class TestPoissonCluGeneric(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_clu
mod = smd.Poisson(endog, exog)
cls.res1 = res1 = mod.fit(disp=False)
debug = False
if debug:
# for debugging
cls.bse_nonrobust = cls.res1.bse.copy()
cls.res1 = res1 = mod.fit(disp=False)
cls.get_robust_clu()
cls.res3 = cls.res1
cls.bse_rob3 = cls.bse_rob.copy()
cls.res1 = res1 = mod.fit(disp=False)
from statsmodels.base.covtype import get_robustcov_results
#res_hc0_ = cls.res1.get_robustcov_results('HC1')
get_robustcov_results(cls.res1._results, 'cluster',
groups=group,
use_correction=True,
df_correction=True, #TODO has no effect
use_t=False, #True,
use_self=True)
cls.bse_rob = cls.res1.bse
nobs, k_vars = res1.model.exog.shape
k_params = len(res1.params)
#n_groups = len(np.unique(group))
corr_fact = (nobs-1.) / float(nobs - k_params)
# for bse we need sqrt of correction factor
cls.corr_fact = np.sqrt(corr_fact)
class TestPoissonHC1Generic(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_hc1
mod = smd.Poisson(endog, exog)
cls.res1 = mod.fit(disp=False)
from statsmodels.base.covtype import get_robustcov_results
#res_hc0_ = cls.res1.get_robustcov_results('HC1')
get_robustcov_results(cls.res1._results, 'HC1', use_self=True)
cls.bse_rob = cls.res1.bse
nobs, k_vars = mod.exog.shape
corr_fact = (nobs) / float(nobs - 1.)
# for bse we need sqrt of correction factor
cls.corr_fact = np.sqrt(1./corr_fact)
# TODO: refactor xxxFit to full testing results
class TestPoissonCluFit(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_clu
mod = smd.Poisson(endog, exog)
# scaling of cov_params_default to match Stata
# TODO should the default be changed?
nobs, k_params = mod.exog.shape
sc_fact = (nobs-1.) / float(nobs - k_params)
cls.res1 = mod.fit(disp=False, cov_type='cluster',
cov_kwds=dict(groups=group,
use_correction=True,
scaling_factor=1. / sc_fact,
df_correction=True), #TODO has no effect
use_t=False, #True,
)
# The model results, t_test, ... should also work without
# normalized_cov_params, see #2209
# Note: we cannot set on the wrapper res1, we need res1._results
cls.res1._results.normalized_cov_params = None
cls.bse_rob = cls.res1.bse
# backwards compatibility with inherited test methods
cls.corr_fact = 1
def test_basic_inference(self):
res1 = self.res1
res2 = self.res2
rtol = 1e-7
assert_allclose(res1.params, res2.params, rtol=1e-8)
assert_allclose(res1.bse, res2.bse, rtol=rtol)
assert_allclose(res1.tvalues, res2.tvalues, rtol=rtol, atol=1e-8)
assert_allclose(res1.pvalues, res2.pvalues, rtol=rtol, atol=1e-20)
ci = res2.params_table[:, 4:6]
assert_allclose(res1.conf_int(), ci, rtol=5e-7, atol=1e-20)
class TestPoissonHC1Fit(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_hc1
mod = smd.Poisson(endog, exog)
cls.res1 = mod.fit(disp=False, cov_type='HC1')
cls.bse_rob = cls.res1.bse
nobs, k_vars = mod.exog.shape
corr_fact = (nobs) / float(nobs - 1.)
# for bse we need sqrt of correction factor
cls.corr_fact = np.sqrt(1./corr_fact)
class TestPoissonCluExposure(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_exposure_clu #nonrobust
mod = smd.Poisson(endog, exog, exposure=exposure)
cls.res1 = mod.fit(disp=False)
cls.get_robust_clu()
class TestPoissonCluExposureGeneric(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_exposure_clu #nonrobust
mod = smd.Poisson(endog, exog, exposure=exposure)
cls.res1 = res1 = mod.fit(disp=False)
from statsmodels.base.covtype import get_robustcov_results
#res_hc0_ = cls.res1.get_robustcov_results('HC1')
get_robustcov_results(cls.res1._results, 'cluster',
groups=group,
use_correction=True,
df_correction=True, #TODO has no effect
use_t=False, #True,
use_self=True)
cls.bse_rob = cls.res1.bse #sc.se_cov(cov_clu)
nobs, k_vars = res1.model.exog.shape
k_params = len(res1.params)
#n_groups = len(np.unique(group))
corr_fact = (nobs-1.) / float(nobs - k_params)
# for bse we need sqrt of correction factor
cls.corr_fact = np.sqrt(corr_fact)
class TestGLMPoissonClu(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_clu
mod = smd.Poisson(endog, exog)
mod = GLM(endog, exog, family=families.Poisson())
cls.res1 = mod.fit()
cls.get_robust_clu()
class TestGLMPoissonCluGeneric(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_clu
mod = GLM(endog, exog, family=families.Poisson())
cls.res1 = res1 = mod.fit()
get_robustcov_results(cls.res1._results, 'cluster',
groups=group,
use_correction=True,
df_correction=True, #TODO has no effect
use_t=False, #True,
use_self=True)
cls.bse_rob = cls.res1.bse
nobs, k_vars = res1.model.exog.shape
k_params = len(res1.params)
#n_groups = len(np.unique(group))
corr_fact = (nobs-1.) / float(nobs - k_params)
# for bse we need sqrt of correction factor
cls.corr_fact = np.sqrt(corr_fact)
class TestGLMPoissonHC1Generic(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_hc1
mod = GLM(endog, exog, family=families.Poisson())
cls.res1 = mod.fit()
#res_hc0_ = cls.res1.get_robustcov_results('HC1')
get_robustcov_results(cls.res1._results, 'HC1', use_self=True)
cls.bse_rob = cls.res1.bse
nobs, k_vars = mod.exog.shape
corr_fact = (nobs) / float(nobs - 1.)
# for bse we need sqrt of correction factor
cls.corr_fact = np.sqrt(1./corr_fact)
# TODO: refactor xxxFit to full testing results
class TestGLMPoissonCluFit(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_clu
mod = GLM(endog, exog, family=families.Poisson())
cls.res1 = res1 = mod.fit(cov_type='cluster',
cov_kwds=dict(groups=group,
use_correction=True,
df_correction=True), #TODO has no effect
use_t=False, #True,
)
# The model results, t_test, ... should also work without
# normalized_cov_params, see #2209
# Note: we cannot set on the wrapper res1, we need res1._results
cls.res1._results.normalized_cov_params = None
cls.bse_rob = cls.res1.bse
nobs, k_vars = mod.exog.shape
k_params = len(cls.res1.params)
#n_groups = len(np.unique(group))
corr_fact = (nobs-1.) / float(nobs - k_params)
# for bse we need sqrt of correction factor
cls.corr_fact = np.sqrt(corr_fact)
class TestGLMPoissonHC1Fit(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_hc1
mod = GLM(endog, exog, family=families.Poisson())
cls.res1 = mod.fit(cov_type='HC1')
cls.bse_rob = cls.res1.bse
nobs, k_vars = mod.exog.shape
corr_fact = (nobs) / float(nobs - 1.)
# for bse we need sqrt of correction factor
cls.corr_fact = np.sqrt(1./corr_fact)
class TestNegbinClu(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_negbin_clu
mod = smd.NegativeBinomial(endog, exog)
cls.res1 = mod.fit(disp=False)
cls.get_robust_clu()
class TestNegbinCluExposure(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_negbin_exposure_clu #nonrobust
mod = smd.NegativeBinomial(endog, exog, exposure=exposure)
cls.res1 = mod.fit(disp=False)
cls.get_robust_clu()
# mod_nbe = smd.NegativeBinomial(endog, exog, exposure=data['service'])
# res_nbe = mod_nbe.fit()
# mod_nb = smd.NegativeBinomial(endog, exog)
# res_nb = mod_nb.fit()
#
# cov_clu_nb = sc.cov_cluster(res_nb, group)
# k_params = k_vars + 1
# print sc.se_cov(cov_clu_nb / ((nobs-1.) / float(nobs - k_params)))
#
# wt = res_nb.wald_test(np.eye(len(res_nb.params))[1:3], cov_p=cov_clu_nb/((nobs-1.) / float(nobs - k_params)))
# print wt
#
# print dir(results_st)
class TestNegbinCluGeneric(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_negbin_clu
mod = smd.NegativeBinomial(endog, exog)
cls.res1 = res1 = mod.fit(disp=False)
get_robustcov_results(cls.res1._results, 'cluster',
groups=group,
use_correction=True,
df_correction=True, #TODO has no effect
use_t=False, #True,
use_self=True)
cls.bse_rob = cls.res1.bse
nobs, k_vars = mod.exog.shape
k_params = len(cls.res1.params)
#n_groups = len(np.unique(group))
corr_fact = (nobs-1.) / float(nobs - k_params)
# for bse we need sqrt of correction factor
cls.corr_fact = np.sqrt(corr_fact)
class TestNegbinCluFit(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_negbin_clu
mod = smd.NegativeBinomial(endog, exog)
cls.res1 = res1 = mod.fit(disp=False, cov_type='cluster',
cov_kwds=dict(groups=group,
use_correction=True,
df_correction=True), #TODO has no effect
use_t=False, #True,
)
cls.bse_rob = cls.res1.bse
nobs, k_vars = mod.exog.shape
k_params = len(cls.res1.params)
#n_groups = len(np.unique(group))
corr_fact = (nobs-1.) / float(nobs - k_params)
# for bse we need sqrt of correction factor
cls.corr_fact = np.sqrt(corr_fact)
class TestNegbinCluExposureFit(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_negbin_exposure_clu #nonrobust
mod = smd.NegativeBinomial(endog, exog, exposure=exposure)
cls.res1 = res1 = mod.fit(disp=False, cov_type='cluster',
cov_kwds=dict(groups=group,
use_correction=True,
df_correction=True), #TODO has no effect
use_t=False, #True,
)
cls.bse_rob = cls.res1.bse
nobs, k_vars = mod.exog.shape
k_params = len(cls.res1.params)
#n_groups = len(np.unique(group))
corr_fact = (nobs-1.) / float(nobs - k_params)
# for bse we need sqrt of correction factor
cls.corr_fact = np.sqrt(corr_fact)
class CheckDiscreteGLM(object):
# compare GLM with other models, no verified reference results
def test_basic(self):
res1 = self.res1
res2 = self.res2
assert_equal(res1.cov_type, self.cov_type)
assert_equal(res2.cov_type, self.cov_type)
assert_allclose(res1.params, res2.params, rtol=1e-13)
# bug TODO res1.scale missing ? in Gaussian/OLS
assert_allclose(res1.bse, res2.bse, rtol=1e-13)
# if not self.cov_type == 'nonrobust':
# assert_allclose(res1.bse * res1.scale, res2.bse, rtol=1e-13)
# else:
# assert_allclose(res1.bse, res2.bse, rtol=1e-13)
class TestGLMLogit(CheckDiscreteGLM):
@classmethod
def setup_class(cls):
endog_bin = (endog > endog.mean()).astype(int)
cls.cov_type = 'cluster'
mod1 = GLM(endog_bin, exog, family=families.Binomial())
cls.res1 = mod1.fit(cov_type='cluster', cov_kwds=dict(groups=group))
mod1 = smd.Logit(endog_bin, exog)
cls.res2 = mod1.fit(cov_type='cluster', cov_kwds=dict(groups=group))
class T_estGLMProbit(CheckDiscreteGLM):
# invalid link. What's Probit as GLM?
@classmethod
def setup_class(cls):
endog_bin = (endog > endog.mean()).astype(int)
cls.cov_type = 'cluster'
mod1 = GLM(endog_bin, exog, family=families.Gaussian(link=links.CDFLink))
cls.res1 = mod1.fit(cov_type='cluster', cov_kwds=dict(groups=group))
mod1 = smd.Probit(endog_bin, exog)
cls.res2 = mod1.fit(cov_type='cluster', cov_kwds=dict(groups=group))
class TestGLMGaussNonRobust(CheckDiscreteGLM):
@classmethod
def setup_class(cls):
cls.cov_type = 'nonrobust'
mod1 = GLM(endog, exog, family=families.Gaussian())
cls.res1 = mod1.fit()
mod2 = OLS(endog, exog)
cls.res2 = mod2.fit()
class TestGLMGaussClu(CheckDiscreteGLM):
@classmethod
def setup_class(cls):
cls.cov_type = 'cluster'
mod1 = GLM(endog, exog, family=families.Gaussian())
cls.res1 = mod1.fit(cov_type='cluster', cov_kwds=dict(groups=group))
mod2 = OLS(endog, exog)
cls.res2 = mod2.fit(cov_type='cluster', cov_kwds=dict(groups=group))
class TestGLMGaussHC(CheckDiscreteGLM):
@classmethod
def setup_class(cls):
cls.cov_type = 'HC0'
mod1 = GLM(endog, exog, family=families.Gaussian())
cls.res1 = mod1.fit(cov_type='HC0')
mod2 = OLS(endog, exog)
cls.res2 = mod2.fit(cov_type='HC0')
if __name__ == '__main__':
tt = TestPoissonClu()
tt.setup_class()
tt.test_basic()
tt = TestNegbinClu()
tt.setup_class()
tt.test_basic()
| bsd-3-clause |
lightning-viz/lightning-python | lightning/types/utils.py | 1 | 8582 | from numpy import asarray, array, ndarray, vstack, newaxis, nonzero, concatenate, \
transpose, atleast_2d, size, isscalar, meshgrid, where, zeros, ones
from matplotlib.path import Path
import ast
def add_property(d, prop, name, **kwargs):
if prop is not None:
p = check_property(prop, name, **kwargs)
d[name] = p
return d
def check_property(prop, name, **kwargs):
"""
Check and parse a property with either a specific checking function
or a generic parser
"""
checkers = {
'color': check_color,
'alpha': check_alpha,
'size': check_size,
'thickness': check_thickness,
'index': check_index,
'coordinates': check_coordinates,
'colormap': check_colormap,
'bins': check_bins,
'spec': check_spec
}
if name in checkers:
return checkers[name](prop, **kwargs)
elif isinstance(prop, list) or isinstance(prop, ndarray) or isscalar(prop):
return check_1d(prop, name)
else:
return prop
def check_coordinates(co, xy=None):
"""
Check and parse coordinates as either a single coordinate list [[r,c],[r,c]] or a
list of coordinates for multiple regions [[[r0,c0],[r0,c0]], [[r1,c1],[r1,c1]]]
"""
if isinstance(co, ndarray):
co = co.tolist()
if not (isinstance(co[0][0], list) or isinstance(co[0][0], tuple)):
co = [co]
if xy is not True:
co = map(lambda p: asarray(p)[:, ::-1].tolist(), co)
return co
def check_bins(b):
return b
def check_color(c):
"""
Check and parse color specs as either a single [r,g,b] or a list of
[[r,g,b],[r,g,b]...]
"""
c = asarray(c)
if c.ndim == 1:
c = c.flatten()
c = c[newaxis, :]
if c.shape[1] != 3:
raise Exception("Color must have three values per point")
elif c.ndim == 2:
if c.shape[1] != 3:
raise Exception("Color array must have three values per point")
return c
def check_colormap(cmap):
"""
Check if cmap is one of the colorbrewer maps
"""
names = set(['BrBG', 'PiYG', 'PRGn', 'PuOr', 'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral',
'Blues', 'BuGn', 'BuPu', 'GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd', 'PuBu',
'PuBuGn', 'PuRd', 'Purples', 'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd',
'Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2', 'Set1', 'Set2', 'Set3', 'Lightning'])
if cmap not in names:
raise Exception("Invalid cmap '%s', must be one of %s" % (cmap, names))
else:
return cmap
def check_size(s):
"""
Check and parse size specs as either a single [s] or a list of [s,s,s,...]
"""
s = check_1d(s, "size")
if any(map(lambda d: d <= 0, s)):
raise Exception('Size cannot be 0 or negative')
return s
def check_thickness(s):
"""
Check and parse thickness specs as either a single [s] or a list of [s,s,s,...]
"""
s = check_1d(s, "thickness")
if any(map(lambda d: d <= 0, s)):
raise Exception('Thickness cannot be 0 or negative')
return s
def check_index(i):
"""
Checks and parses an index spec, must be a one-dimensional array [i0, i1, ...]
"""
i = asarray(i)
if (i.ndim > 1) or (size(i) < 1):
raise Exception("Index must be one-dimensional and non-singleton")
return i
def check_alpha(a):
"""
Check and parse alpha specs as either a single [a] or a list of [a,a,a,...]
"""
a = check_1d(a, "alpha")
if any(map(lambda d: d <= 0, a)):
raise Exception('Alpha cannot be 0 or negative')
return a
def check_1d(x, name):
"""
Check and parse a one-dimensional spec as either a single [x] or a list of [x,x,x...]
"""
x = asarray(x)
if size(x) == 1:
x = asarray([x])
if x.ndim == 2:
raise Exception("Property: %s must be one-dimensional" % name)
x = x.flatten()
return x
def check_spec(spec):
try:
import altair
if type(spec) == altair.api.Viz:
spec = spec.to_dict()
except ImportError:
pass
if type(spec) == str:
import ast
spec = ast.literal_eval(spec)
return spec
def array_to_lines(data):
data = asarray(data)
return data
def vecs_to_points(x, y):
x = asarray(x)
y = asarray(y)
if x.ndim > 1 or y.ndim > 1:
raise Exception('x and y vectors must be one-dimensional')
if size(x) != size(y):
raise Exception('x and y vectors must be the same length')
points = vstack([x, y]).T
return points
def vecs_to_points_three(x, y, z):
x = asarray(x)
y = asarray(y)
z = asarray(z)
if x.ndim > 1 or y.ndim > 1 or z.ndim > 1:
raise Exception('x, y, and z vectors must be one-dimensional')
if (size(x) != size(y)) or (size(x) != size(z)) or (size(y) != size(z)):
raise Exception('x, y, and z vectors must be the same length')
points = vstack([x, y, z]).T
return points
def mat_to_array(mat):
mat = asarray(mat)
if mat.ndim < 2:
raise Exception('Matrix input must be two-dimensional')
return mat
def mat_to_links(mat):
# get nonzero entries as list with the source, target, and value as columns
mat = asarray(mat)
if mat.ndim < 2:
raise Exception('Matrix input must be two-dimensional')
inds = nonzero(mat)
links = concatenate((transpose(nonzero(mat)), atleast_2d(mat[inds]).T), axis=1)
return links
def parse_nodes(data):
data = asarray(data)
if data.shape[0] == data.shape[1]:
nodes = list(range(0, len(data)))
else:
nodes = list(range(0, int(max(max(data[:, 0]), max(data[:, 1])) + 1)))
return nodes
def parse_links(data):
data = asarray(data)
if data.shape[0] == data.shape[1]:
links = mat_to_links(data)
else:
if len(data[0]) == 2:
links = concatenate((data, ones((len(data), 1))), axis=1)
elif len(data[0]) == 3:
links = data
else:
raise ValueError("Too many entries per link, must be 2 or 3, got %g" % len(data[0]))
return links
def array_to_im(im):
from matplotlib.pyplot import imsave
from matplotlib.pyplot import cm
import io
im = asarray(im)
imfile = io.BytesIO()
if im.ndim == 3:
# if 3D, show as RGB
imsave(imfile, im, format="png")
else:
# if 2D, show as grayscale
imsave(imfile, im, format="png", cmap=cm.gray)
if im.ndim > 3:
raise Exception("Images must be 2 or 3 dimensions")
return imfile.getvalue()
def list_to_regions(reg):
if isinstance(reg, str):
return [reg]
if isinstance(reg, list):
checktwo = all(map(lambda x: len(x) == 2, reg))
checkthree = all(map(lambda x: len(x) == 3, reg))
if not (checktwo or checkthree):
raise Exception("All region names must be two letters (for US) or three letters (for world)")
return reg
def polygon_to_mask(coords, dims, z=None):
"""
Given a list of pairs of points which define a polygon, return a binary
mask covering the interior of the polygon with dimensions dim
"""
bounds = array(coords).astype('int')
path = Path(bounds)
grid = meshgrid(range(dims[1]), range(dims[0]))
grid_flat = zip(grid[0].ravel(), grid[1].ravel())
mask = path.contains_points(grid_flat).reshape(dims[0:2]).astype('int')
if z is not None:
if len(dims) < 3:
raise Exception('Dims must have three-dimensions for embedding z-index')
if z >= dims[2]:
raise Exception('Z-index %g exceeds third dimension %g' % (z, dims[2]))
tmp = zeros(dims)
tmp[:, :, z] = mask
mask = tmp
return mask
def polygon_to_points(coords, z=None):
"""
Given a list of pairs of points which define a polygon,
return a list of points interior to the polygon
"""
bounds = array(coords).astype('int')
bmax = bounds.max(0)
bmin = bounds.min(0)
path = Path(bounds)
grid = meshgrid(range(bmin[0], bmax[0]+1), range(bmin[1], bmax[1]+1))
grid_flat = zip(grid[0].ravel(), grid[1].ravel())
points = path.contains_points(grid_flat).reshape(grid[0].shape).astype('int')
points = where(points)
points = (vstack([points[0], points[1]]).T + bmin[-1::-1]).tolist()
if z is not None:
points = map(lambda p: [p[0], p[1], z], points)
return points | mit |
schoolie/bokeh | sphinx/source/docs/user_guide/examples/plotting_band.py | 13 | 1154 | from bokeh.plotting import figure, show, output_file
from bokeh.models import Band, ColumnDataSource
import pandas as pd
import numpy as np
output_file("band.html", title="band.py example")
# Create some random data
x = np.random.random(2500) * 140 - 20
y = np.random.normal(size=2500) * 2 + 5
df = pd.DataFrame(data=dict(x=x, y=y)).sort_values(by="x")
sem = lambda x: x.std() / np.sqrt(x.size)
df2 = df.y.rolling(window=100).agg({"y_mean": np.mean, "y_std": np.std, "y_sem": sem})
df2 = df2.fillna(method='bfill')
df = pd.concat([df, df2], axis=1)
df['lower'] = df.y_mean - df.y_std
df['upper'] = df.y_mean + df.y_std
source = ColumnDataSource(df.reset_index())
TOOLS = "pan,wheel_zoom,box_zoom,reset,save"
p = figure(tools=TOOLS)
p.scatter(x='x', y='y', line_color=None, fill_alpha=0.3, size=5, source=source)
band = Band(base='x', lower='lower', upper='upper', source=source, level='underlay',
fill_alpha=1.0, line_width=1, line_color='black')
p.add_layout(band)
p.title.text = "Rolling Standard Deviation"
p.xgrid[0].grid_line_color=None
p.ygrid[0].grid_line_alpha=0.5
p.xaxis.axis_label = 'X'
p.yaxis.axis_label = 'Y'
show(p)
| bsd-3-clause |
Slayr/Data-Science-45min-Intros | support-vector-machines-101/kernel-examples.py | 26 | 2054 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
__author__="Josh Montague"
__license__="MIT License"
import sys
import json
import matplotlib.pyplot as plt
try:
import seaborn as sns
except ImportError as e:
sys.stderr.write("seaborn not installed. Using default matplotlib templates.")
import numpy as np
from sklearn.svm import SVC
# adapted from:
# http://scikit-learn.org/stable/auto_examples/svm/plot_svm_kernels.html
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf', 'sigmoid'):
#clf = SVC(kernel=kernel)
clf = SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(8, 6))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
facecolors='none', zorder=10, s=300)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired, s=100)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
#plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.pcolormesh(XX, YY, Z > 0, alpha=0.1)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.title('{}'.format(kernel))
#plt.xticks(())
#plt.yticks(())
fignum = fignum + 1
plt.show()
| unlicense |
AlexanderFabisch/scikit-learn | sklearn/linear_model/tests/test_randomized_l1.py | 35 | 4726 | # Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
# labels should not be centered
X, _, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
| bsd-3-clause |
michigraber/scikit-learn | examples/exercises/plot_cv_digits.py | 232 | 1206 | """
=============================================
Cross-validation on Digits Dataset Exercise
=============================================
A tutorial exercise using Cross-validation with an SVM on the Digits dataset.
This exercise is used in the :ref:`cv_generators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
from sklearn import cross_validation, datasets, svm
digits = datasets.load_digits()
X = digits.data
y = digits.target
svc = svm.SVC(kernel='linear')
C_s = np.logspace(-10, 0, 10)
scores = list()
scores_std = list()
for C in C_s:
svc.C = C
this_scores = cross_validation.cross_val_score(svc, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
# Do the plotting
import matplotlib.pyplot as plt
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.semilogx(C_s, scores)
plt.semilogx(C_s, np.array(scores) + np.array(scores_std), 'b--')
plt.semilogx(C_s, np.array(scores) - np.array(scores_std), 'b--')
locs, labels = plt.yticks()
plt.yticks(locs, list(map(lambda x: "%g" % x, locs)))
plt.ylabel('CV score')
plt.xlabel('Parameter C')
plt.ylim(0, 1.1)
plt.show()
| bsd-3-clause |
chrisburr/scikit-learn | examples/cluster/plot_cluster_comparison.py | 246 | 4684 | """
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example aims at showing characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. The last dataset is an example of a 'null'
situation for clustering: the data is homogeneous, and
there is no good clustering.
While these examples give some intuition about the algorithms,
this intuition might not apply to very high dimensional data.
The results could be improved by tweaking the parameters for
each clustering strategy, for instance setting the number of
clusters for the methods that needs this parameter
specified. Note that affinity propagation has a tendency to
create many clusters. Thus in this example its two parameters
(damping and per-point preference) were set to to mitigate this
behavior.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
np.random.seed(0)
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch']
plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
datasets = [noisy_circles, noisy_moons, blobs, no_structure]
for i_dataset, dataset in enumerate(datasets):
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| bsd-3-clause |
nikitasingh981/scikit-learn | sklearn/utils/tests/test_sparsefuncs.py | 78 | 17611 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from numpy.testing import (assert_array_almost_equal,
assert_array_equal,
assert_equal)
from numpy.random import RandomState
from sklearn.datasets import make_classification
from sklearn.utils.sparsefuncs import (mean_variance_axis,
incr_mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_row, inplace_swap_column,
min_max_axis,
count_nonzero, csc_median_axis_0)
from sklearn.utils.sparsefuncs_fast import (assign_rows_csr,
inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from sklearn.utils.testing import assert_raises
def test_mean_variance_axis0():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
X_csr = sp.csr_matrix(X_lil)
X_csc = sp.csc_matrix(X_lil)
expected_dtypes = [(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64)]
for input_dtype, output_dtype in expected_dtypes:
X_test = X.astype(input_dtype)
for X_sparse in (X_csr, X_csc):
X_sparse = X_sparse.astype(input_dtype)
X_means, X_vars = mean_variance_axis(X_sparse, axis=0)
assert_equal(X_means.dtype, output_dtype)
assert_equal(X_vars.dtype, output_dtype)
assert_array_almost_equal(X_means, np.mean(X_test, axis=0))
assert_array_almost_equal(X_vars, np.var(X_test, axis=0))
def test_mean_variance_axis1():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
X_csr = sp.csr_matrix(X_lil)
X_csc = sp.csc_matrix(X_lil)
expected_dtypes = [(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64)]
for input_dtype, output_dtype in expected_dtypes:
X_test = X.astype(input_dtype)
for X_sparse in (X_csr, X_csc):
X_sparse = X_sparse.astype(input_dtype)
X_means, X_vars = mean_variance_axis(X_sparse, axis=0)
assert_equal(X_means.dtype, output_dtype)
assert_equal(X_vars.dtype, output_dtype)
assert_array_almost_equal(X_means, np.mean(X_test, axis=0))
assert_array_almost_equal(X_vars, np.var(X_test, axis=0))
def test_incr_mean_variance_axis():
for axis in [0, 1]:
rng = np.random.RandomState(0)
n_features = 50
n_samples = 10
data_chunks = [rng.randint(0, 2, size=n_features)
for i in range(n_samples)]
# default params for incr_mean_variance
last_mean = np.zeros(n_features)
last_var = np.zeros_like(last_mean)
last_n = 0
# Test errors
X = np.array(data_chunks[0])
X = np.atleast_2d(X)
X_lil = sp.lil_matrix(X)
X_csr = sp.csr_matrix(X_lil)
assert_raises(TypeError, incr_mean_variance_axis, axis,
last_mean, last_var, last_n)
assert_raises(TypeError, incr_mean_variance_axis, axis,
last_mean, last_var, last_n)
assert_raises(TypeError, incr_mean_variance_axis, X_lil, axis,
last_mean, last_var, last_n)
# Test _incr_mean_and_var with a 1 row input
X_means, X_vars = mean_variance_axis(X_csr, axis)
X_means_incr, X_vars_incr, n_incr = \
incr_mean_variance_axis(X_csr, axis, last_mean, last_var, last_n)
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
assert_equal(X.shape[axis], n_incr) # X.shape[axis] picks # samples
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis)
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
assert_equal(X.shape[axis], n_incr)
# Test _incremental_mean_and_var with whole data
X = np.vstack(data_chunks)
X_lil = sp.lil_matrix(X)
X_csr = sp.csr_matrix(X_lil)
X_csc = sp.csc_matrix(X_lil)
expected_dtypes = [(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64)]
for input_dtype, output_dtype in expected_dtypes:
for X_sparse in (X_csr, X_csc):
X_sparse = X_sparse.astype(input_dtype)
X_means, X_vars = mean_variance_axis(X_sparse, axis)
X_means_incr, X_vars_incr, n_incr = \
incr_mean_variance_axis(X_sparse, axis, last_mean,
last_var, last_n)
assert_equal(X_means_incr.dtype, output_dtype)
assert_equal(X_vars_incr.dtype, output_dtype)
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
assert_equal(X.shape[axis], n_incr)
def test_mean_variance_illegal_axis():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = sp.csr_matrix(X)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=2)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1)
assert_raises(ValueError, incr_mean_variance_axis, X_csr, axis=-3,
last_mean=None, last_var=None, last_n=None)
assert_raises(ValueError, incr_mean_variance_axis, X_csr, axis=2,
last_mean=None, last_var=None, last_n=None)
assert_raises(ValueError, incr_mean_variance_axis, X_csr, axis=-1,
last_mean=None, last_var=None, last_n=None)
def test_densify_rows():
for dtype in (np.float32, np.float64):
X = sp.csr_matrix([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=dtype)
X_rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((6, X.shape[1]), dtype=dtype)
out_rows = np.array([1, 3, 4], dtype=np.intp)
expect = np.ones_like(out)
expect[out_rows] = X[X_rows, :].toarray()
assign_rows_csr(X, X_rows, out_rows, out)
assert_array_equal(out, expect)
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_swap_row():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
def test_inplace_swap_column():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
def test_min_max_axis0():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
def test_min_max_axis1():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
def test_min_max_axis_errors():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0)
assert_raises(ValueError, min_max_axis, X_csr, axis=2)
assert_raises(ValueError, min_max_axis, X_csc, axis=-3)
def test_count_nonzero():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
X_nonzero = X != 0
sample_weight = [.5, .2, .3, .1, .1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(count_nonzero(X_csr, axis=axis),
X_nonzero.sum(axis=axis))
assert_array_almost_equal(count_nonzero(X_csr, axis=axis,
sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis))
assert_raises(TypeError, count_nonzero, X_csc)
assert_raises(ValueError, count_nonzero, X_csr, axis=2)
def test_csc_row_median():
# Test csc_row_median actually calculates the median.
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = sp.csc_matrix(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = sp.csc_matrix(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0., -3]))
# Test that it raises an Error for non-csc matrices.
assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X))
def test_inplace_normalize():
ones = np.ones((10, 1))
rs = RandomState(10)
for inplace_csr_row_normalize in (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2):
for dtype in (np.float64, np.float32):
X = rs.randn(10, 5).astype(dtype)
X_csr = sp.csr_matrix(X)
inplace_csr_row_normalize(X_csr)
assert_equal(X_csr.dtype, dtype)
if inplace_csr_row_normalize is inplace_csr_row_normalize_l2:
X_csr.data **= 2
assert_array_almost_equal(np.abs(X_csr).sum(axis=1), ones)
| bsd-3-clause |
intel-analytics/analytics-zoo | pyzoo/test/zoo/chronos/data/utils/test_impute.py | 1 | 3370 | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import pandas as pd
import numpy as np
from test.zoo.pipeline.utils.test_utils import ZooTestCase
from zoo.chronos.data.utils.impute import impute_timeseries_dataframe, \
_last_impute_timeseries_dataframe, _const_impute_timeseries_dataframe, \
_linear_impute_timeseries_dataframe
def get_ugly_ts_df():
data = np.random.random_sample((50, 5))
mask = np.random.random_sample((50, 5))
mask[mask >= 0.4] = 2
mask[mask < 0.4] = 1
mask[mask < 0.2] = 0
data[mask == 0] = None
data[mask == 1] = np.nan
df = pd.DataFrame(data, columns=['a', 'b', 'c', 'd', 'e'])
df['a'][0] = np.nan # make sure column 'a' has a N/A
df["datetime"] = pd.date_range('1/1/2019', periods=50)
return df
class TestImputeTimeSeries(ZooTestCase):
def setup_method(self, method):
self.df = get_ugly_ts_df()
def teardown_method(self, method):
pass
def test_impute_timeseries_dataframe(self):
with pytest.raises(AssertionError):
impute_timeseries_dataframe(self.df, dt_col="z")
with pytest.raises(AssertionError):
impute_timeseries_dataframe(
self.df, dt_col="datetime", mode="dummy")
with pytest.raises(AssertionError):
impute_timeseries_dataframe(self.df, dt_col="a")
last_res_df = impute_timeseries_dataframe(
self.df, dt_col="datetime", mode="last")
assert self.df.isna().sum().sum() != 0
assert last_res_df.isna().sum().sum() == 0
const_res_df = impute_timeseries_dataframe(
self.df, dt_col="datetime", mode="const")
assert self.df.isna().sum().sum() != 0
assert const_res_df.isna().sum().sum() == 0
linear_res_df = impute_timeseries_dataframe(
self.df, dt_col="datetime", mode="linear")
assert self.df.isna().sum().sum() != 0
assert linear_res_df.isna().sum().sum() == 0
def test_last_impute_timeseries_dataframe(self):
data = {'data': [np.nan, np.nan, 1, np.nan, 2, 3]}
df = pd.DataFrame(data)
res_df = _last_impute_timeseries_dataframe(df)
assert res_df['data'][0] == 0
assert res_df['data'][1] == 0
assert res_df['data'][3] == 1
def test_const_impute_timeseries_dataframe(self):
data = {'data': [np.nan, 1, np.nan, 2, 3]}
df = pd.DataFrame(data)
res_df = _const_impute_timeseries_dataframe(df, 1)
assert res_df['data'][0] == 1
assert res_df['data'][2] == 1
def test_linear_timeseries_dataframe(self):
data = {'data': [np.nan, 1, np.nan, 2, 3]}
df = pd.DataFrame(data)
res_df = _linear_impute_timeseries_dataframe(df)
assert res_df['data'][0] == 1
assert res_df['data'][2] == 1.5
| apache-2.0 |
Yangqing/caffe2 | caffe2/python/visualize.py | 4 | 6987 | # Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package visualize
# Module caffe2.python.visualize
"""Functions that could be used to visualize Tensors.
This is adapted from the old-time iceberk package that Yangqing wrote... Oh gold
memories. Before decaf and caffe. Why iceberk? Because I was at Berkeley,
bears are vegetarian, and iceberg lettuce has layers of leaves.
(This joke is so lame.)
"""
import numpy as np
from matplotlib import cm, pyplot
def ChannelFirst(arr):
"""Convert a HWC array to CHW."""
ndim = arr.ndim
return arr.swapaxes(ndim - 1, ndim - 2).swapaxes(ndim - 2, ndim - 3)
def ChannelLast(arr):
"""Convert a CHW array to HWC."""
ndim = arr.ndim
return arr.swapaxes(ndim - 3, ndim - 2).swapaxes(ndim - 2, ndim - 1)
class PatchVisualizer(object):
"""PatchVisualizer visualizes patches.
"""
def __init__(self, gap=1):
self.gap = gap
def ShowSingle(self, patch, cmap=None):
"""Visualizes one single patch.
The input patch could be a vector (in which case we try to infer the shape
of the patch), a 2-D matrix, or a 3-D matrix whose 3rd dimension has 3
channels.
"""
if len(patch.shape) == 1:
patch = patch.reshape(self.get_patch_shape(patch))
elif len(patch.shape) > 2 and patch.shape[2] != 3:
raise ValueError("The input patch shape isn't correct.")
# determine color
if len(patch.shape) == 2 and cmap is None:
cmap = cm.gray
pyplot.imshow(patch, cmap=cmap)
return patch
def ShowMultiple(self, patches, ncols=None, cmap=None, bg_func=np.mean):
"""Visualize multiple patches.
In the passed in patches matrix, each row is a patch, in the shape of either
n*n, n*n*1 or n*n*3, either in a flattened format (so patches would be a
2-D array), or a multi-dimensional tensor. We will try our best to figure
out automatically the patch size.
"""
num_patches = patches.shape[0]
if ncols is None:
ncols = int(np.ceil(np.sqrt(num_patches)))
nrows = int(np.ceil(num_patches / float(ncols)))
if len(patches.shape) == 2:
patches = patches.reshape(
(patches.shape[0], ) + self.get_patch_shape(patches[0])
)
patch_size_expand = np.array(patches.shape[1:3]) + self.gap
image_size = patch_size_expand * np.array([nrows, ncols]) - self.gap
if len(patches.shape) == 4:
if patches.shape[3] == 1:
# gray patches
patches = patches.reshape(patches.shape[:-1])
image_shape = tuple(image_size)
if cmap is None:
cmap = cm.gray
elif patches.shape[3] == 3:
# color patches
image_shape = tuple(image_size) + (3, )
else:
raise ValueError("The input patch shape isn't expected.")
else:
image_shape = tuple(image_size)
if cmap is None:
cmap = cm.gray
image = np.ones(image_shape) * bg_func(patches)
for pid in range(num_patches):
row = pid // ncols * patch_size_expand[0]
col = pid % ncols * patch_size_expand[1]
image[row:row+patches.shape[1], col:col+patches.shape[2]] = \
patches[pid]
pyplot.imshow(image, cmap=cmap, interpolation='nearest')
pyplot.axis('off')
return image
def ShowImages(self, patches, *args, **kwargs):
"""Similar to ShowMultiple, but always normalize the values between 0 and 1
for better visualization of image-type data.
"""
patches = patches - np.min(patches)
patches /= np.max(patches) + np.finfo(np.float64).eps
return self.ShowMultiple(patches, *args, **kwargs)
def ShowChannels(self, patch, cmap=None, bg_func=np.mean):
""" This function shows the channels of a patch.
The incoming patch should have shape [w, h, num_channels], and each channel
will be visualized as a separate gray patch.
"""
if len(patch.shape) != 3:
raise ValueError("The input patch shape isn't correct.")
patch_reordered = np.swapaxes(patch.T, 1, 2)
return self.ShowMultiple(patch_reordered, cmap=cmap, bg_func=bg_func)
def get_patch_shape(self, patch):
"""Gets the shape of a single patch.
Basically it tries to interprete the patch as a square, and also check if it
is in color (3 channels)
"""
edgeLen = np.sqrt(patch.size)
if edgeLen != np.floor(edgeLen):
# we are given color patches
edgeLen = np.sqrt(patch.size / 3.)
if edgeLen != np.floor(edgeLen):
raise ValueError("I can't figure out the patch shape.")
return (edgeLen, edgeLen, 3)
else:
edgeLen = int(edgeLen)
return (edgeLen, edgeLen)
_default_visualizer = PatchVisualizer()
"""Utility functions that directly point to functions in the default visualizer.
These functions don't return anything, so you won't see annoying printouts of
the visualized images. If you want to save the images for example, you should
explicitly instantiate a patch visualizer, and call those functions.
"""
class NHWC(object):
@staticmethod
def ShowSingle(*args, **kwargs):
_default_visualizer.ShowSingle(*args, **kwargs)
@staticmethod
def ShowMultiple(*args, **kwargs):
_default_visualizer.ShowMultiple(*args, **kwargs)
@staticmethod
def ShowImages(*args, **kwargs):
_default_visualizer.ShowImages(*args, **kwargs)
@staticmethod
def ShowChannels(*args, **kwargs):
_default_visualizer.ShowChannels(*args, **kwargs)
class NCHW(object):
@staticmethod
def ShowSingle(patch, *args, **kwargs):
_default_visualizer.ShowSingle(ChannelLast(patch), *args, **kwargs)
@staticmethod
def ShowMultiple(patch, *args, **kwargs):
_default_visualizer.ShowMultiple(ChannelLast(patch), *args, **kwargs)
@staticmethod
def ShowImages(patch, *args, **kwargs):
_default_visualizer.ShowImages(ChannelLast(patch), *args, **kwargs)
@staticmethod
def ShowChannels(patch, *args, **kwargs):
_default_visualizer.ShowChannels(ChannelLast(patch), *args, **kwargs)
| apache-2.0 |
hmn21/stockMarketPrediction | machineLearning.py | 1 | 1559 | from sklearn.ensemble import RandomForestClassifier
from sklearn import neighbors
from sklearn.svm import LinearSVC
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis as QDA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
def Classify(X_train, y_train, X_test, y_test, method):
"""
Performs classification on daily returns.
"""
if method == 'RF':
return RF(X_train, y_train, X_test, y_test)
elif method == 'KNN':
return KNN(X_train, y_train, X_test, y_test)
elif method == 'SVM':
return SVMClass(X_train, y_train, X_test, y_test)
elif method == 'LDA':
return LinearDA(X_train, y_train, X_test, y_test)
elif method == 'QDA':
return QuadDA(X_train, y_train, X_test, y_test)
def RF(X_train, y_train, X_test, y_test):
clf = RandomForestClassifier(n_estimators=1000, n_jobs=-1)
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
return accuracy
def KNN(X_train, y_train, X_test, y_test):
clf = neighbors.KNeighborsClassifier()
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
return accuracy
def SVMClass(X_train, y_train, X_test, y_test):
clf = LinearSVC()
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
return accuracy
def QuadDA(X_train, y_train, X_test, y_test):
clf = QDA()
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
return accuracy
def LinearDA(X_train, y_train, X_test, y_test):
clf = LDA()
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
return accuracy
| mit |
kdebrab/pandas | pandas/core/indexes/numeric.py | 4 | 14410 | import numpy as np
from pandas._libs import (index as libindex,
join as libjoin)
from pandas.core.dtypes.common import (
is_dtype_equal,
pandas_dtype,
needs_i8_conversion,
is_integer_dtype,
is_bool,
is_bool_dtype,
is_scalar)
from pandas import compat
from pandas.core import algorithms
import pandas.core.common as com
from pandas.core.indexes.base import (
Index, InvalidIndexError, _index_shared_docs)
from pandas.util._decorators import Appender, cache_readonly
import pandas.core.dtypes.concat as _concat
import pandas.core.indexes.base as ibase
_num_index_shared_docs = dict()
class NumericIndex(Index):
"""
Provide numeric type operations
This is an abstract class
"""
_is_numeric_dtype = True
def __new__(cls, data=None, dtype=None, copy=False, name=None,
fastpath=False):
if fastpath:
return cls._simple_new(data, name=name)
# is_scalar, generators handled in coerce_to_ndarray
data = cls._coerce_to_ndarray(data)
if issubclass(data.dtype.type, compat.string_types):
cls._string_data_error(data)
if copy or not is_dtype_equal(data.dtype, cls._default_dtype):
subarr = np.array(data, dtype=cls._default_dtype, copy=copy)
cls._assert_safe_casting(data, subarr)
else:
subarr = data
if name is None and hasattr(data, 'name'):
name = data.name
return cls._simple_new(subarr, name=name)
@Appender(_index_shared_docs['_maybe_cast_slice_bound'])
def _maybe_cast_slice_bound(self, label, side, kind):
assert kind in ['ix', 'loc', 'getitem', None]
# we will try to coerce to integers
return self._maybe_cast_indexer(label)
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, **kwargs):
if values is not None and not self._can_hold_na:
# Ensure we are not returning an Int64Index with float data:
return self._shallow_copy_with_infer(values=values, **kwargs)
return (super(NumericIndex, self)._shallow_copy(values=values,
**kwargs))
def _convert_for_op(self, value):
""" Convert value to be insertable to ndarray """
if is_bool(value) or is_bool_dtype(value):
# force conversion to object
# so we don't lose the bools
raise TypeError
return value
def _convert_tolerance(self, tolerance, target):
tolerance = np.asarray(tolerance)
if target.size != tolerance.size and tolerance.size > 1:
raise ValueError('list-like tolerance size must match '
'target index size')
if not np.issubdtype(tolerance.dtype, np.number):
if tolerance.ndim > 0:
raise ValueError(('tolerance argument for %s must contain '
'numeric elements if it is list type') %
(type(self).__name__,))
else:
raise ValueError(('tolerance argument for %s must be numeric '
'if it is a scalar: %r') %
(type(self).__name__, tolerance))
return tolerance
@classmethod
def _assert_safe_casting(cls, data, subarr):
"""
Subclasses need to override this only if the process of casting data
from some accepted dtype to the internal dtype(s) bears the risk of
truncation (e.g. float to int).
"""
pass
def _concat_same_dtype(self, indexes, name):
return _concat._concat_index_same_dtype(indexes).rename(name)
@property
def is_all_dates(self):
"""
Checks that all the labels are datetime objects
"""
return False
_num_index_shared_docs['class_descr'] = """
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects. %(klass)s is a special case
of `Index` with purely %(ltype)s labels. %(extra)s
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: %(dtype)s)
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
Attributes
----------
None
Methods
-------
None
Notes
-----
An Index instance can **only** contain hashable objects.
See also
--------
Index : The base pandas Index type
"""
_int64_descr_args = dict(
klass='Int64Index',
ltype='integer',
dtype='int64',
extra=''
)
class Int64Index(NumericIndex):
__doc__ = _num_index_shared_docs['class_descr'] % _int64_descr_args
_typ = 'int64index'
_left_indexer_unique = libjoin.left_join_indexer_unique_int64
_left_indexer = libjoin.left_join_indexer_int64
_inner_indexer = libjoin.inner_join_indexer_int64
_outer_indexer = libjoin.outer_join_indexer_int64
_can_hold_na = False
_engine_type = libindex.Int64Engine
_default_dtype = np.int64
@property
def inferred_type(self):
"""Always 'integer' for ``Int64Index``"""
return 'integer'
@property
def asi8(self):
# do not cache or you'll create a memory leak
return self.values.view('i8')
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# don't coerce ilocs to integers
if kind != 'iloc':
key = self._maybe_cast_indexer(key)
return (super(Int64Index, self)
._convert_scalar_indexer(key, kind=kind))
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
return Int64Index(joined, name=name)
@classmethod
def _assert_safe_casting(cls, data, subarr):
"""
Ensure incoming data can be represented as ints.
"""
if not issubclass(data.dtype.type, np.signedinteger):
if not np.array_equal(data, subarr):
raise TypeError('Unsafe NumPy casting, you must '
'explicitly cast')
Int64Index._add_numeric_methods()
Int64Index._add_logical_methods()
_uint64_descr_args = dict(
klass='UInt64Index',
ltype='unsigned integer',
dtype='uint64',
extra=''
)
class UInt64Index(NumericIndex):
__doc__ = _num_index_shared_docs['class_descr'] % _uint64_descr_args
_typ = 'uint64index'
_left_indexer_unique = libjoin.left_join_indexer_unique_uint64
_left_indexer = libjoin.left_join_indexer_uint64
_inner_indexer = libjoin.inner_join_indexer_uint64
_outer_indexer = libjoin.outer_join_indexer_uint64
_can_hold_na = False
_engine_type = libindex.UInt64Engine
_default_dtype = np.uint64
@property
def inferred_type(self):
"""Always 'integer' for ``UInt64Index``"""
return 'integer'
@property
def asi8(self):
# do not cache or you'll create a memory leak
return self.values.view('u8')
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# don't coerce ilocs to integers
if kind != 'iloc':
key = self._maybe_cast_indexer(key)
return (super(UInt64Index, self)
._convert_scalar_indexer(key, kind=kind))
@Appender(_index_shared_docs['_convert_arr_indexer'])
def _convert_arr_indexer(self, keyarr):
# Cast the indexer to uint64 if possible so
# that the values returned from indexing are
# also uint64.
keyarr = com._asarray_tuplesafe(keyarr)
if is_integer_dtype(keyarr):
return com._asarray_tuplesafe(keyarr, dtype=np.uint64)
return keyarr
@Appender(_index_shared_docs['_convert_index_indexer'])
def _convert_index_indexer(self, keyarr):
# Cast the indexer to uint64 if possible so
# that the values returned from indexing are
# also uint64.
if keyarr.is_integer():
return keyarr.astype(np.uint64)
return keyarr
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
return UInt64Index(joined, name=name)
@classmethod
def _assert_safe_casting(cls, data, subarr):
"""
Ensure incoming data can be represented as uints.
"""
if not issubclass(data.dtype.type, np.unsignedinteger):
if not np.array_equal(data, subarr):
raise TypeError('Unsafe NumPy casting, you must '
'explicitly cast')
UInt64Index._add_numeric_methods()
UInt64Index._add_logical_methods()
_float64_descr_args = dict(
klass='Float64Index',
dtype='float64',
ltype='float',
extra=''
)
class Float64Index(NumericIndex):
__doc__ = _num_index_shared_docs['class_descr'] % _float64_descr_args
_typ = 'float64index'
_engine_type = libindex.Float64Engine
_left_indexer_unique = libjoin.left_join_indexer_unique_float64
_left_indexer = libjoin.left_join_indexer_float64
_inner_indexer = libjoin.inner_join_indexer_float64
_outer_indexer = libjoin.outer_join_indexer_float64
_default_dtype = np.float64
@property
def inferred_type(self):
"""Always 'floating' for ``Float64Index``"""
return 'floating'
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if needs_i8_conversion(dtype):
msg = ('Cannot convert Float64Index to dtype {dtype}; integer '
'values are required for conversion').format(dtype=dtype)
raise TypeError(msg)
elif is_integer_dtype(dtype) and self.hasnans:
# GH 13149
raise ValueError('Cannot convert NA to integer')
return super(Float64Index, self).astype(dtype, copy=copy)
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
if kind == 'iloc':
return self._validate_indexer('positional', key, kind)
return key
@Appender(_index_shared_docs['_convert_slice_indexer'])
def _convert_slice_indexer(self, key, kind=None):
# if we are not a slice, then we are done
if not isinstance(key, slice):
return key
if kind == 'iloc':
return super(Float64Index, self)._convert_slice_indexer(key,
kind=kind)
# translate to locations
return self.slice_indexer(key.start, key.stop, key.step, kind=kind)
def _format_native_types(self, na_rep='', float_format=None, decimal='.',
quoting=None, **kwargs):
from pandas.io.formats.format import FloatArrayFormatter
formatter = FloatArrayFormatter(self.values, na_rep=na_rep,
float_format=float_format,
decimal=decimal, quoting=quoting,
fixed_width=False)
return formatter.get_result_as_array()
def get_value(self, series, key):
""" we always want to get an index value, never a value """
if not is_scalar(key):
raise InvalidIndexError
k = com._values_from_object(key)
loc = self.get_loc(k)
new_values = com._values_from_object(series)[loc]
return new_values
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self is other:
return True
if not isinstance(other, Index):
return False
# need to compare nans locations and make sure that they are the same
# since nans don't compare equal this is a bit tricky
try:
if not isinstance(other, Float64Index):
other = self._constructor(other)
if (not is_dtype_equal(self.dtype, other.dtype) or
self.shape != other.shape):
return False
left, right = self._ndarray_values, other._ndarray_values
return ((left == right) | (self._isnan & other._isnan)).all()
except (TypeError, ValueError):
return False
def __contains__(self, other):
if super(Float64Index, self).__contains__(other):
return True
try:
# if other is a sequence this throws a ValueError
return np.isnan(other) and self.hasnans
except ValueError:
try:
return len(other) <= 1 and ibase._try_get_item(other) in self
except TypeError:
pass
except TypeError:
pass
return False
@Appender(_index_shared_docs['get_loc'])
def get_loc(self, key, method=None, tolerance=None):
try:
if np.all(np.isnan(key)):
nan_idxs = self._nan_idxs
try:
return nan_idxs.item()
except (ValueError, IndexError):
# should only need to catch ValueError here but on numpy
# 1.7 .item() can raise IndexError when NaNs are present
if not len(nan_idxs):
raise KeyError(key)
return nan_idxs
except (TypeError, NotImplementedError):
pass
return super(Float64Index, self).get_loc(key, method=method,
tolerance=tolerance)
@cache_readonly
def is_unique(self):
return super(Float64Index, self).is_unique and self._nan_idxs.size < 2
@Appender(Index.isin.__doc__)
def isin(self, values, level=None):
if level is not None:
self._validate_index_level(level)
return algorithms.isin(np.array(self), values)
Float64Index._add_numeric_methods()
Float64Index._add_logical_methods_disabled()
| bsd-3-clause |
guschmue/tensorflow | tensorflow/python/client/notebook.py | 109 | 4791 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Notebook front-end to TensorFlow.
When you run this binary, you'll see something like below, which indicates
the serving URL of the notebook:
The IPython Notebook is running at: http://127.0.0.1:8888/
Press "Shift+Enter" to execute a cell
Press "Enter" on a cell to go into edit mode.
Press "Escape" to go back into command mode and use arrow keys to navigate.
Press "a" in command mode to insert cell above or "b" to insert cell below.
Your root notebooks directory is FLAGS.notebook_dir
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import socket
import sys
from tensorflow.python.platform import app
# pylint: disable=g-import-not-at-top
# Official recommended way of turning on fast protocol buffers as of 10/21/14
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "cpp"
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION"] = "2"
FLAGS = None
ORIG_ARGV = sys.argv
# Main notebook process calls itself with argv[1]="kernel" to start kernel
# subprocesses.
IS_KERNEL = len(sys.argv) > 1 and sys.argv[1] == "kernel"
def main(unused_argv):
sys.argv = ORIG_ARGV
if not IS_KERNEL:
# Drop all flags.
sys.argv = [sys.argv[0]]
# NOTE(sadovsky): For some reason, putting this import at the top level
# breaks inline plotting. It's probably a bug in the stone-age version of
# matplotlib.
from IPython.html.notebookapp import NotebookApp # pylint: disable=g-import-not-at-top
notebookapp = NotebookApp.instance()
notebookapp.open_browser = True
# password functionality adopted from quality/ranklab/main/tools/notebook.py
# add options to run with "password"
if FLAGS.password:
from IPython.lib import passwd # pylint: disable=g-import-not-at-top
notebookapp.ip = "0.0.0.0"
notebookapp.password = passwd(FLAGS.password)
else:
print ("\nNo password specified; Notebook server will only be available"
" on the local machine.\n")
notebookapp.initialize(argv=["--notebook-dir", FLAGS.notebook_dir])
if notebookapp.ip == "0.0.0.0":
proto = "https" if notebookapp.certfile else "http"
url = "%s://%s:%d%s" % (proto, socket.gethostname(), notebookapp.port,
notebookapp.base_project_url)
print("\nNotebook server will be publicly available at: %s\n" % url)
notebookapp.start()
return
# Drop the --flagfile flag so that notebook doesn't complain about an
# "unrecognized alias" when parsing sys.argv.
sys.argv = ([sys.argv[0]] +
[z for z in sys.argv[1:] if not z.startswith("--flagfile")])
from IPython.kernel.zmq.kernelapp import IPKernelApp # pylint: disable=g-import-not-at-top
kernelapp = IPKernelApp.instance()
kernelapp.initialize()
# Enable inline plotting. Equivalent to running "%matplotlib inline".
ipshell = kernelapp.shell
ipshell.enable_matplotlib("inline")
kernelapp.start()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--password",
type=str,
default=None,
help="""\
Password to require. If set, the server will allow public access. Only
used if notebook config file does not exist.\
""")
parser.add_argument(
"--notebook_dir",
type=str,
default="experimental/brain/notebooks",
help="root location where to store notebooks")
# When the user starts the main notebook process, we don't touch sys.argv.
# When the main process launches kernel subprocesses, it writes all flags
# to a tmpfile and sets --flagfile to that tmpfile, so for kernel
# subprocesses here we drop all flags *except* --flagfile, then call
# app.run(), and then (in main) restore all flags before starting the
# kernel app.
if IS_KERNEL:
# Drop everything except --flagfile.
sys.argv = ([sys.argv[0]] +
[x for x in sys.argv[1:] if x.startswith("--flagfile")])
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
cdcapano/pycbc | test/lalsim.py | 4 | 16644 | # Copyright (C) 2013 Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
These are simple unit tests for lalsimulation
"""
import sys
import unittest
import copy
import numpy
import lal, lalsimulation
import pycbc
from pycbc.filter import match, overlap, sigma, make_frequency_series
from pycbc.waveform import td_approximants, fd_approximants, \
get_td_waveform, get_fd_waveform, TimeSeries
import optparse
from utils import simple_exit, _check_scheme_cpu
parser = optparse.OptionParser()
parser.add_option('--scheme','-s', action='callback', type = 'choice',
choices = ('cpu','cuda'),
default = 'cpu', dest = 'scheme', callback = _check_scheme_cpu,
help = optparse.SUPPRESS_HELP)
parser.add_option('--device-num','-d', action='store', type = 'int',
dest = 'devicenum', default=0,
help = optparse.SUPPRESS_HELP)
parser.add_option('--show-plots', action='store_true',
help = 'show the plots generated in this test suite')
parser.add_option('--save-plots', action='store_true',
help = 'save the plots generated in this test suite')
parser.add_option('--approximant', type = 'choice', choices = td_approximants() + fd_approximants(),
help = "Choices are %s" % str(td_approximants() + fd_approximants()))
parser.add_option('--mass1', type = float, default=10, help = "[default: %default]")
parser.add_option('--mass2', type = float, default=9, help = "[default: %default]")
parser.add_option('--spin1x', type = float, default=0, help = "[default: %default]")
parser.add_option('--spin1y', type = float, default=0, help = "[default: %default]")
parser.add_option('--spin1z', type = float, default=0, help = "[default: %default]")
parser.add_option('--spin2x', type = float, default=0, help = "[default: %default]")
parser.add_option('--spin2y', type = float, default=0, help = "[default: %default]")
parser.add_option('--spin2z', type = float, default=0, help = "[default: %default]")
parser.add_option('--lambda1', type = float, default=0, help = "[default: %default]")
parser.add_option('--lambda2', type = float, default=0, help = "[default: %default]")
parser.add_option('--coa-phase', type = float, default=0, help = "[default: %default]")
parser.add_option('--inclination', type = float, default=0, help = "[default: %default]")
parser.add_option('--delta-t', type = float, default=1.0/8192, help = "[default: %default]")
parser.add_option('--delta-f', type = float, default=1.0/256, help = "[default: %default]")
parser.add_option('--f-lower', type = float, default=30, help = "[default: %default]")
parser.add_option('--phase-order', type = int, default=-1, help = "[default: %default]")
parser.add_option('--amplitude-order', type = int, default=-1, help = "[default: %default]")
parser.add_option('--spin-order', type = int, default=-1, help = "[default: %default]")
parser.add_option('--tidal-order', type = int, default=-1, help = "[default: %default]")
(opt, args) = parser.parse_args()
print(72*'=')
print("Running {0} unit tests for {1}:".format('CPU', "Lalsimulation Waveforms"))
import matplotlib
if not opt.show_plots:
matplotlib.use('Agg')
import pylab
def get_waveform(p, **kwds):
""" Given the input parameters get me the waveform, whether it is TD or
FD
"""
params = copy.copy(p.__dict__)
params.update(kwds)
if params['approximant'] in td_approximants():
return get_td_waveform(**params)
else:
return get_fd_waveform(**params)
class TestLALSimulation(unittest.TestCase):
def setUp(self,*args):
self.save_plots = opt.save_plots
self.show_plots = opt.show_plots
self.plot_dir = "."
class params(object):
pass
self.p = params()
# Overide my parameters with the program input arguments
self.p.__dict__.update(vars(opt))
if 'approximant' in self.kwds:
self.p.approximant = self.kwds['approximant']
from pycbc import version
self.version_txt = "pycbc: %s %s\n" % (version.git_hash, version.date) + \
"lalsimulation: %s %s" % (lalsimulation.SimulationVCSIdentInfo.vcsId, lalsimulation.SimulationVCSIdentInfo.vcsDate)
def test_varying_orbital_phase(self):
#"""Check that the waveform is consistent under phase changes
#"""
if self.p.approximant in td_approximants():
sample_attr = 'sample_times'
else:
sample_attr = 'sample_frequencies'
f = pylab.figure()
pylab.axes([.1, .2, 0.8, 0.70])
hp_ref, hc_ref = get_waveform(self.p, coa_phase=0)
pylab.plot(getattr(hp_ref, sample_attr), hp_ref.real(), label="phiref")
hp, hc = get_waveform(self.p, coa_phase=lal.PI/4)
m, i = match(hp_ref, hp)
self.assertAlmostEqual(1, m, places=2)
o = overlap(hp_ref, hp)
pylab.plot(getattr(hp, sample_attr), hp.real(), label="$phiref \pi/4$")
hp, hc = get_waveform(self.p, coa_phase=lal.PI/2)
m, i = match(hp_ref, hp)
o = overlap(hp_ref, hp)
self.assertAlmostEqual(1, m, places=7)
self.assertAlmostEqual(-1, o, places=7)
pylab.plot(getattr(hp, sample_attr), hp.real(), label="$phiref \pi/2$")
hp, hc = get_waveform(self.p, coa_phase=lal.PI)
m, i = match(hp_ref, hp)
o = overlap(hp_ref, hp)
self.assertAlmostEqual(1, m, places=7)
self.assertAlmostEqual(1, o, places=7)
pylab.plot(getattr(hp, sample_attr), hp.real(), label="$phiref \pi$")
pylab.xlim(min(getattr(hp, sample_attr)), max(getattr(hp, sample_attr)))
pylab.title("Vary %s oribital phiref, h+" % self.p.approximant)
if self.p.approximant in td_approximants():
pylab.xlabel("Time to coalescence (s)")
else:
pylab.xlabel("GW Frequency (Hz)")
pylab.ylabel("GW Strain (real part)")
pylab.legend(loc="upper left")
info = self.version_txt
pylab.figtext(0.05, 0.05, info)
if self.save_plots:
pname = self.plot_dir + "/%s-vary-phase.png" % self.p.approximant
pylab.savefig(pname)
if self.show_plots:
pylab.show()
else:
pylab.close(f)
def test_distance_scaling(self):
#""" Check that the waveform is consistent under distance changes
#"""
distance = 1e6
tolerance = 1e-5
fac = 10
hpc, hcc = get_waveform(self.p, distance=distance)
hpm, hcm = get_waveform(self.p, distance=distance*fac)
hpf, hcf = get_waveform(self.p, distance=distance*fac*fac)
hpn, hcn = get_waveform(self.p, distance=distance/fac)
f = pylab.figure()
pylab.axes([.1, .2, 0.8, 0.70])
htilde = make_frequency_series(hpc)
pylab.loglog(htilde.sample_frequencies, abs(htilde), label="D")
htilde = make_frequency_series(hpm)
pylab.loglog(htilde.sample_frequencies, abs(htilde), label="D * %s" %fac)
htilde = make_frequency_series(hpf)
pylab.loglog(htilde.sample_frequencies, abs(htilde), label="D * %s" %(fac*fac))
htilde = make_frequency_series(hpn)
pylab.loglog(htilde.sample_frequencies, abs(htilde), label="D / %s" %fac)
pylab.title("Vary %s distance, $\\tilde{h}$+" % self.p.approximant)
pylab.xlabel("GW Frequency (Hz)")
pylab.ylabel("GW Strain")
pylab.legend()
pylab.xlim(xmin=self.p.f_lower)
info = self.version_txt
pylab.figtext(0.05, .05, info)
if self.save_plots:
pname = self.plot_dir + "/%s-distance-scaling.png" % self.p.approximant
pylab.savefig(pname)
if self.show_plots:
pylab.show()
else:
pylab.close(f)
self.assertTrue(hpc.almost_equal_elem(hpm * fac, tolerance, relative=True))
self.assertTrue(hpc.almost_equal_elem(hpf * fac * fac, tolerance, relative=True))
self.assertTrue(hpc.almost_equal_elem(hpn / fac, tolerance, relative=True))
def test_nearby_waveform_agreement(self):
#""" Check that the overlaps are consistent for nearby waveforms
#"""
def nearby(params):
tol = 1e-7
from numpy.random import uniform
nearby_params = copy.copy(params)
nearby_params.mass1 *= uniform(low=1-tol, high=1+tol)
nearby_params.mass2 *= uniform(low=1-tol, high=1+tol)
nearby_params.spin1x *= uniform(low=1-tol, high=1+tol)
nearby_params.spin1y *= uniform(low=1-tol, high=1+tol)
nearby_params.spin1z *= uniform(low=1-tol, high=1+tol)
nearby_params.spin2x *= uniform(low=1-tol, high=1+tol)
nearby_params.spin2y *= uniform(low=1-tol, high=1+tol)
nearby_params.spin2z *= uniform(low=1-tol, high=1+tol)
nearby_params.inclination *= uniform(low=1-tol, high=1+tol)
nearby_params.coa_phase *= uniform(low=1-tol, high=1+tol)
return nearby_params
hp, hc = get_waveform(self.p)
for i in range(10):
p_near = nearby(self.p)
hpn, hcn = get_waveform(p_near)
maxlen = max(len(hpn), len(hp))
hp.resize(maxlen)
hpn.resize(maxlen)
o = overlap(hp, hpn)
self.assertAlmostEqual(1, o, places=5)
def test_almost_equal_mass_waveform(self):
#""" Check that the overlaps are consistent for nearby waveforms
#"""
def nearby(params):
tol = 1e-7
from numpy.random import uniform
nearby_params = copy.copy(params)
nearby_params.mass2 = nearby_params.mass1 * \
uniform(low=1-tol, high=1+tol)
nearby_params.mass1 *= uniform(low=1-tol, high=1+tol)
nearby_params.spin1x *= uniform(low=1-tol, high=1+tol)
nearby_params.spin1y *= uniform(low=1-tol, high=1+tol)
nearby_params.spin1z *= uniform(low=1-tol, high=1+tol)
nearby_params.spin2x *= uniform(low=1-tol, high=1+tol)
nearby_params.spin2y *= uniform(low=1-tol, high=1+tol)
nearby_params.spin2z *= uniform(low=1-tol, high=1+tol)
nearby_params.inclination *= uniform(low=1-tol, high=1+tol)
nearby_params.coa_phase *= uniform(low=1-tol, high=1+tol)
return nearby_params
for i in range(10):
p_near = nearby(self.p)
hpn, hcn = get_waveform(p_near)
def test_varying_inclination(self):
#""" Test that the waveform is consistent for changes in inclination
#"""
sigmas = []
incs = numpy.arange(0, 21, 1.0) * lal.PI / 10.0
for inc in incs:
# WARNING: This does not properly handle the case of SpinTaylor*
# where the spin orientation is not relative to the inclination
hp, hc = get_waveform(self.p, inclination=inc)
s = sigma(hp, low_frequency_cutoff=self.p.f_lower)
sigmas.append(s)
f = pylab.figure()
pylab.axes([.1, .2, 0.8, 0.70])
pylab.plot(incs, sigmas)
pylab.title("Vary %s inclination, $\\tilde{h}$+" % self.p.approximant)
pylab.xlabel("Inclination (radians)")
pylab.ylabel("sigma (flat PSD)")
info = self.version_txt
pylab.figtext(0.05, 0.05, info)
if self.save_plots:
pname = self.plot_dir + "/%s-vary-inclination.png" % self.p.approximant
pylab.savefig(pname)
if self.show_plots:
pylab.show()
else:
pylab.close(f)
self.assertAlmostEqual(sigmas[-1], sigmas[0], places=7)
self.assertAlmostEqual(max(sigmas), sigmas[0], places=7)
self.assertTrue(sigmas[0] > sigmas[5])
def test_swapping_constituents(self):
#""" Test that waveform remains unchanged under swapping both objects
#"""
hp, hc = get_waveform(self.p)
hpswap, hcswap = get_waveform(self.p, mass1=self.p.mass2, mass2=self.p.mass1,
spin1x=self.p.spin2x, spin1y=self.p.spin2y, spin1z=self.p.spin2z,
spin2x=self.p.spin1x, spin2y=self.p.spin1y, spin2z=self.p.spin1z,
lambda1=self.p.lambda2, lambda2=self.p.lambda1)
op = overlap(hp, hpswap)
self.assertAlmostEqual(1, op, places=7)
oc = overlap(hc, hcswap)
self.assertAlmostEqual(1, oc, places=7)
def test_change_rate(self):
#""" Test that waveform remains unchanged under changing rate
#"""
hp, hc = get_waveform(self.p)
hp2dec, hc2dec = get_waveform(self.p, delta_t=self.p.delta_t*2.)
hpdec=numpy.zeros(len(hp2dec.data))
hcdec=numpy.zeros(len(hp2dec.data))
for idx in range(min(len(hp2dec.data),int(len(hp.data)/2))):
hpdec[idx]=hp.data[2*idx]
hcdec[idx]=hc.data[2*idx]
hpTS=TimeSeries(hpdec, delta_t=self.p.delta_t*2.,epoch=hp.start_time)
hcTS=TimeSeries(hcdec, delta_t=self.p.delta_t*2.,epoch=hc.start_time)
f = pylab.figure()
pylab.plot(hp.sample_times, hp.data,label="rate %s Hz" %"{:.0f}".format(1./self.p.delta_t))
pylab.plot(hp2dec.sample_times, hp2dec.data, label="rate %s Hz" %"{:.0f}".format(1./(self.p.delta_t*2.)))
pylab.title("Halving %s rate, $\\tilde{h}$+" % self.p.approximant)
pylab.xlabel("time (sec)")
pylab.ylabel("amplitude")
pylab.legend()
info = self.version_txt
pylab.figtext(0.05, 0.05, info)
if self.save_plots:
pname = self.plot_dir + "/%s-vary-rate.png" % self.p.approximant
pylab.savefig(pname)
if self.show_plots:
pylab.show()
else:
pylab.close(f)
op=overlap(hpTS,hp2dec)
self.assertAlmostEqual(1., op, places=2)
oc=overlap(hcTS,hc2dec)
self.assertAlmostEqual(1., oc, places=2)
def test_maker(class_name, name, **kwds):
class Test(class_name):
def __init__(self, *args):
self.kwds = kwds
class_name.__init__(self, *args)
Test.__name__ = "Test %s" % name
return Test
suite = unittest.TestSuite()
if opt.approximant:
apxs = [opt.approximant]
else:
apxs = td_approximants() + fd_approximants()
# These waveforms fail the current sanity checks, and are not used in current
# analyses. Tracking down reasons for each of these failures is a lot of work,
# so for now I just exclude these from tests.
fail_list = ['EOBNRv2', 'HGimri', 'SEOBNRv1', 'SpinDominatedWf',
'PhenSpinTaylor', 'PhenSpinTaylorRD', 'EccentricTD',
'EccentricFD', 'Lackey_Tidal_2013_SEOBNRv2_ROM']
for apx in apxs:
# The inspiral wrapper is only single precision we won't bother checking
# it here. It may need different tolerances and some special care.
if apx.startswith("Inspiral-"):
continue
# The INTERP waveforms are designed only for filters
if apx.endswith('_INTERP') and not opt.approximant:
continue
if apx in fail_list and not opt.approximant:
# These waveforms segfault and prints debugging to screen
# Only test this is specifically told to do so
continue
if apx in ['NR_hdf5']:
# We'll need an example file for this. Also it will need a special
# set of tests.
continue
vars()[apx] = test_maker(TestLALSimulation, apx, approximant=apx)
suite.addTest( unittest.TestLoader().loadTestsFromTestCase(vars()[apx]) )
if __name__ == '__main__':
results = unittest.TextTestRunner(verbosity=2).run(suite)
simple_exit(results)
| gpl-3.0 |
jayflo/scikit-learn | benchmarks/bench_20newsgroups.py | 377 | 3555 | from __future__ import print_function, division
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print("X_train.shape = {0}".format(X_train.shape))
print("X_train.format = {0}".format(X_train.format))
print("X_train.dtype = {0}".format(X_train.dtype))
print("X_train density = {0}"
"".format(X_train.nnz / np.product(X_train.shape)))
print("y_train {0}".format(y_train.shape))
print("X_test {0}".format(X_test.shape))
print("X_test.format = {0}".format(X_test.format))
print("X_test.dtype = {0}".format(X_test.dtype))
print("y_test {0}".format(y_test.shape))
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
| bsd-3-clause |
ARudiuk/mne-python | mne/io/edf/tests/test_edf.py | 4 | 8492 | """Data Equivalence Tests"""
from __future__ import print_function
# Authors: Teon Brooks <[email protected]>
# Martin Billinger <[email protected]>
# Alan Leggitt <[email protected]>
# Alexandre Barachant <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import inspect
import warnings
from nose.tools import assert_equal, assert_true
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_raises)
from scipy import io
import numpy as np
from mne import pick_types
from mne.datasets import testing
from mne.externals.six import iterbytes
from mne.utils import _TempDir, run_tests_if_main, requires_pandas
from mne.io import read_raw_edf, Raw
from mne.io.tests.test_raw import _test_raw_reader
import mne.io.edf.edf as edfmodule
from mne.event import find_events
warnings.simplefilter('always')
FILE = inspect.getfile(inspect.currentframe())
data_dir = op.join(op.dirname(op.abspath(FILE)), 'data')
montage_path = op.join(data_dir, 'biosemi.hpts')
bdf_path = op.join(data_dir, 'test.bdf')
edf_path = op.join(data_dir, 'test.edf')
edf_uneven_path = op.join(data_dir, 'test_uneven_samp.edf')
bdf_eeglab_path = op.join(data_dir, 'test_bdf_eeglab.mat')
edf_eeglab_path = op.join(data_dir, 'test_edf_eeglab.mat')
edf_uneven_eeglab_path = op.join(data_dir, 'test_uneven_samp.mat')
edf_stim_channel_path = op.join(data_dir, 'test_edf_stim_channel.edf')
edf_txt_stim_channel_path = op.join(data_dir, 'test_edf_stim_channel.txt')
data_path = testing.data_path(download=False)
edf_stim_resamp_path = op.join(data_path, 'EDF', 'test_edf_stim_resamp.edf')
eog = ['REOG', 'LEOG', 'IEOG']
misc = ['EXG1', 'EXG5', 'EXG8', 'M1', 'M2']
def test_bdf_data():
"""Test reading raw bdf files"""
raw_py = _test_raw_reader(read_raw_edf, input_fname=bdf_path,
montage=montage_path, eog=eog, misc=misc)
assert_true('RawEDF' in repr(raw_py))
picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')
data_py, _ = raw_py[picks]
# this .mat was generated using the EEG Lab Biosemi Reader
raw_eeglab = io.loadmat(bdf_eeglab_path)
raw_eeglab = raw_eeglab['data'] * 1e-6 # data are stored in microvolts
data_eeglab = raw_eeglab[picks]
# bdf saved as a single, resolution to seven decimal points in matlab
assert_array_almost_equal(data_py, data_eeglab, 8)
# Manually checking that float coordinates are imported
assert_true((raw_py.info['chs'][0]['loc']).any())
assert_true((raw_py.info['chs'][25]['loc']).any())
assert_true((raw_py.info['chs'][63]['loc']).any())
def test_edf_data():
"""Test edf files"""
_test_raw_reader(read_raw_edf, input_fname=edf_path, stim_channel=None)
raw_py = read_raw_edf(edf_path, preload=True)
# Test saving and loading when annotations were parsed.
tempdir = _TempDir()
raw_file = op.join(tempdir, 'test-raw.fif')
raw_py.save(raw_file, overwrite=True, buffer_size_sec=1)
Raw(raw_file, preload=True)
edf_events = find_events(raw_py, output='step', shortest_event=0,
stim_channel='STI 014')
# onset, duration, id
events = [[0.1344, 0.2560, 2],
[0.3904, 1.0000, 2],
[2.0000, 0.0000, 3],
[2.5000, 2.5000, 2]]
events = np.array(events)
events[:, :2] *= 512 # convert time to samples
events = np.array(events, dtype=int)
events[:, 1] -= 1
events[events[:, 1] <= 0, 1] = 1
events[:, 1] += events[:, 0]
onsets = events[:, [0, 2]]
offsets = events[:, [1, 2]]
events = np.zeros((2 * events.shape[0], 3), dtype=int)
events[0::2, [0, 2]] = onsets
events[1::2, [0, 1]] = offsets
assert_array_equal(edf_events, events)
@testing.requires_testing_data
def test_stim_channel():
"""Test reading raw edf files with stim channel"""
raw_py = read_raw_edf(edf_path, misc=range(-4, 0), stim_channel=139,
preload=True)
picks = pick_types(raw_py.info, meg=False, eeg=True,
exclude=['EDF Annotations'])
data_py, _ = raw_py[picks]
print(raw_py) # to test repr
print(raw_py.info) # to test Info repr
# this .mat was generated using the EEG Lab Biosemi Reader
raw_eeglab = io.loadmat(edf_eeglab_path)
raw_eeglab = raw_eeglab['data'] * 1e-6 # data are stored in microvolts
data_eeglab = raw_eeglab[picks]
assert_array_almost_equal(data_py, data_eeglab, 10)
# Test uneven sampling
raw_py = read_raw_edf(edf_uneven_path, stim_channel=None)
data_py, _ = raw_py[0]
# this .mat was generated using the EEG Lab Biosemi Reader
raw_eeglab = io.loadmat(edf_uneven_eeglab_path)
raw_eeglab = raw_eeglab['data']
data_eeglab = raw_eeglab[0]
# match upsampling
upsample = len(data_eeglab) / len(raw_py)
data_py = np.repeat(data_py, repeats=upsample)
assert_array_equal(data_py, data_eeglab)
assert_raises(RuntimeError, read_raw_edf, edf_path, preload=False)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw = read_raw_edf(edf_stim_resamp_path, verbose=True)
assert_equal(len(w), 1)
assert_true('Events may jitter' in str(w[0].message))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw[:]
assert_equal(len(w), 0)
def test_parse_annotation():
"""Test parsing the tal channel"""
# test the parser
annot = (b'+180\x14Lights off\x14Close door\x14\x00\x00\x00\x00\x00'
b'+180\x14Lights off\x14\x00\x00\x00\x00\x00\x00\x00\x00'
b'+180\x14Close door\x14\x00\x00\x00\x00\x00\x00\x00\x00'
b'+3.14\x1504.20\x14nothing\x14\x00\x00\x00\x00'
b'+1800.2\x1525.5\x14Apnea\x14\x00\x00\x00\x00\x00\x00\x00'
b'+123\x14\x14\x00\x00\x00\x00\x00\x00\x00')
annot = [a for a in iterbytes(annot)]
annot[1::2] = [a * 256 for a in annot[1::2]]
tal_channel = map(sum, zip(annot[0::2], annot[1::2]))
events = edfmodule._parse_tal_channel(tal_channel)
assert_equal(events, [[180.0, 0, 'Lights off'],
[180.0, 0, 'Close door'],
[180.0, 0, 'Lights off'],
[180.0, 0, 'Close door'],
[3.14, 4.2, 'nothing'],
[1800.2, 25.5, 'Apnea']])
def test_edf_annotations():
"""Test if events are detected correctly in a typical MNE workflow."""
# test an actual file
raw = read_raw_edf(edf_path, preload=True)
edf_events = find_events(raw, output='step', shortest_event=0,
stim_channel='STI 014')
# onset, duration, id
events = [[0.1344, 0.2560, 2],
[0.3904, 1.0000, 2],
[2.0000, 0.0000, 3],
[2.5000, 2.5000, 2]]
events = np.array(events)
events[:, :2] *= 512 # convert time to samples
events = np.array(events, dtype=int)
events[:, 1] -= 1
events[events[:, 1] <= 0, 1] = 1
events[:, 1] += events[:, 0]
onsets = events[:, [0, 2]]
offsets = events[:, [1, 2]]
events = np.zeros((2 * events.shape[0], 3), dtype=int)
events[0::2, [0, 2]] = onsets
events[1::2, [0, 1]] = offsets
assert_array_equal(edf_events, events)
def test_edf_stim_channel():
"""Test stim channel for edf file"""
raw = read_raw_edf(edf_stim_channel_path, preload=True,
stim_channel=-1)
true_data = np.loadtxt(edf_txt_stim_channel_path).T
# EDF writer pad data if file to small
_, ns = true_data.shape
edf_data = raw._data[:, :ns]
# assert stim channels are equal
assert_array_equal(true_data[-1], edf_data[-1])
# assert data are equal
assert_array_almost_equal(true_data[0:-1] * 1e-6, edf_data[0:-1])
@requires_pandas
def test_to_data_frame():
"""Test edf Raw Pandas exporter"""
for path in [edf_path, bdf_path]:
raw = read_raw_edf(path, stim_channel=None, preload=True)
_, times = raw[0, :10]
df = raw.to_data_frame()
assert_true((df.columns == raw.ch_names).all())
assert_array_equal(np.round(times * 1e3), df.index.values[:10])
df = raw.to_data_frame(index=None, scalings={'eeg': 1e13})
assert_true('time' in df.index.names)
assert_array_equal(df.values[:, 0], raw._data[0] * 1e13)
run_tests_if_main()
| bsd-3-clause |
mhue/scikit-learn | sklearn/tests/test_cross_validation.py | 70 | 41943 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer, LabelBinarizer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
y = np.arange(10) // 2
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
sorted_array = np.arange(100)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(101, 200)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(201, 300)
assert_true(np.any(sorted_array != ind[train]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist(), allow_lists=False)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = cval.train_test_split(X_df, allow_lists=False)
assert_true(isinstance(X_train_arr, np.ndarray))
assert_true(isinstance(X_test_arr, np.ndarray))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
with warnings.catch_warnings(record=True):
# deprecated sequence of sequence format
cv = cval.check_cv(3, X, y_seq_of_seqs, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_indicator_matrix = LabelBinarizer().fit_transform(y_seq_of_seqs)
cv = cval.check_cv(3, X, y_indicator_matrix, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
| bsd-3-clause |
markneville/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_gtk.py | 69 | 43991 | from __future__ import division
import os, sys
def fn_name(): return sys._getframe(1).f_code.co_name
try:
import gobject
import gtk; gdk = gtk.gdk
import pango
except ImportError:
raise ImportError("Gtk* backend requires pygtk to be installed.")
pygtk_version_required = (2,2,0)
if gtk.pygtk_version < pygtk_version_required:
raise ImportError ("PyGTK %d.%d.%d is installed\n"
"PyGTK %d.%d.%d or later is required"
% (gtk.pygtk_version + pygtk_version_required))
del pygtk_version_required
import matplotlib
from matplotlib import verbose
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, cursors
from matplotlib.backends.backend_gdk import RendererGDK, FigureCanvasGDK
from matplotlib.cbook import is_string_like, is_writable_file_like
from matplotlib.colors import colorConverter
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
from matplotlib import lines
from matplotlib import cbook
backend_version = "%d.%d.%d" % gtk.pygtk_version
_debug = False
#_debug = True
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 96
cursord = {
cursors.MOVE : gdk.Cursor(gdk.FLEUR),
cursors.HAND : gdk.Cursor(gdk.HAND2),
cursors.POINTER : gdk.Cursor(gdk.LEFT_PTR),
cursors.SELECT_REGION : gdk.Cursor(gdk.TCROSS),
}
# ref gtk+/gtk/gtkwidget.h
def GTK_WIDGET_DRAWABLE(w):
flags = w.flags();
return flags & gtk.VISIBLE != 0 and flags & gtk.MAPPED != 0
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw()
def show(mainloop=True):
"""
Show all the figures and enter the gtk main loop
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.window.show()
if mainloop and gtk.main_level() == 0 and \
len(Gcf.get_all_fig_managers())>0:
gtk.main()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasGTK(thisFig)
manager = FigureManagerGTK(canvas, num)
# equals:
#manager = FigureManagerGTK(FigureCanvasGTK(Figure(*args, **kwargs), num)
return manager
class FigureCanvasGTK (gtk.DrawingArea, FigureCanvasBase):
keyvald = {65507 : 'control',
65505 : 'shift',
65513 : 'alt',
65508 : 'control',
65506 : 'shift',
65514 : 'alt',
65361 : 'left',
65362 : 'up',
65363 : 'right',
65364 : 'down',
65307 : 'escape',
65470 : 'f1',
65471 : 'f2',
65472 : 'f3',
65473 : 'f4',
65474 : 'f5',
65475 : 'f6',
65476 : 'f7',
65477 : 'f8',
65478 : 'f9',
65479 : 'f10',
65480 : 'f11',
65481 : 'f12',
65300 : 'scroll_lock',
65299 : 'break',
65288 : 'backspace',
65293 : 'enter',
65379 : 'insert',
65535 : 'delete',
65360 : 'home',
65367 : 'end',
65365 : 'pageup',
65366 : 'pagedown',
65438 : '0',
65436 : '1',
65433 : '2',
65435 : '3',
65430 : '4',
65437 : '5',
65432 : '6',
65429 : '7',
65431 : '8',
65434 : '9',
65451 : '+',
65453 : '-',
65450 : '*',
65455 : '/',
65439 : 'dec',
65421 : 'enter',
}
# Setting this as a static constant prevents
# this resulting expression from leaking
event_mask = (gdk.BUTTON_PRESS_MASK |
gdk.BUTTON_RELEASE_MASK |
gdk.EXPOSURE_MASK |
gdk.KEY_PRESS_MASK |
gdk.KEY_RELEASE_MASK |
gdk.ENTER_NOTIFY_MASK |
gdk.LEAVE_NOTIFY_MASK |
gdk.POINTER_MOTION_MASK |
gdk.POINTER_MOTION_HINT_MASK)
def __init__(self, figure):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
FigureCanvasBase.__init__(self, figure)
gtk.DrawingArea.__init__(self)
self._idle_draw_id = 0
self._need_redraw = True
self._pixmap_width = -1
self._pixmap_height = -1
self._lastCursor = None
self.connect('scroll_event', self.scroll_event)
self.connect('button_press_event', self.button_press_event)
self.connect('button_release_event', self.button_release_event)
self.connect('configure_event', self.configure_event)
self.connect('expose_event', self.expose_event)
self.connect('key_press_event', self.key_press_event)
self.connect('key_release_event', self.key_release_event)
self.connect('motion_notify_event', self.motion_notify_event)
self.connect('leave_notify_event', self.leave_notify_event)
self.connect('enter_notify_event', self.enter_notify_event)
self.set_events(self.__class__.event_mask)
self.set_double_buffered(False)
self.set_flags(gtk.CAN_FOCUS)
self._renderer_init()
self._idle_event_id = gobject.idle_add(self.idle_event)
def destroy(self):
#gtk.DrawingArea.destroy(self)
gobject.source_remove(self._idle_event_id)
if self._idle_draw_id != 0:
gobject.source_remove(self._idle_draw_id)
def scroll_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
if event.direction==gdk.SCROLL_UP:
step = 1
else:
step = -1
FigureCanvasBase.scroll_event(self, x, y, step)
return False # finish event propagation?
def button_press_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
FigureCanvasBase.button_press_event(self, x, y, event.button)
return False # finish event propagation?
def button_release_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
FigureCanvasBase.button_release_event(self, x, y, event.button)
return False # finish event propagation?
def key_press_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
key = self._get_key(event)
if _debug: print "hit", key
FigureCanvasBase.key_press_event(self, key)
return False # finish event propagation?
def key_release_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
key = self._get_key(event)
if _debug: print "release", key
FigureCanvasBase.key_release_event(self, key)
return False # finish event propagation?
def motion_notify_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
if event.is_hint:
x, y, state = event.window.get_pointer()
else:
x, y, state = event.x, event.y, event.state
# flipy so y=0 is bottom of canvas
y = self.allocation.height - y
FigureCanvasBase.motion_notify_event(self, x, y)
return False # finish event propagation?
def leave_notify_event(self, widget, event):
FigureCanvasBase.leave_notify_event(self, event)
def enter_notify_event(self, widget, event):
FigureCanvasBase.enter_notify_event(self, event)
def _get_key(self, event):
if event.keyval in self.keyvald:
key = self.keyvald[event.keyval]
elif event.keyval <256:
key = chr(event.keyval)
else:
key = None
ctrl = event.state & gdk.CONTROL_MASK
shift = event.state & gdk.SHIFT_MASK
return key
def configure_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
if widget.window is None:
return
w, h = event.width, event.height
if w < 3 or h < 3:
return # empty fig
# resize the figure (in inches)
dpi = self.figure.dpi
self.figure.set_size_inches (w/dpi, h/dpi)
self._need_redraw = True
return False # finish event propagation?
def draw(self):
# Note: FigureCanvasBase.draw() is inconveniently named as it clashes
# with the deprecated gtk.Widget.draw()
self._need_redraw = True
if GTK_WIDGET_DRAWABLE(self):
self.queue_draw()
# do a synchronous draw (its less efficient than an async draw,
# but is required if/when animation is used)
self.window.process_updates (False)
def draw_idle(self):
def idle_draw(*args):
self.draw()
self._idle_draw_id = 0
return False
if self._idle_draw_id == 0:
self._idle_draw_id = gobject.idle_add(idle_draw)
def _renderer_init(self):
"""Override by GTK backends to select a different renderer
Renderer should provide the methods:
set_pixmap ()
set_width_height ()
that are used by
_render_figure() / _pixmap_prepare()
"""
self._renderer = RendererGDK (self, self.figure.dpi)
def _pixmap_prepare(self, width, height):
"""
Make sure _._pixmap is at least width, height,
create new pixmap if necessary
"""
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
create_pixmap = False
if width > self._pixmap_width:
# increase the pixmap in 10%+ (rather than 1 pixel) steps
self._pixmap_width = max (int (self._pixmap_width * 1.1),
width)
create_pixmap = True
if height > self._pixmap_height:
self._pixmap_height = max (int (self._pixmap_height * 1.1),
height)
create_pixmap = True
if create_pixmap:
self._pixmap = gdk.Pixmap (self.window, self._pixmap_width,
self._pixmap_height)
self._renderer.set_pixmap (self._pixmap)
def _render_figure(self, pixmap, width, height):
"""used by GTK and GTKcairo. GTKAgg overrides
"""
self._renderer.set_width_height (width, height)
self.figure.draw (self._renderer)
def expose_event(self, widget, event):
"""Expose_event for all GTK backends. Should not be overridden.
"""
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
if GTK_WIDGET_DRAWABLE(self):
if self._need_redraw:
x, y, w, h = self.allocation
self._pixmap_prepare (w, h)
self._render_figure(self._pixmap, w, h)
self._need_redraw = False
x, y, w, h = event.area
self.window.draw_drawable (self.style.fg_gc[self.state],
self._pixmap, x, y, x, y, w, h)
return False # finish event propagation?
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['jpg'] = 'JPEG'
filetypes['jpeg'] = 'JPEG'
filetypes['png'] = 'Portable Network Graphics'
def print_jpeg(self, filename, *args, **kwargs):
return self._print_image(filename, 'jpeg')
print_jpg = print_jpeg
def print_png(self, filename, *args, **kwargs):
return self._print_image(filename, 'png')
def _print_image(self, filename, format):
if self.flags() & gtk.REALIZED == 0:
# for self.window(for pixmap) and has a side effect of altering
# figure width,height (via configure-event?)
gtk.DrawingArea.realize(self)
width, height = self.get_width_height()
pixmap = gdk.Pixmap (self.window, width, height)
self._renderer.set_pixmap (pixmap)
self._render_figure(pixmap, width, height)
# jpg colors don't match the display very well, png colors match
# better
pixbuf = gdk.Pixbuf(gdk.COLORSPACE_RGB, 0, 8, width, height)
pixbuf.get_from_drawable(pixmap, pixmap.get_colormap(),
0, 0, 0, 0, width, height)
if is_string_like(filename):
try:
pixbuf.save(filename, format)
except gobject.GError, exc:
error_msg_gtk('Save figure failure:\n%s' % (exc,), parent=self)
elif is_writable_file_like(filename):
if hasattr(pixbuf, 'save_to_callback'):
def save_callback(buf, data=None):
data.write(buf)
try:
pixbuf.save_to_callback(save_callback, format, user_data=filename)
except gobject.GError, exc:
error_msg_gtk('Save figure failure:\n%s' % (exc,), parent=self)
else:
raise ValueError("Saving to a Python file-like object is only supported by PyGTK >= 2.8")
else:
raise ValueError("filename must be a path or a file-like object")
def get_default_filetype(self):
return 'png'
def flush_events(self):
gtk.gdk.threads_enter()
while gtk.events_pending():
gtk.main_iteration(True)
gtk.gdk.flush()
gtk.gdk.threads_leave()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerGTK(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The gtk.Toolbar (gtk only)
vbox : The gtk.VBox containing the canvas and toolbar (gtk only)
window : The gtk.Window (gtk only)
"""
def __init__(self, canvas, num):
if _debug: print 'FigureManagerGTK.%s' % fn_name()
FigureManagerBase.__init__(self, canvas, num)
self.window = gtk.Window()
self.window.set_title("Figure %d" % num)
self.vbox = gtk.VBox()
self.window.add(self.vbox)
self.vbox.show()
self.canvas.show()
# attach a show method to the figure for pylab ease of use
self.canvas.figure.show = lambda *args: self.window.show()
self.vbox.pack_start(self.canvas, True, True)
self.toolbar = self._get_toolbar(canvas)
# calculate size for window
w = int (self.canvas.figure.bbox.width)
h = int (self.canvas.figure.bbox.height)
if self.toolbar is not None:
self.toolbar.show()
self.vbox.pack_end(self.toolbar, False, False)
tb_w, tb_h = self.toolbar.size_request()
h += tb_h
self.window.set_default_size (w, h)
def destroy(*args):
Gcf.destroy(num)
self.window.connect("destroy", destroy)
self.window.connect("delete_event", destroy)
if matplotlib.is_interactive():
self.window.show()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar is not None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
self.canvas.grab_focus()
def destroy(self, *args):
if _debug: print 'FigureManagerGTK.%s' % fn_name()
self.vbox.destroy()
self.window.destroy()
self.canvas.destroy()
self.toolbar.destroy()
self.__dict__.clear()
if Gcf.get_num_fig_managers()==0 and \
not matplotlib.is_interactive() and \
gtk.main_level() >= 1:
gtk.main_quit()
def show(self):
# show the figure window
self.window.show()
def full_screen_toggle (self):
self._full_screen_flag = not self._full_screen_flag
if self._full_screen_flag:
self.window.fullscreen()
else:
self.window.unfullscreen()
_full_screen_flag = False
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar'] == 'classic':
toolbar = NavigationToolbar (canvas, self.window)
elif matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2GTK (canvas, self.window)
else:
toolbar = None
return toolbar
def set_window_title(self, title):
self.window.set_title(title)
def resize(self, width, height):
'set the canvas size in pixels'
#_, _, cw, ch = self.canvas.allocation
#_, _, ww, wh = self.window.allocation
#self.window.resize (width-cw+ww, height-ch+wh)
self.window.resize(width, height)
class NavigationToolbar2GTK(NavigationToolbar2, gtk.Toolbar):
# list of toolitems to add to the toolbar, format is:
# text, tooltip_text, image_file, callback(str)
toolitems = (
('Home', 'Reset original view', 'home.png', 'home'),
('Back', 'Back to previous view','back.png', 'back'),
('Forward', 'Forward to next view','forward.png', 'forward'),
('Pan', 'Pan axes with left mouse, zoom with right', 'move.png','pan'),
('Zoom', 'Zoom to rectangle','zoom_to_rect.png', 'zoom'),
(None, None, None, None),
('Subplots', 'Configure subplots','subplots.png', 'configure_subplots'),
('Save', 'Save the figure','filesave.png', 'save_figure'),
)
def __init__(self, canvas, window):
self.win = window
gtk.Toolbar.__init__(self)
NavigationToolbar2.__init__(self, canvas)
self._idle_draw_id = 0
def set_message(self, s):
if self._idle_draw_id == 0:
self.message.set_label(s)
def set_cursor(self, cursor):
self.canvas.window.set_cursor(cursord[cursor])
def release(self, event):
try: del self._imageBack
except AttributeError: pass
def dynamic_update(self):
# legacy method; new method is canvas.draw_idle
self.canvas.draw_idle()
def draw_rubberband(self, event, x0, y0, x1, y1):
'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744'
drawable = self.canvas.window
if drawable is None:
return
gc = drawable.new_gc()
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val)for val in min(x0,x1), min(y0, y1), w, h]
try: lastrect, imageBack = self._imageBack
except AttributeError:
#snap image back
if event.inaxes is None:
return
ax = event.inaxes
l,b,w,h = [int(val) for val in ax.bbox.bounds]
b = int(height)-(b+h)
axrect = l,b,w,h
self._imageBack = axrect, drawable.get_image(*axrect)
drawable.draw_rectangle(gc, False, *rect)
self._idle_draw_id = 0
else:
def idle_draw(*args):
drawable.draw_image(gc, imageBack, 0, 0, *lastrect)
drawable.draw_rectangle(gc, False, *rect)
self._idle_draw_id = 0
return False
if self._idle_draw_id == 0:
self._idle_draw_id = gobject.idle_add(idle_draw)
def _init_toolbar(self):
self.set_style(gtk.TOOLBAR_ICONS)
if gtk.pygtk_version >= (2,4,0):
self._init_toolbar2_4()
else:
self._init_toolbar2_2()
def _init_toolbar2_2(self):
basedir = os.path.join(matplotlib.rcParams['datapath'],'images')
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.append_space()
continue
fname = os.path.join(basedir, image_file)
image = gtk.Image()
image.set_from_file(fname)
w = self.append_item(text,
tooltip_text,
'Private',
image,
getattr(self, callback)
)
self.append_space()
self.message = gtk.Label()
self.append_widget(self.message, None, None)
self.message.show()
def _init_toolbar2_4(self):
basedir = os.path.join(matplotlib.rcParams['datapath'],'images')
self.tooltips = gtk.Tooltips()
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.insert( gtk.SeparatorToolItem(), -1 )
continue
fname = os.path.join(basedir, image_file)
image = gtk.Image()
image.set_from_file(fname)
tbutton = gtk.ToolButton(image, text)
self.insert(tbutton, -1)
tbutton.connect('clicked', getattr(self, callback))
tbutton.set_tooltip(self.tooltips, tooltip_text, 'Private')
toolitem = gtk.SeparatorToolItem()
self.insert(toolitem, -1)
# set_draw() not making separator invisible,
# bug #143692 fixed Jun 06 2004, will be in GTK+ 2.6
toolitem.set_draw(False)
toolitem.set_expand(True)
toolitem = gtk.ToolItem()
self.insert(toolitem, -1)
self.message = gtk.Label()
toolitem.add(self.message)
self.show_all()
def get_filechooser(self):
if gtk.pygtk_version >= (2,4,0):
return FileChooserDialog(
title='Save the figure',
parent=self.win,
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
else:
return FileSelection(title='Save the figure',
parent=self.win,)
def save_figure(self, button):
fname, format = self.get_filechooser().get_filename_from_user()
if fname:
try:
self.canvas.print_figure(fname, format=format)
except Exception, e:
error_msg_gtk(str(e), parent=self)
def configure_subplots(self, button):
toolfig = Figure(figsize=(6,3))
canvas = self._get_canvas(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
w = int (toolfig.bbox.width)
h = int (toolfig.bbox.height)
window = gtk.Window()
window.set_title("Subplot Configuration Tool")
window.set_default_size(w, h)
vbox = gtk.VBox()
window.add(vbox)
vbox.show()
canvas.show()
vbox.pack_start(canvas, True, True)
window.show()
def _get_canvas(self, fig):
return FigureCanvasGTK(fig)
class NavigationToolbar(gtk.Toolbar):
"""
Public attributes
canvas - the FigureCanvas (gtk.DrawingArea)
win - the gtk.Window
"""
# list of toolitems to add to the toolbar, format is:
# text, tooltip_text, image, callback(str), callback_arg, scroll(bool)
toolitems = (
('Left', 'Pan left with click or wheel mouse (bidirectional)',
gtk.STOCK_GO_BACK, 'panx', -1, True),
('Right', 'Pan right with click or wheel mouse (bidirectional)',
gtk.STOCK_GO_FORWARD, 'panx', 1, True),
('Zoom In X',
'Zoom In X (shrink the x axis limits) with click or wheel'
' mouse (bidirectional)',
gtk.STOCK_ZOOM_IN, 'zoomx', 1, True),
('Zoom Out X',
'Zoom Out X (expand the x axis limits) with click or wheel'
' mouse (bidirectional)',
gtk.STOCK_ZOOM_OUT, 'zoomx', -1, True),
(None, None, None, None, None, None,),
('Up', 'Pan up with click or wheel mouse (bidirectional)',
gtk.STOCK_GO_UP, 'pany', 1, True),
('Down', 'Pan down with click or wheel mouse (bidirectional)',
gtk.STOCK_GO_DOWN, 'pany', -1, True),
('Zoom In Y',
'Zoom in Y (shrink the y axis limits) with click or wheel'
' mouse (bidirectional)',
gtk.STOCK_ZOOM_IN, 'zoomy', 1, True),
('Zoom Out Y',
'Zoom Out Y (expand the y axis limits) with click or wheel'
' mouse (bidirectional)',
gtk.STOCK_ZOOM_OUT, 'zoomy', -1, True),
(None, None, None, None, None, None,),
('Save', 'Save the figure',
gtk.STOCK_SAVE, 'save_figure', None, False),
)
def __init__(self, canvas, window):
"""
figManager is the FigureManagerGTK instance that contains the
toolbar, with attributes figure, window and drawingArea
"""
gtk.Toolbar.__init__(self)
self.canvas = canvas
# Note: gtk.Toolbar already has a 'window' attribute
self.win = window
self.set_style(gtk.TOOLBAR_ICONS)
if gtk.pygtk_version >= (2,4,0):
self._create_toolitems_2_4()
self.update = self._update_2_4
self.fileselect = FileChooserDialog(
title='Save the figure',
parent=self.win,
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
else:
self._create_toolitems_2_2()
self.update = self._update_2_2
self.fileselect = FileSelection(title='Save the figure',
parent=self.win)
self.show_all()
self.update()
def _create_toolitems_2_4(self):
# use the GTK+ 2.4 GtkToolbar API
iconSize = gtk.ICON_SIZE_SMALL_TOOLBAR
self.tooltips = gtk.Tooltips()
for text, tooltip_text, image_num, callback, callback_arg, scroll \
in self.toolitems:
if text is None:
self.insert( gtk.SeparatorToolItem(), -1 )
continue
image = gtk.Image()
image.set_from_stock(image_num, iconSize)
tbutton = gtk.ToolButton(image, text)
self.insert(tbutton, -1)
if callback_arg:
tbutton.connect('clicked', getattr(self, callback),
callback_arg)
else:
tbutton.connect('clicked', getattr(self, callback))
if scroll:
tbutton.connect('scroll_event', getattr(self, callback))
tbutton.set_tooltip(self.tooltips, tooltip_text, 'Private')
# Axes toolitem, is empty at start, update() adds a menu if >=2 axes
self.axes_toolitem = gtk.ToolItem()
self.insert(self.axes_toolitem, 0)
self.axes_toolitem.set_tooltip (
self.tooltips,
tip_text='Select axes that controls affect',
tip_private = 'Private')
align = gtk.Alignment (xalign=0.5, yalign=0.5, xscale=0.0, yscale=0.0)
self.axes_toolitem.add(align)
self.menubutton = gtk.Button ("Axes")
align.add (self.menubutton)
def position_menu (menu):
"""Function for positioning a popup menu.
Place menu below the menu button, but ensure it does not go off
the bottom of the screen.
The default is to popup menu at current mouse position
"""
x0, y0 = self.window.get_origin()
x1, y1, m = self.window.get_pointer()
x2, y2 = self.menubutton.get_pointer()
sc_h = self.get_screen().get_height() # requires GTK+ 2.2 +
w, h = menu.size_request()
x = x0 + x1 - x2
y = y0 + y1 - y2 + self.menubutton.allocation.height
y = min(y, sc_h - h)
return x, y, True
def button_clicked (button, data=None):
self.axismenu.popup (None, None, position_menu, 0,
gtk.get_current_event_time())
self.menubutton.connect ("clicked", button_clicked)
def _update_2_4(self):
# for GTK+ 2.4+
# called by __init__() and FigureManagerGTK
self._axes = self.canvas.figure.axes
if len(self._axes) >= 2:
self.axismenu = self._make_axis_menu()
self.menubutton.show_all()
else:
self.menubutton.hide()
self.set_active(range(len(self._axes)))
def _create_toolitems_2_2(self):
# use the GTK+ 2.2 (and lower) GtkToolbar API
iconSize = gtk.ICON_SIZE_SMALL_TOOLBAR
for text, tooltip_text, image_num, callback, callback_arg, scroll \
in self.toolitems:
if text is None:
self.append_space()
continue
image = gtk.Image()
image.set_from_stock(image_num, iconSize)
item = self.append_item(text, tooltip_text, 'Private', image,
getattr(self, callback), callback_arg)
if scroll:
item.connect("scroll_event", getattr(self, callback))
self.omenu = gtk.OptionMenu()
self.omenu.set_border_width(3)
self.insert_widget(
self.omenu,
'Select axes that controls affect',
'Private', 0)
def _update_2_2(self):
# for GTK+ 2.2 and lower
# called by __init__() and FigureManagerGTK
self._axes = self.canvas.figure.axes
if len(self._axes) >= 2:
# set up the axis menu
self.omenu.set_menu( self._make_axis_menu() )
self.omenu.show_all()
else:
self.omenu.hide()
self.set_active(range(len(self._axes)))
def _make_axis_menu(self):
# called by self._update*()
def toggled(item, data=None):
if item == self.itemAll:
for item in items: item.set_active(True)
elif item == self.itemInvert:
for item in items:
item.set_active(not item.get_active())
ind = [i for i,item in enumerate(items) if item.get_active()]
self.set_active(ind)
menu = gtk.Menu()
self.itemAll = gtk.MenuItem("All")
menu.append(self.itemAll)
self.itemAll.connect("activate", toggled)
self.itemInvert = gtk.MenuItem("Invert")
menu.append(self.itemInvert)
self.itemInvert.connect("activate", toggled)
items = []
for i in range(len(self._axes)):
item = gtk.CheckMenuItem("Axis %d" % (i+1))
menu.append(item)
item.connect("toggled", toggled)
item.set_active(True)
items.append(item)
menu.show_all()
return menu
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def panx(self, button, direction):
'panx in direction'
for a in self._active:
a.xaxis.pan(direction)
self.canvas.draw()
return True
def pany(self, button, direction):
'pany in direction'
for a in self._active:
a.yaxis.pan(direction)
self.canvas.draw()
return True
def zoomx(self, button, direction):
'zoomx in direction'
for a in self._active:
a.xaxis.zoom(direction)
self.canvas.draw()
return True
def zoomy(self, button, direction):
'zoomy in direction'
for a in self._active:
a.yaxis.zoom(direction)
self.canvas.draw()
return True
def get_filechooser(self):
if gtk.pygtk_version >= (2,4,0):
return FileChooserDialog(
title='Save the figure',
parent=self.win,
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
else:
return FileSelection(title='Save the figure',
parent=self.win)
def save_figure(self, button):
fname, format = self.get_filechooser().get_filename_from_user()
if fname:
try:
self.canvas.print_figure(fname, format=format)
except Exception, e:
error_msg_gtk(str(e), parent=self)
if gtk.pygtk_version >= (2,4,0):
class FileChooserDialog(gtk.FileChooserDialog):
"""GTK+ 2.4 file selector which remembers the last file/directory
selected and presents the user with a menu of supported image formats
"""
def __init__ (self,
title = 'Save file',
parent = None,
action = gtk.FILE_CHOOSER_ACTION_SAVE,
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK),
path = None,
filetypes = [],
default_filetype = None
):
super (FileChooserDialog, self).__init__ (title, parent, action,
buttons)
self.set_default_response (gtk.RESPONSE_OK)
if not path: path = os.getcwd() + os.sep
# create an extra widget to list supported image formats
self.set_current_folder (path)
self.set_current_name ('image.' + default_filetype)
hbox = gtk.HBox (spacing=10)
hbox.pack_start (gtk.Label ("File Format:"), expand=False)
liststore = gtk.ListStore(gobject.TYPE_STRING)
cbox = gtk.ComboBox(liststore)
cell = gtk.CellRendererText()
cbox.pack_start(cell, True)
cbox.add_attribute(cell, 'text', 0)
hbox.pack_start (cbox)
self.filetypes = filetypes
self.sorted_filetypes = filetypes.items()
self.sorted_filetypes.sort()
default = 0
for i, (ext, name) in enumerate(self.sorted_filetypes):
cbox.append_text ("%s (*.%s)" % (name, ext))
if ext == default_filetype:
default = i
cbox.set_active(default)
self.ext = default_filetype
def cb_cbox_changed (cbox, data=None):
"""File extension changed"""
head, filename = os.path.split(self.get_filename())
root, ext = os.path.splitext(filename)
ext = ext[1:]
new_ext = self.sorted_filetypes[cbox.get_active()][0]
self.ext = new_ext
if ext in self.filetypes:
filename = root + '.' + new_ext
elif ext == '':
filename = filename.rstrip('.') + '.' + new_ext
self.set_current_name (filename)
cbox.connect ("changed", cb_cbox_changed)
hbox.show_all()
self.set_extra_widget(hbox)
def get_filename_from_user (self):
while True:
filename = None
if self.run() != int(gtk.RESPONSE_OK):
break
filename = self.get_filename()
break
self.hide()
return filename, self.ext
else:
class FileSelection(gtk.FileSelection):
"""GTK+ 2.2 and lower file selector which remembers the last
file/directory selected
"""
def __init__(self, path=None, title='Select a file', parent=None):
super(FileSelection, self).__init__(title)
if path: self.path = path
else: self.path = os.getcwd() + os.sep
if parent: self.set_transient_for(parent)
def get_filename_from_user(self, path=None, title=None):
if path: self.path = path
if title: self.set_title(title)
self.set_filename(self.path)
filename = None
if self.run() == int(gtk.RESPONSE_OK):
self.path = filename = self.get_filename()
self.hide()
ext = None
if filename is not None:
ext = os.path.splitext(filename)[1]
if ext.startswith('.'):
ext = ext[1:]
return filename, ext
class DialogLineprops:
"""
A GUI dialog for controlling lineprops
"""
signals = (
'on_combobox_lineprops_changed',
'on_combobox_linestyle_changed',
'on_combobox_marker_changed',
'on_colorbutton_linestyle_color_set',
'on_colorbutton_markerface_color_set',
'on_dialog_lineprops_okbutton_clicked',
'on_dialog_lineprops_cancelbutton_clicked',
)
linestyles = [ls for ls in lines.Line2D.lineStyles if ls.strip()]
linestyled = dict([ (s,i) for i,s in enumerate(linestyles)])
markers = [m for m in lines.Line2D.markers if cbook.is_string_like(m)]
markerd = dict([(s,i) for i,s in enumerate(markers)])
def __init__(self, lines):
import gtk.glade
datadir = matplotlib.get_data_path()
gladefile = os.path.join(datadir, 'lineprops.glade')
if not os.path.exists(gladefile):
raise IOError('Could not find gladefile lineprops.glade in %s'%datadir)
self._inited = False
self._updateson = True # suppress updates when setting widgets manually
self.wtree = gtk.glade.XML(gladefile, 'dialog_lineprops')
self.wtree.signal_autoconnect(dict([(s, getattr(self, s)) for s in self.signals]))
self.dlg = self.wtree.get_widget('dialog_lineprops')
self.lines = lines
cbox = self.wtree.get_widget('combobox_lineprops')
cbox.set_active(0)
self.cbox_lineprops = cbox
cbox = self.wtree.get_widget('combobox_linestyles')
for ls in self.linestyles:
cbox.append_text(ls)
cbox.set_active(0)
self.cbox_linestyles = cbox
cbox = self.wtree.get_widget('combobox_markers')
for m in self.markers:
cbox.append_text(m)
cbox.set_active(0)
self.cbox_markers = cbox
self._lastcnt = 0
self._inited = True
def show(self):
'populate the combo box'
self._updateson = False
# flush the old
cbox = self.cbox_lineprops
for i in range(self._lastcnt-1,-1,-1):
cbox.remove_text(i)
# add the new
for line in self.lines:
cbox.append_text(line.get_label())
cbox.set_active(0)
self._updateson = True
self._lastcnt = len(self.lines)
self.dlg.show()
def get_active_line(self):
'get the active line'
ind = self.cbox_lineprops.get_active()
line = self.lines[ind]
return line
def get_active_linestyle(self):
'get the active lineinestyle'
ind = self.cbox_linestyles.get_active()
ls = self.linestyles[ind]
return ls
def get_active_marker(self):
'get the active lineinestyle'
ind = self.cbox_markers.get_active()
m = self.markers[ind]
return m
def _update(self):
'update the active line props from the widgets'
if not self._inited or not self._updateson: return
line = self.get_active_line()
ls = self.get_active_linestyle()
marker = self.get_active_marker()
line.set_linestyle(ls)
line.set_marker(marker)
button = self.wtree.get_widget('colorbutton_linestyle')
color = button.get_color()
r, g, b = [val/65535. for val in color.red, color.green, color.blue]
line.set_color((r,g,b))
button = self.wtree.get_widget('colorbutton_markerface')
color = button.get_color()
r, g, b = [val/65535. for val in color.red, color.green, color.blue]
line.set_markerfacecolor((r,g,b))
line.figure.canvas.draw()
def on_combobox_lineprops_changed(self, item):
'update the widgets from the active line'
if not self._inited: return
self._updateson = False
line = self.get_active_line()
ls = line.get_linestyle()
if ls is None: ls = 'None'
self.cbox_linestyles.set_active(self.linestyled[ls])
marker = line.get_marker()
if marker is None: marker = 'None'
self.cbox_markers.set_active(self.markerd[marker])
r,g,b = colorConverter.to_rgb(line.get_color())
color = gtk.gdk.Color(*[int(val*65535) for val in r,g,b])
button = self.wtree.get_widget('colorbutton_linestyle')
button.set_color(color)
r,g,b = colorConverter.to_rgb(line.get_markerfacecolor())
color = gtk.gdk.Color(*[int(val*65535) for val in r,g,b])
button = self.wtree.get_widget('colorbutton_markerface')
button.set_color(color)
self._updateson = True
def on_combobox_linestyle_changed(self, item):
self._update()
def on_combobox_marker_changed(self, item):
self._update()
def on_colorbutton_linestyle_color_set(self, button):
self._update()
def on_colorbutton_markerface_color_set(self, button):
'called colorbutton marker clicked'
self._update()
def on_dialog_lineprops_okbutton_clicked(self, button):
self._update()
self.dlg.hide()
def on_dialog_lineprops_cancelbutton_clicked(self, button):
self.dlg.hide()
# set icon used when windows are minimized
# Unfortunately, the SVG renderer (rsvg) leaks memory under earlier
# versions of pygtk, so we have to use a PNG file instead.
try:
if gtk.pygtk_version < (2, 8, 0):
icon_filename = 'matplotlib.png'
else:
icon_filename = 'matplotlib.svg'
gtk.window_set_default_icon_from_file (
os.path.join (matplotlib.rcParams['datapath'], 'images', icon_filename))
except:
verbose.report('Could not load matplotlib icon: %s' % sys.exc_info()[1])
def error_msg_gtk(msg, parent=None):
if parent is not None: # find the toplevel gtk.Window
parent = parent.get_toplevel()
if parent.flags() & gtk.TOPLEVEL == 0:
parent = None
if not is_string_like(msg):
msg = ','.join(map(str,msg))
dialog = gtk.MessageDialog(
parent = parent,
type = gtk.MESSAGE_ERROR,
buttons = gtk.BUTTONS_OK,
message_format = msg)
dialog.run()
dialog.destroy()
FigureManager = FigureManagerGTK
| agpl-3.0 |
knights-lab/NINJA-SHOGUN | shogun/tests/test_pipeline.py | 1 | 3478 | """
Copyright 2015-2017 Knights Lab, Regents of the University of Minnesota.
This software is released under the GNU Affero General Public License (AGPL) v3.0 License.
"""
import unittest
import pkg_resources
from click.testing import CliRunner
import os
import tempfile
import pandas as pd
import glob
from shogun.__main__ import cli
class TestAligner(unittest.TestCase):
def setUp(self):
prefix = 'shogun-temp-dir-'
self.temp_dir = tempfile.TemporaryDirectory(prefix=prefix)
def tearDown(self):
self.temp_dir.cleanup()
def test_utree_pipeline(self):
database = pkg_resources.resource_filename('shogun.tests', os.path.join('data'))
infile = pkg_resources.resource_filename('shogun.tests', os.path.join('data', 'combined_seqs.fna'))
outdir = os.path.join(self.temp_dir.name)
runner = CliRunner()
_log = runner.invoke(cli, ['--log', 'debug', 'pipeline', '--input', infile, '--database', database,
'--output', outdir, '--aligner', 'utree', '--no-function'])
outfile_ra = glob.glob(os.path.join(outdir, "*.ra.txt"))
self.assertTrue(len(outfile_ra) == 1)
outfile_ra = outfile_ra[0]
df_infile = pd.read_csv(outfile_ra, sep="\t", index_col=0)
# Assert the correct number of samples
self.assertTrue(df_infile.shape[1] == 3)
# Assert the type is float
self.assertTrue(len(df_infile.select_dtypes(include=['float']).columns) == 3)
def test_bowtie2_pipeline(self):
database = pkg_resources.resource_filename('shogun.tests', os.path.join('data'))
infile = pkg_resources.resource_filename('shogun.tests', os.path.join('data', 'combined_seqs.fna'))
outdir = os.path.join(self.temp_dir.name)
runner = CliRunner()
_log = runner.invoke(cli, ['--log', 'debug', 'pipeline', '--input', infile, '--database', database,
'--output', outdir, '--aligner', 'bowtie2', '--no-function'])
outfile_ra = glob.glob(os.path.join(outdir, "*.ra.txt"))
self.assertTrue(len(outfile_ra) == 1)
outfile_ra = outfile_ra[0]
df_infile = pd.read_csv(outfile_ra, sep="\t", index_col=0)
# Assert the correct number of samples
self.assertTrue(df_infile.shape[1] == 3)
# Assert the type is float
self.assertTrue(len(df_infile.select_dtypes(include=['float']).columns) == 3)
def test_burst_pipeline(self):
database = pkg_resources.resource_filename('shogun.tests', os.path.join('data'))
infile = pkg_resources.resource_filename('shogun.tests', os.path.join('data', 'combined_seqs.fna'))
outdir = os.path.join(self.temp_dir.name)
runner = CliRunner()
_log = runner.invoke(cli, ['--log', 'debug', 'pipeline', '--input', infile, '--database', database,
'--output', outdir, '--aligner', 'burst', '--no-function'])
outfile_ra = glob.glob(os.path.join(outdir, "*.ra.txt"))
self.assertTrue(len(outfile_ra) == 1)
outfile_ra = outfile_ra[0]
df_infile = pd.read_csv(outfile_ra, sep="\t", index_col=0)
# Assert the correct number of samples
self.assertTrue(df_infile.shape[1] == 3)
# Assert the type is float
self.assertTrue(len(df_infile.select_dtypes(include=['float']).columns) == 3)
if __name__ == '__main__':
unittest.main()
| mit |
richrr/scripts | python/plot-network-graph-oct-23-backup.py | 1 | 25813 | import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
from utils import *
import brewer2mpl
import re
import os
import sys
import operator
from time import localtime, strftime
import argparse
import os.path
from subprocess import Popen, PIPE
from collections import defaultdict
import math
import matplotlib.font_manager as font_manager
import matplotlib
#python ~/scripts/python/plot-network-graph.py -i L3_invasive\ 0.310\ edge_attribute-leadeigenvect.txt -n L3_invasive\ 0.310\ node_attribute-leadeigenvect.txt -a L3_invasive.txt -c L3_invasive_0.310-leadeigenvect_L3_native_0.310-leadeigenvect_venn-sm.png-SM_common_edges.txt -b L3_invasive_0.310-leadeigenvect_L3_native_0.310-leadeigenvect_venn-sm.png-SM_common_edges_w_diff_correl.txt
###### to do: input 4 files: 1 common edge file, 1 common edge file with diff correl, 1 common edge file SM, 1 common edge file with diff correl SM
######## or just input the 1 common edge file SM, 1 common edge file with diff correl SM (since we wouldn't show the edges in the entire network)
def create_pretty_node_labels(G):
new_labels = dict()
for old_label in G.nodes():
pattern_to_find = "c__"
if 'o__' in old_label:
pattern_to_find = 'o__'
cont = old_label.split(pattern_to_find)
new_labels[old_label] = cont[-1] if len(cont) > 1 else cont[0] # UnassignedOtherOther -> cont[0]
tmp = new_labels[old_label]
#new_labels[old_label] = tmp if len(tmp)<=5 else tmp[:5]
new_labels[old_label] = tmp if len(tmp)<=5 else insert_newlines(tmp)
return new_labels
def insert_newlines(string, every=6):
return '\n'.join(string[i:i+every] for i in xrange(0, len(string), every))
def preprocess_graph(edges, node_module_dict):
edges_between_nodes_from_same_module = list()
for (u,v) in edges:
#print u, "---->", v
if u in node_module_dict and v in node_module_dict:
if node_module_dict[u] == node_module_dict[v]:
edges_between_nodes_from_same_module.append((u,v))
else:
print "One or both nodes of the edge are missing from the attribute file"
# for nodes that do not have edges when inter module edges are removed
for (u,v) in edges:
u_present = [tup for tup in edges_between_nodes_from_same_module if u in tup]
if len(u_present) == 0:
edges_between_nodes_from_same_module.append((u,u))
v_present = [tup for tup in edges_between_nodes_from_same_module if v in tup]
if len(v_present) == 0:
edges_between_nodes_from_same_module.append((v,v))
return edges_between_nodes_from_same_module
def identify_node_categ(node_module_dict):
node_categ_dict = dict()
for node in node_module_dict:
start = 'p__'
end = 'c__'
result = node
if start in node and end in node:
result = re.search('%s(.*)%s' % (start, end), node).group(1)
elif start in node and end not in node:
result = node[node.index(start)+3:]
elif start not in node and end not in node:
if 'k__Fungi' in node:
result = node[node.index('k__Fungi')+8:] # hack for k__FungiOtherOtherOther
node_categ_dict[node] = result
#print node, result
return node_categ_dict
def create_color_map(node_categ_dict_values):
categ_color_map = dict()
available_colors_key = ['#ff69b4','#ee82ee','#a020f0','#a52a2a','#fa8072','#cd853f','#ffa500','#ff0000',\
'#ff6347','#0000ff','#1e90ff','#00bfff','#40e0d0','#00ffff','#e0ffff','#5f9ea0','#f5fffa',\
'#f0ffff','#f0f8ff','#e6e6fa','#fff0f5','#ffe4e1','#66cdaa','#7fffd4','#6400','#556b2f','#8fbc8f',\
'#9acd32','#228b22','#6b8e23','#bdb76b','#f0e68c','#ffff00','#d3d3d3','#adff2f','#faf0e6','#faebd7',\
'#eedfcc','#cdc0b0','#8b8378','#ffefd5','#ffebcd','#ffe4c4','#eed5b7','#cdb79e','#8b7d6b','#ffdab9',\
'#eecbad','#cdaf95','#8b7765','#ffdead','#ffe4b5','#fff8dc','#eee8dc','#cdc8b1','#8b8878','#fffff0',\
'#eee5de','#cdc5bf','#8b8682','#f0fff0','#e0eee0','#c1cdc1','#838b83','#4682b4','#b0c4de','#add8e6','#b0e0e6']
idx = 0
for phyla in set(node_categ_dict_values):
if idx <= len(available_colors_key):
pass
else:
idx = 0
categ_color_map[phyla] = available_colors_key[idx]
idx += 1
return categ_color_map
def main(args):
parser = argparse.ArgumentParser(description='Plot networks obtained from MENA')
parser.add_argument('-i', '--infile') # file containing the edges (interactions between OTUs), can be sif or edge attribute file
parser.add_argument('-n', '--nodefile') # file containing the node attribute
parser.add_argument('-c', '--commonedgefile') # file containing the edges common with the "other" network (here, used only for the Same Module network)
parser.add_argument('-b', '--commonedgediffcorrelfile') # file containing the edges common with the "other" network but different correlations/edge attributes. (here, used only for the Same Module network)
parser.add_argument('-a', '--nodeabund') # file containing the node abundance
parser.add_argument('-o', '--outfilestr') # string for output filename
parser.add_argument('-l', '--logfile', default="ITS-log-file.txt") # log filename
parser.add_argument('-q', '--imagequality', default=600, type=int) # image quality dpi
parser.add_argument('-f', '--imageformat', default='pdf') # generate images in format. allowed: pdf, png, jpg, tiff
parser.add_argument('-y', '--outdir', default='./') # dir name for outputting figs.
parser.add_argument('-e', '--edgetypeoff', action='store_true', default=False) # do not distinguish positive and negative correlation edges
parser.add_argument('-d', '--delimiter', default='\t') # delimiter for file
parser.add_argument('-z', '--comparetwomethodsnets', action='store_true', default=False) # compare same module common edges (nets) of differents group from two methods. P.S. you have to run the script twice with -t and then finally once with -c
args = parser.parse_args()
if args.infile == None:
parser.print_help()
sys.exit('\natleast two arguments (edge file, node attributes file or -z) required\n')
infile = args.infile
node_attrib_file = ''
if args.nodefile != None:
node_attrib_file = args.nodefile
outfilestring = infile.replace(' ','_')
if args.outfilestr != None:
outfilestring = args.outfilestr
outfilestring = args.outdir + outfilestring + '_pubs_'
if args.commonedgefile != None and args.commonedgediffcorrelfile != None:
outfilestring += 'withCommonEdgeInfo'
node_abund_file = args.nodeabund
#common_edge_list = []
#common_edge_diff_correl_list = []
common_edge_list = [i.replace('\n', '') for i in read_file(args.commonedgefile)] if args.commonedgefile != None else []
common_edge_diff_correl_list = [i.replace('\n', '') for i in read_file(args.commonedgediffcorrelfile)] if args.commonedgediffcorrelfile != None else []
img_qual = args.imagequality
img_frmt = args.imageformat
delim = args.delimiter
#http://www.discoveryplayground.com/computer-programming-for-kids/rgb-colors/
# purple, deep pink, red, orange, brown, wheat, yellow, forest green, cyan, blue
attrib_color_map = {'0' : '#a020f0' , '1' : '#ff1493', '2' : '#ff0000', '3' : '#ffa500' , \
'4' : '#a52a2a' , '5' : '#f5deb3', '6' : '#ffff00' , '7' : '#228b22' , '8' : '#00ffff' , '9' : '#0000ff'}
#Hot Pink,Violet,Purple,Brown,Salmon,Peru,Orange,Red,Tomato,Blue,Dodger Blue,Deep Sky Blue,Turquoise,Cyan,Light Cyan,Cadet Blue,Mint Cream,Azure,Alice Blue,Lavender,Lavender Blush,Misty Rose,Medium Aquamarine,Aquamarine,Dark Green,Dark Olive Green,Dark Sea Green,Yellow Green,Forest Green,Olive Drab,Dark Khaki,Khaki,Yellow,Light Gray,Green Yellow,Linen,Antique White,Antique White 2,Antique White 3,Antique White 4,Papaya Whip,Blanched Almond,Bisque,Bisque 2,Bisque 3,Bisque 4,Peach Puff,Peach Puff 2,Peach Puff 3,Peach Puff 4,Navajo White,Moccasin,Cornsilk,Cornsilk 2,Cornsilk 3,Cornsilk 4,Ivory,Seashell 2,Seashell 3,Seashell 4,Honeydew,Honeydew 2,Honeydew 3,Honeydew 4,Steel Blue,Light Steel Blue,Light Blue,Powder Blue
#'''
categ_color_map = {'Acidobacteria' : '#ff69b4','Actinobacteria' : '#ee82ee','Aquificae' : '#a020f0',\
'Armatimonadetes' : '#a52a2a','Bacteroidetes' : '#fa8072','Caldiserica' : '#cd853f','Chlamydiae' : '#ffa500',\
'Chlorobi' : '#ff0000','Chloroflexi' : '#ff6347','Chrysiogenetes' : '#0000ff','Cyanobacteria' : '#1e90ff',\
'Deferribacteres' : '#00bfff','Deinococcus-Thermus' : '#40e0d0','Dictyoglomi' : '#00ffff',\
'Elusimicrobia' : '#e0ffff','Fibrobacteres' : '#5f9ea0','Firmicutes' : '#f5fffa','Fusobacteria' : '#f0ffff',\
'Gemmatimonadetes' : '#f0f8ff','Lentisphaerae' : '#e6e6fa','Nitrospira' : '#fff0f5','Planctomycetes' : '#ffe4e1',\
'Proteobacteria' : '#66cdaa','Spirochaetes' : '#7fffd4','Synergistetes' : '#006400','Tenericutes' : '#556b2f',\
'Thermodesulfobacteria' : '#8fbc8f','Thermomicrobia' : '#9acd32','Thermotogae' : '#228b22','Verrucomicrobia' : '#6b8e23',\
'Crenarchaeota' : '#bdb76b','Euryarchaeota' : '#f0e68c','Korarchaeota' : '#ffff00','Nanoarchaeota' : '#d3d3d3',\
'Thaumarchaeota' : '#adff2f','[Parvarchaeota]' : '#faf0e6','[Caldithrix]' : '#faebd7','[Thermi]' : '#eedfcc','AD3' : '#cdc0b0','BHI80-139' : '#8b8378','BRC1' : '#ffefd5','FBP' : '#ffebcd','FCPU426' : '#ffe4c4','GAL15' : '#eed5b7','GN02' : '#cdb79e','GN04' : '#8b7d6b','GOUTA4' : '#ffdab9','Kazan-3B-28' : '#eecbad','MVP-21' : '#cdaf95','MVS-104' : '#8b7765','NC10' : '#ffdead','Nitrospirae' : '#ffe4b5','NKB19' : '#fff8dc','OD1' : '#eee8dc','OP11' : '#cdc8b1','OP3' : '#8b8878','SBR1093' : '#fffff0','SC4' : '#eee5de','SR1' : '#cdc5bf','TM6' : '#8b8682','TM7' : '#f0fff0','WPS-2' : '#e0eee0','WS2' : '#c1cdc1','WS3' : '#838b83','WS4' : '#4682b4','ZB3' : '#b0c4de','Other' : '#add8e6','UnassignedOtherOther' : '#b0e0e6'}
#''' #-----(1)
'''
categ_color_map = {'OtherOtherOther' : '#ff69b4','Ascomycota' : '#ee82ee','AscomycotaOtherOther' : '#a020f0',\
'Basidiomycota' : '#a52a2a','BasidiomycotaOtherOther' : '#fa8072', 'Chytridiomycota' : '#0000ff','unidentified' : '#1e90ff',\
'Zygomycota' : '#00bfff'}
'''
#categ_color_map = {} #-----(2)
edge_color_map = {'0':'cyan', '1.000':'green' , '-1.000':'red' , '99':'commEdgeDiffCorrel2methods', 'commEdge':'black' , 'commEdgeDiffCorrel': 'blue'}
if args.edgetypeoff:
draw_plots_wout_edge_attributes(infile, node_attrib_file, outfilestring, img_qual, img_frmt, delim, attrib_color_map, categ_color_map, edge_color_map)
if args.comparetwomethodsnets:
draw_plots_with_edge_attributes_no_node_module_file(infile, node_attrib_file, outfilestring, img_qual, img_frmt, delim, attrib_color_map, categ_color_map, edge_color_map, node_abund_file, common_edge_list, common_edge_diff_correl_list)
else:
draw_plots_with_edge_attributes(infile, node_attrib_file, outfilestring, img_qual, img_frmt, delim, attrib_color_map, categ_color_map, edge_color_map, node_abund_file, common_edge_list, common_edge_diff_correl_list)
def get_edge_attributes(edges, edge_color_map, common_edge_list, common_edge_diff_correl_list):
edges_attrib_dict = dict()
if len(edges[0]) < 3:
sys.exit('\nedge attributes file required, not sif\n')
else:
for (u,v,w) in edges:
edge = (u,v)
if ','.join(edge) in common_edge_diff_correl_list:
#something
edges_attrib_dict[edge] = edge_color_map['commEdgeDiffCorrel']
continue
if ','.join(edge) in common_edge_list:
#something
edges_attrib_dict[edge] = edge_color_map['commEdge']
continue
edges_attrib_dict[edge] = edge_color_map[w] # the value is the color based on the edge weight
return edges_attrib_dict
def get_node_abundance(node_abund_file, delim):
lines = read_file(node_abund_file)
node_avgAbund_dict = dict()
for l in lines:
if '#' in l or 'Taxon' in l:
continue
l = l.strip().split(delim)
taxon = l[0]
abund = [float(i) for i in l[1:]]
avg_abund = sum(abund)/float(len(abund))
node_avgAbund_dict[taxon] = avg_abund
return node_avgAbund_dict
def node_weights_to_sizes(x):
sorted_x = sorted(x.items(), key=operator.itemgetter(1))
node_sizes_dict = dict()
for idx, val in enumerate(sorted_x):
node = val[0]
for patt in [';', ' ', '[', ']']:
node = node.replace(patt, '')
node_sizes_dict[node] = 750 + (idx * 2) # temp hack to increase size of nodes for publication
#node_sizes_dict[node] = 150 + (idx * 2)
#print node_sizes_dict
return node_sizes_dict
def draw_plots_with_edge_attributes_no_node_module_file(infile, node_attrib_file, outfilestring, img_qual, img_frmt, delim, attrib_color_map, categ_color_map, edge_color_map, node_abund_file, common_edge_list, common_edge_diff_correl_list):
# edges
edges = readColumnsSep(infile, ' ', 0, 2, 4)
edges_attrib_dict = get_edge_attributes(edges, edge_color_map, common_edge_list, common_edge_diff_correl_list)
edges_list = edges_attrib_dict.keys()
# calculate avg. relative abundance of the node
node_abundance = get_node_abundance(node_abund_file, delim)
#and make a list and submit as node_size to draw()
node_sizes_dict = node_weights_to_sizes(node_abundance)
samModG = nx.Graph()
samModGcolors = []
for (u,v) in edges_list:
# for singleton nodes in module, dummy self edge was added to display on plot
color_ = ''
if (u,v) in edges_attrib_dict:
color_ = edges_attrib_dict[(u,v)]
elif u==v:
color_ = 'cyan'
samModG.add_edge(u , v, color=color_)
samModGcolors.append(color_)
all_nodes_in_edge_list = [','.join(e) for e in edges_list]
# identify the category the OTU belongs to
node_categ_dict = identify_node_categ(condense_list(all_nodes_in_edge_list , ','))
# create a color map based on phyla
#categ_color_map = create_color_map(node_categ_dict.values()) # use to dynamically create color map, uncomment (1) and (2)
for node in samModG.nodes():
samModG.add_node(node, category = node_categ_dict[node])
samModGnode_sizes_list = [node_sizes_dict[i] for i in samModG.nodes()]
# reduce length of label for easier visualization
# nodecolor as per the phyla
nodeColor = [categ_color_map[samModG.node[node]['category']] for node in samModG]
new_labels = create_pretty_node_labels(samModG)
# create legend
f = plt.figure(1)
ax = f.add_subplot(1,1,1)
for label in categ_color_map:
if label in node_categ_dict.values():# only show legend for values that are in my data
ax.plot([],[],'o',color=categ_color_map[label],label=label)
for label in edge_color_map: # 0, 1, -1 correlation values
if edge_color_map[label] in samModGcolors:# colors, only show legend for values that are in my data
ax.plot([],[],color=edge_color_map[label],label=label)
plt.title('OTUs colored as per Phylum.')
# other layout algos: dot, neato, fdp, twopi, circo
algo = 'circo'
pos = nx.graphviz_layout(samModG, prog=algo)
#https://wiki.ubuntu.com/Fonts
#williamslab@HORT-MW-Vos:/usr/share/matplotlib/mpl-data/fonts$ sudo ln -s /usr/share/fonts/truetype/msttcorefonts/Arial.ttf ./ttf/arial.ttf
#williamslab@HORT-MW-Vos:/usr/share/matplotlib/mpl-data/fonts$ sudo ln -s /usr/share/fonts/truetype/msttcorefonts/Times_New_Roman.ttf ./ttf/times.ttf
'''
fontpath = '/usr/local/share/fonts/Arial.ttf'
prop = font_manager.FontProperties(fname=fontpath)
matplotlib.rcParams['font.family'] = prop.get_name()
'''
#/usr/share/fonts/truetype/msttcorefonts/
text_font = 'Arial' # 'Times' 'Helvetica'
'''
#http://stackoverflow.com/questions/18821795/how-can-i-get-list-of-font-familyor-name-of-font-in-matplotlib
'''
nx.draw(samModG, edgelist=edges_list, edge_color = samModGcolors, pos=pos, node_color = nodeColor, labels = new_labels, with_labels = True, node_size=samModGnode_sizes_list, font_size=8, font_family=text_font)
#http://stackoverflow.com/questions/7125009/how-to-change-legend-size-with-matplotlib-pyplot
#http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.legend
#plt.legend(loc=3,prop={'size':6})
plt.legend(bbox_to_anchor=(0.15, 0.93), loc=0, borderaxespad=0.,prop={'size':6}) #, title = "Legend"
plt.savefig(outfilestring + "-edge-node-color-phyla." + img_frmt, dpi = img_qual)
plt.close()
def draw_plots_with_edge_attributes(infile, node_attrib_file, outfilestring, img_qual, img_frmt, delim, attrib_color_map, categ_color_map, edge_color_map, node_abund_file, common_edge_list, common_edge_diff_correl_list):
G = nx.Graph()
# edges
edges = readColumnsSep(infile, ' ', 0, 2, 4)
edges_attrib_dict = get_edge_attributes(edges, edge_color_map, common_edge_list, common_edge_diff_correl_list)
edges_list = edges_attrib_dict.keys()
Gcolors = edges_attrib_dict.values()
#G.add_edges_from(edges_list) # this will only add edges but not the color info. which I might want later
for (u,v) in edges_list:
G.add_edge(u , v, color=edges_attrib_dict[(u,v)])
# node attributes
node_module_dict = readDict(node_attrib_file, 1, 7, '\t')
for node in node_module_dict:
G.add_node(node, moduleNumb = node_module_dict[node])
# calculate avg. relative abundance of the node
node_abundance = get_node_abundance(node_abund_file, delim)
#and make a list and submit as node_size to draw()
node_sizes_dict = node_weights_to_sizes(node_abundance)
Gnode_sizes_list = [node_sizes_dict[i] for i in G.nodes()]
#print G.edges(data=True)
# preprocessed graph, edges only if both nodes are in same module
edges_between_nodes_from_same_module = preprocess_graph(edges_list, node_module_dict)
samModG = nx.Graph()
#samModG.add_edges_from(edges_between_nodes_from_same_module)
#samModG_edges_list = []
samModGcolors = []
for (u,v) in edges_between_nodes_from_same_module:
# for singleton nodes in module, dummy self edge was added to display on plot
color_ = ''
if (u,v) in edges_attrib_dict:
color_ = edges_attrib_dict[(u,v)]
elif u==v:
color_ = 'cyan'
samModG.add_edge(u , v, color=color_)
#samModG_edges_list.append()
samModGcolors.append(color_)
#print samModGcolors
# identify the category the OTU belongs to
node_categ_dict = identify_node_categ(node_module_dict)
# create a color map based on phyla
categ_color_map = create_color_map(node_categ_dict.values())
for node in samModG.nodes():
samModG.add_node(node, category = node_categ_dict[node])
samModGnode_sizes_list = [node_sizes_dict[i] for i in samModG.nodes()]
#print samModGnode_sizes_list
# reduce length of label for easier visualization
new_labels = create_pretty_node_labels(G)
# plot edges as per modules
#http://stackoverflow.com/questions/24662006/python-networkx-graph-different-colored-nodes-using-two-lists
nodeColor = [attrib_color_map[G.node[node]['moduleNumb']] for node in G if node != 'Name']
#http://stackoverflow.com/questions/22992009/legend-in-python-networkx
# create legend
f = plt.figure(1)
ax = f.add_subplot(1,1,1)
for label in attrib_color_map:
if label in node_module_dict.values():# only show legend for values that are in my data
ax.plot([],[],'o',color=attrib_color_map[label],label=label)
for label in edge_color_map: # 0, 1, -1 correlation values
if edge_color_map[label] in Gcolors:# colors, only show legend for values that are in my data
ax.plot([],[],color=edge_color_map[label],label=label)
plt.title('OTUs colored as per modules. Intermodule edges allowed.')
#nx.draw(G, edgelist=edges_list, edge_color = Gcolors, node_color = nodeColor, with_labels = False, node_size=Gnode_sizes_list)#, style='dashed')
nx.draw(G, edgelist=edges_list, edge_color = Gcolors, node_color = nodeColor, with_labels = False)#, style='dashed')
#plt.legend()
plt.legend(bbox_to_anchor=(0.05, 0.93), loc=0, borderaxespad=0.,prop={'size':6}) #, title = "Legend"
plt.savefig(outfilestring + "-all-edge-node-color-module.png", dpi = img_qual)
plt.clf()
# nodecolor as per the phyla
nodeColor = [categ_color_map[samModG.node[node]['category']] for node in samModG]
new_labels = create_pretty_node_labels(samModG)
# create legend
f = plt.figure(1)
ax = f.add_subplot(1,1,1)
for label in categ_color_map:
if label in node_categ_dict.values():# only show legend for values that are in my data
ax.plot([],[],'o',color=categ_color_map[label],label=label)
for label in edge_color_map: # 0, 1, -1 correlation values
if edge_color_map[label] in samModGcolors:# colors, only show legend for values that are in my data
ax.plot([],[],color=edge_color_map[label],label=label)
plt.title('OTUs colored as per Phylum. Intermodule edges NOT allowed.')
# other layout algos: dot, neato, fdp, twopi, circo
algo = 'circo'
pos = nx.graphviz_layout(samModG, prog=algo)
nx.draw(samModG, edgelist=edges_between_nodes_from_same_module, edge_color = samModGcolors, pos=pos, node_color = nodeColor, labels = new_labels, with_labels = True, node_size=samModGnode_sizes_list, font_size=6)
#nx.draw(samModG, edgelist=edges_between_nodes_from_same_module, edge_color = samModGcolors, pos=pos, node_color = nodeColor, labels = new_labels, font_size=8, with_labels = True)
#http://stackoverflow.com/questions/7125009/how-to-change-legend-size-with-matplotlib-pyplot
#http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.legend
#plt.legend(loc=3,prop={'size':6})
plt.legend(bbox_to_anchor=(0.15, 0.93), loc=0, borderaxespad=0.,prop={'size':6}) #, title = "Legend"
plt.savefig(outfilestring + "-same-module-edge-node-color-phyla." + img_frmt, dpi = img_qual)
plt.close()
def draw_plots_wout_edge_attributes(infile, node_attrib_file, outfilestring, img_qual, img_frmt, delim, attrib_color_map, categ_color_map,edge_color_map):
G = nx.Graph()
# edges
edges = readColumnsSep(infile, '\t', 0, 2)
G.add_edges_from(edges)
# node attributes
node_module_dict = readDict(node_attrib_file, 1, 7, '\t')
for node in node_module_dict:
G.add_node(node, moduleNumb = node_module_dict[node])
# preprocessed graph, edges only if both nodes are in same module
edges_between_nodes_from_same_module = preprocess_graph(edges, node_module_dict)
samModG = nx.Graph()
samModG.add_edges_from(edges_between_nodes_from_same_module)
# identify the category the OTU belongs to
node_categ_dict = identify_node_categ(node_module_dict)
for node in samModG.nodes():
samModG.add_node(node, category = node_categ_dict[node])
# reduce length of label for easier visualization
new_labels = create_pretty_node_labels(G)
# plot edges as per modules
#http://stackoverflow.com/questions/24662006/python-networkx-graph-different-colored-nodes-using-two-lists
nodeColor = [attrib_color_map[G.node[node]['moduleNumb']] for node in G if node != 'Name']
#http://stackoverflow.com/questions/22992009/legend-in-python-networkx
# create legend
f = plt.figure(1)
ax = f.add_subplot(1,1,1)
for label in attrib_color_map:
if label in node_module_dict.values():# only show legend for values that are in my data
ax.plot([0],[0],color=attrib_color_map[label],label=label)
plt.title('OTUs colored as per modules. Intermodule edges allowed.')
nx.draw(G, node_color = nodeColor, with_labels = False)
#nx.draw_circular(G, node_color = nodeColor, labels = new_labels, with_labels = True)
#plt.legend()
plt.legend(bbox_to_anchor=(0.05, 0.93), loc=0, borderaxespad=0.,prop={'size':6}) #, title = "Legend"
plt.savefig(outfilestring + "-all-edge-node-color-module.png", dpi = img_qual)
plt.clf()
# nodecolor as per the phyla
nodeColor = [categ_color_map[samModG.node[node]['category']] for node in samModG]
new_labels = create_pretty_node_labels(samModG)
# create legend
f = plt.figure(1)
ax = f.add_subplot(1,1,1)
for label in categ_color_map:
if label in node_categ_dict.values():# only show legend for values that are in my data
ax.plot([0],[0],color=categ_color_map[label],label=label)
plt.title('OTUs colored as per Phylum. Intermodule edges NOT allowed.')
# other layout algos: dot, neato, fdp, twopi, circo
algo = 'circo'
pos = nx.graphviz_layout(samModG, prog=algo)
nx.draw(samModG, pos=pos, node_color = nodeColor, labels = new_labels, with_labels = True)
#http://stackoverflow.com/questions/7125009/how-to-change-legend-size-with-matplotlib-pyplot
#http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.legend
#plt.legend(loc=3,prop={'size':6})
plt.legend(bbox_to_anchor=(0.15, 0.93), loc=0, borderaxespad=0.,prop={'size':6}) #, title = "Legend"
plt.savefig(outfilestring + "-same-module-edge-node-color-phyla." + img_frmt, dpi = img_qual)
plt.close()
if __name__=='__main__':
datetime = strftime("%a, %d %b %Y %I:%M:%S %p", localtime())
cmd = 'echo ' + datetime
os.system(cmd)
main(sys.argv)
| gpl-3.0 |
nikhilnrng/german-credit-risk | src/model.py | 1 | 4486 | import numpy
from defines import Types
from scaler import NumericScaler
from binomial import BinomialClassifier
from sklearn import feature_selection
from sklearn import decomposition
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import classification_report
from sklearn.metrics import f1_score, accuracy_score
from sklearn.metrics import confusion_matrix
def baseline_classifier(data, labels):
print 'Applying baseline classification'
clf = BinomialClassifier(0.7)
pred = cross_val_predict(clf, data, labels, cv=10)
clf.best_score_ = accuracy_score(labels, pred)
print 'accuracy: %0.3f' % accuracy_score(labels, pred)
print '\n',
print classification_report(labels, pred)
return clf
def knn_classifier(data, labels, columns):
print 'Applying k-nearest neighbor classification'
# create param grid
n_numeric = len([c.TYPE for c in columns if c.TYPE is Types.NUMERICAL and c.CATEGORIES is None])
n_neighbors = list(range(1, 51, 1))
parameters = dict(knn__n_neighbors=n_neighbors)
# create model pipeline
ns = NumericScaler(n_numeric)
rf = RandomForestClassifier() #random_state=8)
knn = KNeighborsClassifier()
rfe = feature_selection.RFE(rf)
pipe = Pipeline(steps=[('ns', ns),
('rfe', rfe),
('knn', knn)])
# run grid search with 10-fold cross validation
clf = GridSearchCV(pipe, parameters, cv=10, verbose=1)
clf.fit(data, labels)
pred = clf.predict(data)
print 'accuracy: %0.3f' % clf.best_score_
print 'Best parameters set: '
best_parameters = clf.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print '\t%s: %r' % (param_name, best_parameters[param_name])
print '\n',
print classification_report(labels, pred)
return clf
def svm_classifier(data, labels, columns):
print 'Applying SVM classification with RBF kernel'
# create param grid
n_numeric = len([c.TYPE for c in columns if c.TYPE is Types.NUMERICAL and c.CATEGORIES is None])
C = [0.1, 1, 10, 100, 1000]
gamma = ['auto', 1, 0.1, 0.001, 0.0001]
parameters = dict(svm__C=C,
svm__gamma=gamma)
# create model pipeline
ns = NumericScaler(n_numeric)
rf = RandomForestClassifier() #random_state=2)
rfe = feature_selection.RFE(rf)
svm = SVC(kernel='rbf') #, random_state=17)
pipe = Pipeline(steps=[('ns', ns),
('rfe', rfe),
('svm', svm)])
# run grid search with 10-fold validation
clf = GridSearchCV(pipe, parameters, cv=10, verbose=1)
clf.fit(data, labels)
pred = clf.predict(data)
print 'accuracy: %0.3f' % clf.best_score_
print 'Best parameters set: '
best_parameters = clf.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print '\t%s: %r' % (param_name, best_parameters[param_name])
print '\n',
print classification_report(labels, pred)
return clf
def naive_bayes_classifier(data, labels, columns):
print 'Applying Naive Bayes classification'
# create param grid
n_numeric = len([c.TYPE for c in columns if c.TYPE is Types.NUMERICAL and c.CATEGORIES is None])
n_components = list(range(1, data.shape[1] + 1, 1))
parameters = dict(pca__n_components=n_components)
# create model pipeline
ns = NumericScaler(n_numeric, with_std=False)
rf = RandomForestClassifier() #random_state=2)
rfe = feature_selection.RFE(rf)
pca = decomposition.PCA()
gnb = GaussianNB()
pipe = Pipeline(steps=[('ns', ns),
('pca', pca),
('gnb', gnb)])
# run grid search with 10-fold validation
clf = GridSearchCV(pipe, parameters, cv=10, verbose=1)
clf.fit(data, labels)
pred = clf.predict(data)
print 'accuracy: %0.3f' % clf.best_score_
print 'Best parameters set: '
best_parameters = clf.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print '\t%s: %r' % (param_name, best_parameters[param_name])
print '\n',
print classification_report(labels, pred)
return clf
| mit |
jzt5132/scikit-learn | sklearn/setup.py | 225 | 2856 | import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('svm')
config.add_subpackage('datasets')
config.add_subpackage('datasets/tests')
config.add_subpackage('feature_extraction')
config.add_subpackage('feature_extraction/tests')
config.add_subpackage('cluster')
config.add_subpackage('cluster/tests')
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('decomposition')
config.add_subpackage('decomposition/tests')
config.add_subpackage("ensemble")
config.add_subpackage("ensemble/tests")
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
config.add_subpackage('externals')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('neighbors')
config.add_subpackage('neural_network')
config.add_subpackage('preprocessing')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('semi_supervised')
config.add_subpackage("tree")
config.add_subpackage("tree/tests")
config.add_subpackage('metrics/tests')
config.add_subpackage('metrics/cluster')
config.add_subpackage('metrics/cluster/tests')
# add cython extension module for isotonic regression
config.add_extension(
'_isotonic',
sources=['_isotonic.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
mtasende/Machine-Learning-Nanodegree-Capstone | recommender/indicator.py | 1 | 2929 | from sklearn.preprocessing import StandardScaler
import pandas as pd
import numpy as np
import utils.preprocessing as pp
class Indicator(object):
"""
A class that represents a feature 'of importance' to the agent.
data_df example:
"""
def __init__(self, extractor, extractor_vec, q_levels, data_df, window=14, name='unknown'):
self.extractor = extractor
self.extractor_vec = extractor_vec
self.window = window
self.q_levels = q_levels
self.scaler = StandardScaler()
self.fit(data_df)
self.name = name
def fit(self, data_df):
extracted_data = self.extractor_vec(data_df, self.window)
self.scaler.fit(extracted_data.fillna(0).values.reshape(-1, 1))
def interval_to_value_vec(self, num_interval_vec):
q_dict = {}
for index in num_interval_vec.index:
q_dict[index] = self.interval_to_value(int(num_interval_vec.loc[index].values[0]))
return pd.DataFrame.from_dict(q_dict, orient='index', dtype=np.float64)
def interval_to_value(self, num_interval):
""" Given an interval number it calculates a 'quantized value'. """
if num_interval == 0:
return self.q_levels[0]
if num_interval == len(self.q_levels):
return self.q_levels[-1]
return (self.q_levels[num_interval] + self.q_levels[num_interval-1]) / 2
def quantize_vec(self, real_values_df):
q_dict = {}
for index in real_values_df.index:
q_dict[index] = self.quantize(real_values_df.loc[index])
return pd.DataFrame.from_dict(q_dict, orient='index', dtype=np.float64)
def quantize(self, real_value):
""" Returns the number of interval in which the real value lies. """
temp_list = self.q_levels + [real_value]
temp_list.sort()
sorted_index = temp_list.index(real_value)
return sorted_index
def get_quantized_value(self, real_value):
""" Returns a quantized value, given the real value. """
return self.interval_to_value(self.quantize(real_value))
def extract(self, data_df):
""" Returns the indicator value in the last date of data_df"""
raw_res = np.array([[self.extractor(pp.fill_missing(data_df), self.window)]])
if np.isnan(raw_res[0,0]):
print(self.name)
print(raw_res)
print(data_df.iloc[-self.window:])
scaled_res = self.scaler.transform(raw_res)
return self.quantize(scaled_res[0, 0])
def extract_vec(self, data_df):
""" Return a pandas Series with the values of the indicator for all the valid dates in data_df."""
temp_df = data_df.copy()
temp_df['ind'] = self.scaler.transform(
pp.fill_missing(
self.extractor_vec(pp.fill_missing(data_df), self.window)).values.reshape(-1, 1))
return self.quantize_vec(temp_df['ind'])
| mit |
hsiaoyi0504/scikit-learn | sklearn/mixture/gmm.py | 128 | 31069 | """
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <[email protected]>
# Fabian Pedregosa <[email protected]>
# Bertrand Thirion <[email protected]>
import warnings
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, thresh=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
if thresh is not None:
warnings.warn("'thresh' has been replaced by 'tol' in 0.16 "
" and will be removed in 0.18.",
DeprecationWarning)
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_states, n_features) if 'diag',
(n_states, n_features, n_features) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
# this line should be removed when 'thresh' is removed in v0.18
tol = (self.tol if self.thresh is None
else self.thresh / float(X.shape[0]))
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
# (should compare to self.tol when deprecated 'thresh' is
# removed in v0.18)
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weights
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
# some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| bsd-3-clause |
nhejazi/scikit-learn | sklearn/linear_model/sag.py | 30 | 12959 | """Solvers for Ridge and LogisticRegression using SAG algorithm"""
# Authors: Tom Dupre la Tour <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import make_dataset
from .sag_fast import sag
from ..exceptions import ConvergenceWarning
from ..utils import check_array
from ..utils.extmath import row_norms
def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept,
n_samples=None,
is_saga=False):
"""Compute automatic step size for SAG solver
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.
Parameters
----------
max_squared_sum : float
Maximum squared sum of X over samples.
alpha_scaled : float
Constant that multiplies the regularization term, scaled by
1. / n_samples, the number of samples.
loss : string, in {"log", "squared"}
The loss function used in SAG solver.
fit_intercept : bool
Specifies if a constant (a.k.a. bias or intercept) will be
added to the decision function.
n_samples : int, optional
Number of rows in X. Useful if is_saga=True.
is_saga : boolean, optional
Whether to return step size for the SAGA algorithm or the SAG
algorithm.
Returns
-------
step_size : float
Step size used in SAG solver.
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
Defazio, A., Bach F. & Lacoste-Julien S. (2014).
SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives
https://arxiv.org/abs/1407.0202
"""
if loss in ('log', 'multinomial'):
L = (0.25 * (max_squared_sum + int(fit_intercept)) + alpha_scaled)
elif loss == 'squared':
# inverse Lipschitz constant for squared loss
L = max_squared_sum + int(fit_intercept) + alpha_scaled
else:
raise ValueError("Unknown loss function for SAG solver, got %s "
"instead of 'log' or 'squared'" % loss)
if is_saga:
# SAGA theoretical step size is 1/3L or 1 / (2 * (L + mu n))
# See Defazio et al. 2014
mun = min(2 * n_samples * alpha_scaled, L)
step = 1. / (2 * L + mun)
else:
# SAG theoretical step size is 1/16L but it is recommended to use 1 / L
# see http://www.birs.ca//workshops//2014/14w5003/files/schmidt.pdf,
# slide 65
step = 1. / L
return step
def sag_solver(X, y, sample_weight=None, loss='log', alpha=1., beta=0.,
max_iter=1000, tol=0.001, verbose=0, random_state=None,
check_input=True, max_squared_sum=None,
warm_start_mem=None,
is_saga=False):
"""SAG solver for Ridge and LogisticRegression
SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values. With loss='multinomial', y must be label encoded
(see preprocessing.LabelEncoder).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
loss : 'log' | 'squared' | 'multinomial'
Loss function that will be optimized:
-'log' is the binary logistic loss, as used in LogisticRegression.
-'squared' is the squared loss, as used in Ridge.
-'multinomial' is the multinomial logistic loss, as used in
LogisticRegression.
.. versionadded:: 0.18
*loss='multinomial'*
alpha : float, optional
Constant that multiplies the regularization term. Defaults to 1.
max_iter : int, optional
The max number of passes over the training data if the stopping
criteria is not reached. Defaults to 1000.
tol : double, optional
The stopping criteria for the weights. The iterations will stop when
max(change in weights) / max(weights) < tol. Defaults to .001
verbose : integer, optional
The verbosity level.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. If None, it will be computed,
going through all the samples. The value should be precomputed
to speed up cross validation.
warm_start_mem : dict, optional
The initialization parameters used for warm starting. Warm starting is
currently used in LogisticRegression but not in Ridge.
It contains:
- 'coef': the weight vector, with the intercept in last line
if the intercept is fitted.
- 'gradient_memory': the scalar gradient for all seen samples.
- 'sum_gradient': the sum of gradient over all seen samples,
for each feature.
- 'intercept_sum_gradient': the sum of gradient over all seen
samples, for the intercept.
- 'seen': array of boolean describing the seen samples.
- 'num_seen': the number of seen samples.
is_saga : boolean, optional
Whether to use the SAGA algorithm or the SAG algorithm. SAGA behaves
better in the first epochs, and allow for l1 regularisation.
Returns
-------
coef_ : array, shape (n_features)
Weight vector.
n_iter_ : int
The number of full pass on all samples.
warm_start_mem : dict
Contains a 'coef' key with the fitted result, and possibly the
fitted intercept at the end of the array. Contains also other keys
used for warm starting.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> X = np.random.randn(n_samples, n_features)
>>> y = np.random.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='sag', tol=0.001)
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
LogisticRegression(C=1.0, class_weight=None, dual=False,
fit_intercept=True, intercept_scaling=1, max_iter=100,
multi_class='ovr', n_jobs=1, penalty='l2', random_state=None,
solver='sag', tol=0.0001, verbose=0, warm_start=False)
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
Defazio, A., Bach F. & Lacoste-Julien S. (2014).
SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives
https://arxiv.org/abs/1407.0202
See also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR, and
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
"""
if warm_start_mem is None:
warm_start_mem = {}
# Ridge default max_iter is None
if max_iter is None:
max_iter = 1000
if check_input:
X = check_array(X, dtype=np.float64, accept_sparse='csr', order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='C')
n_samples, n_features = X.shape[0], X.shape[1]
# As in SGD, the alpha is scaled by n_samples.
alpha_scaled = float(alpha) / n_samples
beta_scaled = float(beta) / n_samples
# if loss == 'multinomial', y should be label encoded.
n_classes = int(y.max()) + 1 if loss == 'multinomial' else 1
# initialization
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
if 'coef' in warm_start_mem.keys():
coef_init = warm_start_mem['coef']
else:
# assume fit_intercept is False
coef_init = np.zeros((n_features, n_classes), dtype=np.float64,
order='C')
# coef_init contains possibly the intercept_init at the end.
# Note that Ridge centers the data before fitting, so fit_intercept=False.
fit_intercept = coef_init.shape[0] == (n_features + 1)
if fit_intercept:
intercept_init = coef_init[-1, :]
coef_init = coef_init[:-1, :]
else:
intercept_init = np.zeros(n_classes, dtype=np.float64)
if 'intercept_sum_gradient' in warm_start_mem.keys():
intercept_sum_gradient = warm_start_mem['intercept_sum_gradient']
else:
intercept_sum_gradient = np.zeros(n_classes, dtype=np.float64)
if 'gradient_memory' in warm_start_mem.keys():
gradient_memory_init = warm_start_mem['gradient_memory']
else:
gradient_memory_init = np.zeros((n_samples, n_classes),
dtype=np.float64, order='C')
if 'sum_gradient' in warm_start_mem.keys():
sum_gradient_init = warm_start_mem['sum_gradient']
else:
sum_gradient_init = np.zeros((n_features, n_classes),
dtype=np.float64, order='C')
if 'seen' in warm_start_mem.keys():
seen_init = warm_start_mem['seen']
else:
seen_init = np.zeros(n_samples, dtype=np.int32, order='C')
if 'num_seen' in warm_start_mem.keys():
num_seen_init = warm_start_mem['num_seen']
else:
num_seen_init = 0
dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
if max_squared_sum is None:
max_squared_sum = row_norms(X, squared=True).max()
step_size = get_auto_step_size(max_squared_sum, alpha_scaled, loss,
fit_intercept, n_samples=n_samples,
is_saga=is_saga)
if step_size * alpha_scaled == 1:
raise ZeroDivisionError("Current sag implementation does not handle "
"the case step_size * alpha_scaled == 1")
num_seen, n_iter_ = sag(dataset, coef_init,
intercept_init, n_samples,
n_features, n_classes, tol,
max_iter,
loss,
step_size, alpha_scaled,
beta_scaled,
sum_gradient_init,
gradient_memory_init,
seen_init,
num_seen_init,
fit_intercept,
intercept_sum_gradient,
intercept_decay,
is_saga,
verbose)
if n_iter_ == max_iter:
warnings.warn("The max_iter was reached which means "
"the coef_ did not converge", ConvergenceWarning)
if fit_intercept:
coef_init = np.vstack((coef_init, intercept_init))
warm_start_mem = {'coef': coef_init, 'sum_gradient': sum_gradient_init,
'intercept_sum_gradient': intercept_sum_gradient,
'gradient_memory': gradient_memory_init,
'seen': seen_init, 'num_seen': num_seen}
if loss == 'multinomial':
coef_ = coef_init.T
else:
coef_ = coef_init[:, 0]
return coef_, n_iter_, warm_start_mem
| bsd-3-clause |
tequa/ammisoft | ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/matplotlib/backend_managers.py | 10 | 11976 | """
`ToolManager`
Class that makes the bridge between user interaction (key press,
toolbar clicks, ..) and the actions in response to the user inputs.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import warnings
import matplotlib.cbook as cbook
import matplotlib.widgets as widgets
from matplotlib.rcsetup import validate_stringlist
import matplotlib.backend_tools as tools
class ToolEvent(object):
"""Event for tool manipulation (add/remove)"""
def __init__(self, name, sender, tool, data=None):
self.name = name
self.sender = sender
self.tool = tool
self.data = data
class ToolTriggerEvent(ToolEvent):
"""Event to inform that a tool has been triggered"""
def __init__(self, name, sender, tool, canvasevent=None, data=None):
ToolEvent.__init__(self, name, sender, tool, data)
self.canvasevent = canvasevent
class ToolManagerMessageEvent(object):
"""
Event carrying messages from toolmanager
Messages usually get displayed to the user by the toolbar
"""
def __init__(self, name, sender, message):
self.name = name
self.sender = sender
self.message = message
class ToolManager(object):
"""
Helper class that groups all the user interactions for a FigureManager
Attributes
----------
manager: `FigureManager`
keypresslock: `widgets.LockDraw`
`LockDraw` object to know if the `canvas` key_press_event is locked
messagelock: `widgets.LockDraw`
`LockDraw` object to know if the message is available to write
"""
def __init__(self, canvas):
warnings.warn('Treat the new Tool classes introduced in v1.5 as ' +
'experimental for now, the API will likely change in ' +
'version 2.1 and perhaps the rcParam as well')
self.canvas = canvas
self._key_press_handler_id = self.canvas.mpl_connect(
'key_press_event', self._key_press)
self._tools = {}
self._keys = {}
self._toggled = {}
self._callbacks = cbook.CallbackRegistry()
# to process keypress event
self.keypresslock = widgets.LockDraw()
self.messagelock = widgets.LockDraw()
def toolmanager_connect(self, s, func):
"""
Connect event with string *s* to *func*.
Parameters
----------
s : String
Name of the event
The following events are recognized
- 'tool_message_event'
- 'tool_removed_event'
- 'tool_added_event'
For every tool added a new event is created
- 'tool_trigger_TOOLNAME`
Where TOOLNAME is the id of the tool.
func : function
Function to be called with signature
def func(event)
"""
return self._callbacks.connect(s, func)
def toolmanager_disconnect(self, cid):
"""
Disconnect callback id *cid*
Example usage::
cid = toolmanager.toolmanager_connect('tool_trigger_zoom',
on_press)
#...later
toolmanager.toolmanager_disconnect(cid)
"""
return self._callbacks.disconnect(cid)
def message_event(self, message, sender=None):
""" Emit a `ToolManagerMessageEvent`"""
if sender is None:
sender = self
s = 'tool_message_event'
event = ToolManagerMessageEvent(s, sender, message)
self._callbacks.process(s, event)
@property
def active_toggle(self):
"""Currently toggled tools"""
return self._toggled
def get_tool_keymap(self, name):
"""
Get the keymap associated with the specified tool
Parameters
----------
name : string
Name of the Tool
Returns
-------
list : list of keys associated with the Tool
"""
keys = [k for k, i in six.iteritems(self._keys) if i == name]
return keys
def _remove_keys(self, name):
for k in self.get_tool_keymap(name):
del self._keys[k]
def update_keymap(self, name, *keys):
"""
Set the keymap to associate with the specified tool
Parameters
----------
name : string
Name of the Tool
keys : keys to associate with the Tool
"""
if name not in self._tools:
raise KeyError('%s not in Tools' % name)
self._remove_keys(name)
for key in keys:
for k in validate_stringlist(key):
if k in self._keys:
warnings.warn('Key %s changed from %s to %s' %
(k, self._keys[k], name))
self._keys[k] = name
def remove_tool(self, name):
"""
Remove tool from `ToolManager`
Parameters
----------
name : string
Name of the Tool
"""
tool = self.get_tool(name)
tool.destroy()
# If is a toggle tool and toggled, untoggle
if getattr(tool, 'toggled', False):
self.trigger_tool(tool, 'toolmanager')
self._remove_keys(name)
s = 'tool_removed_event'
event = ToolEvent(s, self, tool)
self._callbacks.process(s, event)
del self._tools[name]
def add_tool(self, name, tool, *args, **kwargs):
"""
Add *tool* to `ToolManager`
If successful adds a new event `tool_trigger_name` where **name** is
the **name** of the tool, this event is fired everytime
the tool is triggered.
Parameters
----------
name : str
Name of the tool, treated as the ID, has to be unique
tool : class_like, i.e. str or type
Reference to find the class of the Tool to added.
Notes
-----
args and kwargs get passed directly to the tools constructor.
See Also
--------
matplotlib.backend_tools.ToolBase : The base class for tools.
"""
tool_cls = self._get_cls_to_instantiate(tool)
if not tool_cls:
raise ValueError('Impossible to find class for %s' % str(tool))
if name in self._tools:
warnings.warn('A "Tool class" with the same name already exists, '
'not added')
return self._tools[name]
tool_obj = tool_cls(self, name, *args, **kwargs)
self._tools[name] = tool_obj
if tool_cls.default_keymap is not None:
self.update_keymap(name, tool_cls.default_keymap)
# For toggle tools init the radio_group in self._toggled
if isinstance(tool_obj, tools.ToolToggleBase):
# None group is not mutually exclusive, a set is used to keep track
# of all toggled tools in this group
if tool_obj.radio_group is None:
self._toggled.setdefault(None, set())
else:
self._toggled.setdefault(tool_obj.radio_group, None)
self._tool_added_event(tool_obj)
return tool_obj
def _tool_added_event(self, tool):
s = 'tool_added_event'
event = ToolEvent(s, self, tool)
self._callbacks.process(s, event)
def _handle_toggle(self, tool, sender, canvasevent, data):
"""
Toggle tools, need to untoggle prior to using other Toggle tool
Called from trigger_tool
Parameters
----------
tool: Tool object
sender: object
Object that wishes to trigger the tool
canvasevent : Event
Original Canvas event or None
data : Object
Extra data to pass to the tool when triggering
"""
radio_group = tool.radio_group
# radio_group None is not mutually exclusive
# just keep track of toggled tools in this group
if radio_group is None:
if tool.toggled:
self._toggled[None].remove(tool.name)
else:
self._toggled[None].add(tool.name)
return
# If the tool already has a toggled state, untoggle it
if self._toggled[radio_group] == tool.name:
toggled = None
# If no tool was toggled in the radio_group
# toggle it
elif self._toggled[radio_group] is None:
toggled = tool.name
# Other tool in the radio_group is toggled
else:
# Untoggle previously toggled tool
self.trigger_tool(self._toggled[radio_group],
self,
canvasevent,
data)
toggled = tool.name
# Keep track of the toggled tool in the radio_group
self._toggled[radio_group] = toggled
def _get_cls_to_instantiate(self, callback_class):
# Find the class that corresponds to the tool
if isinstance(callback_class, six.string_types):
# FIXME: make more complete searching structure
if callback_class in globals():
callback_class = globals()[callback_class]
else:
mod = 'backend_tools'
current_module = __import__(mod,
globals(), locals(), [mod], 1)
callback_class = getattr(current_module, callback_class, False)
if callable(callback_class):
return callback_class
else:
return None
def trigger_tool(self, name, sender=None, canvasevent=None,
data=None):
"""
Trigger a tool and emit the tool_trigger_[name] event
Parameters
----------
name : string
Name of the tool
sender: object
Object that wishes to trigger the tool
canvasevent : Event
Original Canvas event or None
data : Object
Extra data to pass to the tool when triggering
"""
tool = self.get_tool(name)
if tool is None:
return
if sender is None:
sender = self
self._trigger_tool(name, sender, canvasevent, data)
s = 'tool_trigger_%s' % name
event = ToolTriggerEvent(s, sender, tool, canvasevent, data)
self._callbacks.process(s, event)
def _trigger_tool(self, name, sender=None, canvasevent=None, data=None):
"""
Trigger on a tool
Method to actually trigger the tool
"""
tool = self.get_tool(name)
if isinstance(tool, tools.ToolToggleBase):
self._handle_toggle(tool, sender, canvasevent, data)
# Important!!!
# This is where the Tool object gets triggered
tool.trigger(sender, canvasevent, data)
def _key_press(self, event):
if event.key is None or self.keypresslock.locked():
return
name = self._keys.get(event.key, None)
if name is None:
return
self.trigger_tool(name, canvasevent=event)
@property
def tools(self):
"""Return the tools controlled by `ToolManager`"""
return self._tools
def get_tool(self, name, warn=True):
"""
Return the tool object, also accepts the actual tool for convenience
Parameters
----------
name : str, ToolBase
Name of the tool, or the tool itself
warn : bool, optional
If this method should give warnings.
"""
if isinstance(name, tools.ToolBase) and name.name in self._tools:
return name
if name not in self._tools:
if warn:
warnings.warn("ToolManager does not control tool %s" % name)
return None
return self._tools[name]
| bsd-3-clause |
wscullin/spack | var/spack/repos/builtin/packages/py-pandas/package.py | 3 | 2476 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyPandas(PythonPackage):
"""pandas is a Python package providing fast, flexible, and expressive
data structures designed to make working with relational or
labeled data both easy and intuitive. It aims to be the
fundamental high-level building block for doing practical, real
world data analysis in Python. Additionally, it has the broader
goal of becoming the most powerful and flexible open source data
analysis / manipulation tool available in any language.
"""
homepage = "http://pandas.pydata.org/"
url = "https://pypi.io/packages/source/p/pandas/pandas-0.19.0.tar.gz"
version('0.19.0', 'bc9bb7188e510b5d44fbdd249698a2c3')
version('0.18.0', 'f143762cd7a59815e348adf4308d2cf6')
version('0.16.1', 'fac4f25748f9610a3e00e765474bdea8')
version('0.16.0', 'bfe311f05dc0c351f8955fbd1e296e73')
depends_on('py-dateutil', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-cython', type='build')
depends_on('py-pytz', type=('build', 'run'))
depends_on('py-numexpr', type=('build', 'run'))
depends_on('py-bottleneck', type=('build', 'run'))
| lgpl-2.1 |
prabhamatta/Analyzing-Open-Data | notebooks/Day_19_B_HackFSM.py | 2 | 11257 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# HackFSM
#
# Relationship to other public APIs based on Solr?
#
# * http://www.hathitrust.org/htrc/solr-api
# * http://api.plos.org/solr/search-fields/
#
# <markdowncell>
# Documentation:
#
# http://digitalhumanities.berkeley.edu/hackfsm/api/detail
# <codecell>
from settings import (HACKFSM_ID, HACKFSM_KEY, HACKFSM_BASEURL)
from itertools import islice
import logging
import requests
import json
import urllib
import urlparse
from pandas import DataFrame, Series
import pandas as pd
import numpy as np
logging.basicConfig(filename='Experiment_20140325_HackFSM.log',level=logging.WARNING)
logger=logging.getLogger()
# <codecell>
def query(q, fl="id"):
url = "{base_url}?".format(base_url=HACKFSM_BASEURL) + \
urllib.urlencode({'q':q,
'fl':fl,
'wt':'json',
'app_id':HACKFSM_ID,
'app_key':HACKFSM_KEY})
r = requests.get(url)
return r.json()
# <codecell>
result = query(q="fsmTitle:Savio")['response']
result
# <headingcell level=1>
# Paging through results
# <codecell>
# try again
# http://stackoverflow.com/a/5724453/7782
# http://excess.org/article/2013/02/itergen1/
class my_g(object):
def __init__(self,max_count):
self._remaining = range(max_count)
self._len = max_count
def __iter__(self):
return self
def __len__(self):
return self._len
def next(self):
if not self._remaining:
raise StopIteration
return self._remaining.pop(0)
g=my_g(10)
print len(g)
list(g)
# <codecell>
class FSM(object):
def __init__(self, q, fl="id", start=0, rows=30,
base_url=HACKFSM_BASEURL, app_id=HACKFSM_ID, app_key=HACKFSM_KEY):
self.q = q
self.fl = fl
self.start = start
self.rows = rows
self.base_url = base_url
self.app_id = app_id
self.app_key = app_key
# get first page and numfound
self.cursor = start
# get the first page
result = self._get_page(q, fl, self.cursor, self.rows)
self.numfound = result['response']['numFound']
def _check_status(self,result):
"""throw exception if non-zero status"""
if result['responseHeader']['status'] != 0:
raise FSMException("status: " + str(result['responseHeader']['status']))
def _get_page(self, q, fl, start, rows):
result = self._call_api(q, fl, start, rows)
# update current page
self.page = result['response']['docs']
self.page_len = len(self.page)
return result
def _call_api(self, q, fl, start, rows):
url = "{base_url}?".format(base_url=self.base_url) + \
urllib.urlencode({'q':q,
'fl':fl,
'wt':'json',
'start':start,
'row':rows,
'app_id':self.app_id,
'app_key':self.app_key})
result = requests.get(url).json()
self._check_status(result)
# check whether we're getting fewer records than expected
if len(result['response']['docs']) < rows:
# are we at the end of the results
if start + len(result['response']['docs']) != self.numfound:
logger.warning("url:{url}, numfound:{numfound}, start+len{start_plus_len}".format(url=url,
numfound=self.numfound,
start_plus_len=start + len(result['response']['docs'])))
return result
def __iter__(self):
return self
def __len__(self):
return self.numfound
def next(self):
if not self.page:
# retrieve next page and check whether there's anything left
self.cursor += self.page_len
result = self._get_page(self.q, self.fl, self.cursor, self.rows)
if self.page_len == 0:
raise StopIteration
return self.page.pop(0)
# <codecell>
fsm = FSM("-fsmTeiUrl:[* TO *]", fl="id,fsmTitle,fsmImageUrl,fsmDateCreated")
# <codecell>
len(fsm)
# <codecell>
results = list(islice(fsm,None))
results[:10]
# <codecell>
df = DataFrame(results)
# <codecell>
len(df)
# <codecell>
df.fsmImageUrl
# <codecell>
from IPython.display import HTML
from jinja2 import Template
CSS = """
<style>
.wrap img {
margin-left: 0px;
margin-right: 0px;
display: inline-block;
width: 150px;
}
.wrap {
/* Prevent vertical gaps */
line-height: 0;
-webkit-column-count: 5;
-webkit-column-gap: 0px;
-moz-column-count: 5;
-moz-column-gap: 0px;
column-count: 5;
column-gap: 0px;
}
.wrap img {
/* Just in case there are inline attributes */
width: 100% !important;
height: auto !important;
}
</style>
"""
IMAGES_TEMPLATE = CSS + """
<div class="wrap">
{% for item in items %}<img title="{{item.fsmTitle.0}}" src="{{item.fsmImageUrl.0}}"/>{% endfor %}
</div>
"""
template = Template(IMAGES_TEMPLATE)
HTML(template.render(items=results[:10]))
# <markdowncell>
# # DISTINGUISHING IMAGES FROM DOCUMENTS
#
# To programmatically differentiate records that describe images from records that describe TEI-encoded XML documents, the API permits queries that exclude records with NULL values in the "unwanted" Url field.
#
# That is, to retrieve TEI documents only, one would query for null values in the `fsmImageUrl` field. To retrieve images only, one would query for null values in the `fsmTeiUrl` field.
#
# NOTE: Please observe the hyphen prepended to the field names in the examples below. The hyphen (minus sign) functions here as a NOT operator.
#
# Example that selects for TEI encoded XML documents by excluding null values of `fsmImageUrl`:
#
# https://<BASE URL>/solr/fsm/select?q=-fsmImageUrl:[* TO *]&wt=json&indent=true&app_id=abcdefgh&app_key=12345678901234567890123456789012
#
# Example that selects for images by excluding null values of fsmTeiUrl:
#
# https://<BASE URL>/solr/fsm/select?q=-fsmTeiUrl:[* TO *]&wt=json&indent=true&app_id=abcdefgh&app_key=12345678901234567890123456789012
# <codecell>
# TEI-encoded docs
len(FSM("-fsmImageUrl:[* TO *]"))
# <codecell>
# images
len(FSM("-fsmTeiUrl:[* TO *]", fl="id,fsmImageUrl"))
# <headingcell level=1>
# Studying the API parameters
# <codecell>
from lxml.html import parse, fromstring
from collections import OrderedDict
api_docs_url = "http://digitalhumanities.berkeley.edu/hackfsm/api/detail"
r = requests.get(api_docs_url).content
doc = fromstring(r)
# <codecell>
rows = doc.xpath('//div[@id="content"]/article/div/div/div/table[1]//tr')
headers = [col.text_content().strip() for col in rows[0].findall('td')]
headers
# <codecell>
fields = []
for row in rows[1:]:
field = [col.text_content().strip() for col in row.findall('td')]
fields.append(field)
fsmfields = OrderedDict(fields)
fsmfields.keys()
# <headingcell level=1>
# Study all the records
# <codecell>
fsm = FSM(q="*",fl=",".join(fsmfields.keys()))
# <codecell>
len(fsm)
# <codecell>
df = DataFrame(list(fsm))
# <codecell>
len(df)
# <codecell>
df.head()
# <codecell>
# TEI URIs
len(list(df[~df.fsmTeiUrl.isnull()].fsmTeiUrl.apply(lambda a: a[0])))
# <codecell>
# null dates
len(df[df.fsmDateCreated.isnull()])
# <codecell>
# non-null image URLs
len(df[~df.fsmImageUrl.isnull()])
# <codecell>
df[~df.fsmImageUrl.isnull()].id
# <codecell>
# distribution of number of image URLs
df[~df.fsmImageUrl.isnull()].fsmImageUrl.apply(len).value_counts()
# <codecell>
# let's crawl for images
results_images = list(FSM("-fsmTeiUrl:[* TO *]", fl=",".join(fsmfields.keys())))
# <codecell>
len(results_images)
# <codecell>
df_images=DataFrame(results_images)
# <codecell>
df_images[df_images.fsmImageUrl.isnull()]
# <codecell>
# would be interesting to see sizes of images and whether we can get at thumbnails
df_images.fsmImageUrl
# <markdowncell>
# http://content.cdlib.org/ark:/13030/tf1z09n5r1/thumbnail ->
# http://digitalassets.lib.berkeley.edu/fsm/ucb/images/brk00040569b_a.gif
#
# 
#
# http://content.cdlib.org/ark:/13030/tf1z09n5r1/hi-res.jpg ->
# http://digitalassets.lib.berkeley.edu/fsm/ucb/images/brk00040569b_c.jpg
# <codecell>
urlparse.urlparse("http://digitalassets.lib.berkeley.edu/fsm/ucb/images/brk00040569b_c.jpg").netloc
# <codecell>
df_images.fsmImageUrl
# <codecell>
# calculate hostnames for all image urls
# might be possible to do this all with pandas
netlocs = list(df_images.fsmImageUrl.dropna().apply(lambda urls: set([urlparse.urlparse(url).netloc for url in urls])))
reduce(lambda x,y: x | y, netlocs, set())
# <codecell>
def len2(x):
try:
return len(x)
except:
return np.nan
df_images.fsmImageUrl.apply(len2) == 3
# <codecell>
df_images[df_images.fsmImageUrl.apply(len2) == 3].head()
# <markdowncell>
# 
# 
# 
# <codecell>
df_images[df_images.fsmImageUrl.apply(len2) == 4].ix[100].fsmImageUrl
# <codecell>
IMAGES_TEMPLATE = """
<div class="nowrap">
{% for item in items %}<img title="{{item}}" src="{{item}}"/>{% endfor %}
</div>
"""
template = Template(IMAGES_TEMPLATE)
HTML(template.render(items=df_images[df_images.fsmImageUrl.apply(len2) == 4].ix[100].fsmImageUrl ))
# <headingcell level=1>
# Dates
# <codecell>
len(df[~df.fsmDateCreated.isnull()])
# <codecell>
s = df[~df.fsmDateCreated.isnull()].fsmDateCreated.apply(len)==2 #.astype('datetime64[ns]')
# <codecell>
def first(x):
try:
return x[0]
except:
return np.nan
df['calc_date'] = pd.to_datetime(df.fsmDateCreated.apply(first), coerce=True)
# <codecell>
df[~df.calc_date.isnull()].sort_index(by='calc_date').calc_date
# <codecell>
pd.to_datetime(df.fsmDateCreated.dropna().apply(lambda s:s[0]).astype('str'), coerce=True).dropna()
# <codecell>
# http://stackoverflow.com/questions/17690738/in-pandas-how-do-i-convert-a-string-of-date-strings-to-datetime-objects-and-put
date_stngs = ('2008-12-20','2008-12-21','2008-12-22','2008-12-23','Nov. 9, 1964', 'junk')
pd.to_datetime(pd.Series(date_stngs),coerce=True)
# <headingcell level=1>
# Types of Resources
# <codecell>
def f(x):
try:
return set(x)
except:
return set()
reduce(lambda x,y: x | y, df.fsmTypeOfResource.apply(f), set())
# <codecell>
#related id
len(df.fsmRelatedIdentifier.dropna())
# <headingcell level=1>
# TEI documents
# <codecell>
df.fsmTeiUrl.dropna()
| apache-2.0 |
ominux/scikit-learn | examples/decomposition/plot_ica_blind_source_separation.py | 9 | 1511 | """
=====================================
Blind source separation using FastICA
=====================================
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 2 instruments playing simultaneously and 2 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument.
"""
print __doc__
import numpy as np
import pylab as pl
from sklearn.decomposition import FastICA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 10, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
S = np.c_[s1, s2]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1], [0.5, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA()
S_ = ica.fit(X).transform(X) # Get the estimated sources
A_ = ica.get_mixing_matrix() # Get estimated mixing matrix
assert np.allclose(X, np.dot(S_, A_.T))
###############################################################################
# Plot results
pl.figure()
pl.subplot(3, 1, 1)
pl.plot(S)
pl.title('True Sources')
pl.subplot(3, 1, 2)
pl.plot(X)
pl.title('Observations (mixed signal)')
pl.subplot(3, 1, 3)
pl.plot(S_)
pl.title('ICA estimated sources')
pl.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
pl.show()
| bsd-3-clause |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/matplotlib/_mathtext_data.py | 8 | 90024 | """
font data tables for truetype and afm computer modern fonts
"""
# this dict maps symbol names to fontnames, glyphindex. To get the
# glyph index from the character code, you have to use get_charmap
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
"""
from matplotlib.ft2font import FT2Font
font = FT2Font('/usr/local/share/matplotlib/cmr10.ttf')
items = font.get_charmap().items()
items.sort()
for charcode, glyphind in items:
print charcode, glyphind
"""
latex_to_bakoma = {
r'\oint' : ('cmex10', 45),
r'\bigodot' : ('cmex10', 50),
r'\bigoplus' : ('cmex10', 55),
r'\bigotimes' : ('cmex10', 59),
r'\sum' : ('cmex10', 51),
r'\prod' : ('cmex10', 24),
r'\int' : ('cmex10', 56),
r'\bigcup' : ('cmex10', 28),
r'\bigcap' : ('cmex10', 60),
r'\biguplus' : ('cmex10', 32),
r'\bigwedge' : ('cmex10', 4),
r'\bigvee' : ('cmex10', 37),
r'\coprod' : ('cmex10', 42),
r'\__sqrt__' : ('cmex10', 48),
r'\leftbrace' : ('cmex10', 92),
r'{' : ('cmex10', 92),
r'\{' : ('cmex10', 92),
r'\rightbrace' : ('cmex10', 130),
r'}' : ('cmex10', 130),
r'\}' : ('cmex10', 130),
r'\leftangle' : ('cmex10', 97),
r'\rightangle' : ('cmex10', 64),
r'\langle' : ('cmex10', 97),
r'\rangle' : ('cmex10', 64),
r'\widehat' : ('cmex10', 15),
r'\widetilde' : ('cmex10', 52),
r'\widebar' : ('cmr10', 131),
r'\omega' : ('cmmi10', 29),
r'\varepsilon' : ('cmmi10', 20),
r'\vartheta' : ('cmmi10', 22),
r'\varrho' : ('cmmi10', 61),
r'\varsigma' : ('cmmi10', 41),
r'\varphi' : ('cmmi10', 6),
r'\leftharpoonup' : ('cmmi10', 108),
r'\leftharpoondown' : ('cmmi10', 68),
r'\rightharpoonup' : ('cmmi10', 117),
r'\rightharpoondown' : ('cmmi10', 77),
r'\triangleright' : ('cmmi10', 130),
r'\triangleleft' : ('cmmi10', 89),
r'.' : ('cmmi10', 51),
r',' : ('cmmi10', 44),
r'<' : ('cmmi10', 99),
r'/' : ('cmmi10', 98),
r'>' : ('cmmi10', 107),
r'\flat' : ('cmmi10', 131),
r'\natural' : ('cmmi10', 90),
r'\sharp' : ('cmmi10', 50),
r'\smile' : ('cmmi10', 97),
r'\frown' : ('cmmi10', 58),
r'\ell' : ('cmmi10', 102),
r'\imath' : ('cmmi10', 8),
r'\jmath' : ('cmmi10', 65),
r'\wp' : ('cmmi10', 14),
r'\alpha' : ('cmmi10', 13),
r'\beta' : ('cmmi10', 35),
r'\gamma' : ('cmmi10', 24),
r'\delta' : ('cmmi10', 38),
r'\epsilon' : ('cmmi10', 54),
r'\zeta' : ('cmmi10', 10),
r'\eta' : ('cmmi10', 5),
r'\theta' : ('cmmi10', 18),
r'\iota' : ('cmmi10', 28),
r'\lambda' : ('cmmi10', 9),
r'\mu' : ('cmmi10', 32),
r'\nu' : ('cmmi10', 34),
r'\xi' : ('cmmi10', 7),
r'\pi' : ('cmmi10', 36),
r'\kappa' : ('cmmi10', 30),
r'\rho' : ('cmmi10', 39),
r'\sigma' : ('cmmi10', 21),
r'\tau' : ('cmmi10', 43),
'\\upsilon' : ('cmmi10', 25),
r'\phi' : ('cmmi10', 42),
r'\chi' : ('cmmi10', 17),
r'\psi' : ('cmmi10', 31),
r'|' : ('cmsy10', 47),
r'\|' : ('cmsy10', 44),
r'(' : ('cmr10', 119),
r'\leftparen' : ('cmr10', 119),
r'\rightparen' : ('cmr10', 68),
r')' : ('cmr10', 68),
r'+' : ('cmr10', 76),
r'0' : ('cmr10', 40),
r'1' : ('cmr10', 100),
r'2' : ('cmr10', 49),
r'3' : ('cmr10', 110),
r'4' : ('cmr10', 59),
r'5' : ('cmr10', 120),
r'6' : ('cmr10', 69),
r'7' : ('cmr10', 127),
r'8' : ('cmr10', 77),
r'9' : ('cmr10', 22),
r':' : ('cmr10', 85),
r';' : ('cmr10', 31),
r'=' : ('cmr10', 41),
r'\leftbracket' : ('cmr10', 62),
r'[' : ('cmr10', 62),
r'\rightbracket' : ('cmr10', 72),
r']' : ('cmr10', 72),
r'\%' : ('cmr10', 48),
r'%' : ('cmr10', 48),
r'\$' : ('cmr10', 99),
r'@' : ('cmr10', 111),
r'\#' : ('cmr10', 39),
r'\_' : ('cmtt10', 79),
r'\Gamma' : ('cmr10', 19),
r'\Delta' : ('cmr10', 6),
r'\Theta' : ('cmr10', 7),
r'\Lambda' : ('cmr10', 14),
r'\Xi' : ('cmr10', 3),
r'\Pi' : ('cmr10', 17),
r'\Sigma' : ('cmr10', 10),
'\\Upsilon' : ('cmr10', 11),
r'\Phi' : ('cmr10', 9),
r'\Psi' : ('cmr10', 15),
r'\Omega' : ('cmr10', 12),
r'\prime' : ('cmsy10', 73),
# these are mathml names, I think. I'm just using them for the
# tex methods noted
r'\circumflexaccent' : ('cmr10', 124), # for \hat
r'\combiningbreve' : ('cmr10', 81), # for \breve
r'\combiningoverline' : ('cmr10', 131), # for \bar
r'\combininggraveaccent' : ('cmr10', 114), # for \grave
r'\combiningacuteaccent' : ('cmr10', 63), # for \accute
r'\combiningdiaeresis' : ('cmr10', 91), # for \ddot
r'\combiningtilde' : ('cmr10', 75), # for \tilde
r'\combiningrightarrowabove' : ('cmmi10', 110), # for \vec
r'\combiningdotabove' : ('cmr10', 26), # for \dot
r'\leftarrow' : ('cmsy10', 10),
'\\uparrow' : ('cmsy10', 25),
r'\downarrow' : ('cmsy10', 28),
r'\leftrightarrow' : ('cmsy10', 24),
r'\nearrow' : ('cmsy10', 99),
r'\searrow' : ('cmsy10', 57),
r'\simeq' : ('cmsy10', 108),
r'\Leftarrow' : ('cmsy10', 104),
r'\Rightarrow' : ('cmsy10', 112),
'\\Uparrow' : ('cmsy10', 60),
r'\Downarrow' : ('cmsy10', 68),
r'\Leftrightarrow' : ('cmsy10', 51),
r'\nwarrow' : ('cmsy10', 65),
r'\swarrow' : ('cmsy10', 116),
r'\propto' : ('cmsy10', 15),
r'\infty' : ('cmsy10', 32),
r'\in' : ('cmsy10', 59),
r'\ni' : ('cmsy10', 122),
r'\bigtriangleup' : ('cmsy10', 80),
r'\bigtriangledown' : ('cmsy10', 132),
r'\slash' : ('cmsy10', 87),
r'\forall' : ('cmsy10', 21),
r'\exists' : ('cmsy10', 5),
r'\neg' : ('cmsy10', 20),
r'\emptyset' : ('cmsy10', 33),
r'\Re' : ('cmsy10', 95),
r'\Im' : ('cmsy10', 52),
r'\top' : ('cmsy10', 100),
r'\bot' : ('cmsy10', 11),
r'\aleph' : ('cmsy10', 26),
r'\cup' : ('cmsy10', 6),
r'\cap' : ('cmsy10', 19),
'\\uplus' : ('cmsy10', 58),
r'\wedge' : ('cmsy10', 43),
r'\vee' : ('cmsy10', 96),
r'\vdash' : ('cmsy10', 109),
r'\dashv' : ('cmsy10', 66),
r'\lfloor' : ('cmsy10', 117),
r'\rfloor' : ('cmsy10', 74),
r'\lceil' : ('cmsy10', 123),
r'\rceil' : ('cmsy10', 81),
r'\lbrace' : ('cmsy10', 92),
r'\rbrace' : ('cmsy10', 105),
r'\mid' : ('cmsy10', 47),
r'\vert' : ('cmsy10', 47),
r'\Vert' : ('cmsy10', 44),
'\\updownarrow' : ('cmsy10', 94),
'\\Updownarrow' : ('cmsy10', 53),
r'\backslash' : ('cmsy10', 126),
r'\wr' : ('cmsy10', 101),
r'\nabla' : ('cmsy10', 110),
r'\sqcup' : ('cmsy10', 67),
r'\sqcap' : ('cmsy10', 118),
r'\sqsubseteq' : ('cmsy10', 75),
r'\sqsupseteq' : ('cmsy10', 124),
r'\S' : ('cmsy10', 129),
r'\dag' : ('cmsy10', 71),
r'\ddag' : ('cmsy10', 127),
r'\P' : ('cmsy10', 130),
r'\clubsuit' : ('cmsy10', 18),
r'\diamondsuit' : ('cmsy10', 34),
r'\heartsuit' : ('cmsy10', 22),
r'-' : ('cmsy10', 17),
r'\cdot' : ('cmsy10', 78),
r'\times' : ('cmsy10', 13),
r'*' : ('cmsy10', 9),
r'\ast' : ('cmsy10', 9),
r'\div' : ('cmsy10', 31),
r'\diamond' : ('cmsy10', 48),
r'\pm' : ('cmsy10', 8),
r'\mp' : ('cmsy10', 98),
r'\oplus' : ('cmsy10', 16),
r'\ominus' : ('cmsy10', 56),
r'\otimes' : ('cmsy10', 30),
r'\oslash' : ('cmsy10', 107),
r'\odot' : ('cmsy10', 64),
r'\bigcirc' : ('cmsy10', 115),
r'\circ' : ('cmsy10', 72),
r'\bullet' : ('cmsy10', 84),
r'\asymp' : ('cmsy10', 121),
r'\equiv' : ('cmsy10', 35),
r'\subseteq' : ('cmsy10', 103),
r'\supseteq' : ('cmsy10', 42),
r'\leq' : ('cmsy10', 14),
r'\geq' : ('cmsy10', 29),
r'\preceq' : ('cmsy10', 79),
r'\succeq' : ('cmsy10', 131),
r'\sim' : ('cmsy10', 27),
r'\approx' : ('cmsy10', 23),
r'\subset' : ('cmsy10', 50),
r'\supset' : ('cmsy10', 86),
r'\ll' : ('cmsy10', 85),
r'\gg' : ('cmsy10', 40),
r'\prec' : ('cmsy10', 93),
r'\succ' : ('cmsy10', 49),
r'\rightarrow' : ('cmsy10', 12),
r'\to' : ('cmsy10', 12),
r'\spadesuit' : ('cmsy10', 7),
r'?' : ('cmr10', 50),
r'!' : ('cmr10', 29),
r'&' : ('cmr10', 109)
}
latex_to_cmex = {
r'\__sqrt__' : 112,
r'\bigcap' : 92,
r'\bigcup' : 91,
r'\bigodot' : 75,
r'\bigoplus' : 77,
r'\bigotimes' : 79,
r'\biguplus' : 93,
r'\bigvee' : 95,
r'\bigwedge' : 94,
r'\coprod' : 97,
r'\int' : 90,
r'\leftangle' : 173,
r'\leftbrace' : 169,
r'\oint' : 73,
r'\prod' : 89,
r'\rightangle' : 174,
r'\rightbrace' : 170,
r'\sum' : 88,
r'\widehat' : 98,
r'\widetilde' : 101,
}
latex_to_standard = {
r'\cong' : ('psyr', 64),
r'\Delta' : ('psyr', 68),
r'\Phi' : ('psyr', 70),
r'\Gamma' : ('psyr', 89),
r'\alpha' : ('psyr', 97),
r'\beta' : ('psyr', 98),
r'\chi' : ('psyr', 99),
r'\delta' : ('psyr', 100),
r'\varepsilon' : ('psyr', 101),
r'\phi' : ('psyr', 102),
r'\gamma' : ('psyr', 103),
r'\eta' : ('psyr', 104),
r'\iota' : ('psyr', 105),
r'\varpsi' : ('psyr', 106),
r'\kappa' : ('psyr', 108),
r'\nu' : ('psyr', 110),
r'\pi' : ('psyr', 112),
r'\theta' : ('psyr', 113),
r'\rho' : ('psyr', 114),
r'\sigma' : ('psyr', 115),
r'\tau' : ('psyr', 116),
'\\upsilon' : ('psyr', 117),
r'\varpi' : ('psyr', 118),
r'\omega' : ('psyr', 119),
r'\xi' : ('psyr', 120),
r'\psi' : ('psyr', 121),
r'\zeta' : ('psyr', 122),
r'\sim' : ('psyr', 126),
r'\leq' : ('psyr', 163),
r'\infty' : ('psyr', 165),
r'\clubsuit' : ('psyr', 167),
r'\diamondsuit' : ('psyr', 168),
r'\heartsuit' : ('psyr', 169),
r'\spadesuit' : ('psyr', 170),
r'\leftrightarrow' : ('psyr', 171),
r'\leftarrow' : ('psyr', 172),
'\\uparrow' : ('psyr', 173),
r'\rightarrow' : ('psyr', 174),
r'\downarrow' : ('psyr', 175),
r'\pm' : ('psyr', 176),
r'\geq' : ('psyr', 179),
r'\times' : ('psyr', 180),
r'\propto' : ('psyr', 181),
r'\partial' : ('psyr', 182),
r'\bullet' : ('psyr', 183),
r'\div' : ('psyr', 184),
r'\neq' : ('psyr', 185),
r'\equiv' : ('psyr', 186),
r'\approx' : ('psyr', 187),
r'\ldots' : ('psyr', 188),
r'\aleph' : ('psyr', 192),
r'\Im' : ('psyr', 193),
r'\Re' : ('psyr', 194),
r'\wp' : ('psyr', 195),
r'\otimes' : ('psyr', 196),
r'\oplus' : ('psyr', 197),
r'\oslash' : ('psyr', 198),
r'\cap' : ('psyr', 199),
r'\cup' : ('psyr', 200),
r'\supset' : ('psyr', 201),
r'\supseteq' : ('psyr', 202),
r'\subset' : ('psyr', 204),
r'\subseteq' : ('psyr', 205),
r'\in' : ('psyr', 206),
r'\notin' : ('psyr', 207),
r'\angle' : ('psyr', 208),
r'\nabla' : ('psyr', 209),
r'\textregistered' : ('psyr', 210),
r'\copyright' : ('psyr', 211),
r'\texttrademark' : ('psyr', 212),
r'\Pi' : ('psyr', 213),
r'\prod' : ('psyr', 213),
r'\surd' : ('psyr', 214),
r'\__sqrt__' : ('psyr', 214),
r'\cdot' : ('psyr', 215),
'\\urcorner' : ('psyr', 216),
r'\vee' : ('psyr', 217),
r'\wedge' : ('psyr', 218),
r'\Leftrightarrow' : ('psyr', 219),
r'\Leftarrow' : ('psyr', 220),
'\\Uparrow' : ('psyr', 221),
r'\Rightarrow' : ('psyr', 222),
r'\Downarrow' : ('psyr', 223),
r'\Diamond' : ('psyr', 224),
r'\langle' : ('psyr', 225),
r'\Sigma' : ('psyr', 229),
r'\sum' : ('psyr', 229),
r'\forall' : ('psyr', 34),
r'\exists' : ('psyr', 36),
r'\lceil' : ('psyr', 233),
r'\lbrace' : ('psyr', 123),
r'\Psi' : ('psyr', 89),
r'\bot' : ('psyr', 0o136),
r'\Omega' : ('psyr', 0o127),
r'\leftbracket' : ('psyr', 0o133),
r'\rightbracket' : ('psyr', 0o135),
r'\leftbrace' : ('psyr', 123),
r'\leftparen' : ('psyr', 0o50),
r'\prime' : ('psyr', 0o242),
r'\sharp' : ('psyr', 0o43),
r'\slash' : ('psyr', 0o57),
r'\Lamda' : ('psyr', 0o114),
r'\neg' : ('psyr', 0o330),
'\\Upsilon' : ('psyr', 0o241),
r'\rightbrace' : ('psyr', 0o175),
r'\rfloor' : ('psyr', 0o373),
r'\lambda' : ('psyr', 0o154),
r'\to' : ('psyr', 0o256),
r'\Xi' : ('psyr', 0o130),
r'\emptyset' : ('psyr', 0o306),
r'\lfloor' : ('psyr', 0o353),
r'\rightparen' : ('psyr', 0o51),
r'\rceil' : ('psyr', 0o371),
r'\ni' : ('psyr', 0o47),
r'\epsilon' : ('psyr', 0o145),
r'\Theta' : ('psyr', 0o121),
r'\langle' : ('psyr', 0o341),
r'\leftangle' : ('psyr', 0o341),
r'\rangle' : ('psyr', 0o361),
r'\rightangle' : ('psyr', 0o361),
r'\rbrace' : ('psyr', 0o175),
r'\circ' : ('psyr', 0o260),
r'\diamond' : ('psyr', 0o340),
r'\mu' : ('psyr', 0o155),
r'\mid' : ('psyr', 0o352),
r'\imath' : ('pncri8a', 105),
r'\%' : ('pncr8a', 37),
r'\$' : ('pncr8a', 36),
r'\{' : ('pncr8a', 123),
r'\}' : ('pncr8a', 125),
r'\backslash' : ('pncr8a', 92),
r'\ast' : ('pncr8a', 42),
r'\#' : ('pncr8a', 35),
r'\circumflexaccent' : ('pncri8a', 124), # for \hat
r'\combiningbreve' : ('pncri8a', 81), # for \breve
r'\combininggraveaccent' : ('pncri8a', 114), # for \grave
r'\combiningacuteaccent' : ('pncri8a', 63), # for \accute
r'\combiningdiaeresis' : ('pncri8a', 91), # for \ddot
r'\combiningtilde' : ('pncri8a', 75), # for \tilde
r'\combiningrightarrowabove' : ('pncri8a', 110), # for \vec
r'\combiningdotabove' : ('pncri8a', 26), # for \dot
}
# Automatically generated.
type12uni = {
'uni24C8' : 9416,
'aring' : 229,
'uni22A0' : 8864,
'uni2292' : 8850,
'quotedblright' : 8221,
'uni03D2' : 978,
'uni2215' : 8725,
'uni03D0' : 976,
'V' : 86,
'dollar' : 36,
'uni301E' : 12318,
'uni03D5' : 981,
'four' : 52,
'uni25A0' : 9632,
'uni013C' : 316,
'uni013B' : 315,
'uni013E' : 318,
'Yacute' : 221,
'uni25DE' : 9694,
'uni013F' : 319,
'uni255A' : 9562,
'uni2606' : 9734,
'uni0180' : 384,
'uni22B7' : 8887,
'uni044F' : 1103,
'uni22B5' : 8885,
'uni22B4' : 8884,
'uni22AE' : 8878,
'uni22B2' : 8882,
'uni22B1' : 8881,
'uni22B0' : 8880,
'uni25CD' : 9677,
'uni03CE' : 974,
'uni03CD' : 973,
'uni03CC' : 972,
'uni03CB' : 971,
'uni03CA' : 970,
'uni22B8' : 8888,
'uni22C9' : 8905,
'uni0449' : 1097,
'uni20DD' : 8413,
'uni20DC' : 8412,
'uni20DB' : 8411,
'uni2231' : 8753,
'uni25CF' : 9679,
'uni306E' : 12398,
'uni03D1' : 977,
'uni01A1' : 417,
'uni20D7' : 8407,
'uni03D6' : 982,
'uni2233' : 8755,
'uni20D2' : 8402,
'uni20D1' : 8401,
'uni20D0' : 8400,
'P' : 80,
'uni22BE' : 8894,
'uni22BD' : 8893,
'uni22BC' : 8892,
'uni22BB' : 8891,
'underscore' : 95,
'uni03C8' : 968,
'uni03C7' : 967,
'uni0328' : 808,
'uni03C5' : 965,
'uni03C4' : 964,
'uni03C3' : 963,
'uni03C2' : 962,
'uni03C1' : 961,
'uni03C0' : 960,
'uni2010' : 8208,
'uni0130' : 304,
'uni0133' : 307,
'uni0132' : 306,
'uni0135' : 309,
'uni0134' : 308,
'uni0137' : 311,
'uni0136' : 310,
'uni0139' : 313,
'uni0138' : 312,
'uni2244' : 8772,
'uni229A' : 8858,
'uni2571' : 9585,
'uni0278' : 632,
'uni2239' : 8761,
'p' : 112,
'uni3019' : 12313,
'uni25CB' : 9675,
'uni03DB' : 987,
'uni03DC' : 988,
'uni03DA' : 986,
'uni03DF' : 991,
'uni03DD' : 989,
'uni013D' : 317,
'uni220A' : 8714,
'uni220C' : 8716,
'uni220B' : 8715,
'uni220E' : 8718,
'uni220D' : 8717,
'uni220F' : 8719,
'uni22CC' : 8908,
'Otilde' : 213,
'uni25E5' : 9701,
'uni2736' : 10038,
'perthousand' : 8240,
'zero' : 48,
'uni279B' : 10139,
'dotlessi' : 305,
'uni2279' : 8825,
'Scaron' : 352,
'zcaron' : 382,
'uni21D8' : 8664,
'egrave' : 232,
'uni0271' : 625,
'uni01AA' : 426,
'uni2332' : 9010,
'section' : 167,
'uni25E4' : 9700,
'Icircumflex' : 206,
'ntilde' : 241,
'uni041E' : 1054,
'ampersand' : 38,
'uni041C' : 1052,
'uni041A' : 1050,
'uni22AB' : 8875,
'uni21DB' : 8667,
'dotaccent' : 729,
'uni0416' : 1046,
'uni0417' : 1047,
'uni0414' : 1044,
'uni0415' : 1045,
'uni0412' : 1042,
'uni0413' : 1043,
'degree' : 176,
'uni0411' : 1041,
'K' : 75,
'uni25EB' : 9707,
'uni25EF' : 9711,
'uni0418' : 1048,
'uni0419' : 1049,
'uni2263' : 8803,
'uni226E' : 8814,
'uni2251' : 8785,
'uni02C8' : 712,
'uni2262' : 8802,
'acircumflex' : 226,
'uni22B3' : 8883,
'uni2261' : 8801,
'uni2394' : 9108,
'Aring' : 197,
'uni2260' : 8800,
'uni2254' : 8788,
'uni0436' : 1078,
'uni2267' : 8807,
'k' : 107,
'uni22C8' : 8904,
'uni226A' : 8810,
'uni231F' : 8991,
'smalltilde' : 732,
'uni2201' : 8705,
'uni2200' : 8704,
'uni2203' : 8707,
'uni02BD' : 701,
'uni2205' : 8709,
'uni2204' : 8708,
'Agrave' : 192,
'uni2206' : 8710,
'uni2209' : 8713,
'uni2208' : 8712,
'uni226D' : 8813,
'uni2264' : 8804,
'uni263D' : 9789,
'uni2258' : 8792,
'uni02D3' : 723,
'uni02D2' : 722,
'uni02D1' : 721,
'uni02D0' : 720,
'uni25E1' : 9697,
'divide' : 247,
'uni02D5' : 725,
'uni02D4' : 724,
'ocircumflex' : 244,
'uni2524' : 9508,
'uni043A' : 1082,
'uni24CC' : 9420,
'asciitilde' : 126,
'uni22B9' : 8889,
'uni24D2' : 9426,
'uni211E' : 8478,
'uni211D' : 8477,
'uni24DD' : 9437,
'uni211A' : 8474,
'uni211C' : 8476,
'uni211B' : 8475,
'uni25C6' : 9670,
'uni017F' : 383,
'uni017A' : 378,
'uni017C' : 380,
'uni017B' : 379,
'uni0346' : 838,
'uni22F1' : 8945,
'uni22F0' : 8944,
'two' : 50,
'uni2298' : 8856,
'uni24D1' : 9425,
'E' : 69,
'uni025D' : 605,
'scaron' : 353,
'uni2322' : 8994,
'uni25E3' : 9699,
'uni22BF' : 8895,
'F' : 70,
'uni0440' : 1088,
'uni255E' : 9566,
'uni22BA' : 8890,
'uni0175' : 373,
'uni0174' : 372,
'uni0177' : 375,
'uni0176' : 374,
'bracketleft' : 91,
'uni0170' : 368,
'uni0173' : 371,
'uni0172' : 370,
'asciicircum' : 94,
'uni0179' : 377,
'uni2590' : 9616,
'uni25E2' : 9698,
'uni2119' : 8473,
'uni2118' : 8472,
'uni25CC' : 9676,
'f' : 102,
'ordmasculine' : 186,
'uni229B' : 8859,
'uni22A1' : 8865,
'uni2111' : 8465,
'uni2110' : 8464,
'uni2113' : 8467,
'uni2112' : 8466,
'mu' : 181,
'uni2281' : 8833,
'paragraph' : 182,
'nine' : 57,
'uni25EC' : 9708,
'v' : 118,
'uni040C' : 1036,
'uni0113' : 275,
'uni22D0' : 8912,
'uni21CC' : 8652,
'uni21CB' : 8651,
'uni21CA' : 8650,
'uni22A5' : 8869,
'uni21CF' : 8655,
'uni21CE' : 8654,
'uni21CD' : 8653,
'guilsinglleft' : 8249,
'backslash' : 92,
'uni2284' : 8836,
'uni224E' : 8782,
'uni224D' : 8781,
'uni224F' : 8783,
'uni224A' : 8778,
'uni2287' : 8839,
'uni224C' : 8780,
'uni224B' : 8779,
'uni21BD' : 8637,
'uni2286' : 8838,
'uni030F' : 783,
'uni030D' : 781,
'uni030E' : 782,
'uni030B' : 779,
'uni030C' : 780,
'uni030A' : 778,
'uni026E' : 622,
'uni026D' : 621,
'six' : 54,
'uni026A' : 618,
'uni026C' : 620,
'uni25C1' : 9665,
'uni20D6' : 8406,
'uni045B' : 1115,
'uni045C' : 1116,
'uni256B' : 9579,
'uni045A' : 1114,
'uni045F' : 1119,
'uni045E' : 1118,
'A' : 65,
'uni2569' : 9577,
'uni0458' : 1112,
'uni0459' : 1113,
'uni0452' : 1106,
'uni0453' : 1107,
'uni2562' : 9570,
'uni0451' : 1105,
'uni0456' : 1110,
'uni0457' : 1111,
'uni0454' : 1108,
'uni0455' : 1109,
'icircumflex' : 238,
'uni0307' : 775,
'uni0304' : 772,
'uni0305' : 773,
'uni0269' : 617,
'uni0268' : 616,
'uni0300' : 768,
'uni0301' : 769,
'uni0265' : 613,
'uni0264' : 612,
'uni0267' : 615,
'uni0266' : 614,
'uni0261' : 609,
'uni0260' : 608,
'uni0263' : 611,
'uni0262' : 610,
'a' : 97,
'uni2207' : 8711,
'uni2247' : 8775,
'uni2246' : 8774,
'uni2241' : 8769,
'uni2240' : 8768,
'uni2243' : 8771,
'uni2242' : 8770,
'uni2312' : 8978,
'ogonek' : 731,
'uni2249' : 8777,
'uni2248' : 8776,
'uni3030' : 12336,
'q' : 113,
'uni21C2' : 8642,
'uni21C1' : 8641,
'uni21C0' : 8640,
'uni21C7' : 8647,
'uni21C6' : 8646,
'uni21C5' : 8645,
'uni21C4' : 8644,
'uni225F' : 8799,
'uni212C' : 8492,
'uni21C8' : 8648,
'uni2467' : 9319,
'oacute' : 243,
'uni028F' : 655,
'uni028E' : 654,
'uni026F' : 623,
'uni028C' : 652,
'uni028B' : 651,
'uni028A' : 650,
'uni2510' : 9488,
'ograve' : 242,
'edieresis' : 235,
'uni22CE' : 8910,
'uni22CF' : 8911,
'uni219F' : 8607,
'comma' : 44,
'uni22CA' : 8906,
'uni0429' : 1065,
'uni03C6' : 966,
'uni0427' : 1063,
'uni0426' : 1062,
'uni0425' : 1061,
'uni0424' : 1060,
'uni0423' : 1059,
'uni0422' : 1058,
'uni0421' : 1057,
'uni0420' : 1056,
'uni2465' : 9317,
'uni24D0' : 9424,
'uni2464' : 9316,
'uni0430' : 1072,
'otilde' : 245,
'uni2661' : 9825,
'uni24D6' : 9430,
'uni2466' : 9318,
'uni24D5' : 9429,
'uni219A' : 8602,
'uni2518' : 9496,
'uni22B6' : 8886,
'uni2461' : 9313,
'uni24D4' : 9428,
'uni2460' : 9312,
'uni24EA' : 9450,
'guillemotright' : 187,
'ecircumflex' : 234,
'greater' : 62,
'uni2011' : 8209,
'uacute' : 250,
'uni2462' : 9314,
'L' : 76,
'bullet' : 8226,
'uni02A4' : 676,
'uni02A7' : 679,
'cedilla' : 184,
'uni02A2' : 674,
'uni2015' : 8213,
'uni22C4' : 8900,
'uni22C5' : 8901,
'uni22AD' : 8877,
'uni22C7' : 8903,
'uni22C0' : 8896,
'uni2016' : 8214,
'uni22C2' : 8898,
'uni22C3' : 8899,
'uni24CF' : 9423,
'uni042F' : 1071,
'uni042E' : 1070,
'uni042D' : 1069,
'ydieresis' : 255,
'l' : 108,
'logicalnot' : 172,
'uni24CA' : 9418,
'uni0287' : 647,
'uni0286' : 646,
'uni0285' : 645,
'uni0284' : 644,
'uni0283' : 643,
'uni0282' : 642,
'uni0281' : 641,
'uni027C' : 636,
'uni2664' : 9828,
'exclamdown' : 161,
'uni25C4' : 9668,
'uni0289' : 649,
'uni0288' : 648,
'uni039A' : 922,
'endash' : 8211,
'uni2640' : 9792,
'uni20E4' : 8420,
'uni0473' : 1139,
'uni20E1' : 8417,
'uni2642' : 9794,
'uni03B8' : 952,
'uni03B9' : 953,
'agrave' : 224,
'uni03B4' : 948,
'uni03B5' : 949,
'uni03B6' : 950,
'uni03B7' : 951,
'uni03B0' : 944,
'uni03B1' : 945,
'uni03B2' : 946,
'uni03B3' : 947,
'uni2555' : 9557,
'Adieresis' : 196,
'germandbls' : 223,
'Odieresis' : 214,
'space' : 32,
'uni0126' : 294,
'uni0127' : 295,
'uni0124' : 292,
'uni0125' : 293,
'uni0122' : 290,
'uni0123' : 291,
'uni0120' : 288,
'uni0121' : 289,
'quoteright' : 8217,
'uni2560' : 9568,
'uni2556' : 9558,
'ucircumflex' : 251,
'uni2561' : 9569,
'uni2551' : 9553,
'uni25B2' : 9650,
'uni2550' : 9552,
'uni2563' : 9571,
'uni2553' : 9555,
'G' : 71,
'uni2564' : 9572,
'uni2552' : 9554,
'quoteleft' : 8216,
'uni2565' : 9573,
'uni2572' : 9586,
'uni2568' : 9576,
'uni2566' : 9574,
'W' : 87,
'uni214A' : 8522,
'uni012F' : 303,
'uni012D' : 301,
'uni012E' : 302,
'uni012B' : 299,
'uni012C' : 300,
'uni255C' : 9564,
'uni012A' : 298,
'uni2289' : 8841,
'Q' : 81,
'uni2320' : 8992,
'uni2321' : 8993,
'g' : 103,
'uni03BD' : 957,
'uni03BE' : 958,
'uni03BF' : 959,
'uni2282' : 8834,
'uni2285' : 8837,
'uni03BA' : 954,
'uni03BB' : 955,
'uni03BC' : 956,
'uni2128' : 8488,
'uni25B7' : 9655,
'w' : 119,
'uni0302' : 770,
'uni03DE' : 990,
'uni25DA' : 9690,
'uni0303' : 771,
'uni0463' : 1123,
'uni0462' : 1122,
'uni3018' : 12312,
'uni2514' : 9492,
'question' : 63,
'uni25B3' : 9651,
'uni24E1' : 9441,
'one' : 49,
'uni200A' : 8202,
'uni2278' : 8824,
'ring' : 730,
'uni0195' : 405,
'figuredash' : 8210,
'uni22EC' : 8940,
'uni0339' : 825,
'uni0338' : 824,
'uni0337' : 823,
'uni0336' : 822,
'uni0335' : 821,
'uni0333' : 819,
'uni0332' : 818,
'uni0331' : 817,
'uni0330' : 816,
'uni01C1' : 449,
'uni01C0' : 448,
'uni01C3' : 451,
'uni01C2' : 450,
'uni2353' : 9043,
'uni0308' : 776,
'uni2218' : 8728,
'uni2219' : 8729,
'uni2216' : 8726,
'uni2217' : 8727,
'uni2214' : 8724,
'uni0309' : 777,
'uni2609' : 9737,
'uni2213' : 8723,
'uni2210' : 8720,
'uni2211' : 8721,
'uni2245' : 8773,
'B' : 66,
'uni25D6' : 9686,
'iacute' : 237,
'uni02E6' : 742,
'uni02E7' : 743,
'uni02E8' : 744,
'uni02E9' : 745,
'uni221D' : 8733,
'uni221E' : 8734,
'Ydieresis' : 376,
'uni221C' : 8732,
'uni22D7' : 8919,
'uni221A' : 8730,
'R' : 82,
'uni24DC' : 9436,
'uni033F' : 831,
'uni033E' : 830,
'uni033C' : 828,
'uni033B' : 827,
'uni033A' : 826,
'b' : 98,
'uni228A' : 8842,
'uni22DB' : 8923,
'uni2554' : 9556,
'uni046B' : 1131,
'uni046A' : 1130,
'r' : 114,
'uni24DB' : 9435,
'Ccedilla' : 199,
'minus' : 8722,
'uni24DA' : 9434,
'uni03F0' : 1008,
'uni03F1' : 1009,
'uni20AC' : 8364,
'uni2276' : 8822,
'uni24C0' : 9408,
'uni0162' : 354,
'uni0163' : 355,
'uni011E' : 286,
'uni011D' : 285,
'uni011C' : 284,
'uni011B' : 283,
'uni0164' : 356,
'uni0165' : 357,
'Lslash' : 321,
'uni0168' : 360,
'uni0169' : 361,
'uni25C9' : 9673,
'uni02E5' : 741,
'uni21C3' : 8643,
'uni24C4' : 9412,
'uni24E2' : 9442,
'uni2277' : 8823,
'uni013A' : 314,
'uni2102' : 8450,
'Uacute' : 218,
'uni2317' : 8983,
'uni2107' : 8455,
'uni221F' : 8735,
'yacute' : 253,
'uni3012' : 12306,
'Ucircumflex' : 219,
'uni015D' : 349,
'quotedbl' : 34,
'uni25D9' : 9689,
'uni2280' : 8832,
'uni22AF' : 8879,
'onehalf' : 189,
'uni221B' : 8731,
'Thorn' : 222,
'uni2226' : 8742,
'M' : 77,
'uni25BA' : 9658,
'uni2463' : 9315,
'uni2336' : 9014,
'eight' : 56,
'uni2236' : 8758,
'multiply' : 215,
'uni210C' : 8460,
'uni210A' : 8458,
'uni21C9' : 8649,
'grave' : 96,
'uni210E' : 8462,
'uni0117' : 279,
'uni016C' : 364,
'uni0115' : 277,
'uni016A' : 362,
'uni016F' : 367,
'uni0112' : 274,
'uni016D' : 365,
'uni016E' : 366,
'Ocircumflex' : 212,
'uni2305' : 8965,
'm' : 109,
'uni24DF' : 9439,
'uni0119' : 281,
'uni0118' : 280,
'uni20A3' : 8355,
'uni20A4' : 8356,
'uni20A7' : 8359,
'uni2288' : 8840,
'uni24C3' : 9411,
'uni251C' : 9500,
'uni228D' : 8845,
'uni222F' : 8751,
'uni222E' : 8750,
'uni222D' : 8749,
'uni222C' : 8748,
'uni222B' : 8747,
'uni222A' : 8746,
'uni255B' : 9563,
'Ugrave' : 217,
'uni24DE' : 9438,
'guilsinglright' : 8250,
'uni250A' : 9482,
'Ntilde' : 209,
'uni0279' : 633,
'questiondown' : 191,
'uni256C' : 9580,
'Atilde' : 195,
'uni0272' : 626,
'uni0273' : 627,
'uni0270' : 624,
'ccedilla' : 231,
'uni0276' : 630,
'uni0277' : 631,
'uni0274' : 628,
'uni0275' : 629,
'uni2252' : 8786,
'uni041F' : 1055,
'uni2250' : 8784,
'Z' : 90,
'uni2256' : 8790,
'uni2257' : 8791,
'copyright' : 169,
'uni2255' : 8789,
'uni043D' : 1085,
'uni043E' : 1086,
'uni043F' : 1087,
'yen' : 165,
'uni041D' : 1053,
'uni043B' : 1083,
'uni043C' : 1084,
'uni21B0' : 8624,
'uni21B1' : 8625,
'uni21B2' : 8626,
'uni21B3' : 8627,
'uni21B4' : 8628,
'uni21B5' : 8629,
'uni21B6' : 8630,
'uni21B7' : 8631,
'uni21B8' : 8632,
'Eacute' : 201,
'uni2311' : 8977,
'uni2310' : 8976,
'uni228F' : 8847,
'uni25DB' : 9691,
'uni21BA' : 8634,
'uni21BB' : 8635,
'uni21BC' : 8636,
'uni2017' : 8215,
'uni21BE' : 8638,
'uni21BF' : 8639,
'uni231C' : 8988,
'H' : 72,
'uni0293' : 659,
'uni2202' : 8706,
'uni22A4' : 8868,
'uni231E' : 8990,
'uni2232' : 8754,
'uni225B' : 8795,
'uni225C' : 8796,
'uni24D9' : 9433,
'uni225A' : 8794,
'uni0438' : 1080,
'uni0439' : 1081,
'uni225D' : 8797,
'uni225E' : 8798,
'uni0434' : 1076,
'X' : 88,
'uni007F' : 127,
'uni0437' : 1079,
'Idieresis' : 207,
'uni0431' : 1073,
'uni0432' : 1074,
'uni0433' : 1075,
'uni22AC' : 8876,
'uni22CD' : 8909,
'uni25A3' : 9635,
'bar' : 124,
'uni24BB' : 9403,
'uni037E' : 894,
'uni027B' : 635,
'h' : 104,
'uni027A' : 634,
'uni027F' : 639,
'uni027D' : 637,
'uni027E' : 638,
'uni2227' : 8743,
'uni2004' : 8196,
'uni2225' : 8741,
'uni2224' : 8740,
'uni2223' : 8739,
'uni2222' : 8738,
'uni2221' : 8737,
'uni2220' : 8736,
'x' : 120,
'uni2323' : 8995,
'uni2559' : 9561,
'uni2558' : 9560,
'uni2229' : 8745,
'uni2228' : 8744,
'udieresis' : 252,
'uni029D' : 669,
'ordfeminine' : 170,
'uni22CB' : 8907,
'uni233D' : 9021,
'uni0428' : 1064,
'uni24C6' : 9414,
'uni22DD' : 8925,
'uni24C7' : 9415,
'uni015C' : 348,
'uni015B' : 347,
'uni015A' : 346,
'uni22AA' : 8874,
'uni015F' : 351,
'uni015E' : 350,
'braceleft' : 123,
'uni24C5' : 9413,
'uni0410' : 1040,
'uni03AA' : 938,
'uni24C2' : 9410,
'uni03AC' : 940,
'uni03AB' : 939,
'macron' : 175,
'uni03AD' : 941,
'uni03AF' : 943,
'uni0294' : 660,
'uni0295' : 661,
'uni0296' : 662,
'uni0297' : 663,
'uni0290' : 656,
'uni0291' : 657,
'uni0292' : 658,
'atilde' : 227,
'Acircumflex' : 194,
'uni2370' : 9072,
'uni24C1' : 9409,
'uni0298' : 664,
'uni0299' : 665,
'Oslash' : 216,
'uni029E' : 670,
'C' : 67,
'quotedblleft' : 8220,
'uni029B' : 667,
'uni029C' : 668,
'uni03A9' : 937,
'uni03A8' : 936,
'S' : 83,
'uni24C9' : 9417,
'uni03A1' : 929,
'uni03A0' : 928,
'exclam' : 33,
'uni03A5' : 933,
'uni03A4' : 932,
'uni03A7' : 935,
'Zcaron' : 381,
'uni2133' : 8499,
'uni2132' : 8498,
'uni0159' : 345,
'uni0158' : 344,
'uni2137' : 8503,
'uni2005' : 8197,
'uni2135' : 8501,
'uni2134' : 8500,
'uni02BA' : 698,
'uni2033' : 8243,
'uni0151' : 337,
'uni0150' : 336,
'uni0157' : 343,
'equal' : 61,
'uni0155' : 341,
'uni0154' : 340,
's' : 115,
'uni233F' : 9023,
'eth' : 240,
'uni24BE' : 9406,
'uni21E9' : 8681,
'uni2060' : 8288,
'Egrave' : 200,
'uni255D' : 9565,
'uni24CD' : 9421,
'uni21E1' : 8673,
'uni21B9' : 8633,
'hyphen' : 45,
'uni01BE' : 446,
'uni01BB' : 443,
'period' : 46,
'igrave' : 236,
'uni01BA' : 442,
'uni2296' : 8854,
'uni2297' : 8855,
'uni2294' : 8852,
'uni2295' : 8853,
'colon' : 58,
'uni2293' : 8851,
'uni2290' : 8848,
'uni2291' : 8849,
'uni032D' : 813,
'uni032E' : 814,
'uni032F' : 815,
'uni032A' : 810,
'uni032B' : 811,
'uni032C' : 812,
'uni231D' : 8989,
'Ecircumflex' : 202,
'uni24D7' : 9431,
'uni25DD' : 9693,
'trademark' : 8482,
'Aacute' : 193,
'cent' : 162,
'uni0445' : 1093,
'uni266E' : 9838,
'uni266D' : 9837,
'uni266B' : 9835,
'uni03C9' : 969,
'uni2003' : 8195,
'uni2047' : 8263,
'lslash' : 322,
'uni03A6' : 934,
'uni2043' : 8259,
'uni250C' : 9484,
'uni2040' : 8256,
'uni255F' : 9567,
'uni24CB' : 9419,
'uni0472' : 1138,
'uni0446' : 1094,
'uni0474' : 1140,
'uni0475' : 1141,
'uni2508' : 9480,
'uni2660' : 9824,
'uni2506' : 9478,
'uni2502' : 9474,
'c' : 99,
'uni2500' : 9472,
'N' : 78,
'uni22A6' : 8870,
'uni21E7' : 8679,
'uni2130' : 8496,
'uni2002' : 8194,
'breve' : 728,
'uni0442' : 1090,
'Oacute' : 211,
'uni229F' : 8863,
'uni25C7' : 9671,
'uni229D' : 8861,
'uni229E' : 8862,
'guillemotleft' : 171,
'uni0329' : 809,
'uni24E5' : 9445,
'uni011F' : 287,
'uni0324' : 804,
'uni0325' : 805,
'uni0326' : 806,
'uni0327' : 807,
'uni0321' : 801,
'uni0322' : 802,
'n' : 110,
'uni2032' : 8242,
'uni2269' : 8809,
'uni2268' : 8808,
'uni0306' : 774,
'uni226B' : 8811,
'uni21EA' : 8682,
'uni0166' : 358,
'uni203B' : 8251,
'uni01B5' : 437,
'idieresis' : 239,
'uni02BC' : 700,
'uni01B0' : 432,
'braceright' : 125,
'seven' : 55,
'uni02BB' : 699,
'uni011A' : 282,
'uni29FB' : 10747,
'brokenbar' : 166,
'uni2036' : 8246,
'uni25C0' : 9664,
'uni0156' : 342,
'uni22D5' : 8917,
'uni0258' : 600,
'ugrave' : 249,
'uni22D6' : 8918,
'uni22D1' : 8913,
'uni2034' : 8244,
'uni22D3' : 8915,
'uni22D2' : 8914,
'uni203C' : 8252,
'uni223E' : 8766,
'uni02BF' : 703,
'uni22D9' : 8921,
'uni22D8' : 8920,
'uni25BD' : 9661,
'uni25BE' : 9662,
'uni25BF' : 9663,
'uni041B' : 1051,
'periodcentered' : 183,
'uni25BC' : 9660,
'uni019E' : 414,
'uni019B' : 411,
'uni019A' : 410,
'uni2007' : 8199,
'uni0391' : 913,
'uni0390' : 912,
'uni0393' : 915,
'uni0392' : 914,
'uni0395' : 917,
'uni0394' : 916,
'uni0397' : 919,
'uni0396' : 918,
'uni0399' : 921,
'uni0398' : 920,
'uni25C8' : 9672,
'uni2468' : 9320,
'sterling' : 163,
'uni22EB' : 8939,
'uni039C' : 924,
'uni039B' : 923,
'uni039E' : 926,
'uni039D' : 925,
'uni039F' : 927,
'I' : 73,
'uni03E1' : 993,
'uni03E0' : 992,
'uni2319' : 8985,
'uni228B' : 8843,
'uni25B5' : 9653,
'uni25B6' : 9654,
'uni22EA' : 8938,
'uni24B9' : 9401,
'uni044E' : 1102,
'uni0199' : 409,
'uni2266' : 8806,
'Y' : 89,
'uni22A2' : 8866,
'Eth' : 208,
'uni266F' : 9839,
'emdash' : 8212,
'uni263B' : 9787,
'uni24BD' : 9405,
'uni22DE' : 8926,
'uni0360' : 864,
'uni2557' : 9559,
'uni22DF' : 8927,
'uni22DA' : 8922,
'uni22DC' : 8924,
'uni0361' : 865,
'i' : 105,
'uni24BF' : 9407,
'uni0362' : 866,
'uni263E' : 9790,
'uni028D' : 653,
'uni2259' : 8793,
'uni0323' : 803,
'uni2265' : 8805,
'daggerdbl' : 8225,
'y' : 121,
'uni010A' : 266,
'plusminus' : 177,
'less' : 60,
'uni21AE' : 8622,
'uni0315' : 789,
'uni230B' : 8971,
'uni21AF' : 8623,
'uni21AA' : 8618,
'uni21AC' : 8620,
'uni21AB' : 8619,
'uni01FB' : 507,
'uni01FC' : 508,
'uni223A' : 8762,
'uni01FA' : 506,
'uni01FF' : 511,
'uni01FD' : 509,
'uni01FE' : 510,
'uni2567' : 9575,
'uni25E0' : 9696,
'uni0104' : 260,
'uni0105' : 261,
'uni0106' : 262,
'uni0107' : 263,
'uni0100' : 256,
'uni0101' : 257,
'uni0102' : 258,
'uni0103' : 259,
'uni2038' : 8248,
'uni2009' : 8201,
'uni2008' : 8200,
'uni0108' : 264,
'uni0109' : 265,
'uni02A1' : 673,
'uni223B' : 8763,
'uni226C' : 8812,
'uni25AC' : 9644,
'uni24D3' : 9427,
'uni21E0' : 8672,
'uni21E3' : 8675,
'Udieresis' : 220,
'uni21E2' : 8674,
'D' : 68,
'uni21E5' : 8677,
'uni2621' : 9761,
'uni21D1' : 8657,
'uni203E' : 8254,
'uni22C6' : 8902,
'uni21E4' : 8676,
'uni010D' : 269,
'uni010E' : 270,
'uni010F' : 271,
'five' : 53,
'T' : 84,
'uni010B' : 267,
'uni010C' : 268,
'uni2605' : 9733,
'uni2663' : 9827,
'uni21E6' : 8678,
'uni24B6' : 9398,
'uni22C1' : 8897,
'oslash' : 248,
'acute' : 180,
'uni01F0' : 496,
'd' : 100,
'OE' : 338,
'uni22E3' : 8931,
'Igrave' : 204,
'uni2308' : 8968,
'uni2309' : 8969,
'uni21A9' : 8617,
't' : 116,
'uni2313' : 8979,
'uni03A3' : 931,
'uni21A4' : 8612,
'uni21A7' : 8615,
'uni21A6' : 8614,
'uni21A1' : 8609,
'uni21A0' : 8608,
'uni21A3' : 8611,
'uni21A2' : 8610,
'parenright' : 41,
'uni256A' : 9578,
'uni25DC' : 9692,
'uni24CE' : 9422,
'uni042C' : 1068,
'uni24E0' : 9440,
'uni042B' : 1067,
'uni0409' : 1033,
'uni0408' : 1032,
'uni24E7' : 9447,
'uni25B4' : 9652,
'uni042A' : 1066,
'uni228E' : 8846,
'uni0401' : 1025,
'adieresis' : 228,
'uni0403' : 1027,
'quotesingle' : 39,
'uni0405' : 1029,
'uni0404' : 1028,
'uni0407' : 1031,
'uni0406' : 1030,
'uni229C' : 8860,
'uni2306' : 8966,
'uni2253' : 8787,
'twodotenleader' : 8229,
'uni2131' : 8497,
'uni21DA' : 8666,
'uni2234' : 8756,
'uni2235' : 8757,
'uni01A5' : 421,
'uni2237' : 8759,
'uni2230' : 8752,
'uni02CC' : 716,
'slash' : 47,
'uni01A0' : 416,
'ellipsis' : 8230,
'uni2299' : 8857,
'uni2238' : 8760,
'numbersign' : 35,
'uni21A8' : 8616,
'uni223D' : 8765,
'uni01AF' : 431,
'uni223F' : 8767,
'uni01AD' : 429,
'uni01AB' : 427,
'odieresis' : 246,
'uni223C' : 8764,
'uni227D' : 8829,
'uni0280' : 640,
'O' : 79,
'uni227E' : 8830,
'uni21A5' : 8613,
'uni22D4' : 8916,
'uni25D4' : 9684,
'uni227F' : 8831,
'uni0435' : 1077,
'uni2302' : 8962,
'uni2669' : 9833,
'uni24E3' : 9443,
'uni2720' : 10016,
'uni22A8' : 8872,
'uni22A9' : 8873,
'uni040A' : 1034,
'uni22A7' : 8871,
'oe' : 339,
'uni040B' : 1035,
'uni040E' : 1038,
'uni22A3' : 8867,
'o' : 111,
'uni040F' : 1039,
'Edieresis' : 203,
'uni25D5' : 9685,
'plus' : 43,
'uni044D' : 1101,
'uni263C' : 9788,
'uni22E6' : 8934,
'uni2283' : 8835,
'uni258C' : 9612,
'uni219E' : 8606,
'uni24E4' : 9444,
'uni2136' : 8502,
'dagger' : 8224,
'uni24B7' : 9399,
'uni219B' : 8603,
'uni22E5' : 8933,
'three' : 51,
'uni210B' : 8459,
'uni2534' : 9524,
'uni24B8' : 9400,
'uni230A' : 8970,
'hungarumlaut' : 733,
'parenleft' : 40,
'uni0148' : 328,
'uni0149' : 329,
'uni2124' : 8484,
'uni2125' : 8485,
'uni2126' : 8486,
'uni2127' : 8487,
'uni0140' : 320,
'uni2129' : 8489,
'uni25C5' : 9669,
'uni0143' : 323,
'uni0144' : 324,
'uni0145' : 325,
'uni0146' : 326,
'uni0147' : 327,
'uni210D' : 8461,
'fraction' : 8260,
'uni2031' : 8241,
'uni2196' : 8598,
'uni2035' : 8245,
'uni24E6' : 9446,
'uni016B' : 363,
'uni24BA' : 9402,
'uni266A' : 9834,
'uni0116' : 278,
'uni2115' : 8469,
'registered' : 174,
'J' : 74,
'uni25DF' : 9695,
'uni25CE' : 9678,
'uni273D' : 10045,
'dieresis' : 168,
'uni212B' : 8491,
'uni0114' : 276,
'uni212D' : 8493,
'uni212E' : 8494,
'uni212F' : 8495,
'uni014A' : 330,
'uni014B' : 331,
'uni014C' : 332,
'uni014D' : 333,
'uni014E' : 334,
'uni014F' : 335,
'uni025E' : 606,
'uni24E8' : 9448,
'uni0111' : 273,
'uni24E9' : 9449,
'Ograve' : 210,
'j' : 106,
'uni2195' : 8597,
'uni2194' : 8596,
'uni2197' : 8599,
'uni2037' : 8247,
'uni2191' : 8593,
'uni2190' : 8592,
'uni2193' : 8595,
'uni2192' : 8594,
'uni29FA' : 10746,
'uni2713' : 10003,
'z' : 122,
'uni2199' : 8601,
'uni2198' : 8600,
'uni2667' : 9831,
'ae' : 230,
'uni0448' : 1096,
'semicolon' : 59,
'uni2666' : 9830,
'uni038F' : 911,
'uni0444' : 1092,
'uni0447' : 1095,
'uni038E' : 910,
'uni0441' : 1089,
'uni038C' : 908,
'uni0443' : 1091,
'uni038A' : 906,
'uni0250' : 592,
'uni0251' : 593,
'uni0252' : 594,
'uni0253' : 595,
'uni0254' : 596,
'at' : 64,
'uni0256' : 598,
'uni0257' : 599,
'uni0167' : 359,
'uni0259' : 601,
'uni228C' : 8844,
'uni2662' : 9826,
'uni0319' : 793,
'uni0318' : 792,
'uni24BC' : 9404,
'uni0402' : 1026,
'uni22EF' : 8943,
'Iacute' : 205,
'uni22ED' : 8941,
'uni22EE' : 8942,
'uni0311' : 785,
'uni0310' : 784,
'uni21E8' : 8680,
'uni0312' : 786,
'percent' : 37,
'uni0317' : 791,
'uni0316' : 790,
'uni21D6' : 8662,
'uni21D7' : 8663,
'uni21D4' : 8660,
'uni21D5' : 8661,
'uni21D2' : 8658,
'uni21D3' : 8659,
'uni21D0' : 8656,
'uni2138' : 8504,
'uni2270' : 8816,
'uni2271' : 8817,
'uni2272' : 8818,
'uni2273' : 8819,
'uni2274' : 8820,
'uni2275' : 8821,
'bracketright' : 93,
'uni21D9' : 8665,
'uni21DF' : 8671,
'uni21DD' : 8669,
'uni21DE' : 8670,
'AE' : 198,
'uni03AE' : 942,
'uni227A' : 8826,
'uni227B' : 8827,
'uni227C' : 8828,
'asterisk' : 42,
'aacute' : 225,
'uni226F' : 8815,
'uni22E2' : 8930,
'uni0386' : 902,
'uni22E0' : 8928,
'uni22E1' : 8929,
'U' : 85,
'uni22E7' : 8935,
'uni22E4' : 8932,
'uni0387' : 903,
'uni031A' : 794,
'eacute' : 233,
'uni22E8' : 8936,
'uni22E9' : 8937,
'uni24D8' : 9432,
'uni025A' : 602,
'uni025B' : 603,
'uni025C' : 604,
'e' : 101,
'uni0128' : 296,
'uni025F' : 607,
'uni2665' : 9829,
'thorn' : 254,
'uni0129' : 297,
'uni253C' : 9532,
'uni25D7' : 9687,
'u' : 117,
'uni0388' : 904,
'uni0389' : 905,
'uni0255' : 597,
'uni0171' : 369,
'uni0384' : 900,
'uni0385' : 901,
'uni044A' : 1098,
'uni252C' : 9516,
'uni044C' : 1100,
'uni044B' : 1099
}
uni2type1 = dict(((v,k) for k,v in six.iteritems(type12uni)))
tex2uni = {
'widehat' : 0x0302,
'widetilde' : 0x0303,
'widebar' : 0x0305,
'langle' : 0x27e8,
'rangle' : 0x27e9,
'perp' : 0x27c2,
'neq' : 0x2260,
'Join' : 0x2a1d,
'leqslant' : 0x2a7d,
'geqslant' : 0x2a7e,
'lessapprox' : 0x2a85,
'gtrapprox' : 0x2a86,
'lesseqqgtr' : 0x2a8b,
'gtreqqless' : 0x2a8c,
'triangleeq' : 0x225c,
'eqslantless' : 0x2a95,
'eqslantgtr' : 0x2a96,
'backepsilon' : 0x03f6,
'precapprox' : 0x2ab7,
'succapprox' : 0x2ab8,
'fallingdotseq' : 0x2252,
'subseteqq' : 0x2ac5,
'supseteqq' : 0x2ac6,
'varpropto' : 0x221d,
'precnapprox' : 0x2ab9,
'succnapprox' : 0x2aba,
'subsetneqq' : 0x2acb,
'supsetneqq' : 0x2acc,
'lnapprox' : 0x2ab9,
'gnapprox' : 0x2aba,
'longleftarrow' : 0x27f5,
'longrightarrow' : 0x27f6,
'longleftrightarrow' : 0x27f7,
'Longleftarrow' : 0x27f8,
'Longrightarrow' : 0x27f9,
'Longleftrightarrow' : 0x27fa,
'longmapsto' : 0x27fc,
'leadsto' : 0x21dd,
'dashleftarrow' : 0x290e,
'dashrightarrow' : 0x290f,
'circlearrowleft' : 0x21ba,
'circlearrowright' : 0x21bb,
'leftrightsquigarrow' : 0x21ad,
'leftsquigarrow' : 0x219c,
'rightsquigarrow' : 0x219d,
'Game' : 0x2141,
'hbar' : 0x0127,
'hslash' : 0x210f,
'ldots' : 0x2026,
'vdots' : 0x22ee,
'doteqdot' : 0x2251,
'doteq' : 8784,
'partial' : 8706,
'gg' : 8811,
'asymp' : 8781,
'blacktriangledown' : 9662,
'otimes' : 8855,
'nearrow' : 8599,
'varpi' : 982,
'vee' : 8744,
'vec' : 8407,
'smile' : 8995,
'succnsim' : 8937,
'gimel' : 8503,
'vert' : 124,
'|' : 124,
'varrho' : 1009,
'P' : 182,
'approxident' : 8779,
'Swarrow' : 8665,
'textasciicircum' : 94,
'imageof' : 8887,
'ntriangleleft' : 8938,
'nleq' : 8816,
'div' : 247,
'nparallel' : 8742,
'Leftarrow' : 8656,
'lll' : 8920,
'oiint' : 8751,
'ngeq' : 8817,
'Theta' : 920,
'origof' : 8886,
'blacksquare' : 9632,
'solbar' : 9023,
'neg' : 172,
'sum' : 8721,
'Vdash' : 8873,
'coloneq' : 8788,
'degree' : 176,
'bowtie' : 8904,
'blacktriangleright' : 9654,
'varsigma' : 962,
'leq' : 8804,
'ggg' : 8921,
'lneqq' : 8808,
'scurel' : 8881,
'stareq' : 8795,
'BbbN' : 8469,
'nLeftarrow' : 8653,
'nLeftrightarrow' : 8654,
'k' : 808,
'bot' : 8869,
'BbbC' : 8450,
'Lsh' : 8624,
'leftleftarrows' : 8647,
'BbbZ' : 8484,
'digamma' : 989,
'BbbR' : 8477,
'BbbP' : 8473,
'BbbQ' : 8474,
'vartriangleright' : 8883,
'succsim' : 8831,
'wedge' : 8743,
'lessgtr' : 8822,
'veebar' : 8891,
'mapsdown' : 8615,
'Rsh' : 8625,
'chi' : 967,
'prec' : 8826,
'nsubseteq' : 8840,
'therefore' : 8756,
'eqcirc' : 8790,
'textexclamdown' : 161,
'nRightarrow' : 8655,
'flat' : 9837,
'notin' : 8713,
'llcorner' : 8990,
'varepsilon' : 949,
'bigtriangleup' : 9651,
'aleph' : 8501,
'dotminus' : 8760,
'upsilon' : 965,
'Lambda' : 923,
'cap' : 8745,
'barleftarrow' : 8676,
'mu' : 956,
'boxplus' : 8862,
'mp' : 8723,
'circledast' : 8859,
'tau' : 964,
'in' : 8712,
'backslash' : 92,
'varnothing' : 8709,
'sharp' : 9839,
'eqsim' : 8770,
'gnsim' : 8935,
'Searrow' : 8664,
'updownarrows' : 8645,
'heartsuit' : 9825,
'trianglelefteq' : 8884,
'ddag' : 8225,
'sqsubseteq' : 8849,
'mapsfrom' : 8612,
'boxbar' : 9707,
'sim' : 8764,
'Nwarrow' : 8662,
'nequiv' : 8802,
'succ' : 8827,
'vdash' : 8866,
'Leftrightarrow' : 8660,
'parallel' : 8741,
'invnot' : 8976,
'natural' : 9838,
'ss' : 223,
'uparrow' : 8593,
'nsim' : 8769,
'hookrightarrow' : 8618,
'Equiv' : 8803,
'approx' : 8776,
'Vvdash' : 8874,
'nsucc' : 8833,
'leftrightharpoons' : 8651,
'Re' : 8476,
'boxminus' : 8863,
'equiv' : 8801,
'Lleftarrow' : 8666,
'll' : 8810,
'Cup' : 8915,
'measeq' : 8798,
'upharpoonleft' : 8639,
'lq' : 8216,
'Upsilon' : 933,
'subsetneq' : 8842,
'greater' : 62,
'supsetneq' : 8843,
'Cap' : 8914,
'L' : 321,
'spadesuit' : 9824,
'lrcorner' : 8991,
'not' : 824,
'bar' : 772,
'rightharpoonaccent' : 8401,
'boxdot' : 8865,
'l' : 322,
'leftharpoondown' : 8637,
'bigcup' : 8899,
'iint' : 8748,
'bigwedge' : 8896,
'downharpoonleft' : 8643,
'textasciitilde' : 126,
'subset' : 8834,
'leqq' : 8806,
'mapsup' : 8613,
'nvDash' : 8877,
'looparrowleft' : 8619,
'nless' : 8814,
'rightarrowbar' : 8677,
'Vert' : 8214,
'downdownarrows' : 8650,
'uplus' : 8846,
'simeq' : 8771,
'napprox' : 8777,
'ast' : 8727,
'twoheaduparrow' : 8607,
'doublebarwedge' : 8966,
'Sigma' : 931,
'leftharpoonaccent' : 8400,
'ntrianglelefteq' : 8940,
'nexists' : 8708,
'times' : 215,
'measuredangle' : 8737,
'bumpeq' : 8783,
'carriagereturn' : 8629,
'adots' : 8944,
'checkmark' : 10003,
'lambda' : 955,
'xi' : 958,
'rbrace' : 125,
'rbrack' : 93,
'Nearrow' : 8663,
'maltese' : 10016,
'clubsuit' : 9827,
'top' : 8868,
'overarc' : 785,
'varphi' : 966,
'Delta' : 916,
'iota' : 953,
'nleftarrow' : 8602,
'candra' : 784,
'supset' : 8835,
'triangleleft' : 9665,
'gtreqless' : 8923,
'ntrianglerighteq' : 8941,
'quad' : 8195,
'Xi' : 926,
'gtrdot' : 8919,
'leftthreetimes' : 8907,
'minus' : 8722,
'preccurlyeq' : 8828,
'nleftrightarrow' : 8622,
'lambdabar' : 411,
'blacktriangle' : 9652,
'kernelcontraction' : 8763,
'Phi' : 934,
'angle' : 8736,
'spadesuitopen' : 9828,
'eqless' : 8924,
'mid' : 8739,
'varkappa' : 1008,
'Ldsh' : 8626,
'updownarrow' : 8597,
'beta' : 946,
'textquotedblleft' : 8220,
'rho' : 961,
'alpha' : 945,
'intercal' : 8890,
'beth' : 8502,
'grave' : 768,
'acwopencirclearrow' : 8634,
'nmid' : 8740,
'nsupset' : 8837,
'sigma' : 963,
'dot' : 775,
'Rightarrow' : 8658,
'turnednot' : 8985,
'backsimeq' : 8909,
'leftarrowtail' : 8610,
'approxeq' : 8778,
'curlyeqsucc' : 8927,
'rightarrowtail' : 8611,
'Psi' : 936,
'copyright' : 169,
'yen' : 165,
'vartriangleleft' : 8882,
'rasp' : 700,
'triangleright' : 9655,
'precsim' : 8830,
'infty' : 8734,
'geq' : 8805,
'updownarrowbar' : 8616,
'precnsim' : 8936,
'H' : 779,
'ulcorner' : 8988,
'looparrowright' : 8620,
'ncong' : 8775,
'downarrow' : 8595,
'circeq' : 8791,
'subseteq' : 8838,
'bigstar' : 9733,
'prime' : 8242,
'lceil' : 8968,
'Rrightarrow' : 8667,
'oiiint' : 8752,
'curlywedge' : 8911,
'vDash' : 8872,
'lfloor' : 8970,
'ddots' : 8945,
'exists' : 8707,
'underbar' : 817,
'Pi' : 928,
'leftrightarrows' : 8646,
'sphericalangle' : 8738,
'coprod' : 8720,
'circledcirc' : 8858,
'gtrsim' : 8819,
'gneqq' : 8809,
'between' : 8812,
'theta' : 952,
'complement' : 8705,
'arceq' : 8792,
'nVdash' : 8878,
'S' : 167,
'wr' : 8768,
'wp' : 8472,
'backcong' : 8780,
'lasp' : 701,
'c' : 807,
'nabla' : 8711,
'dotplus' : 8724,
'eta' : 951,
'forall' : 8704,
'eth' : 240,
'colon' : 58,
'sqcup' : 8852,
'rightrightarrows' : 8649,
'sqsupset' : 8848,
'mapsto' : 8614,
'bigtriangledown' : 9661,
'sqsupseteq' : 8850,
'propto' : 8733,
'pi' : 960,
'pm' : 177,
'dots' : 0x2026,
'nrightarrow' : 8603,
'textasciiacute' : 180,
'Doteq' : 8785,
'breve' : 774,
'sqcap' : 8851,
'twoheadrightarrow' : 8608,
'kappa' : 954,
'vartriangle' : 9653,
'diamondsuit' : 9826,
'pitchfork' : 8916,
'blacktriangleleft' : 9664,
'nprec' : 8832,
'vdots' : 8942,
'curvearrowright' : 8631,
'barwedge' : 8892,
'multimap' : 8888,
'textquestiondown' : 191,
'cong' : 8773,
'rtimes' : 8906,
'rightzigzagarrow' : 8669,
'rightarrow' : 8594,
'leftarrow' : 8592,
'__sqrt__' : 8730,
'twoheaddownarrow' : 8609,
'oint' : 8750,
'bigvee' : 8897,
'eqdef' : 8797,
'sterling' : 163,
'phi' : 981,
'Updownarrow' : 8661,
'backprime' : 8245,
'emdash' : 8212,
'Gamma' : 915,
'i' : 305,
'rceil' : 8969,
'leftharpoonup' : 8636,
'Im' : 8465,
'curvearrowleft' : 8630,
'wedgeq' : 8793,
'fallingdotseq' : 8786,
'curlyeqprec' : 8926,
'questeq' : 8799,
'less' : 60,
'upuparrows' : 8648,
'tilde' : 771,
'textasciigrave' : 96,
'smallsetminus' : 8726,
'ell' : 8467,
'cup' : 8746,
'danger' : 9761,
'nVDash' : 8879,
'cdotp' : 183,
'cdots' : 8943,
'hat' : 770,
'eqgtr' : 8925,
'psi' : 968,
'frown' : 8994,
'acute' : 769,
'downzigzagarrow' : 8623,
'ntriangleright' : 8939,
'cupdot' : 8845,
'circleddash' : 8861,
'oslash' : 8856,
'mho' : 8487,
'd' : 803,
'sqsubset' : 8847,
'cdot' : 8901,
'Omega' : 937,
'OE' : 338,
'veeeq' : 8794,
'Finv' : 8498,
't' : 865,
'leftrightarrow' : 8596,
'swarrow' : 8601,
'rightthreetimes' : 8908,
'rightleftharpoons' : 8652,
'lesssim' : 8818,
'searrow' : 8600,
'because' : 8757,
'gtrless' : 8823,
'star' : 8902,
'nsubset' : 8836,
'zeta' : 950,
'dddot' : 8411,
'bigcirc' : 9675,
'Supset' : 8913,
'circ' : 8728,
'slash' : 8725,
'ocirc' : 778,
'prod' : 8719,
'twoheadleftarrow' : 8606,
'daleth' : 8504,
'upharpoonright' : 8638,
'odot' : 8857,
'Uparrow' : 8657,
'O' : 216,
'hookleftarrow' : 8617,
'trianglerighteq' : 8885,
'nsime' : 8772,
'oe' : 339,
'nwarrow' : 8598,
'o' : 248,
'ddddot' : 8412,
'downharpoonright' : 8642,
'succcurlyeq' : 8829,
'gamma' : 947,
'scrR' : 8475,
'dag' : 8224,
'thickspace' : 8197,
'frakZ' : 8488,
'lessdot' : 8918,
'triangledown' : 9663,
'ltimes' : 8905,
'scrB' : 8492,
'endash' : 8211,
'scrE' : 8496,
'scrF' : 8497,
'scrH' : 8459,
'scrI' : 8464,
'rightharpoondown' : 8641,
'scrL' : 8466,
'scrM' : 8499,
'frakC' : 8493,
'nsupseteq' : 8841,
'circledR' : 174,
'circledS' : 9416,
'ngtr' : 8815,
'bigcap' : 8898,
'scre' : 8495,
'Downarrow' : 8659,
'scrg' : 8458,
'overleftrightarrow' : 8417,
'scro' : 8500,
'lnsim' : 8934,
'eqcolon' : 8789,
'curlyvee' : 8910,
'urcorner' : 8989,
'lbrace' : 123,
'Bumpeq' : 8782,
'delta' : 948,
'boxtimes' : 8864,
'overleftarrow' : 8406,
'prurel' : 8880,
'clubsuitopen' : 9831,
'cwopencirclearrow' : 8635,
'geqq' : 8807,
'rightleftarrows' : 8644,
'ac' : 8766,
'ae' : 230,
'int' : 8747,
'rfloor' : 8971,
'risingdotseq' : 8787,
'nvdash' : 8876,
'diamond' : 8900,
'ddot' : 776,
'backsim' : 8765,
'oplus' : 8853,
'triangleq' : 8796,
'check' : 780,
'ni' : 8715,
'iiint' : 8749,
'ne' : 8800,
'lesseqgtr' : 8922,
'obar' : 9021,
'supseteq' : 8839,
'nu' : 957,
'AA' : 197,
'AE' : 198,
'models' : 8871,
'ominus' : 8854,
'dashv' : 8867,
'omega' : 969,
'rq' : 8217,
'Subset' : 8912,
'rightharpoonup' : 8640,
'Rdsh' : 8627,
'bullet' : 8729,
'divideontimes' : 8903,
'lbrack' : 91,
'textquotedblright' : 8221,
'Colon' : 8759,
'%' : 37,
'$' : 36,
'{' : 123,
'}' : 125,
'_' : 95,
'#' : 35,
'imath' : 0x131,
'circumflexaccent' : 770,
'combiningbreve' : 774,
'combiningoverline' : 772,
'combininggraveaccent' : 768,
'combiningacuteaccent' : 769,
'combiningdiaeresis' : 776,
'combiningtilde' : 771,
'combiningrightarrowabove' : 8407,
'combiningdotabove' : 775,
'to' : 8594,
'succeq' : 8829,
'emptyset' : 8709,
'leftparen' : 40,
'rightparen' : 41,
'bigoplus' : 10753,
'leftangle' : 10216,
'rightangle' : 10217,
'leftbrace' : 124,
'rightbrace' : 125,
'jmath' : 567,
'bigodot' : 10752,
'preceq' : 8828,
'biguplus' : 10756,
'epsilon' : 949,
'vartheta' : 977,
'bigotimes' : 10754,
'guillemotleft' : 171,
'ring' : 730,
'Thorn' : 222,
'guilsinglright' : 8250,
'perthousand' : 8240,
'macron' : 175,
'cent' : 162,
'guillemotright' : 187,
'equal' : 61,
'asterisk' : 42,
'guilsinglleft' : 8249,
'plus' : 43,
'thorn' : 254,
'dagger' : 8224
}
# Each element is a 4-tuple of the form:
# src_start, src_end, dst_font, dst_start
#
stix_virtual_fonts = {
'bb':
{
'rm':
[
(0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
(0x0041, 0x0042, 'rm', 0x1d538), # A-B
(0x0043, 0x0043, 'rm', 0x2102), # C
(0x0044, 0x0047, 'rm', 0x1d53b), # D-G
(0x0048, 0x0048, 'rm', 0x210d), # H
(0x0049, 0x004d, 'rm', 0x1d540), # I-M
(0x004e, 0x004e, 'rm', 0x2115), # N
(0x004f, 0x004f, 'rm', 0x1d546), # O
(0x0050, 0x0051, 'rm', 0x2119), # P-Q
(0x0052, 0x0052, 'rm', 0x211d), # R
(0x0053, 0x0059, 'rm', 0x1d54a), # S-Y
(0x005a, 0x005a, 'rm', 0x2124), # Z
(0x0061, 0x007a, 'rm', 0x1d552), # a-z
(0x0393, 0x0393, 'rm', 0x213e), # \Gamma
(0x03a0, 0x03a0, 'rm', 0x213f), # \Pi
(0x03a3, 0x03a3, 'rm', 0x2140), # \Sigma
(0x03b3, 0x03b3, 'rm', 0x213d), # \gamma
(0x03c0, 0x03c0, 'rm', 0x213c), # \pi
],
'it':
[
(0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
(0x0041, 0x0042, 'it', 0xe154), # A-B
(0x0043, 0x0043, 'it', 0x2102), # C
(0x0044, 0x0044, 'it', 0x2145), # D
(0x0045, 0x0047, 'it', 0xe156), # E-G
(0x0048, 0x0048, 'it', 0x210d), # H
(0x0049, 0x004d, 'it', 0xe159), # I-M
(0x004e, 0x004e, 'it', 0x2115), # N
(0x004f, 0x004f, 'it', 0xe15e), # O
(0x0050, 0x0051, 'it', 0x2119), # P-Q
(0x0052, 0x0052, 'it', 0x211d), # R
(0x0053, 0x0059, 'it', 0xe15f), # S-Y
(0x005a, 0x005a, 'it', 0x2124), # Z
(0x0061, 0x0063, 'it', 0xe166), # a-c
(0x0064, 0x0065, 'it', 0x2146), # d-e
(0x0066, 0x0068, 'it', 0xe169), # f-h
(0x0069, 0x006a, 'it', 0x2148), # i-j
(0x006b, 0x007a, 'it', 0xe16c), # k-z
(0x0393, 0x0393, 'it', 0x213e), # \Gamma (missing in beta STIX fonts)
(0x03a0, 0x03a0, 'it', 0x213f), # \Pi
(0x03a3, 0x03a3, 'it', 0x2140), # \Sigma (missing in beta STIX fonts)
(0x03b3, 0x03b3, 'it', 0x213d), # \gamma (missing in beta STIX fonts)
(0x03c0, 0x03c0, 'it', 0x213c), # \pi
],
'bf':
[
(0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
(0x0041, 0x0042, 'bf', 0xe38a), # A-B
(0x0043, 0x0043, 'bf', 0x2102), # C
(0x0044, 0x0044, 'bf', 0x2145), # D
(0x0045, 0x0047, 'bf', 0xe38d), # E-G
(0x0048, 0x0048, 'bf', 0x210d), # H
(0x0049, 0x004d, 'bf', 0xe390), # I-M
(0x004e, 0x004e, 'bf', 0x2115), # N
(0x004f, 0x004f, 'bf', 0xe395), # O
(0x0050, 0x0051, 'bf', 0x2119), # P-Q
(0x0052, 0x0052, 'bf', 0x211d), # R
(0x0053, 0x0059, 'bf', 0xe396), # S-Y
(0x005a, 0x005a, 'bf', 0x2124), # Z
(0x0061, 0x0063, 'bf', 0xe39d), # a-c
(0x0064, 0x0065, 'bf', 0x2146), # d-e
(0x0066, 0x0068, 'bf', 0xe3a2), # f-h
(0x0069, 0x006a, 'bf', 0x2148), # i-j
(0x006b, 0x007a, 'bf', 0xe3a7), # k-z
(0x0393, 0x0393, 'bf', 0x213e), # \Gamma
(0x03a0, 0x03a0, 'bf', 0x213f), # \Pi
(0x03a3, 0x03a3, 'bf', 0x2140), # \Sigma
(0x03b3, 0x03b3, 'bf', 0x213d), # \gamma
(0x03c0, 0x03c0, 'bf', 0x213c), # \pi
],
},
'cal':
[
(0x0041, 0x005a, 'it', 0xe22d), # A-Z
],
'circled':
{
'rm':
[
(0x0030, 0x0030, 'rm', 0x24ea), # 0
(0x0031, 0x0039, 'rm', 0x2460), # 1-9
(0x0041, 0x005a, 'rm', 0x24b6), # A-Z
(0x0061, 0x007a, 'rm', 0x24d0) # a-z
],
'it':
[
(0x0030, 0x0030, 'rm', 0x24ea), # 0
(0x0031, 0x0039, 'rm', 0x2460), # 1-9
(0x0041, 0x005a, 'it', 0x24b6), # A-Z
(0x0061, 0x007a, 'it', 0x24d0) # a-z
],
'bf':
[
(0x0030, 0x0030, 'bf', 0x24ea), # 0
(0x0031, 0x0039, 'bf', 0x2460), # 1-9
(0x0041, 0x005a, 'bf', 0x24b6), # A-Z
(0x0061, 0x007a, 'bf', 0x24d0) # a-z
],
},
'frak':
{
'rm':
[
(0x0041, 0x0042, 'rm', 0x1d504), # A-B
(0x0043, 0x0043, 'rm', 0x212d), # C
(0x0044, 0x0047, 'rm', 0x1d507), # D-G
(0x0048, 0x0048, 'rm', 0x210c), # H
(0x0049, 0x0049, 'rm', 0x2111), # I
(0x004a, 0x0051, 'rm', 0x1d50d), # J-Q
(0x0052, 0x0052, 'rm', 0x211c), # R
(0x0053, 0x0059, 'rm', 0x1d516), # S-Y
(0x005a, 0x005a, 'rm', 0x2128), # Z
(0x0061, 0x007a, 'rm', 0x1d51e), # a-z
],
'it':
[
(0x0041, 0x0042, 'rm', 0x1d504), # A-B
(0x0043, 0x0043, 'rm', 0x212d), # C
(0x0044, 0x0047, 'rm', 0x1d507), # D-G
(0x0048, 0x0048, 'rm', 0x210c), # H
(0x0049, 0x0049, 'rm', 0x2111), # I
(0x004a, 0x0051, 'rm', 0x1d50d), # J-Q
(0x0052, 0x0052, 'rm', 0x211c), # R
(0x0053, 0x0059, 'rm', 0x1d516), # S-Y
(0x005a, 0x005a, 'rm', 0x2128), # Z
(0x0061, 0x007a, 'rm', 0x1d51e), # a-z
],
'bf':
[
(0x0041, 0x005a, 'bf', 0x1d56c), # A-Z
(0x0061, 0x007a, 'bf', 0x1d586), # a-z
],
},
'scr':
[
(0x0041, 0x0041, 'it', 0x1d49c), # A
(0x0042, 0x0042, 'it', 0x212c), # B
(0x0043, 0x0044, 'it', 0x1d49e), # C-D
(0x0045, 0x0046, 'it', 0x2130), # E-F
(0x0047, 0x0047, 'it', 0x1d4a2), # G
(0x0048, 0x0048, 'it', 0x210b), # H
(0x0049, 0x0049, 'it', 0x2110), # I
(0x004a, 0x004b, 'it', 0x1d4a5), # J-K
(0x004c, 0x004c, 'it', 0x2112), # L
(0x004d, 0x004d, 'it', 0x2133), # M
(0x004e, 0x0051, 'it', 0x1d4a9), # N-Q
(0x0052, 0x0052, 'it', 0x211b), # R
(0x0053, 0x005a, 'it', 0x1d4ae), # S-Z
(0x0061, 0x0064, 'it', 0x1d4b6), # a-d
(0x0065, 0x0065, 'it', 0x212f), # e
(0x0066, 0x0066, 'it', 0x1d4bb), # f
(0x0067, 0x0067, 'it', 0x210a), # g
(0x0068, 0x006e, 'it', 0x1d4bd), # h-n
(0x006f, 0x006f, 'it', 0x2134), # o
(0x0070, 0x007a, 'it', 0x1d4c5), # p-z
],
'sf':
{
'rm':
[
(0x0030, 0x0039, 'rm', 0x1d7e2), # 0-9
(0x0041, 0x005a, 'rm', 0x1d5a0), # A-Z
(0x0061, 0x007a, 'rm', 0x1d5ba), # a-z
(0x0391, 0x03a9, 'rm', 0xe17d), # \Alpha-\Omega
(0x03b1, 0x03c9, 'rm', 0xe196), # \alpha-\omega
(0x03d1, 0x03d1, 'rm', 0xe1b0), # theta variant
(0x03d5, 0x03d5, 'rm', 0xe1b1), # phi variant
(0x03d6, 0x03d6, 'rm', 0xe1b3), # pi variant
(0x03f1, 0x03f1, 'rm', 0xe1b2), # rho variant
(0x03f5, 0x03f5, 'rm', 0xe1af), # lunate epsilon
(0x2202, 0x2202, 'rm', 0xe17c), # partial differential
],
'it':
[
# These numerals are actually upright. We don't actually
# want italic numerals ever.
(0x0030, 0x0039, 'rm', 0x1d7e2), # 0-9
(0x0041, 0x005a, 'it', 0x1d608), # A-Z
(0x0061, 0x007a, 'it', 0x1d622), # a-z
(0x0391, 0x03a9, 'rm', 0xe17d), # \Alpha-\Omega
(0x03b1, 0x03c9, 'it', 0xe1d8), # \alpha-\omega
(0x03d1, 0x03d1, 'it', 0xe1f2), # theta variant
(0x03d5, 0x03d5, 'it', 0xe1f3), # phi variant
(0x03d6, 0x03d6, 'it', 0xe1f5), # pi variant
(0x03f1, 0x03f1, 'it', 0xe1f4), # rho variant
(0x03f5, 0x03f5, 'it', 0xe1f1), # lunate epsilon
],
'bf':
[
(0x0030, 0x0039, 'bf', 0x1d7ec), # 0-9
(0x0041, 0x005a, 'bf', 0x1d5d4), # A-Z
(0x0061, 0x007a, 'bf', 0x1d5ee), # a-z
(0x0391, 0x03a9, 'bf', 0x1d756), # \Alpha-\Omega
(0x03b1, 0x03c9, 'bf', 0x1d770), # \alpha-\omega
(0x03d1, 0x03d1, 'bf', 0x1d78b), # theta variant
(0x03d5, 0x03d5, 'bf', 0x1d78d), # phi variant
(0x03d6, 0x03d6, 'bf', 0x1d78f), # pi variant
(0x03f0, 0x03f0, 'bf', 0x1d78c), # kappa variant
(0x03f1, 0x03f1, 'bf', 0x1d78e), # rho variant
(0x03f5, 0x03f5, 'bf', 0x1d78a), # lunate epsilon
(0x2202, 0x2202, 'bf', 0x1d789), # partial differential
(0x2207, 0x2207, 'bf', 0x1d76f), # \Nabla
],
},
'tt':
[
(0x0030, 0x0039, 'rm', 0x1d7f6), # 0-9
(0x0041, 0x005a, 'rm', 0x1d670), # A-Z
(0x0061, 0x007a, 'rm', 0x1d68a) # a-z
],
}
| mit |
barak/autograd | examples/bayesian_neural_net.py | 1 | 3940 | from __future__ import absolute_import
from __future__ import print_function
import matplotlib.pyplot as plt
import autograd.numpy as np
import autograd.numpy.random as npr
import autograd.scipy.stats.norm as norm
from black_box_svi import black_box_variational_inference
from optimizers import adam
def make_nn_funs(layer_sizes, L2_reg, noise_variance, nonlinearity=np.tanh):
"""These functions implement a standard multi-layer perceptron,
vectorized over both training examples and weight samples."""
shapes = zip(layer_sizes[:-1], layer_sizes[1:])
num_weights = sum((m+1)*n for m, n in shapes)
def unpack_layers(weights):
num_weight_sets = len(weights)
for m, n in shapes:
yield weights[:, :m*n] .reshape((num_weight_sets, m, n)),\
weights[:, m*n:m*n+n].reshape((num_weight_sets, 1, n))
weights = weights[:, (m+1)*n:]
def predictions(weights, inputs):
"""weights is shape (num_weight_samples x num_weights)
inputs is shape (num_datapoints x D)"""
inputs = np.expand_dims(inputs, 0)
for W, b in unpack_layers(weights):
outputs = np.einsum('mnd,mdo->mno', inputs, W) + b
inputs = nonlinearity(outputs)
return outputs
def logprob(weights, inputs, targets):
log_prior = -L2_reg * np.sum(weights**2, axis=1)
preds = predictions(weights, inputs)
log_lik = -np.sum((preds - targets)**2, axis=1)[:, 0] / noise_variance
return log_prior + log_lik
return num_weights, predictions, logprob
def build_toy_dataset(n_data=40, noise_std=0.1):
D = 1
rs = npr.RandomState(0)
inputs = np.concatenate([np.linspace(0, 2, num=n_data/2),
np.linspace(6, 8, num=n_data/2)])
targets = np.cos(inputs) + rs.randn(n_data) * noise_std
inputs = (inputs - 4.0) / 4.0
inputs = inputs.reshape((len(inputs), D))
targets = targets.reshape((len(targets), D))
return inputs, targets
if __name__ == '__main__':
# Specify inference problem by its unnormalized log-posterior.
rbf = lambda x: norm.pdf(x, 0, 1)
sq = lambda x: np.sin(x)
num_weights, predictions, logprob = \
make_nn_funs(layer_sizes=[1, 10, 10, 1], L2_reg=0.01,
noise_variance = 0.01, nonlinearity=rbf)
inputs, targets = build_toy_dataset()
log_posterior = lambda weights, t: logprob(weights, inputs, targets)
# Build variational objective.
objective, gradient, unpack_params = \
black_box_variational_inference(log_posterior, num_weights,
num_samples=20)
# Set up figure.
fig = plt.figure(figsize=(8,8), facecolor='white')
ax = fig.add_subplot(111, frameon=False)
plt.ion()
plt.show(block=False)
def callback(params, t, g):
print("Iteration {} lower bound {}".format(t, -objective(params, t)))
# Sample functions from posterior.
rs = npr.RandomState(0)
mean, log_std = unpack_params(params)
#rs = npr.RandomState(0)
sample_weights = rs.randn(10, num_weights) * np.exp(log_std) + mean
plot_inputs = np.linspace(-8, 8, num=400)
outputs = predictions(sample_weights, np.expand_dims(plot_inputs, 1))
# Plot data and functions.
plt.cla()
ax.plot(inputs.ravel(), targets.ravel(), 'bx')
ax.plot(plot_inputs, outputs[:, :, 0].T)
ax.set_ylim([-2, 3])
plt.draw()
plt.pause(1.0/60.0)
# Initialize variational parameters
rs = npr.RandomState(0)
init_mean = rs.randn(num_weights)
init_log_std = -5 * np.ones(num_weights)
init_var_params = np.concatenate([init_mean, init_log_std])
print("Optimizing variational parameters...")
variational_params = adam(gradient, init_var_params,
step_size=0.1, num_iters=1000, callback=callback)
| mit |
gclenaghan/scikit-learn | sklearn/linear_model/__init__.py | 270 | 3096 | """
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
| bsd-3-clause |
shaneknapp/spark | python/pyspark/pandas/typedef/typehints.py | 11 | 18793 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Utilities to deal with types. This is mostly focused on python3.
"""
import datetime
import decimal
from inspect import getfullargspec, isclass
from typing import ( # noqa: F401
Any,
Callable,
Generic,
List,
Optional,
Tuple,
Union,
)
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype, pandas_dtype
try:
from pandas import Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype
extension_dtypes_available = True
extension_dtypes = (Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype) # type: Tuple
try:
from pandas import BooleanDtype, StringDtype
extension_object_dtypes_available = True
extension_dtypes += (BooleanDtype, StringDtype)
except ImportError:
extension_object_dtypes_available = False
try:
from pandas import Float32Dtype, Float64Dtype
extension_float_dtypes_available = True
extension_dtypes += (Float32Dtype, Float64Dtype)
except ImportError:
extension_float_dtypes_available = False
except ImportError:
extension_dtypes_available = False
extension_object_dtypes_available = False
extension_float_dtypes_available = False
extension_dtypes = ()
import pyarrow as pa
import pyspark.sql.types as types
from pyspark.sql.pandas.types import to_arrow_type, from_arrow_type
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Dtype, T
from pyspark.pandas.typedef.string_typehints import resolve_string_type_hint
# A column of data, with the data type.
class SeriesType(Generic[T]):
def __init__(self, dtype: Dtype, spark_type: types.DataType):
self.dtype = dtype
self.spark_type = spark_type
def __repr__(self) -> str:
return "SeriesType[{}]".format(self.spark_type)
class DataFrameType(object):
def __init__(
self, dtypes: List[Dtype], spark_types: List[types.DataType], names: List[Optional[str]]
):
from pyspark.pandas.internal import InternalField
from pyspark.pandas.utils import name_like_string
self.fields = [
InternalField(
dtype=dtype,
struct_field=types.StructField(
name=(name_like_string(name) if name is not None else ("c%s" % i)),
dataType=spark_type,
),
)
for i, (name, dtype, spark_type) in enumerate(zip(names, dtypes, spark_types))
]
@property
def dtypes(self) -> List[Dtype]:
return [field.dtype for field in self.fields]
@property
def spark_type(self) -> types.StructType:
return types.StructType([field.struct_field for field in self.fields])
def __repr__(self) -> str:
return "DataFrameType[{}]".format(self.spark_type)
# The type is a scalar type that is furthermore understood by Spark.
class ScalarType(object):
def __init__(self, dtype: Dtype, spark_type: types.DataType):
self.dtype = dtype
self.spark_type = spark_type
def __repr__(self) -> str:
return "ScalarType[{}]".format(self.spark_type)
# The type is left unspecified or we do not know about this type.
class UnknownType(object):
def __init__(self, tpe: Any):
self.tpe = tpe
def __repr__(self) -> str:
return "UnknownType[{}]".format(self.tpe)
class NameTypeHolder(object):
name = None
tpe = None
def as_spark_type(tpe: Union[str, type, Dtype], *, raise_error: bool = True) -> types.DataType:
"""
Given a Python type, returns the equivalent spark type.
Accepts:
- the built-in types in Python
- the built-in types in numpy
- list of pairs of (field_name, type)
- dictionaries of field_name -> type
- Python3's typing system
"""
if isinstance(tpe, np.dtype) and tpe == np.dtype("object"):
pass
# ArrayType
elif tpe in (np.ndarray,):
return types.ArrayType(types.StringType())
elif hasattr(tpe, "__origin__") and issubclass(tpe.__origin__, list): # type: ignore
element_type = as_spark_type(tpe.__args__[0], raise_error=raise_error) # type: ignore
if element_type is None:
return None
return types.ArrayType(element_type)
# BinaryType
elif tpe in (bytes, np.character, np.bytes_, np.string_):
return types.BinaryType()
# BooleanType
elif tpe in (bool, np.bool, "bool", "?"):
return types.BooleanType()
# DateType
elif tpe in (datetime.date,):
return types.DateType()
# NumericType
elif tpe in (np.int8, np.byte, "int8", "byte", "b"):
return types.ByteType()
elif tpe in (decimal.Decimal,):
# TODO: considering about the precision & scale for decimal type.
return types.DecimalType(38, 18)
elif tpe in (float, np.float, np.float64, "float", "float64", "double"):
return types.DoubleType()
elif tpe in (np.float32, "float32", "f"):
return types.FloatType()
elif tpe in (np.int32, "int32", "i"):
return types.IntegerType()
elif tpe in (int, np.int, np.int64, "int", "int64", "long"):
return types.LongType()
elif tpe in (np.int16, "int16", "short"):
return types.ShortType()
# StringType
elif tpe in (str, np.unicode_, "str", "U"):
return types.StringType()
# TimestampType
elif tpe in (datetime.datetime, np.datetime64, "datetime64[ns]", "M"):
return types.TimestampType()
# categorical types
elif isinstance(tpe, CategoricalDtype) or (isinstance(tpe, str) and type == "category"):
return types.LongType()
# extension types
elif extension_dtypes_available:
# IntegralType
if isinstance(tpe, Int8Dtype) or (isinstance(tpe, str) and tpe == "Int8"):
return types.ByteType()
elif isinstance(tpe, Int16Dtype) or (isinstance(tpe, str) and tpe == "Int16"):
return types.ShortType()
elif isinstance(tpe, Int32Dtype) or (isinstance(tpe, str) and tpe == "Int32"):
return types.IntegerType()
elif isinstance(tpe, Int64Dtype) or (isinstance(tpe, str) and tpe == "Int64"):
return types.LongType()
if extension_object_dtypes_available:
# BooleanType
if isinstance(tpe, BooleanDtype) or (isinstance(tpe, str) and tpe == "boolean"):
return types.BooleanType()
# StringType
elif isinstance(tpe, StringDtype) or (isinstance(tpe, str) and tpe == "string"):
return types.StringType()
if extension_float_dtypes_available:
# FractionalType
if isinstance(tpe, Float32Dtype) or (isinstance(tpe, str) and tpe == "Float32"):
return types.FloatType()
elif isinstance(tpe, Float64Dtype) or (isinstance(tpe, str) and tpe == "Float64"):
return types.DoubleType()
if raise_error:
raise TypeError("Type %s was not understood." % tpe)
else:
return None
def spark_type_to_pandas_dtype(
spark_type: types.DataType, *, use_extension_dtypes: bool = False
) -> Dtype:
"""Return the given Spark DataType to pandas dtype."""
if use_extension_dtypes and extension_dtypes_available:
# IntegralType
if isinstance(spark_type, types.ByteType):
return Int8Dtype()
elif isinstance(spark_type, types.ShortType):
return Int16Dtype()
elif isinstance(spark_type, types.IntegerType):
return Int32Dtype()
elif isinstance(spark_type, types.LongType):
return Int64Dtype()
if extension_object_dtypes_available:
# BooleanType
if isinstance(spark_type, types.BooleanType):
return BooleanDtype()
# StringType
elif isinstance(spark_type, types.StringType):
return StringDtype()
# FractionalType
if extension_float_dtypes_available:
if isinstance(spark_type, types.FloatType):
return Float32Dtype()
elif isinstance(spark_type, types.DoubleType):
return Float64Dtype()
if isinstance(
spark_type,
(
types.DateType,
types.NullType,
types.ArrayType,
types.MapType,
types.StructType,
types.UserDefinedType,
),
):
return np.dtype("object")
elif isinstance(spark_type, types.TimestampType):
return np.dtype("datetime64[ns]")
else:
return np.dtype(to_arrow_type(spark_type).to_pandas_dtype())
def pandas_on_spark_type(tpe: Union[str, type, Dtype]) -> Tuple[Dtype, types.DataType]:
"""
Convert input into a pandas only dtype object or a numpy dtype object,
and its corresponding Spark DataType.
Parameters
----------
tpe : object to be converted
Returns
-------
tuple of np.dtype or a pandas dtype, and Spark DataType
Raises
------
TypeError if not a dtype
Examples
--------
>>> pandas_on_spark_type(int)
(dtype('int64'), LongType)
>>> pandas_on_spark_type(str)
(dtype('<U'), StringType)
>>> pandas_on_spark_type(datetime.date)
(dtype('O'), DateType)
>>> pandas_on_spark_type(datetime.datetime)
(dtype('<M8[ns]'), TimestampType)
>>> pandas_on_spark_type(List[bool])
(dtype('O'), ArrayType(BooleanType,true))
"""
try:
dtype = pandas_dtype(tpe)
spark_type = as_spark_type(dtype)
except TypeError:
spark_type = as_spark_type(tpe)
dtype = spark_type_to_pandas_dtype(spark_type)
return dtype, spark_type
def infer_pd_series_spark_type(pser: pd.Series, dtype: Dtype) -> types.DataType:
"""Infer Spark DataType from pandas Series dtype.
:param pser: :class:`pandas.Series` to be inferred
:param dtype: the Series' dtype
:return: the inferred Spark data type
"""
if dtype == np.dtype("object"):
if len(pser) == 0 or pser.isnull().all():
return types.NullType()
elif hasattr(pser.iloc[0], "__UDT__"):
return pser.iloc[0].__UDT__
else:
return from_arrow_type(pa.Array.from_pandas(pser).type)
elif isinstance(dtype, CategoricalDtype):
if isinstance(pser.dtype, CategoricalDtype):
return as_spark_type(pser.cat.codes.dtype)
else:
# `pser` must already be converted to codes.
return as_spark_type(pser.dtype)
else:
return as_spark_type(dtype)
def infer_return_type(f: Callable) -> Union[SeriesType, DataFrameType, ScalarType, UnknownType]:
"""
Infer the return type from the return type annotation of the given function.
The returned type class indicates both dtypes (a pandas only dtype object
or a numpy dtype object) and its corresponding Spark DataType.
>>> def func() -> int:
... pass
>>> inferred = infer_return_type(func)
>>> inferred.dtype
dtype('int64')
>>> inferred.spark_type
LongType
>>> def func() -> ps.Series[int]:
... pass
>>> inferred = infer_return_type(func)
>>> inferred.dtype
dtype('int64')
>>> inferred.spark_type
LongType
>>> def func() -> ps.DataFrame[np.float, str]:
... pass
>>> inferred = infer_return_type(func)
>>> inferred.dtypes
[dtype('float64'), dtype('<U')]
>>> inferred.spark_type
StructType(List(StructField(c0,DoubleType,true),StructField(c1,StringType,true)))
>>> def func() -> ps.DataFrame[np.float]:
... pass
>>> inferred = infer_return_type(func)
>>> inferred.dtypes
[dtype('float64')]
>>> inferred.spark_type
StructType(List(StructField(c0,DoubleType,true)))
>>> def func() -> 'int':
... pass
>>> inferred = infer_return_type(func)
>>> inferred.dtype
dtype('int64')
>>> inferred.spark_type
LongType
>>> def func() -> 'ps.Series[int]':
... pass
>>> inferred = infer_return_type(func)
>>> inferred.dtype
dtype('int64')
>>> inferred.spark_type
LongType
>>> def func() -> 'ps.DataFrame[np.float, str]':
... pass
>>> inferred = infer_return_type(func)
>>> inferred.dtypes
[dtype('float64'), dtype('<U')]
>>> inferred.spark_type
StructType(List(StructField(c0,DoubleType,true),StructField(c1,StringType,true)))
>>> def func() -> 'ps.DataFrame[np.float]':
... pass
>>> inferred = infer_return_type(func)
>>> inferred.dtypes
[dtype('float64')]
>>> inferred.spark_type
StructType(List(StructField(c0,DoubleType,true)))
>>> def func() -> ps.DataFrame['a': np.float, 'b': int]:
... pass
>>> inferred = infer_return_type(func)
>>> inferred.dtypes
[dtype('float64'), dtype('int64')]
>>> inferred.spark_type
StructType(List(StructField(a,DoubleType,true),StructField(b,LongType,true)))
>>> def func() -> "ps.DataFrame['a': np.float, 'b': int]":
... pass
>>> inferred = infer_return_type(func)
>>> inferred.dtypes
[dtype('float64'), dtype('int64')]
>>> inferred.spark_type
StructType(List(StructField(a,DoubleType,true),StructField(b,LongType,true)))
>>> pdf = pd.DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]})
>>> def func() -> ps.DataFrame[pdf.dtypes]:
... pass
>>> inferred = infer_return_type(func)
>>> inferred.dtypes
[dtype('int64'), dtype('int64')]
>>> inferred.spark_type
StructType(List(StructField(c0,LongType,true),StructField(c1,LongType,true)))
>>> pdf = pd.DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]})
>>> def func() -> ps.DataFrame[zip(pdf.columns, pdf.dtypes)]:
... pass
>>> inferred = infer_return_type(func)
>>> inferred.dtypes
[dtype('int64'), dtype('int64')]
>>> inferred.spark_type
StructType(List(StructField(a,LongType,true),StructField(b,LongType,true)))
>>> pdf = pd.DataFrame({("x", "a"): [1, 2, 3], ("y", "b"): [3, 4, 5]})
>>> def func() -> ps.DataFrame[zip(pdf.columns, pdf.dtypes)]:
... pass
>>> inferred = infer_return_type(func)
>>> inferred.dtypes
[dtype('int64'), dtype('int64')]
>>> inferred.spark_type
StructType(List(StructField((x, a),LongType,true),StructField((y, b),LongType,true)))
>>> pdf = pd.DataFrame({"a": [1, 2, 3], "b": pd.Categorical([3, 4, 5])})
>>> def func() -> ps.DataFrame[pdf.dtypes]:
... pass
>>> inferred = infer_return_type(func)
>>> inferred.dtypes
[dtype('int64'), CategoricalDtype(categories=[3, 4, 5], ordered=False)]
>>> inferred.spark_type
StructType(List(StructField(c0,LongType,true),StructField(c1,LongType,true)))
>>> def func() -> ps.DataFrame[zip(pdf.columns, pdf.dtypes)]:
... pass
>>> inferred = infer_return_type(func)
>>> inferred.dtypes
[dtype('int64'), CategoricalDtype(categories=[3, 4, 5], ordered=False)]
>>> inferred.spark_type
StructType(List(StructField(a,LongType,true),StructField(b,LongType,true)))
>>> def func() -> ps.Series[pdf.b.dtype]:
... pass
>>> inferred = infer_return_type(func)
>>> inferred.dtype
CategoricalDtype(categories=[3, 4, 5], ordered=False)
>>> inferred.spark_type
LongType
"""
# We should re-import to make sure the class 'SeriesType' is not treated as a class
# within this module locally. See Series.__class_getitem__ which imports this class
# canonically.
from pyspark.pandas.typedef import SeriesType, NameTypeHolder
spec = getfullargspec(f)
tpe = spec.annotations.get("return", None)
if isinstance(tpe, str):
# This type hint can happen when given hints are string to avoid forward reference.
tpe = resolve_string_type_hint(tpe)
if hasattr(tpe, "__origin__") and (
tpe.__origin__ == ps.DataFrame or tpe.__origin__ == ps.Series
):
# When Python version is lower then 3.7. Unwrap it to a Tuple/SeriesType type hints.
tpe = tpe.__args__[0]
if hasattr(tpe, "__origin__") and issubclass(tpe.__origin__, SeriesType):
tpe = tpe.__args__[0]
if issubclass(tpe, NameTypeHolder):
tpe = tpe.tpe
dtype, spark_type = pandas_on_spark_type(tpe)
return SeriesType(dtype, spark_type)
# Note that, DataFrame type hints will create a Tuple.
# Python 3.6 has `__name__`. Python 3.7 and 3.8 have `_name`.
# Check if the name is Tuple.
name = getattr(tpe, "_name", getattr(tpe, "__name__", None))
if name == "Tuple":
tuple_type = tpe
if hasattr(tuple_type, "__tuple_params__"):
# Python 3.5.0 to 3.5.2 has '__tuple_params__' instead.
# See https://github.com/python/cpython/blob/v3.5.2/Lib/typing.py
parameters = getattr(tuple_type, "__tuple_params__")
else:
parameters = getattr(tuple_type, "__args__")
dtypes, spark_types = zip(
*(
pandas_on_spark_type(p.tpe)
if isclass(p) and issubclass(p, NameTypeHolder)
else pandas_on_spark_type(p)
for p in parameters
)
)
names = [
p.name if isclass(p) and issubclass(p, NameTypeHolder) else None for p in parameters
]
return DataFrameType(list(dtypes), list(spark_types), names)
types = pandas_on_spark_type(tpe)
if types is None:
return UnknownType(tpe)
else:
return ScalarType(*types)
def _test() -> None:
import doctest
import sys
import pyspark.pandas.typedef.typehints
globs = pyspark.pandas.typedef.typehints.__dict__.copy()
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.typedef.typehints,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
QuLogic/iris | lib/iris/tests/integration/plot/test_netcdftime.py | 3 | 2243 | # (C) British Crown Copyright 2016 - 2017, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Test plot of time coord with non-gregorian calendar.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
import netcdftime
import numpy as np
from iris.coords import AuxCoord
from cf_units import Unit
if tests.NC_TIME_AXIS_AVAILABLE:
from nc_time_axis import CalendarDateTime
# Run tests in no graphics mode if matplotlib is not available.
if tests.MPL_AVAILABLE:
import matplotlib.pyplot as plt
import iris.plot as iplt
@tests.skip_nc_time_axis
@tests.skip_plot
class Test(tests.GraphicsTest):
def test_360_day_calendar(self):
n = 360
calendar = '360_day'
time_unit = Unit('days since 1970-01-01 00:00', calendar=calendar)
time_coord = AuxCoord(np.arange(n), 'time', units=time_unit)
times = [time_unit.num2date(point) for point in time_coord.points]
times = [netcdftime.datetime(atime.year, atime.month, atime.day,
atime.hour, atime.minute, atime.second)
for atime in times]
expected_ydata = np.array([CalendarDateTime(time, calendar)
for time in times])
line1, = iplt.plot(time_coord)
result_ydata = line1.get_ydata()
self.assertArrayEqual(expected_ydata, result_ydata)
if __name__ == "__main__":
tests.main()
| gpl-3.0 |
josesho/bootstrap_contrast | bootstrap_contrast/old__/sandbox.py | 2 | 23194 | from scipy.stats import ttest_ind, ttest_1samp, ttest_rel, mannwhitneyu, norm
from collections import OrderedDict
from numpy.random import randint
import matplotlib.gridspec as gridspec
from matplotlib.lines import Line2D
from matplotlib.ticker import AutoMinorLocator, MultipleLocator, MaxNLocator, FixedLocator, AutoLocator, FormatStrFormatter
from decimal import Decimal
import matplotlib.pyplot as plt
from matplotlib import rc, rcParams, rcdefaults
import sys
import seaborn.apionly as sns
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings("ignore")
# This imports the custom functions used.
# These have been placed in separate .py files for reduced code clutter.
from .mpl_tools import rotateTicks, normalizeSwarmY, normalizeContrastY, offsetSwarmX, resetSwarmX, getSwarmSpan
from .mpl_tools import align_yaxis, halfviolin, drawback_y, drawback_x
from .bootstrap_tools import ci, bootstrap, bootstrap_contrast, bootstrap_indexes, jackknife_indexes, getstatarray, bca
from .plot_bootstrap_tools import plotbootstrap, plotbootstrap_hubspoke, swarmsummary
def contrastplot_test(
data, x, y, idx=None,
alpha=0.75,
axis_title_size=None,
barWidth=5,
contrastShareY=True,
contrastEffectSizeLineStyle='solid',
contrastEffectSizeLineColor='black',
contrastYlim=None,
contrastZeroLineStyle='solid',
contrastZeroLineColor='black',
effectSizeYLabel="Effect Size",
figsize=None,
floatContrast=True,
floatSwarmSpacer=0.2,
heightRatio=(1, 1),
idcol=None,
lineWidth=2,
legend=True,
legendFontSize=14,
legendFontProps={},
paired=False,
pal=None,
rawMarkerSize=8,
rawMarkerType='o',
reps=3000,
showGroupCount=True,
show95CI=False,
showAllYAxes=False,
showRawData=True,
smoothboot=False,
statfunction=None,
summaryBar=False,
summaryBarColor='grey',
summaryBarAlpha=0.25,
summaryColour='black',
summaryLine=True,
summaryLineStyle='solid',
summaryLineWidth=0.25,
summaryMarkerSize=10,
summaryMarkerType='o',
swarmShareY=True,
swarmYlim=None,
tickAngle=45,
tickAlignment='right',
violinOffset=0.375,
violinWidth=0.2,
violinColor='k',
xticksize=None,
yticksize=None,
**kwargs):
'''Takes a pandas dataframe and produces a contrast plot:
either a Cummings hub-and-spoke plot or a Gardner-Altman contrast plot.
-----------------------------------------------------------------------
Description of flags upcoming.'''
# Check that `data` is a pandas dataframe
if 'DataFrame' not in str(type(data)):
raise TypeError("The object passed to the command is not not a pandas DataFrame.\
Please convert it to a pandas DataFrame.")
# Get and set levels of data[x]
if idx is None:
widthratio=[1]
allgrps=np.sort(data[x].unique())
if paired:
# If `idx` is not specified, just take the FIRST TWO levels alphabetically.
tuple_in=tuple(allgrps[0:2],)
else:
# No idx is given, so all groups are compared to the first one in the DataFrame column.
tuple_in=(tuple(allgrps), )
if len(allgrps)>2:
floatContrast=False
else:
if all(isinstance(element, str) for element in idx):
# if idx is supplied but not a multiplot (ie single list or tuple)
tuple_in=(idx, )
widthratio=[1]
if len(idx)>2:
floatContrast=False
elif all(isinstance(element, tuple) for element in idx):
# if idx is supplied, and it is a list/tuple of tuples or lists, we have a multiplot!
tuple_in=idx
if ( any(len(element)>2 for element in tuple_in) ):
# if any of the tuples in idx has more than 2 groups, we turn set floatContrast as False.
floatContrast=False
# Make sure the widthratio of the seperate multiplot corresponds to how
# many groups there are in each one.
widthratio=[]
for i in tuple_in:
widthratio.append(len(i))
else:
raise TypeError("The object passed to `idx` consists of a mixture of single strings and tuples. \
Please make sure that `idx` is either a tuple of column names, or a tuple of tuples for plotting.")
# initialise statfunction
if statfunction == None:
statfunction=np.mean
# Create list to collect all the contrast DataFrames generated.
contrastList=list()
contrastListNames=list()
# # Calculate the bootstraps according to idx.
# for ix, current_tuple in enumerate(tuple_in):
# bscontrast=list()
# for i in range (1, len(current_tuple)):
# # Note that you start from one. No need to do auto-contrast!
# tempbs=bootstrap_contrast(
# data=data,
# x=x,
# y=y,
# idx=[current_tuple[0], current_tuple[i]],
# statfunction=statfunction,
# smoothboot=smoothboot,
# reps=reps)
# bscontrast.append(tempbs)
# contrastList.append(tempbs)
# contrastListNames.append(current_tuple[i]+' vs. '+current_tuple[0])
# Setting color palette for plotting.
if pal is None:
if 'hue' in kwargs:
colorCol=kwargs['hue']
colGrps=data[colorCol].unique()
nColors=len(colGrps)
else:
colorCol=x
colGrps=data[x].unique()
nColors=len([element for tupl in tuple_in for element in tupl])
plotPal=dict( zip( colGrps, sns.color_palette(n_colors=nColors) ) )
else:
plotPal=pal
# Ensure summaryLine and summaryBar are not displayed together.
if summaryLine is True and summaryBar is True:
summaryBar=True
summaryLine=False
# Turn off summary line if floatContrast is true
if floatContrast:
summaryLine=False
if swarmYlim is None:
# get range of _selected groups_.
u = list()
for t in idx:
for i in np.unique(t):
u.append(i)
u = np.unique(u)
tempdat=data[data[x].isin(u)]
swarm_ylim=np.array([np.min(tempdat[y]), np.max(tempdat[y])])
else:
swarm_ylim=np.array([swarmYlim[0],swarmYlim[1]])
if contrastYlim is not None:
contrastYlim=np.array([contrastYlim[0],contrastYlim[1]])
barWidth=barWidth/1000 # Not sure why have to reduce the barwidth by this much!
if showRawData is True:
maxSwarmSpan=0.25
else:
maxSwarmSpan=barWidth
# Expand the ylim in both directions.
## Find half of the range of swarm_ylim.
swarmrange=swarm_ylim[1] -swarm_ylim[0]
pad=0.1*swarmrange
x2=np.array([swarm_ylim[0]-pad, swarm_ylim[1]+pad])
swarm_ylim=x2
# plot params
if axis_title_size is None:
axis_title_size=25
if yticksize is None:
yticksize=18
if xticksize is None:
xticksize=18
# Set clean style
sns.set(style='ticks')
axisTitleParams={'labelsize' : axis_title_size}
xtickParams={'labelsize' : xticksize}
ytickParams={'labelsize' : yticksize}
svgParams={'fonttype' : 'none'}
rc('axes', **axisTitleParams)
rc('xtick', **xtickParams)
rc('ytick', **ytickParams)
rc('svg', **svgParams)
if figsize is None:
if len(tuple_in)>2:
figsize=(12,(12/np.sqrt(2)))
else:
figsize=(8,(8/np.sqrt(2)))
# Initialise figure, taking into account desired figsize.
fig=plt.figure(figsize=figsize)
# Initialise GridSpec based on `tuple_in` shape.
gsMain=gridspec.GridSpec(
1, np.shape(tuple_in)[0],
# 1 row; columns based on number of tuples in tuple.
width_ratios=widthratio,
wspace=0 )
for gsIdx, current_tuple in enumerate(tuple_in):
#### FOR EACH TUPLE IN IDX
plotdat=data[data[x].isin(current_tuple)]
plotdat[x]=plotdat[x].astype("category")
plotdat[x].cat.set_categories(
current_tuple,
ordered=True,
inplace=True)
plotdat.sort_values(by=[x])
# Drop all nans.
plotdat=plotdat.dropna()
# Calculate summaries.
summaries=plotdat.groupby([x],sort=True)[y].apply(statfunction)
if floatContrast is True:
# Use fig.add_subplot instead of plt.Subplot
ax_raw=fig.add_subplot(gsMain[gsIdx],
frame_on=False)
ax_contrast=ax_raw.twinx()
else:
# Create subGridSpec with 2 rows and 1 column.
subGridSpec=gridspec.GridSpecFromSubplotSpec(2, 1,
subplot_spec=gsMain[gsIdx],
wspace=0)
# Use plt.Subplot instead of fig.add_subplot
ax_raw=plt.Subplot(fig,
subGridSpec[0, 0],
frame_on=False)
ax_contrast=plt.Subplot(fig,
subGridSpec[1, 0],
sharex=ax_raw,
frame_on=False)
# Calculate the boostrapped contrast
bscontrast=list()
for i in range (1, len(current_tuple)):
# Note that you start from one. No need to do auto-contrast!
tempbs=bootstrap_contrast(
data=data,
x=x,
y=y,
idx=[current_tuple[0], current_tuple[i]],
statfunction=statfunction,
smoothboot=smoothboot,
reps=reps)
bscontrast.append(tempbs)
contrastList.append(tempbs)
contrastListNames.append(current_tuple[i]+' vs. '+current_tuple[0])
#### PLOT RAW DATA.
if showRawData is True:
# Seaborn swarmplot doc says to set custom ylims first.
ax_raw.set_ylim(swarm_ylim)
sw=sns.swarmplot(
data=plotdat,
x=x, y=y,
order=current_tuple,
ax=ax_raw,
alpha=alpha,
palette=plotPal,
size=rawMarkerSize,
marker=rawMarkerType,
**kwargs)
if summaryBar is True:
bar_raw=sns.barplot(
x=summaries.index.tolist(),
y=summaries.values,
facecolor=summaryBarColor,
ax=ax_raw,
alpha=summaryBarAlpha)
if floatContrast:
# Get horizontal offset values.
maxXBefore=max(sw.collections[0].get_offsets().T[0])
minXAfter=min(sw.collections[1].get_offsets().T[0])
xposAfter=maxXBefore+floatSwarmSpacer
xAfterShift=minXAfter-xposAfter
# shift the swarmplots
offsetSwarmX(sw.collections[1], -xAfterShift)
## get swarm with largest span, set as max width of each barplot.
for i, bar in enumerate(bar_raw.patches):
x_width=bar.get_x()
width=bar.get_width()
centre=x_width + (width/2.)
if i == 0:
bar.set_x(centre-maxSwarmSpan/2.)
else:
bar.set_x(centre-xAfterShift-maxSwarmSpan/2.)
bar.set_width(maxSwarmSpan)
## Set the ticks locations for ax_raw.
ax_raw.xaxis.set_ticks((0, xposAfter))
firstTick=ax_raw.xaxis.get_ticklabels()[0].get_text()
secondTick=ax_raw.xaxis.get_ticklabels()[1].get_text()
ax_raw.set_xticklabels([firstTick,#+' n='+count[firstTick],
secondTick],#+' n='+count[secondTick]],
rotation=tickAngle,
horizontalalignment=tickAlignment)
if summaryLine is True:
for i, m in enumerate(summaries):
ax_raw.plot(
(i -summaryLineWidth,
i + summaryLineWidth), # x-coordinates
(m, m),
color=summaryColour,
linestyle=summaryLineStyle)
if show95CI is True:
sns.barplot(
data=plotdat,
x=x, y=y,
ax=ax_raw,
alpha=0, ci=95)
ax_raw.set_xlabel("")
if floatContrast is False:
fig.add_subplot(ax_raw)
#### PLOT CONTRAST DATA.
if len(current_tuple)==2:
# Plot the CIs on the contrast axes.
plotbootstrap(sw.collections[1],
bslist=tempbs,
ax=ax_contrast,
violinWidth=violinWidth,
violinOffset=violinOffset,
markersize=summaryMarkerSize,
marker=summaryMarkerType,
offset=floatContrast,
color=violinColor,
linewidth=1)
if floatContrast:
# Set reference lines
## First get leftmost limit of left reference group
xtemp, _=np.array(sw.collections[0].get_offsets()).T
leftxlim=xtemp.min()
## Then get leftmost limit of right test group
xtemp, _=np.array(sw.collections[1].get_offsets()).T
rightxlim=xtemp.min()
## zero line
ax_contrast.hlines(0, # y-coordinates
leftxlim, 3.5, # x-coordinates, start and end.
linestyle=contrastZeroLineStyle,
linewidth=0.75,
color=contrastZeroLineColor)
## effect size line
ax_contrast.hlines(tempbs['summary'],
rightxlim, 3.5, # x-coordinates, start and end.
linestyle=contrastEffectSizeLineStyle,
linewidth=0.75,
color=contrastEffectSizeLineColor)
## If the effect size is positive, shift the right axis up.
if float(tempbs['summary'])>0:
rightmin=ax_raw.get_ylim()[0] -float(tempbs['summary'])
rightmax=ax_raw.get_ylim()[1] -float(tempbs['summary'])
## If the effect size is negative, shift the right axis down.
elif float(tempbs['summary'])<0:
rightmin=ax_raw.get_ylim()[0] + float(tempbs['summary'])
rightmax=ax_raw.get_ylim()[1] + float(tempbs['summary'])
ax_contrast.set_ylim(rightmin, rightmax)
if gsIdx>0:
ax_contrast.set_ylabel('')
align_yaxis(ax_raw, tempbs['statistic_ref'], ax_contrast, 0.)
else:
# Set bottom axes ybounds
if contrastYlim is not None:
ax_contrast.set_ylim(contrastYlim)
# Set xlims so everything is properly visible!
swarm_xbounds=ax_raw.get_xbound()
ax_contrast.set_xbound(swarm_xbounds[0] -(summaryLineWidth * 1.1),
swarm_xbounds[1] + (summaryLineWidth * 1.1))
else:
# Plot the CIs on the bottom axes.
plotbootstrap_hubspoke(
bslist=bscontrast,
ax=ax_contrast,
violinWidth=violinWidth,
violinOffset=violinOffset,
markersize=summaryMarkerSize,
marker=summaryMarkerType,
linewidth=lineWidth)
if floatContrast is False:
fig.add_subplot(ax_contrast)
if gsIdx>0:
ax_raw.set_ylabel('')
ax_contrast.set_ylabel('')
# Turn contrastList into a pandas DataFrame,
contrastList=pd.DataFrame(contrastList).T
contrastList.columns=contrastListNames
########
axesCount=len(fig.get_axes())
## Loop thru SWARM axes for aesthetic touchups.
for i in range(0, axesCount, 2):
axx=fig.axes[i]
if i!=axesCount-2 and 'hue' in kwargs:
# If this is not the final swarmplot, remove the hue legend.
axx.legend().set_visible(False)
if floatContrast is False:
axx.xaxis.set_visible(False)
sns.despine(ax=axx, trim=True, bottom=False, left=False)
else:
sns.despine(ax=axx, trim=True, bottom=True, left=True)
if showAllYAxes is False:
if i in range(2, axesCount):
axx.yaxis.set_visible(showAllYAxes)
else:
# Draw back the lines for the relevant y-axes.
# Not entirely sure why I have to do this.
drawback_y(axx)
# Add zero reference line for swarmplots with bars.
if summaryBar is True:
axx.add_artist(Line2D(
(axx.xaxis.get_view_interval()[0],
axx.xaxis.get_view_interval()[1]),
(0,0),
color='black', linewidth=0.75
)
)
# I don't know why the swarm axes controls the contrast axes ticks....
if showGroupCount:
count=data.groupby(x).count()[y]
newticks=list()
for ix, t in enumerate(axx.xaxis.get_ticklabels()):
t_text=t.get_text()
nt=t_text+' n='+str(count[t_text])
newticks.append(nt)
axx.xaxis.set_ticklabels(newticks)
if legend is False:
axx.legend().set_visible(False)
else:
if i==axesCount-2: # the last (rightmost) swarm axes.
axx.legend(loc='top right',
bbox_to_anchor=(1.1,1.0),
fontsize=legendFontSize,
**legendFontProps)
## Loop thru the CONTRAST axes and perform aesthetic touch-ups.
## Get the y-limits:
for j,i in enumerate(range(1, axesCount, 2)):
axx=fig.get_axes()[i]
if floatContrast is False:
xleft, xright=axx.xaxis.get_view_interval()
# Draw zero reference line.
axx.hlines(y=0,
xmin=xleft-1,
xmax=xright+1,
linestyle=contrastZeroLineStyle,
linewidth=0.75,
color=contrastZeroLineColor)
# reset view interval.
axx.set_xlim(xleft, xright)
# # Draw back x-axis lines connecting ticks.
# drawback_x(axx)
if showAllYAxes is False:
if i in range(2, axesCount):
axx.yaxis.set_visible(False)
else:
# Draw back the lines for the relevant y-axes.
# Not entirely sure why I have to do this.
drawback_y(axx)
sns.despine(ax=axx,
top=True, right=True,
left=False, bottom=False,
trim=True)
# Rotate tick labels.
rotateTicks(axx,tickAngle,tickAlignment)
else:
# Re-draw the floating axis to the correct limits.
lower=np.min(contrastList.ix['diffarray',j])
upper=np.max(contrastList.ix['diffarray',j])
meandiff=contrastList.ix['summary', j]
## Make sure we have zero in the limits.
if lower>0:
lower=0.
if upper<0:
upper=0.
## Get the tick interval from the left y-axis.
leftticks=fig.get_axes()[i-1].get_yticks()
tickstep=leftticks[1] -leftticks[0]
## First re-draw of axis with new tick interval
axx.yaxis.set_major_locator(MultipleLocator(base=tickstep))
newticks1=axx.get_yticks()
## Obtain major ticks that comfortably encompass lower and upper.
newticks2=list()
for a,b in enumerate(newticks1):
if (b >= lower and b <= upper):
# if the tick lies within upper and lower, take it.
newticks2.append(b)
# if the meandiff falls outside of the newticks2 set, add a tick in the right direction.
if np.max(newticks2)<meandiff:
ind=np.where(newticks1 == np.max(newticks2))[0][0] # find out the max tick index in newticks1.
newticks2.append( newticks1[ind+1] )
elif meandiff<np.min(newticks2):
ind=np.where(newticks1 == np.min(newticks2))[0][0] # find out the min tick index in newticks1.
newticks2.append( newticks1[ind-1] )
newticks2=np.array(newticks2)
newticks2.sort()
## Second re-draw of axis to shrink it to desired limits.
axx.yaxis.set_major_locator(FixedLocator(locs=newticks2))
## Despine the axes.
sns.despine(ax=axx, trim=True,
bottom=False, right=False,
left=True, top=True)
# Normalize bottom/right Contrast axes to each other for Cummings hub-and-spoke plots.
if (axesCount>2 and
contrastShareY is True and
floatContrast is False):
# Set contrast ylim as max ticks of leftmost swarm axes.
if contrastYlim is None:
lower=list()
upper=list()
for c in range(0,len(contrastList.columns)):
lower.append( np.min(contrastList.ix['bca_ci_low',c]) )
upper.append( np.max(contrastList.ix['bca_ci_high',c]) )
lower=np.min(lower)
upper=np.max(upper)
else:
lower=contrastYlim[0]
upper=contrastYlim[1]
normalizeContrastY(fig,
contrast_ylim = contrastYlim,
show_all_yaxes = showAllYAxes)
# if (axesCount==2 and
# floatContrast is False):
# drawback_x(fig.get_axes()[1])
# drawback_y(fig.get_axes()[1])
# if swarmShareY is False:
# for i in range(0, axesCount, 2):
# drawback_y(fig.get_axes()[i])
# if contrastShareY is False:
# for i in range(1, axesCount, 2):
# if floatContrast is True:
# sns.despine(ax=fig.get_axes()[i],
# top=True, right=False, left=True, bottom=True,
# trim=True)
# else:
# sns.despine(ax=fig.get_axes()[i], trim=True)
# Zero gaps between plots on the same row, if floatContrast is False
if (floatContrast is False and showAllYAxes is False):
gsMain.update(wspace=0.)
else:
# Tight Layout!
gsMain.tight_layout(fig)
# And we're all done.
rcdefaults() # restore matplotlib defaults.
sns.set() # restore seaborn defaults.
return fig, contrastList
| mit |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.