repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
napjon/moocs_solution
|
Data_Science/project_3/plot_histogram/plot_histogram.py
|
1
|
1718
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def entries_histogram(turnstile_weather):
'''
Before we perform any analysis, it might be useful to take a
look at the data we're hoping to analyze. More specifically, lets
examine the hourly entries in our NYC subway data and determine what
distribution the data follows. This data is stored in a dataframe
called turnstile_weather under the ['ENTRIESn_hourly'] column.
Why don't you plot two histograms on the same axes, showing hourly
entries when raining vs. when not raining. Here's an example on how
to plot histograms with pandas and matplotlib:
turnstile_weather['column_to_graph'].hist()
Your histograph may look similar to the following graph:
http://i.imgur.com/9TrkKal.png
You can read a bit about using matplotlib and pandas to plot
histograms:
http://pandas.pydata.org/pandas-docs/stable/visualization.html#histograms
You can look at the information contained within the turnstile weather data at the link below:
https://www.dropbox.com/s/meyki2wl9xfa7yk/turnstile_data_master_with_weather.csv
'''
plt.figure()
turnstile_weather['ENTRIESn_hourly'][turnstile_weather.rain == 1].hist() # your code here to plot a historgram for hourly entries when it is raining
turnstile_weather['ENTRIESn_hourly'][turnstile_weather.rain == 0].hist() # your code here to plot a historgram for hourly entries when it is not raining
return plt
if __name__ == "__main__":
image = "plot.png"
turnstile_weather = pd.read_csv("turnstile_data_master_with_weather.csv")
plt = entries_histogram(turnstile_weather)
plt.savefig(image)
|
mit
|
lpawluczuk/summar.pl
|
Summarizer/trainer.py
|
1
|
7451
|
# -*- coding: utf-8 -*-
import os
import argparse
from helpers import save_to_file, load_from_file, normalize
from polish_summaries_corpus_reader import read_psc_file
from summarization.summarizer import Summarizer
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer
import math
import codecs
import numpy as np
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.grid_search import GridSearchCV
class AbstractTrainer(object):
"""Some description that tells you it's abstract,
often listing the methods you're expected to supply."""
def train(self, summarizer, dataset, testset, model_path):
raise NotImplementedError("Should have implemented this")
class TrainerWrapper():
def __init__(self, features, config):
self.train_method = config.get_train_method()
self.process_dir = config.get_dir_processor()
self.summarizer = Summarizer.Instance()
self.summarizer.set_features(features, config.get_stop_list_path())
self.features = features
def train(self, train_dir, test_dir, dataset_path=None, dump_dataset=True):
testset = SupervisedDataSet(len(self.summarizer.get_features()), 1)
min_maxs = [[100, 0] for i in range(len(self.summarizer.get_features()))]
if dataset_path and dataset_path != 'None':
dataset = load_from_file(dataset_path)
min_maxs = load_from_file("meta_model.xml") # sprawidzć ścieżke!
else:
dataset = SupervisedDataSet(len(self.summarizer.get_features()), 1)
for root, dirs, files in os.walk(train_dir, topdown=False):
for file_ds in self.process_dir(self.summarizer, root, files):
for ds in file_ds:
dataset.addSample(ds[0], ds[1])
min_maxs = self.update_min_maxs(min_maxs, ds[0])
# break # remove this !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# print min_maxs
inp = []
for d in dataset['input']:
inp.append([normalize(val, min_maxs[i][0], min_maxs[i][1]) for i, val in enumerate(d)])
dataset.setField("input", inp)
# print dataset['input']
### TEMP
# save_dataset_as_csv(dataset)
if dump_dataset:
save_to_file(dataset, "dataset.xml")
if test_dir:
for root, dirs, files in os.walk(test_dir, topdown=False):
for file_ds in self.process_dir(self.summarizer, root, files):
for ds in file_ds:
testset.addSample(ds[0], ds[1])
print "[Trainer] -> training..."
save_to_file(min_maxs, self.features.replace("features.txt", "meta_model.xml"))
self.train_method(self.summarizer, dataset, testset, self.features.replace("features.txt", "model.xml"))
def update_min_maxs(self, min_maxs, ds):
for i, d in enumerate(ds):
if min_maxs[i][0] > d:
min_maxs[i][0] = d
if min_maxs[i][1] < d:
min_maxs[i][1] = d
return min_maxs
class NeuralNetworkTrainer(AbstractTrainer):
def train(self, summarizer, dataset, testset, model_path):
net = buildNetwork(len(summarizer.get_features()), 5, 5, 5, 1, bias=True) #, hiddenclass=TanhLayer)
trainer = BackpropTrainer(net, dataset, learningrate=0.001, momentum=0.99)
# trainer.trainUntilConvergence()
if testset:
errors = trainer.trainUntilConvergence(verbose=True,
trainingData=dataset,
validationData=testset,
maxEpochs=10)
else:
errors = trainer.trainUntilConvergence(verbose=True,
trainingData=dataset,
validationProportion=0.1,
maxEpochs=10)
print "[Trainer] -> training done."
print errors
save_to_file(net, model_path)
print "[Trainer] -> model save to model.xml file."
class SVMTrainer(AbstractTrainer):
def train(self, summarizer, dataset, testset, model_path):
clf = svm.SVC(cache_size=5000, verbose=True, C=10, gamma=0.2)
print clf.fit(dataset['input'], dataset['target'])
save_to_file(clf, model_path)
def save_dataset_as_csv(dataset):
file = codecs.open("dataset.csv", "w", "utf-8")
for input, target in zip(dataset['input'], dataset['target']):
file.write(";".join([str(i) for i in input]) + ";" + str(target[0]) + "\n")
def get_summary(root, files, not_summary=False):
previous_document = None
for i, name in enumerate(files):
print "[Trainer] -> file: %s" % name
doc_psc = read_psc_file(os.path.join(root, name), previous_document)
previous_document = doc_psc
# print "\n".join(unicode(s[1]) for s in doc_psc.get_not_summary(0))
result = doc_psc.get_not_summary(0) if not_summary else doc_psc.get_summary(0)
file = codecs.open("result.txt", "a", "utf-8")
file.write("\n".join(unicode(s[1]) for s in result))
file.write("\nEOF\n")
def print_summaries(train_dir, not_summary=False):
for root, dirs, files in os.walk(train_dir, topdown=False):
get_summary(root, files, not_summary)
############################
def check_summaries_overlap(train_dir):
overlap = []
var = []
for root, dirs, files in os.walk(train_dir, topdown=False):
previous_document = None
summaries = []
for i, name in enumerate(files):
print "[Trainer] -> file: %s" % name
doc_psc = read_psc_file(os.path.join(root, name), previous_document)
previous_document = doc_psc
summaries.append(doc_psc.summaries[0])
intersections = []
for i in range(len(summaries)):
for j in range(len(summaries)):
if j <= i:
continue
intersections.append(float(len(summaries[i].intersection(summaries[j]))) / len(summaries[i]))
if len(intersections) == 0:
print "Skipping"
continue
o = sum(intersections) / len(intersections)
v = sum(math.pow(i - o, 2) for i in intersections) / len(intersections)
print "Average: ", o
print "Var: ", v
overlap.append(o)
var.append(v)
final_o = sum(overlap) / len(overlap)
final_v = sum(var) / len(var)
print "AVERAGE OVERLAP: %s, AVERAGE VAR: %s" % (final_o, final_v)
def main(mode, files):
if mode == "CHECK":
check_summaries_overlap(files)
elif mode == "PRINT":
print_summaries(files)
elif mode == "PRINT_NOT":
print_summaries(files, True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--mode", choices=["CHECK", "PRINT", "PRINT_NOT"],
help="program mode", required=True)
parser.add_argument("-f", "--files", help="files to process")
args = parser.parse_args()
main(args.mode, args.files if args.files else "/home/lukasz/Projects/PracaMagisterska/Inne/datasets/trainset/")
|
mit
|
ikiskin/CNN
|
Humbug/wavelet_nn.py
|
1
|
8113
|
#import audacity
import numpy as np
import sys
import os, os.path
from scipy.io.wavfile import read, write
from scipy import signal
from scipy import nanmean
import csv
import matplotlib.pyplot as plt
import matplotlib.backends.backend_pdf
from collections import Counter
# Keras-related imports
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution1D, MaxPooling2D, Convolution2D
from keras import backend as K
K.set_image_dim_ordering('th')
from keras.callbacks import ModelCheckpoint
from keras.callbacks import RemoteMonitor
from keras.models import load_model
# Data post-processing and analysis
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_auc_score, roc_curve
import itertools
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
#plt.colorbar()
plt.ylabel('True label')
plt.xlabel('Predicted label')
################################################################################################################################
## Load data
loaded_data = np.load('HumBug/Outputs/humbug_MLP_wavelet_80_10_10_most_positive_interp.npz')
print 'Files available to load:', loaded_data.files
x_train = loaded_data['x_train']
y_train = loaded_data['y_train']
x_test = loaded_data['x_test']
y_test = loaded_data['y_test']
x_train_caged = x_train_wav
y_train_caged = y_train_wav
# NN parameters
conv = False
# number of convolutional filters
nb_filters = 16
# size of pooling area for max pooling
pool_size = (2,2)
# convolution kernel size
kernel_size_1 = (spec_window,spec_window)
kernel_size_2 = (3,3)
# number of classes
nb_classes = 2
# Initialise model
model = Sequential()
# Fully connected first layer to replace conv layer
n_hidden = 32 # N.B. Not sure exactly if this is the number of units in the hidden layer
model.add(Dense(n_hidden, input_dim=np.shape(x_train_caged)[1]))
# model.add(Convolution2D(nb_filters, kernel_size_1[0], kernel_size_1[1],
# border_mode = 'valid',
# input_shape = input_shape))
# convout1 = Activation('relu')
# model.add(convout1)
# model.add(Convolution2D(nb_filters, kernel_size_2[0], kernel_size_2[1]))
# convout2 = Activation('relu')
# model.add(convout2)
# model.add(MaxPooling2D(pool_size = pool_size))
# model.add(Dropout(0.25))
#model.add(Flatten())
model.add(Activation('relu'))
model.add(Dense(32))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
################################################################################################################################
# Train model
#y_test = y_test_spec
#x_test = x_test_spec
#x_train_caged = x_train
#y_train_caged = y_train_wav
#input_shape = (1, x_train.shape[2], 10)
# # Reshape data for MLP
if not conv:
x_train_caged = x_train.reshape(x_train.shape[0], x_train.shape[-2]*x_train.shape[-1])
x_test = x_test.reshape(x_test.shape[0], x_test.shape[-2]*x_test.shape[-1])
y_train_caged = y_train_wav
batch_size = 64
nb_epoch = 200
# filepath = "weights-improvement.hdf5"
# #filepath="weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5"
# # Options to save best performing iteration according to monitor = ?. E.g. 'val_acc' will save the run with the highest
# # validation accuracy.
# checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
# callbacks_list = [checkpoint]
# remote = RemoteMonitor(root='http://localhost:9000') # For viewing accuracy measures during training. Experimental.
print np.shape(x_train_caged)
Weights = np.zeros([nb_epoch,np.shape(x_train_caged)[1],n_hidden])
for i in range(nb_epoch):
print 'Epoch number', i+1, 'of', nb_epoch
model.fit(x_train_caged, y_train_caged, batch_size=batch_size, nb_epoch=1,
verbose=2)
W = model.layers[0].W.get_value(borrow=True)
Weights[i,:,:] = W
#model.fit(x_train_caged, y_train_caged, batch_size=batch_size, nb_epoch=nb_epoch,
# verbose=2)#,callbacks= [remote]) ## validation_data=(X_test_set, Y_test)
#RemoteMonitor(root='http://localhost:9000', path='/publish/epoch/end/', field='data', headers={'Content-Type': 'application/json', 'Accept': 'application/json'})
# Set file name for wavelet
base_name = 'humbug'
suffix_2 = 'wavelet'
if conv:
suffix = 'conv'
else:
suffix = 'MLP'
model_name = (base_name + '_' + suffix + '_' + suffix_2 + '_' + str(len(scales)) + '_'
+ str(kernel_size_1[0]) + '_' + str(kernel_size_1[0]) + '_' + count_method + '_' + binning_method)
print model_name
score = model.evaluate(x_test, y_test, verbose=1)
predictions = model.predict(x_test)
print('Test score:', score[0])
print('Test accuracy:', score[1])
########### 2 class predictions #####################################################
positive_predictions = predictions[:,0][np.where(y_test[:,0])]
negative_predictions = predictions[:,1][np.where(y_test[:,1])]
true_positive_rate = (sum(np.round(positive_predictions)))/sum(y_test[:,0])
true_negative_rate = sum(np.round(negative_predictions))/sum(y_test[:,1])
figs = []
f = plt.figure(figsize = (15,6))
plt.plot(predictions[:,0],'g.', markersize = 2, label = 'y_pred_positive')
plt.plot(y_test[:,0], '--b', linewidth = 0.5, markersize = 2, label = 'y_test_positive')
plt.legend(loc = 7)
plt.ylim([-0.2,1.2])
plt.ylabel('Softmax output')
plt.xlabel('Signal window number')
figs.append(f)
print 'True positive rate', true_positive_rate, 'True negative rate', true_negative_rate
#plt.savefig('Outputs/' + 'ClassOutput_' + model_name + '.pdf', transparent = True)
#print 'saved as', 'ClassOutput_' + model_name + '.pdf'
#plt.show()
cnf_matrix = confusion_matrix(y_test[:,1], np.round(predictions[:,1]).astype(int))
class_names = ('Mozz','No mozz')
# Plot normalized confusion matrix
f, axs = plt.subplots(1,2,figsize=(12,6))
#plt.figure(figsize = (4,4))
#plt.subplot(1,2,1)
#plt.figure(figsize = (4,4))
conf_m = plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,
title='Normalized confusion matrix',)
#plt.savefig('Outputs/' + 'Conf_' + model_name + '.pdf', transparent = True)
#print 'saved as', 'Conf_' + model_name + '.pdf'
y_true = y_test[:,0]
y_score = predictions[:,0]
roc_score = roc_auc_score(y_true, y_score)
fpr, tpr, thresholds = roc_curve(y_true, y_score)
#plt.subplot(1,2,2)
#plt.figure(figsize=(4,4))
axs[0].plot(fpr, tpr, '.-')
axs[0].plot([0,1],[0,1],'k--')
axs[0].set_xlim([-0.01, 1.01])
axs[0].set_ylim([-0.01, 1.01])
axs[0].set_xlabel('False positive rate')
axs[0].set_ylabel('True positive rate')
axs[0].set_title('ROC, area = %.4f'%roc_score)
#plt.savefig('Outputs/' + 'ROC_' + model_name + '.pdf')
#print 'saved as', 'ROC_' + model_name + '.pdf'
#plt.show()
figs.append(f)
pdf = matplotlib.backends.backend_pdf.PdfPages('Outputs/' + model_name + '.pdf')
for i in range(2):
pdf.savefig(figs[i])
pdf.close()
print 'saved as ' + model_name + '.pdf'
|
gpl-3.0
|
jjx02230808/project0223
|
sklearn/neural_network/tests/test_rbm.py
|
225
|
6278
|
import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples([np.arange(1000) * 100])
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
|
bsd-3-clause
|
DSLituiev/scikit-learn
|
sklearn/decomposition/tests/test_truncated_svd.py
|
73
|
6086
|
"""Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr, decimal=5)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=2)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features + 1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
|
bsd-3-clause
|
jaidevd/scikit-learn
|
examples/model_selection/plot_train_error_vs_test_error.py
|
349
|
2577
|
"""
=========================
Train error vs Test error
=========================
Illustration of how the performance of an estimator on unseen data (test data)
is not the same as the performance on training data. As the regularization
increases the performance on train decreases while the performance on test
is optimal within a range of values of the regularization parameter.
The example with an Elastic-Net regression model and the performance is
measured using the explained variance a.k.a. R^2.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn import linear_model
###############################################################################
# Generate sample data
n_samples_train, n_samples_test, n_features = 75, 150, 500
np.random.seed(0)
coef = np.random.randn(n_features)
coef[50:] = 0.0 # only the top 10 features are impacting the model
X = np.random.randn(n_samples_train + n_samples_test, n_features)
y = np.dot(X, coef)
# Split train and test data
X_train, X_test = X[:n_samples_train], X[n_samples_train:]
y_train, y_test = y[:n_samples_train], y[n_samples_train:]
###############################################################################
# Compute train and test errors
alphas = np.logspace(-5, 1, 60)
enet = linear_model.ElasticNet(l1_ratio=0.7)
train_errors = list()
test_errors = list()
for alpha in alphas:
enet.set_params(alpha=alpha)
enet.fit(X_train, y_train)
train_errors.append(enet.score(X_train, y_train))
test_errors.append(enet.score(X_test, y_test))
i_alpha_optim = np.argmax(test_errors)
alpha_optim = alphas[i_alpha_optim]
print("Optimal regularization parameter : %s" % alpha_optim)
# Estimate the coef_ on full data with optimal regularization parameter
enet.set_params(alpha=alpha_optim)
coef_ = enet.fit(X, y).coef_
###############################################################################
# Plot results functions
import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.semilogx(alphas, train_errors, label='Train')
plt.semilogx(alphas, test_errors, label='Test')
plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
linewidth=3, label='Optimum on test')
plt.legend(loc='lower left')
plt.ylim([0, 1.2])
plt.xlabel('Regularization parameter')
plt.ylabel('Performance')
# Show estimated coef_ vs true coef
plt.subplot(2, 1, 2)
plt.plot(coef, label='True coef')
plt.plot(coef_, label='Estimated coef')
plt.legend()
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
plt.show()
|
bsd-3-clause
|
grocsvs/grocsvs
|
src/grocsvs/stages/cluster_svs.py
|
1
|
5006
|
import numpy
import os
import pandas
from grocsvs import step
from grocsvs import graphing
from grocsvs.stages import refine_grid_search_breakpoints
pandas.options.display.width = 150
class ClusterSVsStep(step.StepChunk):
"""
Takes the matrix of barcode overlaps and p-values and finds clusters
of significant window-pairs
Output files:
svs.sample.dataset.chromx.chromy.pickle - list
- each item is the x,y coordinates of a significant window-pair
"""
@staticmethod
def get_steps(options):
yield ClusterSVsStep(options)
def __init__(self, options):
self.options = options
def __str__(self):
return self.__class__.__name__
def outpaths(self, final):
directory = self.results_dir if final \
else self.working_dir
paths = {
"breakpoints": os.path.join(directory, "breakpoints.tsv"),
"edges": os.path.join(directory, "edges.tsv"),
"graphs": os.path.join(directory, "graphs")
}
return paths
def run(self):
outpaths = self.outpaths(final=False)
self.logger.log("Loading data...")
self.evidence, short_frag_support, mate_pair_support = load_evidence(
self.options)
self.evidence["p"] = self.evidence["p_resampling"]
self.evidence = precluster(self.evidence)
# self.evidence = add_facing(self.evidence)
print self.evidence
self.logger.log("Building graph, pruning...")
graph = graphing.build_graph(self.evidence)#common_counts, total_counts, binom_ps, events)
graph = self.prune_graph(graph)
graphs = graphing.get_subgraphs(graph)
print "graphs:", len(graphs)
total_breakpoints = 0
for graph in graphs:
total_breakpoints += sum(1 for n1,n2,data in graph.edges(data=True) if data["kind"]=="breakpoint")
print total_breakpoints
graphing.visualize_graphs(outpaths["graphs"], graphs, self.evidence)
table = graphing.graphs_to_table(graphs)
table.to_csv(outpaths["edges"], sep="\t", index=False)
table.loc[table["kind"]=="breakpoint"].to_csv(
outpaths["breakpoints"], sep="\t", index=False)
def prune_graph(self, graph):
pruned = graphing.pick_best_edges(graph)
fixed = graphing.fix_breakpoints(self.options, pruned)
cleaned = graphing.cleanup_fixed_graph(fixed)
return cleaned
def load_evidence(options):
short_frag_support = []
mate_pair_support = []
path = refine_grid_search_breakpoints.CombineRefinedBreakpointsStep(options)\
.outpaths(final=True)["refined_pairs"]
evidence = pandas.read_table(path)
evidence["chromx"] = evidence["chromx"].astype("string")
evidence["chromy"] = evidence["chromy"].astype("string")
evidence["p"] = evidence["p_resampling"]
return evidence, short_frag_support, mate_pair_support
def get_positions(evidence, which="new", with_orientation=False):
"""
gets a list of all (chrom,pos) pairs from x and y coords
"""
x = evidence[["chromx", "{}_x".format(which)]].copy()
x.columns = ["chrom", "pos"]
y = evidence[["chromy", "{}_y".format(which)]].copy()
y.columns = ["chrom", "pos"]
if with_orientation:
x["orientation"] = evidence["orientation"].str[0]
y["orientation"] = evidence["orientation"].str[1]
positions = pandas.concat([x,y], ignore_index=True)\
.sort_values(["chrom","pos"])\
.drop_duplicates()\
.reset_index(drop=True)
return positions
def mean_(vals):
return int(numpy.mean(vals))
def precluster(evidence, distance=3000):
"""
we need to cluster putative breakpoints in 1d space so that the graphs
can be built properly
"""
positions = get_positions(evidence)
transforms = []
for chrom, p in positions.groupby("chrom"):
p = p.copy()
p["group"] = ((p["pos"]-p["pos"].shift()) > distance).cumsum()
p["clustered"] = p.groupby("group")["pos"].transform(mean_)
transforms.append(p[["chrom","pos","clustered"]])
transform = pandas.concat(transforms)
cur_evidence = evidence.copy()
cur_evidence = pandas.merge(cur_evidence, transform[["chrom", "pos", "clustered"]],
left_on=["chromx", "new_x"], right_on=["chrom","pos"], how="left")\
.drop(["chrom", "pos"], axis=1)
cur_evidence = cur_evidence.rename(columns={"clustered": "clustered_x"})
cur_evidence = pandas.merge(cur_evidence, transform[["chrom", "pos", "clustered"]],
left_on=["chromy", "new_y"], right_on=["chrom","pos"], how="left")\
.drop(["chrom", "pos"], axis=1)
cur_evidence = cur_evidence.rename(columns={"clustered": "clustered_y"})
cur_evidence
return cur_evidence
|
mit
|
boomsbloom/dtm-fmri
|
DTM/for_gensim/lib/python2.7/site-packages/matplotlib/patches.py
|
1
|
147871
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import map, zip
import math
import matplotlib as mpl
import numpy as np
import matplotlib.cbook as cbook
import matplotlib.artist as artist
from matplotlib.artist import allow_rasterization
import matplotlib.colors as colors
from matplotlib import docstring
import matplotlib.transforms as transforms
from matplotlib.path import Path
from matplotlib.bezier import split_bezier_intersecting_with_closedpath
from matplotlib.bezier import get_intersection, inside_circle, get_parallels
from matplotlib.bezier import make_wedged_bezier2
from matplotlib.bezier import split_path_inout, get_cos_sin
from matplotlib.bezier import make_path_regular, concatenate_paths
# these are not available for the object inspector until after the
# class is built so we define an initial set here for the init
# function and they will be overridden after object definition
docstring.interpd.update(Patch="""
================= ==============================================
Property Description
================= ==============================================
alpha float
animated [True | False]
antialiased or aa [True | False]
capstyle ['butt' | 'round' | 'projecting']
clip_box a matplotlib.transform.Bbox instance
clip_on [True | False]
edgecolor or ec any matplotlib color
facecolor or fc any matplotlib color
figure a matplotlib.figure.Figure instance
fill [True | False]
hatch unknown
joinstyle ['miter' | 'round' | 'bevel']
label any string
linewidth or lw float
lod [True | False]
transform a matplotlib.transform transformation instance
visible [True | False]
zorder any number
================= ==============================================
""")
class Patch(artist.Artist):
"""
A patch is a 2D artist with a face color and an edge color.
If any of *edgecolor*, *facecolor*, *linewidth*, or *antialiased*
are *None*, they default to their rc params setting.
"""
zorder = 1
validCap = ('butt', 'round', 'projecting')
validJoin = ('miter', 'round', 'bevel')
def __str__(self):
return str(self.__class__).split('.')[-1]
def __init__(self,
edgecolor=None,
facecolor=None,
color=None,
linewidth=None,
linestyle=None,
antialiased=None,
hatch=None,
fill=True,
capstyle=None,
joinstyle=None,
**kwargs):
"""
The following kwarg properties are supported
%(Patch)s
"""
artist.Artist.__init__(self)
if linewidth is None:
linewidth = mpl.rcParams['patch.linewidth']
if linestyle is None:
linestyle = "solid"
if capstyle is None:
capstyle = 'butt'
if joinstyle is None:
joinstyle = 'miter'
if antialiased is None:
antialiased = mpl.rcParams['patch.antialiased']
self._fill = True # needed for set_facecolor call
if color is not None:
if (edgecolor is not None or facecolor is not None):
import warnings
warnings.warn("Setting the 'color' property will override"
"the edgecolor or facecolor properties. ")
self.set_color(color)
else:
self.set_edgecolor(edgecolor)
self.set_facecolor(facecolor)
self.set_linewidth(linewidth)
self.set_linestyle(linestyle)
self.set_antialiased(antialiased)
self.set_hatch(hatch)
self.set_fill(fill)
self.set_capstyle(capstyle)
self.set_joinstyle(joinstyle)
self._combined_transform = transforms.IdentityTransform()
if len(kwargs):
self.update(kwargs)
def get_verts(self):
"""
Return a copy of the vertices used in this patch
If the patch contains Bezier curves, the curves will be
interpolated by line segments. To access the curves as
curves, use :meth:`get_path`.
"""
trans = self.get_transform()
path = self.get_path()
polygons = path.to_polygons(trans)
if len(polygons):
return polygons[0]
return []
def contains(self, mouseevent, radius=None):
"""Test whether the mouse event occurred in the patch.
Returns T/F, {}
"""
# This is a general version of contains that should work on any
# patch with a path. However, patches that have a faster
# algebraic solution to hit-testing should override this
# method.
if six.callable(self._contains):
return self._contains(self, mouseevent)
if radius is None:
radius = self.get_linewidth()
inside = self.get_path().contains_point(
(mouseevent.x, mouseevent.y), self.get_transform(), radius)
return inside, {}
def contains_point(self, point, radius=None):
"""
Returns *True* if the given point is inside the path
(transformed with its transform attribute).
"""
if radius is None:
radius = self.get_linewidth()
return self.get_path().contains_point(point,
self.get_transform(),
radius)
def update_from(self, other):
"""
Updates this :class:`Patch` from the properties of *other*.
"""
artist.Artist.update_from(self, other)
self.set_edgecolor(other.get_edgecolor())
self.set_facecolor(other.get_facecolor())
self.set_fill(other.get_fill())
self.set_hatch(other.get_hatch())
self.set_linewidth(other.get_linewidth())
self.set_linestyle(other.get_linestyle())
self.set_transform(other.get_data_transform())
self.set_figure(other.get_figure())
self.set_alpha(other.get_alpha())
def get_extents(self):
"""
Return a :class:`~matplotlib.transforms.Bbox` object defining
the axis-aligned extents of the :class:`Patch`.
"""
return self.get_path().get_extents(self.get_transform())
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the :class:`Patch`.
"""
return self.get_patch_transform() + artist.Artist.get_transform(self)
def get_data_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` instance which
maps data coordinates to physical coordinates.
"""
return artist.Artist.get_transform(self)
def get_patch_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` instance which
takes patch coordinates to data coordinates.
For example, one may define a patch of a circle which represents a
radius of 5 by providing coordinates for a unit circle, and a
transform which scales the coordinates (the patch coordinate) by 5.
"""
return transforms.IdentityTransform()
def get_antialiased(self):
"""
Returns True if the :class:`Patch` is to be drawn with antialiasing.
"""
return self._antialiased
get_aa = get_antialiased
def get_edgecolor(self):
"""
Return the edge color of the :class:`Patch`.
"""
return self._edgecolor
get_ec = get_edgecolor
def get_facecolor(self):
"""
Return the face color of the :class:`Patch`.
"""
return self._facecolor
get_fc = get_facecolor
def get_linewidth(self):
"""
Return the line width in points.
"""
return self._linewidth
get_lw = get_linewidth
def get_linestyle(self):
"""
Return the linestyle. Will be one of ['solid' | 'dashed' |
'dashdot' | 'dotted']
"""
return self._linestyle
get_ls = get_linestyle
def set_antialiased(self, aa):
"""
Set whether to use antialiased rendering
ACCEPTS: [True | False] or None for default
"""
if aa is None:
aa = mpl.rcParams['patch.antialiased']
self._antialiased = aa
self.stale = True
def set_aa(self, aa):
"""alias for set_antialiased"""
return self.set_antialiased(aa)
def set_edgecolor(self, color):
"""
Set the patch edge color
ACCEPTS: mpl color spec, or None for default, or 'none' for no color
"""
if color is None:
color = mpl.rcParams['patch.edgecolor']
self._original_edgecolor = color
self._edgecolor = colors.colorConverter.to_rgba(color, self._alpha)
self.stale = True
def set_ec(self, color):
"""alias for set_edgecolor"""
return self.set_edgecolor(color)
def set_facecolor(self, color):
"""
Set the patch face color
ACCEPTS: mpl color spec, or None for default, or 'none' for no color
"""
if color is None:
color = mpl.rcParams['patch.facecolor']
# save: otherwise changing _fill may lose alpha information
self._original_facecolor = color
self._facecolor = colors.colorConverter.to_rgba(color, self._alpha)
if not self._fill:
self._facecolor = list(self._facecolor)
self._facecolor[3] = 0
self.stale = True
def set_fc(self, color):
"""alias for set_facecolor"""
return self.set_facecolor(color)
def set_color(self, c):
"""
Set both the edgecolor and the facecolor.
ACCEPTS: matplotlib color spec
.. seealso::
:meth:`set_facecolor`, :meth:`set_edgecolor`
For setting the edge or face color individually.
"""
self.set_facecolor(c)
self.set_edgecolor(c)
def set_alpha(self, alpha):
"""
Set the alpha tranparency of the patch.
ACCEPTS: float or None
"""
if alpha is not None:
try:
float(alpha)
except TypeError:
raise TypeError('alpha must be a float or None')
artist.Artist.set_alpha(self, alpha)
# using self._fill and self._alpha
self.set_facecolor(self._original_facecolor)
self.set_edgecolor(self._original_edgecolor)
self.stale = True
def set_linewidth(self, w):
"""
Set the patch linewidth in points
ACCEPTS: float or None for default
"""
if w is None:
w = mpl.rcParams['patch.linewidth']
self._linewidth = float(w)
self.stale = True
def set_lw(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_linestyle(self, ls):
"""
Set the patch linestyle
=========================== =================
linestyle description
=========================== =================
``'-'`` or ``'solid'`` solid line
``'--'`` or ``'dashed'`` dashed line
``'-.'`` or ``'dash_dot'`` dash-dotted line
``':'`` or ``'dotted'`` dotted line
=========================== =================
Alternatively a dash tuple of the following form can be provided::
(offset, onoffseq),
where ``onoffseq`` is an even length tuple of on and off ink
in points.
ACCEPTS: ['solid' | 'dashed', 'dashdot', 'dotted' |
(offset, on-off-dash-seq) |
``'-'`` | ``'--'`` | ``'-.'`` | ``':'`` | ``'None'`` |
``' '`` | ``''``]
Parameters
----------
ls : { '-', '--', '-.', ':'} and more see description
The line style.
"""
if ls is None:
ls = "solid"
ls = cbook.ls_mapper.get(ls, ls)
self._linestyle = ls
self.stale = True
def set_ls(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_fill(self, b):
"""
Set whether to fill the patch
ACCEPTS: [True | False]
"""
self._fill = bool(b)
self.set_facecolor(self._original_facecolor)
self.stale = True
def get_fill(self):
'return whether fill is set'
return self._fill
# Make fill a property so as to preserve the long-standing
# but somewhat inconsistent behavior in which fill was an
# attribute.
fill = property(get_fill, set_fill)
def set_capstyle(self, s):
"""
Set the patch capstyle
ACCEPTS: ['butt' | 'round' | 'projecting']
"""
s = s.lower()
if s not in self.validCap:
raise ValueError('set_capstyle passed "%s";\n' % (s,) +
'valid capstyles are %s' % (self.validCap,))
self._capstyle = s
self.stale = True
def get_capstyle(self):
"Return the current capstyle"
return self._capstyle
def set_joinstyle(self, s):
"""
Set the patch joinstyle
ACCEPTS: ['miter' | 'round' | 'bevel']
"""
s = s.lower()
if s not in self.validJoin:
raise ValueError('set_joinstyle passed "%s";\n' % (s,) +
'valid joinstyles are %s' % (self.validJoin,))
self._joinstyle = s
self.stale = True
def get_joinstyle(self):
"Return the current joinstyle"
return self._joinstyle
def set_hatch(self, hatch):
"""
Set the hatching pattern
*hatch* can be one of::
/ - diagonal hatching
\ - back diagonal
| - vertical
- - horizontal
+ - crossed
x - crossed diagonal
o - small circle
O - large circle
. - dots
* - stars
Letters can be combined, in which case all the specified
hatchings are done. If same letter repeats, it increases the
density of hatching of that pattern.
Hatching is supported in the PostScript, PDF, SVG and Agg
backends only.
ACCEPTS: ['/' | '\\\\' | '|' | '-' | '+' | 'x' | 'o' | 'O' | '.' | '*']
"""
self._hatch = hatch
self.stale = True
def get_hatch(self):
'Return the current hatching pattern'
return self._hatch
@allow_rasterization
def draw(self, renderer):
'Draw the :class:`Patch` to the given *renderer*.'
if not self.get_visible():
return
renderer.open_group('patch', self.get_gid())
gc = renderer.new_gc()
gc.set_foreground(self._edgecolor, isRGBA=True)
lw = self._linewidth
if self._edgecolor[3] == 0:
lw = 0
gc.set_linewidth(lw)
gc.set_linestyle(self._linestyle)
gc.set_capstyle(self._capstyle)
gc.set_joinstyle(self._joinstyle)
gc.set_antialiased(self._antialiased)
self._set_gc_clip(gc)
gc.set_url(self._url)
gc.set_snap(self.get_snap())
rgbFace = self._facecolor
if rgbFace[3] == 0:
rgbFace = None # (some?) renderers expect this as no-fill signal
gc.set_alpha(self._alpha)
if self._hatch:
gc.set_hatch(self._hatch)
if self.get_sketch_params() is not None:
gc.set_sketch_params(*self.get_sketch_params())
path = self.get_path()
transform = self.get_transform()
tpath = transform.transform_path_non_affine(path)
affine = transform.get_affine()
if self.get_path_effects():
from matplotlib.patheffects import PathEffectRenderer
renderer = PathEffectRenderer(self.get_path_effects(), renderer)
renderer.draw_path(gc, tpath, affine, rgbFace)
gc.restore()
renderer.close_group('patch')
self.stale = False
def get_path(self):
"""
Return the path of this patch
"""
raise NotImplementedError('Derived must override')
def get_window_extent(self, renderer=None):
return self.get_path().get_extents(self.get_transform())
patchdoc = artist.kwdoc(Patch)
for k in ('Rectangle', 'Circle', 'RegularPolygon', 'Polygon', 'Wedge', 'Arrow',
'FancyArrow', 'YAArrow', 'CirclePolygon', 'Ellipse', 'Arc',
'FancyBboxPatch', 'Patch'):
docstring.interpd.update({k: patchdoc})
# define Patch.__init__ docstring after the class has been added to interpd
docstring.dedent_interpd(Patch.__init__)
class Shadow(Patch):
def __str__(self):
return "Shadow(%s)" % (str(self.patch))
@docstring.dedent_interpd
def __init__(self, patch, ox, oy, props=None, **kwargs):
"""
Create a shadow of the given *patch* offset by *ox*, *oy*.
*props*, if not *None*, is a patch property update dictionary.
If *None*, the shadow will have have the same color as the face,
but darkened.
kwargs are
%(Patch)s
"""
Patch.__init__(self)
self.patch = patch
self.props = props
self._ox, self._oy = ox, oy
self._shadow_transform = transforms.Affine2D()
self._update()
def _update(self):
self.update_from(self.patch)
if self.props is not None:
self.update(self.props)
else:
r, g, b, a = colors.colorConverter.to_rgba(
self.patch.get_facecolor())
rho = 0.3
r = rho * r
g = rho * g
b = rho * b
self.set_facecolor((r, g, b, 0.5))
self.set_edgecolor((r, g, b, 0.5))
self.set_alpha(0.5)
def _update_transform(self, renderer):
ox = renderer.points_to_pixels(self._ox)
oy = renderer.points_to_pixels(self._oy)
self._shadow_transform.clear().translate(ox, oy)
def _get_ox(self):
return self._ox
def _set_ox(self, ox):
self._ox = ox
def _get_oy(self):
return self._oy
def _set_oy(self, oy):
self._oy = oy
def get_path(self):
return self.patch.get_path()
def get_patch_transform(self):
return self.patch.get_patch_transform() + self._shadow_transform
def draw(self, renderer):
self._update_transform(renderer)
Patch.draw(self, renderer)
class Rectangle(Patch):
"""
Draw a rectangle with lower left at *xy* = (*x*, *y*) with
specified *width* and *height*.
"""
def __str__(self):
return self.__class__.__name__ \
+ "(%g,%g;%gx%g)" % (self._x, self._y, self._width, self._height)
@docstring.dedent_interpd
def __init__(self, xy, width, height, angle=0.0, **kwargs):
"""
*angle*
rotation in degrees (anti-clockwise)
*fill* is a boolean indicating whether to fill the rectangle
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self._x = float(xy[0])
self._y = float(xy[1])
self._width = float(width)
self._height = float(height)
self._angle = float(angle)
# Note: This cannot be calculated until this is added to an Axes
self._rect_transform = transforms.IdentityTransform()
def get_path(self):
"""
Return the vertices of the rectangle
"""
return Path.unit_rectangle()
def _update_patch_transform(self):
"""NOTE: This cannot be called until after this has been added
to an Axes, otherwise unit conversion will fail. This
maxes it very important to call the accessor method and
not directly access the transformation member variable.
"""
x = self.convert_xunits(self._x)
y = self.convert_yunits(self._y)
width = self.convert_xunits(self._width)
height = self.convert_yunits(self._height)
bbox = transforms.Bbox.from_bounds(x, y, width, height)
rot_trans = transforms.Affine2D()
rot_trans.rotate_deg_around(x, y, self._angle)
self._rect_transform = transforms.BboxTransformTo(bbox)
self._rect_transform += rot_trans
def get_patch_transform(self):
self._update_patch_transform()
return self._rect_transform
def contains(self, mouseevent):
# special case the degenerate rectangle
if self._width == 0 or self._height == 0:
return False, {}
x, y = self.get_transform().inverted().transform_point(
(mouseevent.x, mouseevent.y))
return (x >= 0.0 and x <= 1.0 and y >= 0.0 and y <= 1.0), {}
def get_x(self):
"Return the left coord of the rectangle"
return self._x
def get_y(self):
"Return the bottom coord of the rectangle"
return self._y
def get_xy(self):
"Return the left and bottom coords of the rectangle"
return self._x, self._y
def get_width(self):
"Return the width of the rectangle"
return self._width
def get_height(self):
"Return the height of the rectangle"
return self._height
def set_x(self, x):
"""
Set the left coord of the rectangle
ACCEPTS: float
"""
self._x = x
self.stale = True
def set_y(self, y):
"""
Set the bottom coord of the rectangle
ACCEPTS: float
"""
self._y = y
self.stale = True
def set_xy(self, xy):
"""
Set the left and bottom coords of the rectangle
ACCEPTS: 2-item sequence
"""
self._x, self._y = xy
self.stale = True
def set_width(self, w):
"""
Set the width rectangle
ACCEPTS: float
"""
self._width = w
self.stale = True
def set_height(self, h):
"""
Set the width rectangle
ACCEPTS: float
"""
self._height = h
self.stale = True
def set_bounds(self, *args):
"""
Set the bounds of the rectangle: l,b,w,h
ACCEPTS: (left, bottom, width, height)
"""
if len(args) == 0:
l, b, w, h = args[0]
else:
l, b, w, h = args
self._x = l
self._y = b
self._width = w
self._height = h
self.stale = True
def get_bbox(self):
return transforms.Bbox.from_bounds(self._x, self._y,
self._width, self._height)
xy = property(get_xy, set_xy)
class RegularPolygon(Patch):
"""
A regular polygon patch.
"""
def __str__(self):
return "Poly%d(%g,%g)" % (self._numVertices, self._xy[0], self._xy[1])
@docstring.dedent_interpd
def __init__(self, xy, numVertices, radius=5, orientation=0,
**kwargs):
"""
Constructor arguments:
*xy*
A length 2 tuple (*x*, *y*) of the center.
*numVertices*
the number of vertices.
*radius*
The distance from the center to each of the vertices.
*orientation*
rotates the polygon (in radians).
Valid kwargs are:
%(Patch)s
"""
self._xy = xy
self._numVertices = numVertices
self._orientation = orientation
self._radius = radius
self._path = Path.unit_regular_polygon(numVertices)
self._poly_transform = transforms.Affine2D()
self._update_transform()
Patch.__init__(self, **kwargs)
def _update_transform(self):
self._poly_transform.clear() \
.scale(self.radius) \
.rotate(self.orientation) \
.translate(*self.xy)
def _get_xy(self):
return self._xy
def _set_xy(self, xy):
self._xy = xy
self._update_transform()
xy = property(_get_xy, _set_xy)
def _get_orientation(self):
return self._orientation
def _set_orientation(self, orientation):
self._orientation = orientation
self._update_transform()
orientation = property(_get_orientation, _set_orientation)
def _get_radius(self):
return self._radius
def _set_radius(self, radius):
self._radius = radius
self._update_transform()
radius = property(_get_radius, _set_radius)
def _get_numvertices(self):
return self._numVertices
def _set_numvertices(self, numVertices):
self._numVertices = numVertices
numvertices = property(_get_numvertices, _set_numvertices)
def get_path(self):
return self._path
def get_patch_transform(self):
self._update_transform()
return self._poly_transform
class PathPatch(Patch):
"""
A general polycurve path patch.
"""
def __str__(self):
return "Poly((%g, %g) ...)" % tuple(self._path.vertices[0])
@docstring.dedent_interpd
def __init__(self, path, **kwargs):
"""
*path* is a :class:`matplotlib.path.Path` object.
Valid kwargs are:
%(Patch)s
.. seealso::
:class:`Patch`
For additional kwargs
"""
Patch.__init__(self, **kwargs)
self._path = path
def get_path(self):
return self._path
class Polygon(Patch):
"""
A general polygon patch.
"""
def __str__(self):
return "Poly((%g, %g) ...)" % tuple(self._path.vertices[0])
@docstring.dedent_interpd
def __init__(self, xy, closed=True, **kwargs):
"""
*xy* is a numpy array with shape Nx2.
If *closed* is *True*, the polygon will be closed so the
starting and ending points are the same.
Valid kwargs are:
%(Patch)s
.. seealso::
:class:`Patch`
For additional kwargs
"""
Patch.__init__(self, **kwargs)
self._closed = closed
self.set_xy(xy)
def get_path(self):
"""
Get the path of the polygon
Returns
-------
path : Path
The :class:`~matplotlib.path.Path` object for
the polygon
"""
return self._path
def get_closed(self):
"""
Returns if the polygon is closed
Returns
-------
closed : bool
If the path is closed
"""
return self._closed
def set_closed(self, closed):
"""
Set if the polygon is closed
Parameters
----------
closed : bool
True if the polygon is closed
"""
if self._closed == bool(closed):
return
self._closed = bool(closed)
self.set_xy(self.get_xy())
self.stale = True
def get_xy(self):
"""
Get the vertices of the path
Returns
-------
vertices : numpy array
The coordinates of the vertices as a Nx2
ndarray.
"""
return self._path.vertices
def set_xy(self, xy):
"""
Set the vertices of the polygon
Parameters
----------
xy : numpy array or iterable of pairs
The coordinates of the vertices as a Nx2
ndarray or iterable of pairs.
"""
xy = np.asarray(xy)
if self._closed:
if len(xy) and (xy[0] != xy[-1]).any():
xy = np.concatenate([xy, [xy[0]]])
else:
if len(xy) > 2 and (xy[0] == xy[-1]).all():
xy = xy[:-1]
self._path = Path(xy, closed=self._closed)
self.stale = True
_get_xy = get_xy
_set_xy = set_xy
xy = property(
get_xy, set_xy, None,
"""Set/get the vertices of the polygon. This property is
provided for backward compatibility with matplotlib 0.91.x
only. New code should use
:meth:`~matplotlib.patches.Polygon.get_xy` and
:meth:`~matplotlib.patches.Polygon.set_xy` instead.""")
class Wedge(Patch):
"""
Wedge shaped patch.
"""
def __str__(self):
return "Wedge(%g,%g)" % (self.theta1, self.theta2)
@docstring.dedent_interpd
def __init__(self, center, r, theta1, theta2, width=None, **kwargs):
"""
Draw a wedge centered at *x*, *y* center with radius *r* that
sweeps *theta1* to *theta2* (in degrees). If *width* is given,
then a partial wedge is drawn from inner radius *r* - *width*
to outer radius *r*.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self.center = center
self.r, self.width = r, width
self.theta1, self.theta2 = theta1, theta2
self._patch_transform = transforms.IdentityTransform()
self._recompute_path()
def _recompute_path(self):
# Inner and outer rings are connected unless the annulus is complete
if abs((self.theta2 - self.theta1) - 360) <= 1e-12:
theta1, theta2 = 0, 360
connector = Path.MOVETO
else:
theta1, theta2 = self.theta1, self.theta2
connector = Path.LINETO
# Form the outer ring
arc = Path.arc(theta1, theta2)
if self.width is not None:
# Partial annulus needs to draw the outer ring
# followed by a reversed and scaled inner ring
v1 = arc.vertices
v2 = arc.vertices[::-1] * float(self.r - self.width) / self.r
v = np.vstack([v1, v2, v1[0, :], (0, 0)])
c = np.hstack([arc.codes, arc.codes, connector, Path.CLOSEPOLY])
c[len(arc.codes)] = connector
else:
# Wedge doesn't need an inner ring
v = np.vstack([arc.vertices, [(0, 0), arc.vertices[0, :], (0, 0)]])
c = np.hstack([arc.codes, [connector, connector, Path.CLOSEPOLY]])
# Shift and scale the wedge to the final location.
v *= self.r
v += np.asarray(self.center)
self._path = Path(v, c)
def set_center(self, center):
self._path = None
self.center = center
self.stale = True
def set_radius(self, radius):
self._path = None
self.r = radius
self.stale = True
def set_theta1(self, theta1):
self._path = None
self.theta1 = theta1
self.stale = True
def set_theta2(self, theta2):
self._path = None
self.theta2 = theta2
self.stale = True
def set_width(self, width):
self._path = None
self.width = width
self.stale = True
def get_path(self):
if self._path is None:
self._recompute_path()
return self._path
# COVERAGE NOTE: Not used internally or from examples
class Arrow(Patch):
"""
An arrow patch.
"""
def __str__(self):
return "Arrow()"
_path = Path([
[0.0, 0.1], [0.0, -0.1],
[0.8, -0.1], [0.8, -0.3],
[1.0, 0.0], [0.8, 0.3],
[0.8, 0.1], [0.0, 0.1]],
closed=True)
@docstring.dedent_interpd
def __init__(self, x, y, dx, dy, width=1.0, **kwargs):
"""
Draws an arrow, starting at (*x*, *y*), direction and length
given by (*dx*, *dy*) the width of the arrow is scaled by *width*.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
L = np.hypot(dx, dy)
if L != 0:
cx = float(dx) / L
sx = float(dy) / L
else:
# Account for division by zero
cx, sx = 0, 1
trans1 = transforms.Affine2D().scale(L, width)
trans2 = transforms.Affine2D.from_values(cx, sx, -sx, cx, 0.0, 0.0)
trans3 = transforms.Affine2D().translate(x, y)
trans = trans1 + trans2 + trans3
self._patch_transform = trans.frozen()
def get_path(self):
return self._path
def get_patch_transform(self):
return self._patch_transform
class FancyArrow(Polygon):
"""
Like Arrow, but lets you set head width and head height independently.
"""
def __str__(self):
return "FancyArrow()"
@docstring.dedent_interpd
def __init__(self, x, y, dx, dy, width=0.001, length_includes_head=False,
head_width=None, head_length=None, shape='full', overhang=0,
head_starts_at_zero=False, **kwargs):
"""
Constructor arguments
*width*: float (default: 0.001)
width of full arrow tail
*length_includes_head*: [True | False] (default: False)
True if head is to be counted in calculating the length.
*head_width*: float or None (default: 3*width)
total width of the full arrow head
*head_length*: float or None (default: 1.5 * head_width)
length of arrow head
*shape*: ['full', 'left', 'right'] (default: 'full')
draw the left-half, right-half, or full arrow
*overhang*: float (default: 0)
fraction that the arrow is swept back (0 overhang means
triangular shape). Can be negative or greater than one.
*head_starts_at_zero*: [True | False] (default: False)
if True, the head starts being drawn at coordinate 0
instead of ending at coordinate 0.
Other valid kwargs (inherited from :class:`Patch`) are:
%(Patch)s
"""
if head_width is None:
head_width = 20 * width
if head_length is None:
head_length = 1.5 * head_width
distance = np.hypot(dx, dy)
if length_includes_head:
length = distance
else:
length = distance + head_length
if not length:
verts = [] # display nothing if empty
else:
# start by drawing horizontal arrow, point at (0,0)
hw, hl, hs, lw = head_width, head_length, overhang, width
left_half_arrow = np.array([
[0.0, 0.0], # tip
[-hl, -hw / 2.0], # leftmost
[-hl * (1 - hs), -lw / 2.0], # meets stem
[-length, -lw / 2.0], # bottom left
[-length, 0],
])
# if we're not including the head, shift up by head length
if not length_includes_head:
left_half_arrow += [head_length, 0]
# if the head starts at 0, shift up by another head length
if head_starts_at_zero:
left_half_arrow += [head_length / 2.0, 0]
# figure out the shape, and complete accordingly
if shape == 'left':
coords = left_half_arrow
else:
right_half_arrow = left_half_arrow * [1, -1]
if shape == 'right':
coords = right_half_arrow
elif shape == 'full':
# The half-arrows contain the midpoint of the stem,
# which we can omit from the full arrow. Including it
# twice caused a problem with xpdf.
coords = np.concatenate([left_half_arrow[:-1],
right_half_arrow[-2::-1]])
else:
raise ValueError("Got unknown shape: %s" % shape)
if distance != 0:
cx = float(dx) / distance
sx = float(dy) / distance
else:
#Account for division by zero
cx, sx = 0, 1
M = np.array([[cx, sx], [-sx, cx]])
verts = np.dot(coords, M) + (x + dx, y + dy)
Polygon.__init__(self, list(map(tuple, verts)), closed=True, **kwargs)
docstring.interpd.update({"FancyArrow": FancyArrow.__init__.__doc__})
docstring.interpd.update({"FancyArrow": FancyArrow.__init__.__doc__})
class YAArrow(Patch):
"""
Yet another arrow class.
This is an arrow that is defined in display space and has a tip at
*x1*, *y1* and a base at *x2*, *y2*.
"""
def __str__(self):
return "YAArrow()"
@docstring.dedent_interpd
def __init__(self, figure, xytip, xybase,
width=4, frac=0.1, headwidth=12, **kwargs):
"""
Constructor arguments:
*xytip*
(*x*, *y*) location of arrow tip
*xybase*
(*x*, *y*) location the arrow base mid point
*figure*
The :class:`~matplotlib.figure.Figure` instance
(fig.dpi)
*width*
The width of the arrow in points
*frac*
The fraction of the arrow length occupied by the head
*headwidth*
The width of the base of the arrow head in points
Valid kwargs are:
%(Patch)s
"""
self.xytip = xytip
self.xybase = xybase
self.width = width
self.frac = frac
self.headwidth = headwidth
Patch.__init__(self, **kwargs)
# Set self.figure after Patch.__init__, since it sets self.figure to
# None
self.figure = figure
def get_path(self):
# Since this is dpi dependent, we need to recompute the path
# every time.
# the base vertices
x1, y1 = self.xytip
x2, y2 = self.xybase
k1 = self.width * self.figure.dpi / 72. / 2.
k2 = self.headwidth * self.figure.dpi / 72. / 2.
xb1, yb1, xb2, yb2 = self.getpoints(x1, y1, x2, y2, k1)
# a point on the segment 20% of the distance from the tip to the base
theta = math.atan2(y2 - y1, x2 - x1)
r = math.sqrt((y2 - y1) ** 2. + (x2 - x1) ** 2.)
xm = x1 + self.frac * r * math.cos(theta)
ym = y1 + self.frac * r * math.sin(theta)
xc1, yc1, xc2, yc2 = self.getpoints(x1, y1, xm, ym, k1)
xd1, yd1, xd2, yd2 = self.getpoints(x1, y1, xm, ym, k2)
xs = self.convert_xunits([xb1, xb2, xc2, xd2, x1, xd1, xc1, xb1])
ys = self.convert_yunits([yb1, yb2, yc2, yd2, y1, yd1, yc1, yb1])
return Path(list(zip(xs, ys)), closed=True)
def get_patch_transform(self):
return transforms.IdentityTransform()
def getpoints(self, x1, y1, x2, y2, k):
"""
For line segment defined by (*x1*, *y1*) and (*x2*, *y2*)
return the points on the line that is perpendicular to the
line and intersects (*x2*, *y2*) and the distance from (*x2*,
*y2*) of the returned points is *k*.
"""
x1, y1, x2, y2, k = list(map(float, (x1, y1, x2, y2, k)))
if y2 - y1 == 0:
return x2, y2 + k, x2, y2 - k
elif x2 - x1 == 0:
return x2 + k, y2, x2 - k, y2
m = (y2 - y1) / (x2 - x1)
pm = -1. / m
a = 1
b = -2 * y2
c = y2 ** 2. - k ** 2. * pm ** 2. / (1. + pm ** 2.)
y3a = (-b + math.sqrt(b ** 2. - 4 * a * c)) / (2. * a)
x3a = (y3a - y2) / pm + x2
y3b = (-b - math.sqrt(b ** 2. - 4 * a * c)) / (2. * a)
x3b = (y3b - y2) / pm + x2
return x3a, y3a, x3b, y3b
class CirclePolygon(RegularPolygon):
"""
A polygon-approximation of a circle patch.
"""
def __str__(self):
return "CirclePolygon(%d,%d)" % self.center
@docstring.dedent_interpd
def __init__(self, xy, radius=5,
resolution=20, # the number of vertices
** kwargs):
"""
Create a circle at *xy* = (*x*, *y*) with given *radius*.
This circle is approximated by a regular polygon with
*resolution* sides. For a smoother circle drawn with splines,
see :class:`~matplotlib.patches.Circle`.
Valid kwargs are:
%(Patch)s
"""
RegularPolygon.__init__(self, xy,
resolution,
radius,
orientation=0,
**kwargs)
class Ellipse(Patch):
"""
A scale-free ellipse.
"""
def __str__(self):
return "Ellipse(%s,%s;%sx%s)" % (self.center[0], self.center[1],
self.width, self.height)
@docstring.dedent_interpd
def __init__(self, xy, width, height, angle=0.0, **kwargs):
"""
*xy*
center of ellipse
*width*
total length (diameter) of horizontal axis
*height*
total length (diameter) of vertical axis
*angle*
rotation in degrees (anti-clockwise)
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self.center = xy
self.width, self.height = width, height
self.angle = angle
self._path = Path.unit_circle()
# Note: This cannot be calculated until this is added to an Axes
self._patch_transform = transforms.IdentityTransform()
def _recompute_transform(self):
"""NOTE: This cannot be called until after this has been added
to an Axes, otherwise unit conversion will fail. This
maxes it very important to call the accessor method and
not directly access the transformation member variable.
"""
center = (self.convert_xunits(self.center[0]),
self.convert_yunits(self.center[1]))
width = self.convert_xunits(self.width)
height = self.convert_yunits(self.height)
self._patch_transform = transforms.Affine2D() \
.scale(width * 0.5, height * 0.5) \
.rotate_deg(self.angle) \
.translate(*center)
def get_path(self):
"""
Return the vertices of the rectangle
"""
return self._path
def get_patch_transform(self):
self._recompute_transform()
return self._patch_transform
def contains(self, ev):
if ev.x is None or ev.y is None:
return False, {}
x, y = self.get_transform().inverted().transform_point((ev.x, ev.y))
return (x * x + y * y) <= 1.0, {}
class Circle(Ellipse):
"""
A circle patch.
"""
def __str__(self):
return "Circle((%g,%g),r=%g)" % (self.center[0],
self.center[1],
self.radius)
@docstring.dedent_interpd
def __init__(self, xy, radius=5, **kwargs):
"""
Create true circle at center *xy* = (*x*, *y*) with given
*radius*. Unlike :class:`~matplotlib.patches.CirclePolygon`
which is a polygonal approximation, this uses Bézier splines
and is much closer to a scale-free circle.
Valid kwargs are:
%(Patch)s
"""
Ellipse.__init__(self, xy, radius * 2, radius * 2, **kwargs)
self.radius = radius
def set_radius(self, radius):
"""
Set the radius of the circle
ACCEPTS: float
"""
self.width = self.height = 2 * radius
self.stale = True
def get_radius(self):
'return the radius of the circle'
return self.width / 2.
radius = property(get_radius, set_radius)
class Arc(Ellipse):
"""
An elliptical arc. Because it performs various optimizations, it
can not be filled.
The arc must be used in an :class:`~matplotlib.axes.Axes`
instance---it can not be added directly to a
:class:`~matplotlib.figure.Figure`---because it is optimized to
only render the segments that are inside the axes bounding box
with high resolution.
"""
def __str__(self):
return "Arc(%s,%s;%sx%s)" % (self.center[0], self.center[1],
self.width, self.height)
@docstring.dedent_interpd
def __init__(self, xy, width, height, angle=0.0,
theta1=0.0, theta2=360.0, **kwargs):
"""
The following args are supported:
*xy*
center of ellipse
*width*
length of horizontal axis
*height*
length of vertical axis
*angle*
rotation in degrees (anti-clockwise)
*theta1*
starting angle of the arc in degrees
*theta2*
ending angle of the arc in degrees
If *theta1* and *theta2* are not provided, the arc will form a
complete ellipse.
Valid kwargs are:
%(Patch)s
"""
fill = kwargs.setdefault('fill', False)
if fill:
raise ValueError("Arc objects can not be filled")
Ellipse.__init__(self, xy, width, height, angle, **kwargs)
self.theta1 = theta1
self.theta2 = theta2
self._path = Path.arc(self.theta1, self.theta2)
@allow_rasterization
def draw(self, renderer):
"""
Ellipses are normally drawn using an approximation that uses
eight cubic bezier splines. The error of this approximation
is 1.89818e-6, according to this unverified source:
Lancaster, Don. Approximating a Circle or an Ellipse Using
Four Bezier Cubic Splines.
http://www.tinaja.com/glib/ellipse4.pdf
There is a use case where very large ellipses must be drawn
with very high accuracy, and it is too expensive to render the
entire ellipse with enough segments (either splines or line
segments). Therefore, in the case where either radius of the
ellipse is large enough that the error of the spline
approximation will be visible (greater than one pixel offset
from the ideal), a different technique is used.
In that case, only the visible parts of the ellipse are drawn,
with each visible arc using a fixed number of spline segments
(8). The algorithm proceeds as follows:
1. The points where the ellipse intersects the axes bounding
box are located. (This is done be performing an inverse
transformation on the axes bbox such that it is relative
to the unit circle -- this makes the intersection
calculation much easier than doing rotated ellipse
intersection directly).
This uses the "line intersecting a circle" algorithm
from:
Vince, John. Geometry for Computer Graphics: Formulae,
Examples & Proofs. London: Springer-Verlag, 2005.
2. The angles of each of the intersection points are
calculated.
3. Proceeding counterclockwise starting in the positive
x-direction, each of the visible arc-segments between the
pairs of vertices are drawn using the bezier arc
approximation technique implemented in
:meth:`matplotlib.path.Path.arc`.
"""
if not hasattr(self, 'axes'):
raise RuntimeError('Arcs can only be used in Axes instances')
self._recompute_transform()
# Get the width and height in pixels
width = self.convert_xunits(self.width)
height = self.convert_yunits(self.height)
width, height = self.get_transform().transform_point(
(width, height))
inv_error = (1.0 / 1.89818e-6) * 0.5
if width < inv_error and height < inv_error:
# self._path = Path.arc(self.theta1, self.theta2)
return Patch.draw(self, renderer)
def iter_circle_intersect_on_line(x0, y0, x1, y1):
dx = x1 - x0
dy = y1 - y0
dr2 = dx * dx + dy * dy
D = x0 * y1 - x1 * y0
D2 = D * D
discrim = dr2 - D2
# Single (tangential) intersection
if discrim == 0.0:
x = (D * dy) / dr2
y = (-D * dx) / dr2
yield x, y
elif discrim > 0.0:
# The definition of "sign" here is different from
# np.sign: we never want to get 0.0
if dy < 0.0:
sign_dy = -1.0
else:
sign_dy = 1.0
sqrt_discrim = np.sqrt(discrim)
for sign in (1., -1.):
x = (D * dy + sign * sign_dy * dx * sqrt_discrim) / dr2
y = (-D * dx + sign * np.abs(dy) * sqrt_discrim) / dr2
yield x, y
def iter_circle_intersect_on_line_seg(x0, y0, x1, y1):
epsilon = 1e-9
if x1 < x0:
x0e, x1e = x1, x0
else:
x0e, x1e = x0, x1
if y1 < y0:
y0e, y1e = y1, y0
else:
y0e, y1e = y0, y1
x0e -= epsilon
y0e -= epsilon
x1e += epsilon
y1e += epsilon
for x, y in iter_circle_intersect_on_line(x0, y0, x1, y1):
if x >= x0e and x <= x1e and y >= y0e and y <= y1e:
yield x, y
# Transforms the axes box_path so that it is relative to the unit
# circle in the same way that it is relative to the desired
# ellipse.
box_path = Path.unit_rectangle()
box_path_transform = transforms.BboxTransformTo(self.axes.bbox) + \
self.get_transform().inverted()
box_path = box_path.transformed(box_path_transform)
PI = np.pi
TWOPI = PI * 2.0
RAD2DEG = 180.0 / PI
DEG2RAD = PI / 180.0
theta1 = self.theta1
theta2 = self.theta2
thetas = {}
# For each of the point pairs, there is a line segment
for p0, p1 in zip(box_path.vertices[:-1], box_path.vertices[1:]):
x0, y0 = p0
x1, y1 = p1
for x, y in iter_circle_intersect_on_line_seg(x0, y0, x1, y1):
theta = np.arccos(x)
if y < 0:
theta = TWOPI - theta
# Convert radians to angles
theta *= RAD2DEG
if theta > theta1 and theta < theta2:
thetas[theta] = None
thetas = list(six.iterkeys(thetas))
thetas.sort()
thetas.append(theta2)
last_theta = theta1
theta1_rad = theta1 * DEG2RAD
inside = box_path.contains_point((np.cos(theta1_rad),
np.sin(theta1_rad)))
# save original path
path_original = self._path
for theta in thetas:
if inside:
Path.arc(last_theta, theta, 8)
Patch.draw(self, renderer)
inside = False
else:
inside = True
last_theta = theta
# restore original path
self._path = path_original
def bbox_artist(artist, renderer, props=None, fill=True):
"""
This is a debug function to draw a rectangle around the bounding
box returned by
:meth:`~matplotlib.artist.Artist.get_window_extent` of an artist,
to test whether the artist is returning the correct bbox.
*props* is a dict of rectangle props with the additional property
'pad' that sets the padding around the bbox in points.
"""
if props is None:
props = {}
props = props.copy() # don't want to alter the pad externally
pad = props.pop('pad', 4)
pad = renderer.points_to_pixels(pad)
bbox = artist.get_window_extent(renderer)
l, b, w, h = bbox.bounds
l -= pad / 2.
b -= pad / 2.
w += pad
h += pad
r = Rectangle(xy=(l, b),
width=w,
height=h,
fill=fill,
)
r.set_transform(transforms.IdentityTransform())
r.set_clip_on(False)
r.update(props)
r.draw(renderer)
def draw_bbox(bbox, renderer, color='k', trans=None):
"""
This is a debug function to draw a rectangle around the bounding
box returned by
:meth:`~matplotlib.artist.Artist.get_window_extent` of an artist,
to test whether the artist is returning the correct bbox.
"""
l, b, w, h = bbox.bounds
r = Rectangle(xy=(l, b),
width=w,
height=h,
edgecolor=color,
fill=False,
)
if trans is not None:
r.set_transform(trans)
r.set_clip_on(False)
r.draw(renderer)
def _pprint_table(_table, leadingspace=2):
"""
Given the list of list of strings, return a string of REST table format.
"""
if leadingspace:
pad = ' ' * leadingspace
else:
pad = ''
columns = [[] for cell in _table[0]]
for row in _table:
for column, cell in zip(columns, row):
column.append(cell)
col_len = [max([len(cell) for cell in column]) for column in columns]
lines = []
table_formatstr = pad + ' '.join([('=' * cl) for cl in col_len])
lines.append('')
lines.append(table_formatstr)
lines.append(pad + ' '.join([cell.ljust(cl)
for cell, cl
in zip(_table[0], col_len)]))
lines.append(table_formatstr)
lines.extend([(pad + ' '.join([cell.ljust(cl)
for cell, cl
in zip(row, col_len)]))
for row in _table[1:]])
lines.append(table_formatstr)
lines.append('')
return "\n".join(lines)
def _pprint_styles(_styles):
"""
A helper function for the _Style class. Given the dictionary of
(stylename : styleclass), return a formatted string listing all the
styles. Used to update the documentation.
"""
names, attrss, clss = [], [], []
import inspect
_table = [["Class", "Name", "Attrs"]]
for name, cls in sorted(_styles.items()):
if six.PY2:
args, varargs, varkw, defaults = inspect.getargspec(cls.__init__)
else:
(args, varargs, varkw, defaults, kwonlyargs, kwonlydefs,
annotations) = inspect.getfullargspec(cls.__init__)
if defaults:
args = [(argname, argdefault)
for argname, argdefault in zip(args[1:], defaults)]
else:
args = None
if args is None:
argstr = 'None'
else:
argstr = ",".join([("%s=%s" % (an, av))
for an, av
in args])
# adding ``quotes`` since - and | have special meaning in reST
_table.append([cls.__name__, "``%s``" % name, argstr])
return _pprint_table(_table)
def _simpleprint_styles(_styles):
"""
A helper function for the _Style class. Given the dictionary of
(stylename : styleclass), return a string rep of the list of keys.
Used to update the documentation.
"""
styles = "[ \'"
styles += "\' | \'".join(str(i) for i in sorted(_styles.keys()))
styles += "\' ]"
return styles
class _Style(object):
"""
A base class for the Styles. It is meant to be a container class,
where actual styles are declared as subclass of it, and it
provides some helper functions.
"""
def __new__(self, stylename, **kw):
"""
return the instance of the subclass with the given style name.
"""
# the "class" should have the _style_list attribute, which is
# a dictionary of stylname, style class paie.
_list = stylename.replace(" ", "").split(",")
_name = _list[0].lower()
try:
_cls = self._style_list[_name]
except KeyError:
raise ValueError("Unknown style : %s" % stylename)
try:
_args_pair = [cs.split("=") for cs in _list[1:]]
_args = dict([(k, float(v)) for k, v in _args_pair])
except ValueError:
raise ValueError("Incorrect style argument : %s" % stylename)
_args.update(kw)
return _cls(**_args)
@classmethod
def get_styles(klass):
"""
A class method which returns a dictionary of available styles.
"""
return klass._style_list
@classmethod
def pprint_styles(klass):
"""
A class method which returns a string of the available styles.
"""
return _pprint_styles(klass._style_list)
@classmethod
def register(klass, name, style):
"""
Register a new style.
"""
if not issubclass(style, klass._Base):
raise ValueError("%s must be a subclass of %s" % (style,
klass._Base))
klass._style_list[name] = style
class BoxStyle(_Style):
"""
:class:`BoxStyle` is a container class which defines several
boxstyle classes, which are used for :class:`FancyBoxPatch`.
A style object can be created as::
BoxStyle.Round(pad=0.2)
or::
BoxStyle("Round", pad=0.2)
or::
BoxStyle("Round, pad=0.2")
Following boxstyle classes are defined.
%(AvailableBoxstyles)s
An instance of any boxstyle class is an callable object,
whose call signature is::
__call__(self, x0, y0, width, height, mutation_size, aspect_ratio=1.)
and returns a :class:`Path` instance. *x0*, *y0*, *width* and
*height* specify the location and size of the box to be
drawn. *mutation_scale* determines the overall size of the
mutation (by which I mean the transformation of the rectangle to
the fancy box). *mutation_aspect* determines the aspect-ratio of
the mutation.
.. plot:: mpl_examples/pylab_examples/fancybox_demo2.py
"""
_style_list = {}
class _Base(object):
"""
:class:`BBoxTransmuterBase` and its derivatives are used to make a
fancy box around a given rectangle. The :meth:`__call__` method
returns the :class:`~matplotlib.path.Path` of the fancy box. This
class is not an artist and actual drawing of the fancy box is done
by the :class:`FancyBboxPatch` class.
"""
# The derived classes are required to be able to be initialized
# w/o arguments, i.e., all its argument (except self) must have
# the default values.
def __init__(self):
"""
initializtion.
"""
super(BoxStyle._Base, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
"""
The transmute method is a very core of the
:class:`BboxTransmuter` class and must be overriden in the
subclasses. It receives the location and size of the
rectangle, and the mutation_size, with which the amount of
padding and etc. will be scaled. It returns a
:class:`~matplotlib.path.Path` instance.
"""
raise NotImplementedError('Derived must override')
def __call__(self, x0, y0, width, height, mutation_size,
aspect_ratio=1.):
"""
Given the location and size of the box, return the path of
the box around it.
- *x0*, *y0*, *width*, *height* : location and size of the box
- *mutation_size* : a reference scale for the mutation.
- *aspect_ratio* : aspect-ration for the mutation.
"""
# The __call__ method is a thin wrapper around the transmute method
# and take care of the aspect.
if aspect_ratio is not None:
# Squeeze the given height by the aspect_ratio
y0, height = y0 / aspect_ratio, height / aspect_ratio
# call transmute method with squeezed height.
path = self.transmute(x0, y0, width, height, mutation_size)
vertices, codes = path.vertices, path.codes
# Restore the height
vertices[:, 1] = vertices[:, 1] * aspect_ratio
return Path(vertices, codes)
else:
return self.transmute(x0, y0, width, height, mutation_size)
def __reduce__(self):
# because we have decided to nest thes classes, we need to
# add some more information to allow instance pickling.
import matplotlib.cbook as cbook
return (cbook._NestedClassGetter(),
(BoxStyle, self.__class__.__name__),
self.__dict__
)
class Square(_Base):
"""
A simple square box.
"""
def __init__(self, pad=0.3):
"""
*pad*
amount of padding
"""
self.pad = pad
super(BoxStyle.Square, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
pad = mutation_size * self.pad
# width and height with padding added.
width, height = width + 2*pad, height + 2*pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad,
x1, y1 = x0 + width, y0 + height
vertices = [(x0, y0), (x1, y0), (x1, y1), (x0, y1), (x0, y0)]
codes = [Path.MOVETO] + [Path.LINETO] * 3 + [Path.CLOSEPOLY]
return Path(vertices, codes)
_style_list["square"] = Square
class Circle(_Base):
"""A simple circle box."""
def __init__(self, pad=0.3):
"""
Parameters
----------
pad : float
The amount of padding around the original box.
"""
self.pad = pad
super(BoxStyle.Circle, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
pad = mutation_size * self.pad
width, height = width + 2 * pad, height + 2 * pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad,
return Path.circle((x0 + width/2., y0 + height/2.),
(max([width, height]) / 2.))
_style_list["circle"] = Circle
class LArrow(_Base):
"""
(left) Arrow Box
"""
def __init__(self, pad=0.3):
self.pad = pad
super(BoxStyle.LArrow, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# width and height with padding added.
width, height = width + 2. * pad, height + 2. * pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad,
x1, y1 = x0 + width, y0 + height
dx = (y1 - y0) / 2.
dxx = dx * .5
# adjust x0. 1.4 <- sqrt(2)
x0 = x0 + pad / 1.4
cp = [(x0 + dxx, y0), (x1, y0), (x1, y1), (x0 + dxx, y1),
(x0 + dxx, y1 + dxx), (x0 - dx, y0 + dx),
(x0 + dxx, y0 - dxx), # arrow
(x0 + dxx, y0), (x0 + dxx, y0)]
com = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["larrow"] = LArrow
class RArrow(LArrow):
"""
(right) Arrow Box
"""
def __init__(self, pad=0.3):
super(BoxStyle.RArrow, self).__init__(pad)
def transmute(self, x0, y0, width, height, mutation_size):
p = BoxStyle.LArrow.transmute(self, x0, y0,
width, height, mutation_size)
p.vertices[:, 0] = 2 * x0 + width - p.vertices[:, 0]
return p
_style_list["rarrow"] = RArrow
class DArrow(_Base):
"""
(Double) Arrow Box
"""
# This source is copied from LArrow,
# modified to add a right arrow to the bbox.
def __init__(self, pad=0.3):
self.pad = pad
super(BoxStyle.DArrow, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# width and height with padding added.
# The width is padded by the arrows, so we don't need to pad it.
height = height + 2. * pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad
x1, y1 = x0 + width, y0 + height
dx = (y1 - y0)/2.
dxx = dx * .5
# adjust x0. 1.4 <- sqrt(2)
x0 = x0 + pad / 1.4
cp = [(x0 + dxx, y0), (x1, y0), # bot-segment
(x1, y0 - dxx), (x1 + dx + dxx, y0 + dx),
(x1, y1 + dxx), # right-arrow
(x1, y1), (x0 + dxx, y1), # top-segment
(x0 + dxx, y1 + dxx), (x0 - dx, y0 + dx),
(x0 + dxx, y0 - dxx), # left-arrow
(x0 + dxx, y0), (x0 + dxx, y0)] # close-poly
com = [Path.MOVETO, Path.LINETO,
Path.LINETO, Path.LINETO,
Path.LINETO,
Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO,
Path.LINETO,
Path.LINETO, Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list['darrow'] = DArrow
class Round(_Base):
"""
A box with round corners.
"""
def __init__(self, pad=0.3, rounding_size=None):
"""
*pad*
amount of padding
*rounding_size*
rounding radius of corners. *pad* if None
"""
self.pad = pad
self.rounding_size = rounding_size
super(BoxStyle.Round, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# size of the roudning corner
if self.rounding_size:
dr = mutation_size * self.rounding_size
else:
dr = pad
width, height = width + 2. * pad, height + 2. * pad
x0, y0 = x0 - pad, y0 - pad,
x1, y1 = x0 + width, y0 + height
# Round corners are implemented as quadratic bezier. e.g.,
# [(x0, y0-dr), (x0, y0), (x0+dr, y0)] for lower left corner.
cp = [(x0 + dr, y0),
(x1 - dr, y0),
(x1, y0), (x1, y0 + dr),
(x1, y1 - dr),
(x1, y1), (x1 - dr, y1),
(x0 + dr, y1),
(x0, y1), (x0, y1 - dr),
(x0, y0 + dr),
(x0, y0), (x0 + dr, y0),
(x0 + dr, y0)]
com = [Path.MOVETO,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["round"] = Round
class Round4(_Base):
"""
Another box with round edges.
"""
def __init__(self, pad=0.3, rounding_size=None):
"""
*pad*
amount of padding
*rounding_size*
rounding size of edges. *pad* if None
"""
self.pad = pad
self.rounding_size = rounding_size
super(BoxStyle.Round4, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# roudning size. Use a half of the pad if not set.
if self.rounding_size:
dr = mutation_size * self.rounding_size
else:
dr = pad / 2.
width, height = (width + 2. * pad - 2 * dr,
height + 2. * pad - 2 * dr)
x0, y0 = x0 - pad + dr, y0 - pad + dr,
x1, y1 = x0 + width, y0 + height
cp = [(x0, y0),
(x0 + dr, y0 - dr), (x1 - dr, y0 - dr), (x1, y0),
(x1 + dr, y0 + dr), (x1 + dr, y1 - dr), (x1, y1),
(x1 - dr, y1 + dr), (x0 + dr, y1 + dr), (x0, y1),
(x0 - dr, y1 - dr), (x0 - dr, y0 + dr), (x0, y0),
(x0, y0)]
com = [Path.MOVETO,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["round4"] = Round4
class Sawtooth(_Base):
"""
A sawtooth box.
"""
def __init__(self, pad=0.3, tooth_size=None):
"""
*pad*
amount of padding
*tooth_size*
size of the sawtooth. pad* if None
"""
self.pad = pad
self.tooth_size = tooth_size
super(BoxStyle.Sawtooth, self).__init__()
def _get_sawtooth_vertices(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# size of sawtooth
if self.tooth_size is None:
tooth_size = self.pad * .5 * mutation_size
else:
tooth_size = self.tooth_size * mutation_size
tooth_size2 = tooth_size / 2.
width, height = (width + 2. * pad - tooth_size,
height + 2. * pad - tooth_size)
# the sizes of the vertical and horizontal sawtooth are
# separately adjusted to fit the given box size.
dsx_n = int(round((width - tooth_size) / (tooth_size * 2))) * 2
dsx = (width - tooth_size) / dsx_n
dsy_n = int(round((height - tooth_size) / (tooth_size * 2))) * 2
dsy = (height - tooth_size) / dsy_n
x0, y0 = x0 - pad + tooth_size2, y0 - pad + tooth_size2
x1, y1 = x0 + width, y0 + height
bottom_saw_x = [x0] + \
[x0 + tooth_size2 + dsx * .5 * i
for i
in range(dsx_n * 2)] + \
[x1 - tooth_size2]
bottom_saw_y = [y0] + \
[y0 - tooth_size2, y0,
y0 + tooth_size2, y0] * dsx_n + \
[y0 - tooth_size2]
right_saw_x = [x1] + \
[x1 + tooth_size2,
x1,
x1 - tooth_size2,
x1] * dsx_n + \
[x1 + tooth_size2]
right_saw_y = [y0] + \
[y0 + tooth_size2 + dsy * .5 * i
for i
in range(dsy_n * 2)] + \
[y1 - tooth_size2]
top_saw_x = [x1] + \
[x1 - tooth_size2 - dsx * .5 * i
for i
in range(dsx_n * 2)] + \
[x0 + tooth_size2]
top_saw_y = [y1] + \
[y1 + tooth_size2,
y1,
y1 - tooth_size2,
y1] * dsx_n + \
[y1 + tooth_size2]
left_saw_x = [x0] + \
[x0 - tooth_size2,
x0,
x0 + tooth_size2,
x0] * dsy_n + \
[x0 - tooth_size2]
left_saw_y = [y1] + \
[y1 - tooth_size2 - dsy * .5 * i
for i
in range(dsy_n * 2)] + \
[y0 + tooth_size2]
saw_vertices = (list(zip(bottom_saw_x, bottom_saw_y)) +
list(zip(right_saw_x, right_saw_y)) +
list(zip(top_saw_x, top_saw_y)) +
list(zip(left_saw_x, left_saw_y)) +
[(bottom_saw_x[0], bottom_saw_y[0])])
return saw_vertices
def transmute(self, x0, y0, width, height, mutation_size):
saw_vertices = self._get_sawtooth_vertices(x0, y0, width,
height, mutation_size)
path = Path(saw_vertices, closed=True)
return path
_style_list["sawtooth"] = Sawtooth
class Roundtooth(Sawtooth):
"""A rounded tooth box."""
def __init__(self, pad=0.3, tooth_size=None):
"""
*pad*
amount of padding
*tooth_size*
size of the sawtooth. pad* if None
"""
super(BoxStyle.Roundtooth, self).__init__(pad, tooth_size)
def transmute(self, x0, y0, width, height, mutation_size):
saw_vertices = self._get_sawtooth_vertices(x0, y0,
width, height,
mutation_size)
# Add a trailing vertex to allow us to close the polygon correctly
saw_vertices = np.concatenate([np.array(saw_vertices),
[saw_vertices[0]]], axis=0)
codes = ([Path.MOVETO] +
[Path.CURVE3, Path.CURVE3] * ((len(saw_vertices)-1)//2) +
[Path.CLOSEPOLY])
return Path(saw_vertices, codes)
_style_list["roundtooth"] = Roundtooth
if __doc__: # __doc__ could be None if -OO optimization is enabled
__doc__ = cbook.dedent(__doc__) % \
{"AvailableBoxstyles": _pprint_styles(_style_list)}
docstring.interpd.update(
AvailableBoxstyles=_pprint_styles(BoxStyle._style_list),
ListBoxstyles=_simpleprint_styles(BoxStyle._style_list))
class FancyBboxPatch(Patch):
"""
Draw a fancy box around a rectangle with lower left at *xy*=(*x*,
*y*) with specified width and height.
:class:`FancyBboxPatch` class is similar to :class:`Rectangle`
class, but it draws a fancy box around the rectangle. The
transformation of the rectangle box to the fancy box is delegated
to the :class:`BoxTransmuterBase` and its derived classes.
"""
def __str__(self):
return self.__class__.__name__ \
+ "(%g,%g;%gx%g)" % (self._x, self._y,
self._width, self._height)
@docstring.dedent_interpd
def __init__(self, xy, width, height,
boxstyle="round",
bbox_transmuter=None,
mutation_scale=1.,
mutation_aspect=None,
**kwargs):
"""
*xy* = lower left corner
*width*, *height*
*boxstyle* determines what kind of fancy box will be drawn. It
can be a string of the style name with a comma separated
attribute, or an instance of :class:`BoxStyle`. Following box
styles are available.
%(AvailableBoxstyles)s
*mutation_scale* : a value with which attributes of boxstyle
(e.g., pad) will be scaled. default=1.
*mutation_aspect* : The height of the rectangle will be
squeezed by this value before the mutation and the mutated
box will be stretched by the inverse of it. default=None.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self._x = xy[0]
self._y = xy[1]
self._width = width
self._height = height
if boxstyle == "custom":
if bbox_transmuter is None:
raise ValueError("bbox_transmuter argument is needed with "
"custom boxstyle")
self._bbox_transmuter = bbox_transmuter
else:
self.set_boxstyle(boxstyle)
self._mutation_scale = mutation_scale
self._mutation_aspect = mutation_aspect
self.stale = True
@docstring.dedent_interpd
def set_boxstyle(self, boxstyle=None, **kw):
"""
Set the box style.
*boxstyle* can be a string with boxstyle name with optional
comma-separated attributes. Alternatively, the attrs can
be provided as keywords::
set_boxstyle("round,pad=0.2")
set_boxstyle("round", pad=0.2)
Old attrs simply are forgotten.
Without argument (or with *boxstyle* = None), it returns
available box styles.
The following boxstyles are available:
%(AvailableBoxstyles)s
ACCEPTS: %(ListBoxstyles)s
"""
if boxstyle is None:
return BoxStyle.pprint_styles()
if isinstance(boxstyle, BoxStyle._Base):
self._bbox_transmuter = boxstyle
elif six.callable(boxstyle):
self._bbox_transmuter = boxstyle
else:
self._bbox_transmuter = BoxStyle(boxstyle, **kw)
self.stale = True
def set_mutation_scale(self, scale):
"""
Set the mutation scale.
ACCEPTS: float
"""
self._mutation_scale = scale
self.stale = True
def get_mutation_scale(self):
"""
Return the mutation scale.
"""
return self._mutation_scale
def set_mutation_aspect(self, aspect):
"""
Set the aspect ratio of the bbox mutation.
ACCEPTS: float
"""
self._mutation_aspect = aspect
self.stale = True
def get_mutation_aspect(self):
"""
Return the aspect ratio of the bbox mutation.
"""
return self._mutation_aspect
def get_boxstyle(self):
"Return the boxstyle object"
return self._bbox_transmuter
def get_path(self):
"""
Return the mutated path of the rectangle
"""
_path = self.get_boxstyle()(self._x, self._y,
self._width, self._height,
self.get_mutation_scale(),
self.get_mutation_aspect())
return _path
# Following methods are borrowed from the Rectangle class.
def get_x(self):
"Return the left coord of the rectangle"
return self._x
def get_y(self):
"Return the bottom coord of the rectangle"
return self._y
def get_width(self):
"Return the width of the rectangle"
return self._width
def get_height(self):
"Return the height of the rectangle"
return self._height
def set_x(self, x):
"""
Set the left coord of the rectangle
ACCEPTS: float
"""
self._x = x
self.stale = True
def set_y(self, y):
"""
Set the bottom coord of the rectangle
ACCEPTS: float
"""
self._y = y
self.stale = True
def set_width(self, w):
"""
Set the width rectangle
ACCEPTS: float
"""
self._width = w
self.stale = True
def set_height(self, h):
"""
Set the width rectangle
ACCEPTS: float
"""
self._height = h
self.stale = True
def set_bounds(self, *args):
"""
Set the bounds of the rectangle: l,b,w,h
ACCEPTS: (left, bottom, width, height)
"""
if len(args) == 0:
l, b, w, h = args[0]
else:
l, b, w, h = args
self._x = l
self._y = b
self._width = w
self._height = h
self.stale = True
def get_bbox(self):
return transforms.Bbox.from_bounds(self._x, self._y,
self._width, self._height)
class ConnectionStyle(_Style):
"""
:class:`ConnectionStyle` is a container class which defines
several connectionstyle classes, which is used to create a path
between two points. These are mainly used with
:class:`FancyArrowPatch`.
A connectionstyle object can be either created as::
ConnectionStyle.Arc3(rad=0.2)
or::
ConnectionStyle("Arc3", rad=0.2)
or::
ConnectionStyle("Arc3, rad=0.2")
The following classes are defined
%(AvailableConnectorstyles)s
An instance of any connection style class is an callable object,
whose call signature is::
__call__(self, posA, posB,
patchA=None, patchB=None,
shrinkA=2., shrinkB=2.)
and it returns a :class:`Path` instance. *posA* and *posB* are
tuples of x,y coordinates of the two points to be
connected. *patchA* (or *patchB*) is given, the returned path is
clipped so that it start (or end) from the boundary of the
patch. The path is further shrunk by *shrinkA* (or *shrinkB*)
which is given in points.
"""
_style_list = {}
class _Base(object):
"""
A base class for connectionstyle classes. The subclass needs
to implement a *connect* method whose call signature is::
connect(posA, posB)
where posA and posB are tuples of x, y coordinates to be
connected. The method needs to return a path connecting two
points. This base class defines a __call__ method, and a few
helper methods.
"""
class SimpleEvent:
def __init__(self, xy):
self.x, self.y = xy
def _clip(self, path, patchA, patchB):
"""
Clip the path to the boundary of the patchA and patchB.
The starting point of the path needed to be inside of the
patchA and the end point inside the patch B. The *contains*
methods of each patch object is utilized to test if the point
is inside the path.
"""
if patchA:
def insideA(xy_display):
xy_event = ConnectionStyle._Base.SimpleEvent(xy_display)
return patchA.contains(xy_event)[0]
try:
left, right = split_path_inout(path, insideA)
except ValueError:
right = path
path = right
if patchB:
def insideB(xy_display):
xy_event = ConnectionStyle._Base.SimpleEvent(xy_display)
return patchB.contains(xy_event)[0]
try:
left, right = split_path_inout(path, insideB)
except ValueError:
left = path
path = left
return path
def _shrink(self, path, shrinkA, shrinkB):
"""
Shrink the path by fixed size (in points) with shrinkA and shrinkB
"""
if shrinkA:
x, y = path.vertices[0]
insideA = inside_circle(x, y, shrinkA)
try:
left, right = split_path_inout(path, insideA)
path = right
except ValueError:
pass
if shrinkB:
x, y = path.vertices[-1]
insideB = inside_circle(x, y, shrinkB)
try:
left, right = split_path_inout(path, insideB)
path = left
except ValueError:
pass
return path
def __call__(self, posA, posB,
shrinkA=2., shrinkB=2., patchA=None, patchB=None):
"""
Calls the *connect* method to create a path between *posA*
and *posB*. The path is clipped and shrunken.
"""
path = self.connect(posA, posB)
clipped_path = self._clip(path, patchA, patchB)
shrunk_path = self._shrink(clipped_path, shrinkA, shrinkB)
return shrunk_path
def __reduce__(self):
# because we have decided to nest these classes, we need to
# add some more information to allow instance pickling.
import matplotlib.cbook as cbook
return (cbook._NestedClassGetter(),
(ConnectionStyle, self.__class__.__name__),
self.__dict__
)
class Arc3(_Base):
"""
Creates a simple quadratic bezier curve between two
points. The curve is created so that the middle contol points
(C1) is located at the same distance from the start (C0) and
end points(C2) and the distance of the C1 to the line
connecting C0-C2 is *rad* times the distance of C0-C2.
"""
def __init__(self, rad=0.):
"""
*rad*
curvature of the curve.
"""
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
x12, y12 = (x1 + x2) / 2., (y1 + y2) / 2.
dx, dy = x2 - x1, y2 - y1
f = self.rad
cx, cy = x12 + f * dy, y12 - f * dx
vertices = [(x1, y1),
(cx, cy),
(x2, y2)]
codes = [Path.MOVETO,
Path.CURVE3,
Path.CURVE3]
return Path(vertices, codes)
_style_list["arc3"] = Arc3
class Angle3(_Base):
"""
Creates a simple quadratic bezier curve between two
points. The middle control points is placed at the
intersecting point of two lines which crosses the start (or
end) point and has a angle of angleA (or angleB).
"""
def __init__(self, angleA=90, angleB=0):
"""
*angleA*
starting angle of the path
*angleB*
ending angle of the path
"""
self.angleA = angleA
self.angleB = angleB
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
cosA, sinA = (math.cos(self.angleA / 180. * math.pi),
math.sin(self.angleA / 180. * math.pi))
cosB, sinB = (math.cos(self.angleB / 180. * math.pi),
math.sin(self.angleB / 180. * math.pi))
cx, cy = get_intersection(x1, y1, cosA, sinA,
x2, y2, cosB, sinB)
vertices = [(x1, y1), (cx, cy), (x2, y2)]
codes = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
return Path(vertices, codes)
_style_list["angle3"] = Angle3
class Angle(_Base):
"""
Creates a picewise continuous quadratic bezier path between
two points. The path has a one passing-through point placed at
the intersecting point of two lines which crosses the start
(or end) point and has a angle of angleA (or angleB). The
connecting edges are rounded with *rad*.
"""
def __init__(self, angleA=90, angleB=0, rad=0.):
"""
*angleA*
starting angle of the path
*angleB*
ending angle of the path
*rad*
rounding radius of the edge
"""
self.angleA = angleA
self.angleB = angleB
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
cosA, sinA = (math.cos(self.angleA / 180. * math.pi),
math.sin(self.angleA / 180. * math.pi))
cosB, sinB = (math.cos(self.angleB / 180. * math.pi),
math.sin(self.angleB / 180. * math.pi))
cx, cy = get_intersection(x1, y1, cosA, sinA,
x2, y2, cosB, sinB)
vertices = [(x1, y1)]
codes = [Path.MOVETO]
if self.rad == 0.:
vertices.append((cx, cy))
codes.append(Path.LINETO)
else:
dx1, dy1 = x1 - cx, y1 - cy
d1 = (dx1 ** 2 + dy1 ** 2) ** .5
f1 = self.rad / d1
dx2, dy2 = x2 - cx, y2 - cy
d2 = (dx2 ** 2 + dy2 ** 2) ** .5
f2 = self.rad / d2
vertices.extend([(cx + dx1 * f1, cy + dy1 * f1),
(cx, cy),
(cx + dx2 * f2, cy + dy2 * f2)])
codes.extend([Path.LINETO, Path.CURVE3, Path.CURVE3])
vertices.append((x2, y2))
codes.append(Path.LINETO)
return Path(vertices, codes)
_style_list["angle"] = Angle
class Arc(_Base):
"""
Creates a picewise continuous quadratic bezier path between
two points. The path can have two passing-through points, a
point placed at the distance of armA and angle of angleA from
point A, another point with respect to point B. The edges are
rounded with *rad*.
"""
def __init__(self, angleA=0, angleB=0, armA=None, armB=None, rad=0.):
"""
*angleA* :
starting angle of the path
*angleB* :
ending angle of the path
*armA* :
length of the starting arm
*armB* :
length of the ending arm
*rad* :
rounding radius of the edges
"""
self.angleA = angleA
self.angleB = angleB
self.armA = armA
self.armB = armB
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
vertices = [(x1, y1)]
rounded = []
codes = [Path.MOVETO]
if self.armA:
cosA = math.cos(self.angleA / 180. * math.pi)
sinA = math.sin(self.angleA / 180. * math.pi)
# x_armA, y_armB
d = self.armA - self.rad
rounded.append((x1 + d * cosA, y1 + d * sinA))
d = self.armA
rounded.append((x1 + d * cosA, y1 + d * sinA))
if self.armB:
cosB = math.cos(self.angleB / 180. * math.pi)
sinB = math.sin(self.angleB / 180. * math.pi)
x_armB, y_armB = x2 + self.armB * cosB, y2 + self.armB * sinB
if rounded:
xp, yp = rounded[-1]
dx, dy = x_armB - xp, y_armB - yp
dd = (dx * dx + dy * dy) ** .5
rounded.append((xp + self.rad * dx / dd,
yp + self.rad * dy / dd))
vertices.extend(rounded)
codes.extend([Path.LINETO,
Path.CURVE3,
Path.CURVE3])
else:
xp, yp = vertices[-1]
dx, dy = x_armB - xp, y_armB - yp
dd = (dx * dx + dy * dy) ** .5
d = dd - self.rad
rounded = [(xp + d * dx / dd, yp + d * dy / dd),
(x_armB, y_armB)]
if rounded:
xp, yp = rounded[-1]
dx, dy = x2 - xp, y2 - yp
dd = (dx * dx + dy * dy) ** .5
rounded.append((xp + self.rad * dx / dd,
yp + self.rad * dy / dd))
vertices.extend(rounded)
codes.extend([Path.LINETO,
Path.CURVE3,
Path.CURVE3])
vertices.append((x2, y2))
codes.append(Path.LINETO)
return Path(vertices, codes)
_style_list["arc"] = Arc
class Bar(_Base):
"""
A line with *angle* between A and B with *armA* and
*armB*. One of the arms is extended so that they are connected in
a right angle. The length of armA is determined by (*armA*
+ *fraction* x AB distance). Same for armB.
"""
def __init__(self, armA=0., armB=0., fraction=0.3, angle=None):
"""
*armA* : minimum length of armA
*armB* : minimum length of armB
*fraction* : a fraction of the distance between two points that
will be added to armA and armB.
*angle* : angle of the connecting line (if None, parallel to A
and B)
"""
self.armA = armA
self.armB = armB
self.fraction = fraction
self.angle = angle
def connect(self, posA, posB):
x1, y1 = posA
x20, y20 = x2, y2 = posB
x12, y12 = (x1 + x2) / 2., (y1 + y2) / 2.
theta1 = math.atan2(y2 - y1, x2 - x1)
dx, dy = x2 - x1, y2 - y1
dd = (dx * dx + dy * dy) ** .5
ddx, ddy = dx / dd, dy / dd
armA, armB = self.armA, self.armB
if self.angle is not None:
#angle = self.angle % 180.
#if angle < 0. or angle > 180.:
# angle
#theta0 = (self.angle%180.)/180.*math.pi
theta0 = self.angle / 180. * math.pi
#theta0 = (((self.angle+90)%180.) - 90.)/180.*math.pi
dtheta = theta1 - theta0
dl = dd * math.sin(dtheta)
dL = dd * math.cos(dtheta)
#x2, y2 = x2 + dl*ddy, y2 - dl*ddx
x2, y2 = x1 + dL * math.cos(theta0), y1 + dL * math.sin(theta0)
armB = armB - dl
# update
dx, dy = x2 - x1, y2 - y1
dd2 = (dx * dx + dy * dy) ** .5
ddx, ddy = dx / dd2, dy / dd2
else:
dl = 0.
#if armA > armB:
# armB = armA + dl
#else:
# armA = armB - dl
arm = max(armA, armB)
f = self.fraction * dd + arm
#fB = self.fraction*dd + armB
cx1, cy1 = x1 + f * ddy, y1 - f * ddx
cx2, cy2 = x2 + f * ddy, y2 - f * ddx
vertices = [(x1, y1),
(cx1, cy1),
(cx2, cy2),
(x20, y20)]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO]
return Path(vertices, codes)
_style_list["bar"] = Bar
if __doc__:
__doc__ = cbook.dedent(__doc__) % \
{"AvailableConnectorstyles": _pprint_styles(_style_list)}
def _point_along_a_line(x0, y0, x1, y1, d):
"""
find a point along a line connecting (x0, y0) -- (x1, y1) whose
distance from (x0, y0) is d.
"""
dx, dy = x0 - x1, y0 - y1
ff = d / (dx * dx + dy * dy) ** .5
x2, y2 = x0 - ff * dx, y0 - ff * dy
return x2, y2
class ArrowStyle(_Style):
"""
:class:`ArrowStyle` is a container class which defines several
arrowstyle classes, which is used to create an arrow path along a
given path. These are mainly used with :class:`FancyArrowPatch`.
A arrowstyle object can be either created as::
ArrowStyle.Fancy(head_length=.4, head_width=.4, tail_width=.4)
or::
ArrowStyle("Fancy", head_length=.4, head_width=.4, tail_width=.4)
or::
ArrowStyle("Fancy, head_length=.4, head_width=.4, tail_width=.4")
The following classes are defined
%(AvailableArrowstyles)s
An instance of any arrow style class is a callable object,
whose call signature is::
__call__(self, path, mutation_size, linewidth, aspect_ratio=1.)
and it returns a tuple of a :class:`Path` instance and a boolean
value. *path* is a :class:`Path` instance along which the arrow
will be drawn. *mutation_size* and *aspect_ratio* have the same
meaning as in :class:`BoxStyle`. *linewidth* is a line width to be
stroked. This is meant to be used to correct the location of the
head so that it does not overshoot the destination point, but not all
classes support it.
.. plot:: mpl_examples/pylab_examples/fancyarrow_demo.py
"""
_style_list = {}
class _Base(object):
"""
Arrow Transmuter Base class
ArrowTransmuterBase and its derivatives are used to make a fancy
arrow around a given path. The __call__ method returns a path
(which will be used to create a PathPatch instance) and a boolean
value indicating the path is open therefore is not fillable. This
class is not an artist and actual drawing of the fancy arrow is
done by the FancyArrowPatch class.
"""
# The derived classes are required to be able to be initialized
# w/o arguments, i.e., all its argument (except self) must have
# the default values.
def __init__(self):
super(ArrowStyle._Base, self).__init__()
@staticmethod
def ensure_quadratic_bezier(path):
""" Some ArrowStyle class only wokrs with a simple
quaratic bezier curve (created with Arc3Connetion or
Angle3Connector). This static method is to check if the
provided path is a simple quadratic bezier curve and returns
its control points if true.
"""
segments = list(path.iter_segments())
if ((len(segments) != 2) or (segments[0][1] != Path.MOVETO) or
(segments[1][1] != Path.CURVE3)):
msg = "'path' it's not a valid quadratic bezier curve"
raise ValueError(msg)
return list(segments[0][0]) + list(segments[1][0])
def transmute(self, path, mutation_size, linewidth):
"""
The transmute method is the very core of the ArrowStyle
class and must be overriden in the subclasses. It receives
the path object along which the arrow will be drawn, and
the mutation_size, with which the arrow head etc.
will be scaled. The linewidth may be used to adjust
the path so that it does not pass beyond the given
points. It returns a tuple of a Path instance and a
boolean. The boolean value indicate whether the path can
be filled or not. The return value can also be a list of paths
and list of booleans of a same length.
"""
raise NotImplementedError('Derived must override')
def __call__(self, path, mutation_size, linewidth,
aspect_ratio=1.):
"""
The __call__ method is a thin wrapper around the transmute method
and take care of the aspect ratio.
"""
path = make_path_regular(path)
if aspect_ratio is not None:
# Squeeze the given height by the aspect_ratio
vertices, codes = path.vertices[:], path.codes[:]
# Squeeze the height
vertices[:, 1] = vertices[:, 1] / aspect_ratio
path_shrunk = Path(vertices, codes)
# call transmute method with squeezed height.
path_mutated, fillable = self.transmute(path_shrunk,
linewidth,
mutation_size)
if cbook.iterable(fillable):
path_list = []
for p in zip(path_mutated):
v, c = p.vertices, p.codes
# Restore the height
v[:, 1] = v[:, 1] * aspect_ratio
path_list.append(Path(v, c))
return path_list, fillable
else:
return path_mutated, fillable
else:
return self.transmute(path, mutation_size, linewidth)
def __reduce__(self):
# because we have decided to nest thes classes, we need to
# add some more information to allow instance pickling.
import matplotlib.cbook as cbook
return (cbook._NestedClassGetter(),
(ArrowStyle, self.__class__.__name__),
self.__dict__
)
class _Curve(_Base):
"""
A simple arrow which will work with any path instance. The
returned path is simply concatenation of the original path + at
most two paths representing the arrow head at the begin point and the
at the end point. The arrow heads can be either open or closed.
"""
def __init__(self, beginarrow=None, endarrow=None,
fillbegin=False, fillend=False,
head_length=.2, head_width=.1):
"""
The arrows are drawn if *beginarrow* and/or *endarrow* are
true. *head_length* and *head_width* determines the size
of the arrow relative to the *mutation scale*. The
arrowhead at the begin (or end) is closed if fillbegin (or
fillend) is True.
"""
self.beginarrow, self.endarrow = beginarrow, endarrow
self.head_length, self.head_width = head_length, head_width
self.fillbegin, self.fillend = fillbegin, fillend
super(ArrowStyle._Curve, self).__init__()
def _get_arrow_wedge(self, x0, y0, x1, y1,
head_dist, cos_t, sin_t, linewidth
):
"""
Return the paths for arrow heads. Since arrow lines are
drawn with capstyle=projected, The arrow goes beyond the
desired point. This method also returns the amount of the path
to be shrunken so that it does not overshoot.
"""
# arrow from x0, y0 to x1, y1
dx, dy = x0 - x1, y0 - y1
cp_distance = np.hypot(dx, dy)
# pad_projected : amount of pad to account the
# overshooting of the projection of the wedge
pad_projected = (.5 * linewidth / sin_t)
# Account for division by zero
if cp_distance == 0:
cp_distance = 1
# apply pad for projected edge
ddx = pad_projected * dx / cp_distance
ddy = pad_projected * dy / cp_distance
# offset for arrow wedge
dx = dx / cp_distance * head_dist
dy = dy / cp_distance * head_dist
dx1, dy1 = cos_t * dx + sin_t * dy, -sin_t * dx + cos_t * dy
dx2, dy2 = cos_t * dx - sin_t * dy, sin_t * dx + cos_t * dy
vertices_arrow = [(x1 + ddx + dx1, y1 + ddy + dy1),
(x1 + ddx, y1 + ddy),
(x1 + ddx + dx2, y1 + ddy + dy2)]
codes_arrow = [Path.MOVETO,
Path.LINETO,
Path.LINETO]
return vertices_arrow, codes_arrow, ddx, ddy
def transmute(self, path, mutation_size, linewidth):
head_length, head_width = self.head_length * mutation_size, \
self.head_width * mutation_size
head_dist = math.sqrt(head_length ** 2 + head_width ** 2)
cos_t, sin_t = head_length / head_dist, head_width / head_dist
# begin arrow
x0, y0 = path.vertices[0]
x1, y1 = path.vertices[1]
# If there is no room for an arrow and a line, then skip the arrow
has_begin_arrow = (self.beginarrow and
not ((x0 == x1) and (y0 == y1)))
if has_begin_arrow:
verticesA, codesA, ddxA, ddyA = \
self._get_arrow_wedge(x1, y1, x0, y0,
head_dist, cos_t, sin_t,
linewidth)
else:
verticesA, codesA = [], []
ddxA, ddyA = 0., 0.
# end arrow
x2, y2 = path.vertices[-2]
x3, y3 = path.vertices[-1]
# If there is no room for an arrow and a line, then skip the arrow
has_end_arrow = (self.endarrow and not ((x2 == x3) and (y2 == y3)))
if has_end_arrow:
verticesB, codesB, ddxB, ddyB = \
self._get_arrow_wedge(x2, y2, x3, y3,
head_dist, cos_t, sin_t,
linewidth)
else:
verticesB, codesB = [], []
ddxB, ddyB = 0., 0.
# this simple code will not work if ddx, ddy is greater than
# separation bettern vertices.
_path = [Path(np.concatenate([[(x0 + ddxA, y0 + ddyA)],
path.vertices[1:-1],
[(x3 + ddxB, y3 + ddyB)]]),
path.codes)]
_fillable = [False]
if has_begin_arrow:
if self.fillbegin:
p = np.concatenate([verticesA, [verticesA[0],
verticesA[0]], ])
c = np.concatenate([codesA, [Path.LINETO, Path.CLOSEPOLY]])
_path.append(Path(p, c))
_fillable.append(True)
else:
_path.append(Path(verticesA, codesA))
_fillable.append(False)
if has_end_arrow:
if self.fillend:
_fillable.append(True)
p = np.concatenate([verticesB, [verticesB[0],
verticesB[0]], ])
c = np.concatenate([codesB, [Path.LINETO, Path.CLOSEPOLY]])
_path.append(Path(p, c))
else:
_fillable.append(False)
_path.append(Path(verticesB, codesB))
return _path, _fillable
class Curve(_Curve):
"""
A simple curve without any arrow head.
"""
def __init__(self):
super(ArrowStyle.Curve, self).__init__(
beginarrow=False, endarrow=False)
_style_list["-"] = Curve
class CurveA(_Curve):
"""
An arrow with a head at its begin point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveA, self).__init__(
beginarrow=True, endarrow=False,
head_length=head_length, head_width=head_width)
_style_list["<-"] = CurveA
class CurveB(_Curve):
"""
An arrow with a head at its end point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveB, self).__init__(
beginarrow=False, endarrow=True,
head_length=head_length, head_width=head_width)
_style_list["->"] = CurveB
class CurveAB(_Curve):
"""
An arrow with heads both at the begin and the end point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveAB, self).__init__(
beginarrow=True, endarrow=True,
head_length=head_length, head_width=head_width)
_style_list["<->"] = CurveAB
class CurveFilledA(_Curve):
"""
An arrow with filled triangle head at the begin.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveFilledA, self).__init__(
beginarrow=True, endarrow=False,
fillbegin=True, fillend=False,
head_length=head_length, head_width=head_width)
_style_list["<|-"] = CurveFilledA
class CurveFilledB(_Curve):
"""
An arrow with filled triangle head at the end.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveFilledB, self).__init__(
beginarrow=False, endarrow=True,
fillbegin=False, fillend=True,
head_length=head_length, head_width=head_width)
_style_list["-|>"] = CurveFilledB
class CurveFilledAB(_Curve):
"""
An arrow with filled triangle heads both at the begin and the end
point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveFilledAB, self).__init__(
beginarrow=True, endarrow=True,
fillbegin=True, fillend=True,
head_length=head_length, head_width=head_width)
_style_list["<|-|>"] = CurveFilledAB
class _Bracket(_Base):
def __init__(self, bracketA=None, bracketB=None,
widthA=1., widthB=1.,
lengthA=0.2, lengthB=0.2,
angleA=None, angleB=None,
scaleA=None, scaleB=None):
self.bracketA, self.bracketB = bracketA, bracketB
self.widthA, self.widthB = widthA, widthB
self.lengthA, self.lengthB = lengthA, lengthB
self.angleA, self.angleB = angleA, angleB
self.scaleA, self.scaleB = scaleA, scaleB
def _get_bracket(self, x0, y0,
cos_t, sin_t, width, length):
# arrow from x0, y0 to x1, y1
from matplotlib.bezier import get_normal_points
x1, y1, x2, y2 = get_normal_points(x0, y0, cos_t, sin_t, width)
dx, dy = length * cos_t, length * sin_t
vertices_arrow = [(x1 + dx, y1 + dy),
(x1, y1),
(x2, y2),
(x2 + dx, y2 + dy)]
codes_arrow = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO]
return vertices_arrow, codes_arrow
def transmute(self, path, mutation_size, linewidth):
if self.scaleA is None:
scaleA = mutation_size
else:
scaleA = self.scaleA
if self.scaleB is None:
scaleB = mutation_size
else:
scaleB = self.scaleB
vertices_list, codes_list = [], []
if self.bracketA:
x0, y0 = path.vertices[0]
x1, y1 = path.vertices[1]
cos_t, sin_t = get_cos_sin(x1, y1, x0, y0)
verticesA, codesA = self._get_bracket(x0, y0, cos_t, sin_t,
self.widthA * scaleA,
self.lengthA * scaleA)
vertices_list.append(verticesA)
codes_list.append(codesA)
vertices_list.append(path.vertices)
codes_list.append(path.codes)
if self.bracketB:
x0, y0 = path.vertices[-1]
x1, y1 = path.vertices[-2]
cos_t, sin_t = get_cos_sin(x1, y1, x0, y0)
verticesB, codesB = self._get_bracket(x0, y0, cos_t, sin_t,
self.widthB * scaleB,
self.lengthB * scaleB)
vertices_list.append(verticesB)
codes_list.append(codesB)
vertices = np.concatenate(vertices_list)
codes = np.concatenate(codes_list)
p = Path(vertices, codes)
return p, False
class BracketAB(_Bracket):
"""
An arrow with a bracket(]) at both ends.
"""
def __init__(self,
widthA=1., lengthA=0.2, angleA=None,
widthB=1., lengthB=0.2, angleB=None):
"""
*widthA*
width of the bracket
*lengthA*
length of the bracket
*angleA*
angle between the bracket and the line
*widthB*
width of the bracket
*lengthB*
length of the bracket
*angleB*
angle between the bracket and the line
"""
super(ArrowStyle.BracketAB, self).__init__(
True, True, widthA=widthA, lengthA=lengthA,
angleA=angleA, widthB=widthB, lengthB=lengthB,
angleB=angleB)
_style_list["]-["] = BracketAB
class BracketA(_Bracket):
"""
An arrow with a bracket(]) at its end.
"""
def __init__(self, widthA=1., lengthA=0.2, angleA=None):
"""
*widthA*
width of the bracket
*lengthA*
length of the bracket
*angleA*
angle between the bracket and the line
"""
super(ArrowStyle.BracketA, self).__init__(True, None,
widthA=widthA,
lengthA=lengthA,
angleA=angleA)
_style_list["]-"] = BracketA
class BracketB(_Bracket):
"""
An arrow with a bracket([) at its end.
"""
def __init__(self, widthB=1., lengthB=0.2, angleB=None):
"""
*widthB*
width of the bracket
*lengthB*
length of the bracket
*angleB*
angle between the bracket and the line
"""
super(ArrowStyle.BracketB, self).__init__(None, True,
widthB=widthB,
lengthB=lengthB,
angleB=angleB)
_style_list["-["] = BracketB
class BarAB(_Bracket):
"""
An arrow with a bar(|) at both ends.
"""
def __init__(self,
widthA=1., angleA=None,
widthB=1., angleB=None):
"""
*widthA*
width of the bracket
*lengthA*
length of the bracket
*angleA*
angle between the bracket and the line
*widthB*
width of the bracket
*lengthB*
length of the bracket
*angleB*
angle between the bracket and the line
"""
super(ArrowStyle.BarAB, self).__init__(
True, True, widthA=widthA, lengthA=0, angleA=angleA,
widthB=widthB, lengthB=0, angleB=angleB)
_style_list["|-|"] = BarAB
class Simple(_Base):
"""
A simple arrow. Only works with a quadratic bezier curve.
"""
def __init__(self, head_length=.5, head_width=.5, tail_width=.2):
"""
*head_length*
length of the arrow head
*head_with*
width of the arrow head
*tail_width*
width of the arrow tail
"""
self.head_length, self.head_width, self.tail_width = \
head_length, head_width, tail_width
super(ArrowStyle.Simple, self).__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
# divide the path into a head and a tail
head_length = self.head_length * mutation_size
in_f = inside_circle(x2, y2, head_length)
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
from .bezier import NonIntersectingPathException
try:
arrow_out, arrow_in = \
split_bezier_intersecting_with_closedpath(arrow_path,
in_f,
tolerence=0.01)
except NonIntersectingPathException:
# if this happens, make a straight line of the head_length
# long.
x0, y0 = _point_along_a_line(x2, y2, x1, y1, head_length)
x1n, y1n = 0.5 * (x0 + x2), 0.5 * (y0 + y2)
arrow_in = [(x0, y0), (x1n, y1n), (x2, y2)]
arrow_out = None
# head
head_width = self.head_width * mutation_size
head_left, head_right = make_wedged_bezier2(arrow_in,
head_width / 2., wm=.5)
# tail
if arrow_out is not None:
tail_width = self.tail_width * mutation_size
tail_left, tail_right = get_parallels(arrow_out,
tail_width / 2.)
patch_path = [(Path.MOVETO, tail_right[0]),
(Path.CURVE3, tail_right[1]),
(Path.CURVE3, tail_right[2]),
(Path.LINETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.LINETO, tail_left[2]),
(Path.CURVE3, tail_left[1]),
(Path.CURVE3, tail_left[0]),
(Path.LINETO, tail_right[0]),
(Path.CLOSEPOLY, tail_right[0]),
]
else:
patch_path = [(Path.MOVETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.CLOSEPOLY, head_left[0]),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
_style_list["simple"] = Simple
class Fancy(_Base):
"""
A fancy arrow. Only works with a quadratic bezier curve.
"""
def __init__(self, head_length=.4, head_width=.4, tail_width=.4):
"""
*head_length*
length of the arrow head
*head_with*
width of the arrow head
*tail_width*
width of the arrow tail
"""
self.head_length, self.head_width, self.tail_width = \
head_length, head_width, tail_width
super(ArrowStyle.Fancy, self).__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
# divide the path into a head and a tail
head_length = self.head_length * mutation_size
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
from .bezier import NonIntersectingPathException
# path for head
in_f = inside_circle(x2, y2, head_length)
try:
path_out, path_in = \
split_bezier_intersecting_with_closedpath(
arrow_path,
in_f,
tolerence=0.01)
except NonIntersectingPathException:
# if this happens, make a straight line of the head_length
# long.
x0, y0 = _point_along_a_line(x2, y2, x1, y1, head_length)
x1n, y1n = 0.5 * (x0 + x2), 0.5 * (y0 + y2)
arrow_path = [(x0, y0), (x1n, y1n), (x2, y2)]
path_head = arrow_path
else:
path_head = path_in
# path for head
in_f = inside_circle(x2, y2, head_length * .8)
path_out, path_in = split_bezier_intersecting_with_closedpath(
arrow_path,
in_f,
tolerence=0.01
)
path_tail = path_out
# head
head_width = self.head_width * mutation_size
head_l, head_r = make_wedged_bezier2(path_head,
head_width / 2.,
wm=.6)
# tail
tail_width = self.tail_width * mutation_size
tail_left, tail_right = make_wedged_bezier2(path_tail,
tail_width * .5,
w1=1., wm=0.6, w2=0.3)
# path for head
in_f = inside_circle(x0, y0, tail_width * .3)
path_in, path_out = split_bezier_intersecting_with_closedpath(
arrow_path,
in_f,
tolerence=0.01
)
tail_start = path_in[-1]
head_right, head_left = head_r, head_l
patch_path = [(Path.MOVETO, tail_start),
(Path.LINETO, tail_right[0]),
(Path.CURVE3, tail_right[1]),
(Path.CURVE3, tail_right[2]),
(Path.LINETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.LINETO, tail_left[2]),
(Path.CURVE3, tail_left[1]),
(Path.CURVE3, tail_left[0]),
(Path.LINETO, tail_start),
(Path.CLOSEPOLY, tail_start),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
_style_list["fancy"] = Fancy
class Wedge(_Base):
"""
Wedge(?) shape. Only works with a quadratic bezier curve. The
begin point has a width of the tail_width and the end point has a
width of 0. At the middle, the width is shrink_factor*tail_width.
"""
def __init__(self, tail_width=.3, shrink_factor=0.5):
"""
*tail_width*
width of the tail
*shrink_factor*
fraction of the arrow width at the middle point
"""
self.tail_width = tail_width
self.shrink_factor = shrink_factor
super(ArrowStyle.Wedge, self).__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
b_plus, b_minus = make_wedged_bezier2(
arrow_path,
self.tail_width * mutation_size / 2.,
wm=self.shrink_factor)
patch_path = [(Path.MOVETO, b_plus[0]),
(Path.CURVE3, b_plus[1]),
(Path.CURVE3, b_plus[2]),
(Path.LINETO, b_minus[2]),
(Path.CURVE3, b_minus[1]),
(Path.CURVE3, b_minus[0]),
(Path.CLOSEPOLY, b_minus[0]),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
_style_list["wedge"] = Wedge
if __doc__:
__doc__ = cbook.dedent(__doc__) % \
{"AvailableArrowstyles": _pprint_styles(_style_list)}
docstring.interpd.update(
AvailableArrowstyles=_pprint_styles(ArrowStyle._style_list),
AvailableConnectorstyles=_pprint_styles(ConnectionStyle._style_list),
)
class FancyArrowPatch(Patch):
"""
A fancy arrow patch. It draws an arrow using the :class:ArrowStyle.
"""
def __str__(self):
if self._posA_posB is not None:
(x1, y1), (x2, y2) = self._posA_posB
return self.__class__.__name__ \
+ "(%g,%g->%g,%g)" % (x1, y1, x2, y2)
else:
return self.__class__.__name__ \
+ "(%s)" % (str(self._path_original),)
@docstring.dedent_interpd
def __init__(self, posA=None, posB=None,
path=None,
arrowstyle="simple",
arrow_transmuter=None,
connectionstyle="arc3",
connector=None,
patchA=None,
patchB=None,
shrinkA=2.,
shrinkB=2.,
mutation_scale=1.,
mutation_aspect=None,
dpi_cor=1.,
**kwargs):
"""
If *posA* and *posB* is given, a path connecting two point are
created according to the connectionstyle. The path will be
clipped with *patchA* and *patchB* and further shrunken by
*shrinkA* and *shrinkB*. An arrow is drawn along this
resulting path using the *arrowstyle* parameter. If *path*
provided, an arrow is drawn along this path and *patchA*,
*patchB*, *shrinkA*, and *shrinkB* are ignored.
The *connectionstyle* describes how *posA* and *posB* are
connected. It can be an instance of the ConnectionStyle class
(matplotlib.patches.ConnectionStlye) or a string of the
connectionstyle name, with optional comma-separated
attributes. The following connection styles are available.
%(AvailableConnectorstyles)s
The *arrowstyle* describes how the fancy arrow will be
drawn. It can be string of the available arrowstyle names,
with optional comma-separated attributes, or one of the
ArrowStyle instance. The optional attributes are meant to be
scaled with the *mutation_scale*. The following arrow styles are
available.
%(AvailableArrowstyles)s
*mutation_scale* : a value with which attributes of arrowstyle
(e.g., head_length) will be scaled. default=1.
*mutation_aspect* : The height of the rectangle will be
squeezed by this value before the mutation and the mutated
box will be stretched by the inverse of it. default=None.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
if posA is not None and posB is not None and path is None:
self._posA_posB = [posA, posB]
if connectionstyle is None:
connectionstyle = "arc3"
self.set_connectionstyle(connectionstyle)
elif posA is None and posB is None and path is not None:
self._posA_posB = None
self._connetors = None
else:
raise ValueError("either posA and posB, or path need to provided")
self.patchA = patchA
self.patchB = patchB
self.shrinkA = shrinkA
self.shrinkB = shrinkB
self._path_original = path
self.set_arrowstyle(arrowstyle)
self._mutation_scale = mutation_scale
self._mutation_aspect = mutation_aspect
self.set_dpi_cor(dpi_cor)
#self._draw_in_display_coordinate = True
def set_dpi_cor(self, dpi_cor):
"""
dpi_cor is currently used for linewidth-related things and
shink factor. Mutation scale is not affected by this.
"""
self._dpi_cor = dpi_cor
self.stale = True
def get_dpi_cor(self):
"""
dpi_cor is currently used for linewidth-related things and
shink factor. Mutation scale is not affected by this.
"""
return self._dpi_cor
def set_positions(self, posA, posB):
""" set the begin end end positions of the connecting
path. Use current vlaue if None.
"""
if posA is not None:
self._posA_posB[0] = posA
if posB is not None:
self._posA_posB[1] = posB
self.stale = True
def set_patchA(self, patchA):
""" set the begin patch.
"""
self.patchA = patchA
self.stale = True
def set_patchB(self, patchB):
""" set the begin patch
"""
self.patchB = patchB
self.stale = True
def set_connectionstyle(self, connectionstyle, **kw):
"""
Set the connection style.
*connectionstyle* can be a string with connectionstyle name with
optional comma-separated attributes. Alternatively, the attrs can be
provided as keywords.
set_connectionstyle("arc,angleA=0,armA=30,rad=10")
set_connectionstyle("arc", angleA=0,armA=30,rad=10)
Old attrs simply are forgotten.
Without argument (or with connectionstyle=None), return
available styles as a list of strings.
"""
if connectionstyle is None:
return ConnectionStyle.pprint_styles()
if isinstance(connectionstyle, ConnectionStyle._Base):
self._connector = connectionstyle
elif six.callable(connectionstyle):
# we may need check the calling convention of the given function
self._connector = connectionstyle
else:
self._connector = ConnectionStyle(connectionstyle, **kw)
self.stale = True
def get_connectionstyle(self):
"""
Return the ConnectionStyle instance
"""
return self._connector
def set_arrowstyle(self, arrowstyle=None, **kw):
"""
Set the arrow style.
*arrowstyle* can be a string with arrowstyle name with optional
comma-separated attributes. Alternatively, the attrs can
be provided as keywords.
set_arrowstyle("Fancy,head_length=0.2")
set_arrowstyle("fancy", head_length=0.2)
Old attrs simply are forgotten.
Without argument (or with arrowstyle=None), return
available box styles as a list of strings.
"""
if arrowstyle is None:
return ArrowStyle.pprint_styles()
if isinstance(arrowstyle, ArrowStyle._Base):
self._arrow_transmuter = arrowstyle
else:
self._arrow_transmuter = ArrowStyle(arrowstyle, **kw)
self.stale = True
def get_arrowstyle(self):
"""
Return the arrowstyle object
"""
return self._arrow_transmuter
def set_mutation_scale(self, scale):
"""
Set the mutation scale.
ACCEPTS: float
"""
self._mutation_scale = scale
self.stale = True
def get_mutation_scale(self):
"""
Return the mutation scale.
"""
return self._mutation_scale
def set_mutation_aspect(self, aspect):
"""
Set the aspect ratio of the bbox mutation.
ACCEPTS: float
"""
self._mutation_aspect = aspect
self.stale = True
def get_mutation_aspect(self):
"""
Return the aspect ratio of the bbox mutation.
"""
return self._mutation_aspect
def get_path(self):
"""
return the path of the arrow in the data coordinate. Use
get_path_in_displaycoord() method to retrieve the arrow path
in the display coord.
"""
_path, fillable = self.get_path_in_displaycoord()
if cbook.iterable(fillable):
_path = concatenate_paths(_path)
return self.get_transform().inverted().transform_path(_path)
def get_path_in_displaycoord(self):
"""
Return the mutated path of the arrow in the display coord
"""
dpi_cor = self.get_dpi_cor()
if self._posA_posB is not None:
posA = self.get_transform().transform_point(self._posA_posB[0])
posB = self.get_transform().transform_point(self._posA_posB[1])
_path = self.get_connectionstyle()(posA, posB,
patchA=self.patchA,
patchB=self.patchB,
shrinkA=self.shrinkA * dpi_cor,
shrinkB=self.shrinkB * dpi_cor
)
else:
_path = self.get_transform().transform_path(self._path_original)
_path, fillable = self.get_arrowstyle()(_path,
self.get_mutation_scale(),
self.get_linewidth() * dpi_cor,
self.get_mutation_aspect()
)
#if not fillable:
# self._fill = False
return _path, fillable
def draw(self, renderer):
if not self.get_visible():
return
renderer.open_group('patch', self.get_gid())
gc = renderer.new_gc()
gc.set_foreground(self._edgecolor, isRGBA=True)
lw = self._linewidth
if self._edgecolor[3] == 0:
lw = 0
gc.set_linewidth(lw)
gc.set_linestyle(self._linestyle)
gc.set_antialiased(self._antialiased)
self._set_gc_clip(gc)
gc.set_capstyle('round')
gc.set_snap(self.get_snap())
rgbFace = self._facecolor
if rgbFace[3] == 0:
rgbFace = None # (some?) renderers expect this as no-fill signal
gc.set_alpha(self._alpha)
if self._hatch:
gc.set_hatch(self._hatch)
if self.get_sketch_params() is not None:
gc.set_sketch_params(*self.get_sketch_params())
# FIXME : dpi_cor is for the dpi-dependecy of the
# linewidth. There could be room for improvement.
#
#dpi_cor = renderer.points_to_pixels(1.)
self.set_dpi_cor(renderer.points_to_pixels(1.))
path, fillable = self.get_path_in_displaycoord()
if not cbook.iterable(fillable):
path = [path]
fillable = [fillable]
affine = transforms.IdentityTransform()
if self.get_path_effects():
from matplotlib.patheffects import PathEffectRenderer
renderer = PathEffectRenderer(self.get_path_effects(), renderer)
for p, f in zip(path, fillable):
if f:
renderer.draw_path(gc, p, affine, rgbFace)
else:
renderer.draw_path(gc, p, affine, None)
gc.restore()
renderer.close_group('patch')
self.stale = False
class ConnectionPatch(FancyArrowPatch):
"""
A :class:`~matplotlib.patches.ConnectionPatch` class is to make
connecting lines between two points (possibly in different axes).
"""
def __str__(self):
return "ConnectionPatch((%g,%g),(%g,%g))" % \
(self.xy1[0], self.xy1[1], self.xy2[0], self.xy2[1])
@docstring.dedent_interpd
def __init__(self, xyA, xyB, coordsA, coordsB=None,
axesA=None, axesB=None,
arrowstyle="-",
arrow_transmuter=None,
connectionstyle="arc3",
connector=None,
patchA=None,
patchB=None,
shrinkA=0.,
shrinkB=0.,
mutation_scale=10.,
mutation_aspect=None,
clip_on=False,
dpi_cor=1.,
**kwargs):
"""
Connect point *xyA* in *coordsA* with point *xyB* in *coordsB*
Valid keys are
=============== ======================================================
Key Description
=============== ======================================================
arrowstyle the arrow style
connectionstyle the connection style
relpos default is (0.5, 0.5)
patchA default is bounding box of the text
patchB default is None
shrinkA default is 2 points
shrinkB default is 2 points
mutation_scale default is text size (in points)
mutation_aspect default is 1.
? any key for :class:`matplotlib.patches.PathPatch`
=============== ======================================================
*coordsA* and *coordsB* are strings that indicate the
coordinates of *xyA* and *xyB*.
================= ===================================================
Property Description
================= ===================================================
'figure points' points from the lower left corner of the figure
'figure pixels' pixels from the lower left corner of the figure
'figure fraction' 0,0 is lower left of figure and 1,1 is upper, right
'axes points' points from lower left corner of axes
'axes pixels' pixels from lower left corner of axes
'axes fraction' 0,1 is lower left of axes and 1,1 is upper right
'data' use the coordinate system of the object being
annotated (default)
'offset points' Specify an offset (in points) from the *xy* value
'polar' you can specify *theta*, *r* for the annotation,
even in cartesian plots. Note that if you
are using a polar axes, you do not need
to specify polar for the coordinate
system since that is the native "data" coordinate
system.
================= ===================================================
"""
if coordsB is None:
coordsB = coordsA
# we'll draw ourself after the artist we annotate by default
self.xy1 = xyA
self.xy2 = xyB
self.coords1 = coordsA
self.coords2 = coordsB
self.axesA = axesA
self.axesB = axesB
FancyArrowPatch.__init__(self,
posA=(0, 0), posB=(1, 1),
arrowstyle=arrowstyle,
arrow_transmuter=arrow_transmuter,
connectionstyle=connectionstyle,
connector=connector,
patchA=patchA,
patchB=patchB,
shrinkA=shrinkA,
shrinkB=shrinkB,
mutation_scale=mutation_scale,
mutation_aspect=mutation_aspect,
clip_on=clip_on,
dpi_cor=dpi_cor,
**kwargs)
# if True, draw annotation only if self.xy is inside the axes
self._annotation_clip = None
def _get_xy(self, x, y, s, axes=None):
"""
caculate the pixel position of given point
"""
if axes is None:
axes = self.axes
if s == 'data':
trans = axes.transData
x = float(self.convert_xunits(x))
y = float(self.convert_yunits(y))
return trans.transform_point((x, y))
elif s == 'offset points':
# convert the data point
dx, dy = self.xy
# prevent recursion
if self.xycoords == 'offset points':
return self._get_xy(dx, dy, 'data')
dx, dy = self._get_xy(dx, dy, self.xycoords)
# convert the offset
dpi = self.figure.get_dpi()
x *= dpi / 72.
y *= dpi / 72.
# add the offset to the data point
x += dx
y += dy
return x, y
elif s == 'polar':
theta, r = x, y
x = r * np.cos(theta)
y = r * np.sin(theta)
trans = axes.transData
return trans.transform_point((x, y))
elif s == 'figure points':
# points from the lower left corner of the figure
dpi = self.figure.dpi
l, b, w, h = self.figure.bbox.bounds
r = l + w
t = b + h
x *= dpi / 72.
y *= dpi / 72.
if x < 0:
x = r + x
if y < 0:
y = t + y
return x, y
elif s == 'figure pixels':
# pixels from the lower left corner of the figure
l, b, w, h = self.figure.bbox.bounds
r = l + w
t = b + h
if x < 0:
x = r + x
if y < 0:
y = t + y
return x, y
elif s == 'figure fraction':
# (0,0) is lower left, (1,1) is upper right of figure
trans = self.figure.transFigure
return trans.transform_point((x, y))
elif s == 'axes points':
# points from the lower left corner of the axes
dpi = self.figure.dpi
l, b, w, h = axes.bbox.bounds
r = l + w
t = b + h
if x < 0:
x = r + x * dpi / 72.
else:
x = l + x * dpi / 72.
if y < 0:
y = t + y * dpi / 72.
else:
y = b + y * dpi / 72.
return x, y
elif s == 'axes pixels':
#pixels from the lower left corner of the axes
l, b, w, h = axes.bbox.bounds
r = l + w
t = b + h
if x < 0:
x = r + x
else:
x = l + x
if y < 0:
y = t + y
else:
y = b + y
return x, y
elif s == 'axes fraction':
#(0,0) is lower left, (1,1) is upper right of axes
trans = axes.transAxes
return trans.transform_point((x, y))
def set_annotation_clip(self, b):
"""
set *annotation_clip* attribute.
* True: the annotation will only be drawn when self.xy is inside the
axes.
* False: the annotation will always be drawn regardless of its
position.
* None: the self.xy will be checked only if *xycoords* is "data"
"""
self._annotation_clip = b
self.stale = True
def get_annotation_clip(self):
"""
Return *annotation_clip* attribute.
See :meth:`set_annotation_clip` for the meaning of return values.
"""
return self._annotation_clip
def get_path_in_displaycoord(self):
"""
Return the mutated path of the arrow in the display coord
"""
dpi_cor = self.get_dpi_cor()
x, y = self.xy1
posA = self._get_xy(x, y, self.coords1, self.axesA)
x, y = self.xy2
posB = self._get_xy(x, y, self.coords2, self.axesB)
_path = self.get_connectionstyle()(posA, posB,
patchA=self.patchA,
patchB=self.patchB,
shrinkA=self.shrinkA * dpi_cor,
shrinkB=self.shrinkB * dpi_cor
)
_path, fillable = self.get_arrowstyle()(_path,
self.get_mutation_scale(),
self.get_linewidth() * dpi_cor,
self.get_mutation_aspect()
)
return _path, fillable
def _check_xy(self, renderer):
"""
check if the annotation need to
be drawn.
"""
b = self.get_annotation_clip()
if b or (b is None and self.coords1 == "data"):
x, y = self.xy1
xy_pixel = self._get_xy(x, y, self.coords1, self.axesA)
if not self.axes.contains_point(xy_pixel):
return False
if b or (b is None and self.coords2 == "data"):
x, y = self.xy2
xy_pixel = self._get_xy(x, y, self.coords2, self.axesB)
if self.axesB is None:
axes = self.axes
else:
axes = self.axesB
if not axes.contains_point(xy_pixel):
return False
return True
def draw(self, renderer):
"""
Draw.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible():
return
if not self._check_xy(renderer):
return
FancyArrowPatch.draw(self, renderer)
|
mit
|
istellartech/OpenTsiolkovsky
|
tools/outlier_detector_2d.py
|
1
|
2753
|
# coding: utf-8
from __future__ import print_function
import numpy as np
import scipy.linalg
class OutlierDetector2D:
def __init__(self, n_split=20, contamination=0.0027):
self.n_split = n_split
self.contamination = contamination
def fit_predict(self, xy):
Cov = np.cov(xy, rowvar=0) # cal. covariance matrix
A = scipy.linalg.sqrtm(Cov) # sqrt of Cov matrix
XY = np.linalg.solve(A, xy.T).T # cal. A^-1 * xy
XY -= np.median(XY, axis=0) # sub. median
Theta = np.arctan2(XY[:, 1], XY[:, 0])
Index_original2theta = np.argsort(Theta)
Index_theta2original = np.argsort(Index_original2theta)
R2 = np.sum(XY**2, axis=1) # cal. radius**2
R2 = R2[Index_original2theta] # sorting by Theta
N = len(R2)
dN = float(N) / float(self.n_split)
for i in range(self.n_split): # in each region
Nrange_i = range(int(np.ceil(dN * i)), min(int(np.ceil(dN * (i + 1))), N))
sigma2_i = np.mean(R2[Nrange_i]) * 0.5 # cal. estimated sigma**2
if sigma2_i > 0:
R2[Nrange_i] /= sigma2_i # normalize radius
Index_theta2rlarge = np.argsort(-R2)
Index_rlarge2theta = np.argsort(Index_theta2rlarge)
Is_contami_theta_order = (Index_rlarge2theta + 1 <= N * self.contamination)
return np.where(Is_contami_theta_order[Index_theta2original], -1, +1)
if __name__ == "__main__":
import argparse
import pandas as pd
import os
parser = argparse.ArgumentParser(description="Outlier Detector for 2D Scattering Data")
parser.add_argument("filename.csv", type=str)
parser.add_argument("-col", "--columns",
nargs=2, type=int, default=[0, 1], metavar=("COLUMN1", "COLUMN2"),
help="columns to process (default=[0, 1])")
parser.add_argument("-cont", "--contamination",
type=float, default=0.0027,
help="ratio of contamination of the data set (default=0.0027)")
parser.add_argument("-n", "--n_split",
type=int, default=20,
help="number of region to split the data set (defalut=20)")
args = parser.parse_args()
filename = vars(args)["filename.csv"]
df = pd.read_csv(filename, index_col=False)
xy = df.values[:, args.columns]
clf = OutlierDetector2D(n_split=args.n_split, contamination=args.contamination)
pred = clf.fit_predict(xy)
root, ext = os.path.splitext(filename)
df_inlier = df[pred == 1]
df_inlier.to_csv(root + "_inlier" + ext, index=False)
df_outlier = df[pred == -1]
df_outlier.to_csv(root + "_outlier" + ext, index=False)
|
mit
|
gfyoung/pandas
|
pandas/tests/indexes/multi/test_integrity.py
|
1
|
8520
|
import re
import numpy as np
import pytest
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
import pandas as pd
from pandas import IntervalIndex, MultiIndex, RangeIndex
import pandas._testing as tm
def test_labels_dtypes():
# GH 8456
i = MultiIndex.from_tuples([("A", 1), ("A", 2)])
assert i.codes[0].dtype == "int8"
assert i.codes[1].dtype == "int8"
i = MultiIndex.from_product([["a"], range(40)])
assert i.codes[1].dtype == "int8"
i = MultiIndex.from_product([["a"], range(400)])
assert i.codes[1].dtype == "int16"
i = MultiIndex.from_product([["a"], range(40000)])
assert i.codes[1].dtype == "int32"
i = MultiIndex.from_product([["a"], range(1000)])
assert (i.codes[0] >= 0).all()
assert (i.codes[1] >= 0).all()
def test_values_boxed():
tuples = [
(1, pd.Timestamp("2000-01-01")),
(2, pd.NaT),
(3, pd.Timestamp("2000-01-03")),
(1, pd.Timestamp("2000-01-04")),
(2, pd.Timestamp("2000-01-02")),
(3, pd.Timestamp("2000-01-03")),
]
result = MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex():
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz="US/Central")
idx = MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex():
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq="D")
idx = MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_consistency():
# need to construct an overflow
major_axis = list(range(70000))
minor_axis = list(range(10))
major_codes = np.arange(70000)
minor_codes = np.repeat(range(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(
levels=[major_axis, minor_axis], codes=[major_codes, minor_codes]
)
# inconsistent
major_codes = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_codes = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(
levels=[major_axis, minor_axis], codes=[major_codes, minor_codes]
)
assert index.is_unique is False
@pytest.mark.arm_slow
def test_hash_collisions():
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product(
[np.arange(1000), np.arange(1000)], names=["one", "two"]
)
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(len(index), dtype="intp"))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_dims():
pass
def test_take_invalid_kwargs():
vals = [["A", "B"], [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]]
idx = MultiIndex.from_product(vals, names=["str", "dt"])
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
idx.take(indices, foo=2)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, out=indices)
msg = "the 'mode' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, mode="clip")
def test_isna_behavior(idx):
# should not segfault GH5123
# NOTE: if MI representation changes, may make sense to allow
# isna(MI)
msg = "isna is not defined for MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
pd.isna(idx)
def test_large_multiindex_error():
# GH12527
df_below_1000000 = pd.DataFrame(
1, index=MultiIndex.from_product([[1, 2], range(499999)]), columns=["dest"]
)
with pytest.raises(KeyError, match=r"^\(-1, 0\)$"):
df_below_1000000.loc[(-1, 0), "dest"]
with pytest.raises(KeyError, match=r"^\(3, 0\)$"):
df_below_1000000.loc[(3, 0), "dest"]
df_above_1000000 = pd.DataFrame(
1, index=MultiIndex.from_product([[1, 2], range(500001)]), columns=["dest"]
)
with pytest.raises(KeyError, match=r"^\(-1, 0\)$"):
df_above_1000000.loc[(-1, 0), "dest"]
with pytest.raises(KeyError, match=r"^\(3, 0\)$"):
df_above_1000000.loc[(3, 0), "dest"]
def test_million_record_attribute_error():
# GH 18165
r = list(range(1000000))
df = pd.DataFrame(
{"a": r, "b": r}, index=MultiIndex.from_tuples([(x, x) for x in r])
)
msg = "'Series' object has no attribute 'foo'"
with pytest.raises(AttributeError, match=msg):
df["a"].foo()
def test_can_hold_identifiers(idx):
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_metadata_immutable(idx):
levels, codes = idx.levels, idx.codes
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile("does not support mutable operations")
with pytest.raises(TypeError, match=mutable_regex):
levels[0] = levels[0]
with pytest.raises(TypeError, match=mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with pytest.raises(TypeError, match=mutable_regex):
codes[0] = codes[0]
with pytest.raises(ValueError, match="assignment destination is read-only"):
codes[0][0] = codes[0][0]
# and for names
names = idx.names
with pytest.raises(TypeError, match=mutable_regex):
names[0] = names[0]
def test_level_setting_resets_attributes():
ind = MultiIndex.from_arrays([["A", "A", "B", "B", "B"], [1, 2, 1, 2, 3]])
assert ind.is_monotonic
with tm.assert_produces_warning(FutureWarning):
ind.set_levels([["A", "B"], [1, 3, 2]], inplace=True)
# if this fails, probably didn't reset the cache correctly.
assert not ind.is_monotonic
def test_rangeindex_fallback_coercion_bug():
# GH 12893
foo = pd.DataFrame(np.arange(100).reshape((10, 10)))
bar = pd.DataFrame(np.arange(100).reshape((10, 10)))
df = pd.concat({"foo": foo.stack(), "bar": bar.stack()}, axis=1)
df.index.names = ["fizz", "buzz"]
str(df)
expected = pd.DataFrame(
{"bar": np.arange(100), "foo": np.arange(100)},
index=MultiIndex.from_product([range(10), range(10)], names=["fizz", "buzz"]),
)
tm.assert_frame_equal(df, expected, check_like=True)
result = df.index.get_level_values("fizz")
expected = pd.Int64Index(np.arange(10), name="fizz").repeat(10)
tm.assert_index_equal(result, expected)
result = df.index.get_level_values("buzz")
expected = pd.Int64Index(np.tile(np.arange(10), 10), name="buzz")
tm.assert_index_equal(result, expected)
def test_memory_usage(idx):
result = idx.memory_usage()
if len(idx):
idx.get_loc(idx[0])
result2 = idx.memory_usage()
result3 = idx.memory_usage(deep=True)
# RangeIndex, IntervalIndex
# don't have engines
if not isinstance(idx, (RangeIndex, IntervalIndex)):
assert result2 > result
if idx.inferred_type == "object":
assert result3 > result2
else:
# we report 0 for no-length
assert result == 0
def test_nlevels(idx):
assert idx.nlevels == 2
|
bsd-3-clause
|
yyjiang/scikit-learn
|
examples/decomposition/plot_ica_vs_pca.py
|
306
|
3329
|
"""
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
|
bsd-3-clause
|
BigTone2009/sms-tools
|
software/models_interface/spsModel_function.py
|
21
|
3527
|
# function to call the main analysis/synthesis functions in software/models/spsModel.py
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import get_window
import spsModel as SPS
import utilFunctions as UF
def main(inputFile='../../sounds/bendir.wav', window='hamming', M=2001, N=2048, t=-80, minSineDur=0.02,
maxnSines=150, freqDevOffset=10, freqDevSlope=0.001, stocf=0.2):
"""
inputFile: input sound file (monophonic with sampling rate of 44100)
window: analysis window type (rectangular, hanning, hamming, blackman, blackmanharris)
M: analysis window size; N: fft size (power of two, bigger or equal than M)
t: magnitude threshold of spectral peaks; minSineDur: minimum duration of sinusoidal tracks
maxnSines: maximum number of parallel sinusoids
freqDevOffset: frequency deviation allowed in the sinusoids from frame to frame at frequency 0
freqDevSlope: slope of the frequency deviation, higher frequencies have bigger deviation
stocf: decimation factor used for the stochastic approximation
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# read input sound
(fs, x) = UF.wavread(inputFile)
# compute analysis window
w = get_window(window, M)
# perform sinusoidal+sotchastic analysis
tfreq, tmag, tphase, stocEnv = SPS.spsModelAnal(x, fs, w, N, H, t, minSineDur, maxnSines, freqDevOffset, freqDevSlope, stocf)
# synthesize sinusoidal+stochastic model
y, ys, yst = SPS.spsModelSynth(tfreq, tmag, tphase, stocEnv, Ns, H, fs)
# output sound file (monophonic with sampling rate of 44100)
outputFileSines = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_spsModel_sines.wav'
outputFileStochastic = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_spsModel_stochastic.wav'
outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_spsModel.wav'
# write sounds files for sinusoidal, residual, and the sum
UF.wavwrite(ys, fs, outputFileSines)
UF.wavwrite(yst, fs, outputFileStochastic)
UF.wavwrite(y, fs, outputFile)
# create figure to plot
plt.figure(figsize=(12, 9))
# frequency range to plot
maxplotfreq = 10000.0
# plot the input sound
plt.subplot(3,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('input sound: x')
plt.subplot(3,1,2)
numFrames = int(stocEnv[:,0].size)
sizeEnv = int(stocEnv[0,:].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = (.5*fs)*np.arange(sizeEnv*maxplotfreq/(.5*fs))/sizeEnv
plt.pcolormesh(frmTime, binFreq, np.transpose(stocEnv[:,:sizeEnv*maxplotfreq/(.5*fs)+1]))
plt.autoscale(tight=True)
# plot sinusoidal frequencies on top of stochastic component
if (tfreq.shape[1] > 0):
sines = tfreq*np.less(tfreq,maxplotfreq)
sines[sines==0] = np.nan
numFrames = int(sines[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, sines, color='k', ms=3, alpha=1)
plt.xlabel('time(s)')
plt.ylabel('Frequency(Hz)')
plt.autoscale(tight=True)
plt.title('sinusoidal + stochastic spectrogram')
# plot the output sound
plt.subplot(3,1,3)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show()
if __name__ == "__main__":
main()
|
agpl-3.0
|
virneo/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/fontconfig_pattern.py
|
72
|
6429
|
"""
A module for parsing and generating fontconfig patterns.
See the `fontconfig pattern specification
<http://www.fontconfig.org/fontconfig-user.html>`_ for more
information.
"""
# Author : Michael Droettboom <[email protected]>
# License : matplotlib license (PSF compatible)
# This class is defined here because it must be available in:
# - The old-style config framework (:file:`rcsetup.py`)
# - The traits-based config framework (:file:`mpltraits.py`)
# - The font manager (:file:`font_manager.py`)
# It probably logically belongs in :file:`font_manager.py`, but
# placing it in any of these places would have created cyclical
# dependency problems, or an undesired dependency on traits even
# when the traits-based config framework is not used.
import re
from matplotlib.pyparsing import Literal, ZeroOrMore, \
Optional, Regex, StringEnd, ParseException, Suppress
family_punc = r'\\\-:,'
family_unescape = re.compile(r'\\([%s])' % family_punc).sub
family_escape = re.compile(r'([%s])' % family_punc).sub
value_punc = r'\\=_:,'
value_unescape = re.compile(r'\\([%s])' % value_punc).sub
value_escape = re.compile(r'([%s])' % value_punc).sub
class FontconfigPatternParser:
"""A simple pyparsing-based parser for fontconfig-style patterns.
See the `fontconfig pattern specification
<http://www.fontconfig.org/fontconfig-user.html>`_ for more
information.
"""
_constants = {
'thin' : ('weight', 'light'),
'extralight' : ('weight', 'light'),
'ultralight' : ('weight', 'light'),
'light' : ('weight', 'light'),
'book' : ('weight', 'book'),
'regular' : ('weight', 'regular'),
'normal' : ('weight', 'normal'),
'medium' : ('weight', 'medium'),
'demibold' : ('weight', 'demibold'),
'semibold' : ('weight', 'semibold'),
'bold' : ('weight', 'bold'),
'extrabold' : ('weight', 'extra bold'),
'black' : ('weight', 'black'),
'heavy' : ('weight', 'heavy'),
'roman' : ('slant', 'normal'),
'italic' : ('slant', 'italic'),
'oblique' : ('slant', 'oblique'),
'ultracondensed' : ('width', 'ultra-condensed'),
'extracondensed' : ('width', 'extra-condensed'),
'condensed' : ('width', 'condensed'),
'semicondensed' : ('width', 'semi-condensed'),
'expanded' : ('width', 'expanded'),
'extraexpanded' : ('width', 'extra-expanded'),
'ultraexpanded' : ('width', 'ultra-expanded')
}
def __init__(self):
family = Regex(r'([^%s]|(\\[%s]))*' %
(family_punc, family_punc)) \
.setParseAction(self._family)
size = Regex(r"([0-9]+\.?[0-9]*|\.[0-9]+)") \
.setParseAction(self._size)
name = Regex(r'[a-z]+') \
.setParseAction(self._name)
value = Regex(r'([^%s]|(\\[%s]))*' %
(value_punc, value_punc)) \
.setParseAction(self._value)
families =(family
+ ZeroOrMore(
Literal(',')
+ family)
).setParseAction(self._families)
point_sizes =(size
+ ZeroOrMore(
Literal(',')
+ size)
).setParseAction(self._point_sizes)
property =( (name
+ Suppress(Literal('='))
+ value
+ ZeroOrMore(
Suppress(Literal(','))
+ value)
)
| name
).setParseAction(self._property)
pattern =(Optional(
families)
+ Optional(
Literal('-')
+ point_sizes)
+ ZeroOrMore(
Literal(':')
+ property)
+ StringEnd()
)
self._parser = pattern
self.ParseException = ParseException
def parse(self, pattern):
"""
Parse the given fontconfig *pattern* and return a dictionary
of key/value pairs useful for initializing a
:class:`font_manager.FontProperties` object.
"""
props = self._properties = {}
try:
self._parser.parseString(pattern)
except self.ParseException, e:
raise ValueError("Could not parse font string: '%s'\n%s" % (pattern, e))
self._properties = None
return props
def _family(self, s, loc, tokens):
return [family_unescape(r'\1', str(tokens[0]))]
def _size(self, s, loc, tokens):
return [float(tokens[0])]
def _name(self, s, loc, tokens):
return [str(tokens[0])]
def _value(self, s, loc, tokens):
return [value_unescape(r'\1', str(tokens[0]))]
def _families(self, s, loc, tokens):
self._properties['family'] = [str(x) for x in tokens]
return []
def _point_sizes(self, s, loc, tokens):
self._properties['size'] = [str(x) for x in tokens]
return []
def _property(self, s, loc, tokens):
if len(tokens) == 1:
if tokens[0] in self._constants:
key, val = self._constants[tokens[0]]
self._properties.setdefault(key, []).append(val)
else:
key = tokens[0]
val = tokens[1:]
self._properties.setdefault(key, []).extend(val)
return []
parse_fontconfig_pattern = FontconfigPatternParser().parse
def generate_fontconfig_pattern(d):
"""
Given a dictionary of key/value pairs, generates a fontconfig
pattern string.
"""
props = []
families = ''
size = ''
for key in 'family style variant weight stretch file size'.split():
val = getattr(d, 'get_' + key)()
if val is not None and val != []:
if type(val) == list:
val = [value_escape(r'\\\1', str(x)) for x in val if x is not None]
if val != []:
val = ','.join(val)
props.append(":%s=%s" % (key, val))
return ''.join(props)
|
agpl-3.0
|
jupyter-widgets/ipywidgets
|
ipywidgets/widgets/interaction.py
|
1
|
20275
|
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
"""Interact with functions using widgets."""
from collections.abc import Iterable, Mapping
from inspect import signature, Parameter
from inspect import getcallargs
from inspect import getfullargspec as check_argspec
import sys
from IPython import get_ipython
from . import (Widget, ValueWidget, Text,
FloatSlider, IntSlider, Checkbox, Dropdown,
VBox, Button, DOMWidget, Output)
from IPython.display import display, clear_output
from traitlets import HasTraits, Any, Unicode, observe
from numbers import Real, Integral
from warnings import warn
empty = Parameter.empty
def show_inline_matplotlib_plots():
"""Show matplotlib plots immediately if using the inline backend.
With ipywidgets 6.0, matplotlib plots don't work well with interact when
using the inline backend that comes with ipykernel. Basically, the inline
backend only shows the plot after the entire cell executes, which does not
play well with drawing plots inside of an interact function. See
https://github.com/jupyter-widgets/ipywidgets/issues/1181/ and
https://github.com/ipython/ipython/issues/10376 for more details. This
function displays any matplotlib plots if the backend is the inline backend.
"""
if 'matplotlib' not in sys.modules:
# matplotlib hasn't been imported, nothing to do.
return
try:
import matplotlib as mpl
from ipykernel.pylab.backend_inline import flush_figures
except ImportError:
return
if mpl.get_backend() == 'module://ipykernel.pylab.backend_inline':
flush_figures()
def interactive_output(f, controls):
"""Connect widget controls to a function.
This function does not generate a user interface for the widgets (unlike `interact`).
This enables customisation of the widget user interface layout.
The user interface layout must be defined and displayed manually.
"""
out = Output()
def observer(change):
kwargs = {k:v.value for k,v in controls.items()}
show_inline_matplotlib_plots()
with out:
clear_output(wait=True)
f(**kwargs)
show_inline_matplotlib_plots()
for k,w in controls.items():
w.observe(observer, 'value')
show_inline_matplotlib_plots()
observer(None)
return out
def _matches(o, pattern):
"""Match a pattern of types in a sequence."""
if not len(o) == len(pattern):
return False
comps = zip(o,pattern)
return all(isinstance(obj,kind) for obj,kind in comps)
def _get_min_max_value(min, max, value=None, step=None):
"""Return min, max, value given input values with possible None."""
# Either min and max need to be given, or value needs to be given
if value is None:
if min is None or max is None:
raise ValueError('unable to infer range, value from: ({}, {}, {})'.format(min, max, value))
diff = max - min
value = min + (diff / 2)
# Ensure that value has the same type as diff
if not isinstance(value, type(diff)):
value = min + (diff // 2)
else: # value is not None
if not isinstance(value, Real):
raise TypeError('expected a real number, got: %r' % value)
# Infer min/max from value
if value == 0:
# This gives (0, 1) of the correct type
vrange = (value, value + 1)
elif value > 0:
vrange = (-value, 3*value)
else:
vrange = (3*value, -value)
if min is None:
min = vrange[0]
if max is None:
max = vrange[1]
if step is not None:
# ensure value is on a step
tick = int((value - min) / step)
value = min + tick * step
if not min <= value <= max:
raise ValueError('value must be between min and max (min={}, value={}, max={})'.format(min, value, max))
return min, max, value
def _yield_abbreviations_for_parameter(param, kwargs):
"""Get an abbreviation for a function parameter."""
name = param.name
kind = param.kind
default = param.default
not_found = (name, empty, empty)
if kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY):
if name in kwargs:
value = kwargs.pop(name)
elif default is not empty:
value = default
else:
yield not_found
yield (name, value, default)
elif kind == Parameter.VAR_KEYWORD:
# In this case name=kwargs and we yield the items in kwargs with their keys.
for k, v in kwargs.copy().items():
kwargs.pop(k)
yield k, v, empty
class interactive(VBox):
"""
A VBox container containing a group of interactive widgets tied to a
function.
Parameters
----------
__interact_f : function
The function to which the interactive widgets are tied. The `**kwargs`
should match the function signature.
__options : dict
A dict of options. Currently, the only supported keys are
``"manual"`` (defaults to ``False``), ``"manual_name"`` (defaults
to ``"Run Interact"``) and ``"auto_display"`` (defaults to ``False``).
**kwargs : various, optional
An interactive widget is created for each keyword argument that is a
valid widget abbreviation.
Note that the first two parameters intentionally start with a double
underscore to avoid being mixed up with keyword arguments passed by
``**kwargs``.
"""
def __init__(self, __interact_f, __options={}, **kwargs):
VBox.__init__(self, _dom_classes=['widget-interact'])
self.result = None
self.args = []
self.kwargs = {}
self.f = f = __interact_f
self.clear_output = kwargs.pop('clear_output', True)
self.manual = __options.get("manual", False)
self.manual_name = __options.get("manual_name", "Run Interact")
self.auto_display = __options.get("auto_display", False)
new_kwargs = self.find_abbreviations(kwargs)
# Before we proceed, let's make sure that the user has passed a set of args+kwargs
# that will lead to a valid call of the function. This protects against unspecified
# and doubly-specified arguments.
try:
check_argspec(f)
except TypeError:
# if we can't inspect, we can't validate
pass
else:
getcallargs(f, **{n:v for n,v,_ in new_kwargs})
# Now build the widgets from the abbreviations.
self.kwargs_widgets = self.widgets_from_abbreviations(new_kwargs)
# This has to be done as an assignment, not using self.children.append,
# so that traitlets notices the update. We skip any objects (such as fixed) that
# are not DOMWidgets.
c = [w for w in self.kwargs_widgets if isinstance(w, DOMWidget)]
# If we are only to run the function on demand, add a button to request this.
if self.manual:
self.manual_button = Button(description=self.manual_name)
c.append(self.manual_button)
self.out = Output()
c.append(self.out)
self.children = c
# Wire up the widgets
# If we are doing manual running, the callback is only triggered by the button
# Otherwise, it is triggered for every trait change received
# On-demand running also suppresses running the function with the initial parameters
if self.manual:
self.manual_button.on_click(self.update)
# Also register input handlers on text areas, so the user can hit return to
# invoke execution.
for w in self.kwargs_widgets:
if isinstance(w, Text):
w.on_submit(self.update)
else:
for widget in self.kwargs_widgets:
widget.observe(self.update, names='value')
self.update()
# Callback function
def update(self, *args):
"""
Call the interact function and update the output widget with
the result of the function call.
Parameters
----------
*args : ignored
Required for this method to be used as traitlets callback.
"""
self.kwargs = {}
if self.manual:
self.manual_button.disabled = True
try:
show_inline_matplotlib_plots()
with self.out:
if self.clear_output:
clear_output(wait=True)
for widget in self.kwargs_widgets:
value = widget.get_interact_value()
self.kwargs[widget._kwarg] = value
self.result = self.f(**self.kwargs)
show_inline_matplotlib_plots()
if self.auto_display and self.result is not None:
display(self.result)
except Exception as e:
ip = get_ipython()
if ip is None:
self.log.warning("Exception in interact callback: %s", e, exc_info=True)
else:
ip.showtraceback()
finally:
if self.manual:
self.manual_button.disabled = False
# Find abbreviations
def signature(self):
return signature(self.f)
def find_abbreviations(self, kwargs):
"""Find the abbreviations for the given function and kwargs.
Return (name, abbrev, default) tuples.
"""
new_kwargs = []
try:
sig = self.signature()
except (ValueError, TypeError):
# can't inspect, no info from function; only use kwargs
return [ (key, value, value) for key, value in kwargs.items() ]
for param in sig.parameters.values():
for name, value, default in _yield_abbreviations_for_parameter(param, kwargs):
if value is empty:
raise ValueError('cannot find widget or abbreviation for argument: {!r}'.format(name))
new_kwargs.append((name, value, default))
return new_kwargs
# Abbreviations to widgets
def widgets_from_abbreviations(self, seq):
"""Given a sequence of (name, abbrev, default) tuples, return a sequence of Widgets."""
result = []
for name, abbrev, default in seq:
if isinstance(abbrev, Widget) and (not isinstance(abbrev, ValueWidget)):
raise TypeError("{!r} is not a ValueWidget".format(abbrev))
widget = self.widget_from_abbrev(abbrev, default)
if widget is None:
raise ValueError("{!r} cannot be transformed to a widget".format(abbrev))
if not hasattr(widget, "description") or not widget.description:
widget.description = name
widget._kwarg = name
result.append(widget)
return result
@classmethod
def widget_from_abbrev(cls, abbrev, default=empty):
"""Build a ValueWidget instance given an abbreviation or Widget."""
if isinstance(abbrev, ValueWidget) or isinstance(abbrev, fixed):
return abbrev
if isinstance(abbrev, tuple):
widget = cls.widget_from_tuple(abbrev)
if default is not empty:
try:
widget.value = default
except Exception:
# ignore failure to set default
pass
return widget
# Try single value
widget = cls.widget_from_single_value(abbrev)
if widget is not None:
return widget
# Something iterable (list, dict, generator, ...). Note that str and
# tuple should be handled before, that is why we check this case last.
if isinstance(abbrev, Iterable):
widget = cls.widget_from_iterable(abbrev)
if default is not empty:
try:
widget.value = default
except Exception:
# ignore failure to set default
pass
return widget
# No idea...
return None
@staticmethod
def widget_from_single_value(o):
"""Make widgets from single values, which can be used as parameter defaults."""
if isinstance(o, str):
return Text(value=str(o))
elif isinstance(o, bool):
return Checkbox(value=o)
elif isinstance(o, Integral):
min, max, value = _get_min_max_value(None, None, o)
return IntSlider(value=o, min=min, max=max)
elif isinstance(o, Real):
min, max, value = _get_min_max_value(None, None, o)
return FloatSlider(value=o, min=min, max=max)
else:
return None
@staticmethod
def widget_from_tuple(o):
"""Make widgets from a tuple abbreviation."""
if _matches(o, (Real, Real)):
min, max, value = _get_min_max_value(o[0], o[1])
if all(isinstance(_, Integral) for _ in o):
cls = IntSlider
else:
cls = FloatSlider
return cls(value=value, min=min, max=max)
elif _matches(o, (Real, Real, Real)):
step = o[2]
if step <= 0:
raise ValueError("step must be >= 0, not %r" % step)
min, max, value = _get_min_max_value(o[0], o[1], step=step)
if all(isinstance(_, Integral) for _ in o):
cls = IntSlider
else:
cls = FloatSlider
return cls(value=value, min=min, max=max, step=step)
@staticmethod
def widget_from_iterable(o):
"""Make widgets from an iterable. This should not be done for
a string or tuple."""
# Dropdown expects a dict or list, so we convert an arbitrary
# iterable to either of those.
if isinstance(o, (list, dict)):
return Dropdown(options=o)
elif isinstance(o, Mapping):
return Dropdown(options=list(o.items()))
else:
return Dropdown(options=list(o))
# Return a factory for interactive functions
@classmethod
def factory(cls):
options = dict(manual=False, auto_display=True, manual_name="Run Interact")
return _InteractFactory(cls, options)
class _InteractFactory:
"""
Factory for instances of :class:`interactive`.
This class is needed to support options like::
>>> @interact.options(manual=True)
... def greeting(text="World"):
... print("Hello {}".format(text))
Parameters
----------
cls : class
The subclass of :class:`interactive` to construct.
options : dict
A dict of options used to construct the interactive
function. By default, this is returned by
``cls.default_options()``.
kwargs : dict
A dict of **kwargs to use for widgets.
"""
def __init__(self, cls, options, kwargs={}):
self.cls = cls
self.opts = options
self.kwargs = kwargs
def widget(self, f):
"""
Return an interactive function widget for the given function.
The widget is only constructed, not displayed nor attached to
the function.
Returns
-------
An instance of ``self.cls`` (typically :class:`interactive`).
Parameters
----------
f : function
The function to which the interactive widgets are tied.
"""
return self.cls(f, self.opts, **self.kwargs)
def __call__(self, __interact_f=None, **kwargs):
"""
Make the given function interactive by adding and displaying
the corresponding :class:`interactive` widget.
Expects the first argument to be a function. Parameters to this
function are widget abbreviations passed in as keyword arguments
(``**kwargs``). Can be used as a decorator (see examples).
Returns
-------
f : __interact_f with interactive widget attached to it.
Parameters
----------
__interact_f : function
The function to which the interactive widgets are tied. The `**kwargs`
should match the function signature. Passed to :func:`interactive()`
**kwargs : various, optional
An interactive widget is created for each keyword argument that is a
valid widget abbreviation. Passed to :func:`interactive()`
Examples
--------
Render an interactive text field that shows the greeting with the passed in
text::
# 1. Using interact as a function
def greeting(text="World"):
print("Hello {}".format(text))
interact(greeting, text="Jupyter Widgets")
# 2. Using interact as a decorator
@interact
def greeting(text="World"):
print("Hello {}".format(text))
# 3. Using interact as a decorator with named parameters
@interact(text="Jupyter Widgets")
def greeting(text="World"):
print("Hello {}".format(text))
Render an interactive slider widget and prints square of number::
# 1. Using interact as a function
def square(num=1):
print("{} squared is {}".format(num, num*num))
interact(square, num=5)
# 2. Using interact as a decorator
@interact
def square(num=2):
print("{} squared is {}".format(num, num*num))
# 3. Using interact as a decorator with named parameters
@interact(num=5)
def square(num=2):
print("{} squared is {}".format(num, num*num))
"""
# If kwargs are given, replace self by a new
# _InteractFactory with the updated kwargs
if kwargs:
kw = dict(self.kwargs)
kw.update(kwargs)
self = type(self)(self.cls, self.opts, kw)
f = __interact_f
if f is None:
# This branch handles the case 3
# @interact(a=30, b=40)
# def f(*args, **kwargs):
# ...
#
# Simply return the new factory
return self
# positional arg support in: https://gist.github.com/8851331
# Handle the cases 1 and 2
# 1. interact(f, **kwargs)
# 2. @interact
# def f(*args, **kwargs):
# ...
w = self.widget(f)
try:
f.widget = w
except AttributeError:
# some things (instancemethods) can't have attributes attached,
# so wrap in a lambda
f = lambda *args, **kwargs: __interact_f(*args, **kwargs)
f.widget = w
show_inline_matplotlib_plots()
display(w)
return f
def options(self, **kwds):
"""
Change options for interactive functions.
Returns
-------
A new :class:`_InteractFactory` which will apply the
options when called.
"""
opts = dict(self.opts)
for k in kwds:
try:
# Ensure that the key exists because we want to change
# existing options, not add new ones.
_ = opts[k]
except KeyError:
raise ValueError("invalid option {!r}".format(k))
opts[k] = kwds[k]
return type(self)(self.cls, opts, self.kwargs)
interact = interactive.factory()
interact_manual = interact.options(manual=True, manual_name="Run Interact")
class fixed(HasTraits):
"""A pseudo-widget whose value is fixed and never synced to the client."""
value = Any(help="Any Python object")
description = Unicode('', help="Any Python object")
def __init__(self, value, **kwargs):
super().__init__(value=value, **kwargs)
def get_interact_value(self):
"""Return the value for this widget which should be passed to
interactive functions. Custom widgets can change this method
to process the raw value ``self.value``.
"""
return self.value
|
bsd-3-clause
|
hijinks/python-bcet
|
random_points.py
|
1
|
1962
|
#!/usr/bin/env python
# Use pixel difference and Kirsch filter to pick series of random points
import georasters as gr
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage
import random
import string
import csv
diff_gr = gr.from_file('./output/diff.tif')
ndv, xsize, ysize, geot, projection, datatype = gr.get_geo_info('./output/diff.tif') # Raster information
edge_gr = gr.from_file('./output/k100.tif')
raster_bounds = diff_gr.bounds
lat_range = np.linspace(raster_bounds[0]+10, raster_bounds[2]-10, num=xsize, endpoint=False, retstep=False, dtype=float)
lon_range = np.linspace(raster_bounds[1]+10, raster_bounds[3]-10, num=ysize, endpoint=False, retstep=False, dtype=float)
npz = np.zeros(diff_gr.raster.shape)
npz[np.where(edge_gr.raster < 1)] = 1
npz[np.where(diff_gr.raster > 20)] = 0
npd = ndimage.binary_erosion(npz, iterations=1)
npd = npd+1
npd[np.where(diff_gr.raster < 1)] = 0
npd_gr = gr.GeoRaster(npd,
diff_gr.geot,
nodata_value=ndv,
projection=diff_gr.projection,
datatype=diff_gr.datatype)
npd_gr.to_tiff('./npd')
lon_random = np.random.choice(ysize, 20000)
lat_random = np.random.choice(xsize, 20000)
random_coords = np.vstack((lat_random,lon_random)).transpose()
random_coords_unique = np.vstack(tuple(row) for row in random_coords)
def valid_point(v):
if v > 1:
return True
i = 0
p = 0
with open('random_points3.csv', 'wb') as csvfile:
csvw = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
csvw.writerow(['Latitude', 'Longitude', 'Name'])
while p < 1000:
coord_r = random_coords_unique[i]
coord_lat = lat_range[coord_r[0]]
coord_lon = lon_range[coord_r[1]]
print([coord_lat,coord_lon])
if valid_point(npd_gr.map_pixel(coord_lat,coord_lon)):
label = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
csvw.writerow([coord_lat, coord_lon, label])
p = p+1
i = i+1
|
mit
|
h2educ/scikit-learn
|
sklearn/linear_model/tests/test_ridge.py
|
68
|
23597
|
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import _solve_cholesky
from sklearn.linear_model.ridge import _solve_cholesky_kernel
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import KFold
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
# Ridge regression convergence test using score
# TODO: for this test to be robust, we should use a dataset instead
# of np.random.
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "cholesky", "lsqr", "sag"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples // 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples // 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_sample_weights():
rng = np.random.RandomState(0)
for solver in ("cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
for alpha in (1.0, 1e-2):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
coefs = ridge_regression(X, y,
alpha=alpha,
sample_weight=sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha=alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
# Test for fit_intercept = True
est = Ridge(alpha=alpha, solver=solver)
est.fit(X, y, sample_weight=sample_weight)
# Check using Newton's Method
# Quadratic function should be solved in a single step.
# Initialize
sample_weight = np.sqrt(sample_weight)
X_weighted = sample_weight[:, np.newaxis] * (
np.column_stack((np.ones(n_samples), X)))
y_weighted = y * sample_weight
# Gradient is (X*coef-y)*X + alpha*coef_[1:]
# Remove coef since it is initialized to zero.
grad = -np.dot(y_weighted, X_weighted)
# Hessian is (X.T*X) + alpha*I except that the first
# diagonal element should be zero, since there is no
# penalization of intercept.
diag = alpha * np.ones(n_features + 1)
diag[0] = 0.
hess = np.dot(X_weighted.T, X_weighted)
hess.flat[::n_features + 2] += diag
coef_ = - np.dot(linalg.inv(hess), grad)
assert_almost_equal(coef_[0], est.intercept_)
assert_array_almost_equal(coef_[1:], est.coef_)
def test_ridge_shapes():
# Test shape of coef_ and intercept_
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
# Test intercept with multiple targets GH issue #708
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
# Test BayesianRegression ridge classifier
# TODO: test also n_samples > n_features
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
# On alpha=0., Ridge and OLS yield the same solution.
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
# Tests the ridge object using individual penalties
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-8).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky', 'sag']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:-1])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(alpha=1.0, fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = get_scorer('mean_squared_error')
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
n_samples = X_diabetes.shape[0]
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(n_samples, 5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
n_samples = X_iris.shape[0]
cv = KFold(n_samples, 5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_ridge_sparse_svd():
X = sp.csc_matrix(rng.rand(100, 10))
y = rng.rand(100)
ridge = Ridge(solver='svd')
assert_raises(TypeError, ridge.fit, X, y)
def test_class_weights():
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = RidgeClassifier(class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
# check if class_weight = 'balanced' can handle negative labels.
clf = RidgeClassifier(class_weight='balanced')
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# class_weight = 'balanced', and class_weight = None should return
# same values when y has equal number of all labels
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])
y = [1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
clfa = RidgeClassifier(class_weight='balanced')
clfa.fit(X, y)
assert_equal(len(clfa.classes_), 2)
assert_array_almost_equal(clf.coef_, clfa.coef_)
assert_array_almost_equal(clf.intercept_, clfa.intercept_)
def test_class_weight_vs_sample_weight():
"""Check class_weights resemble sample_weights behavior."""
for clf in (RidgeClassifier, RidgeClassifierCV):
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = clf()
clf1.fit(iris.data, iris.target)
clf2 = clf(class_weight='balanced')
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Check that sample_weight and class_weight are multiplicative
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_class_weights_cv():
# Test class weights for cross validated ridge classifier.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
clf.fit(X, y)
# we give a small weights to class 1
clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
clf.fit(X, y)
assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
# Test _RidgeCV's store_cv_values attribute.
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
def test_ridgecv_sample_weight():
rng = np.random.RandomState(0)
alphas = (0.1, 1.0, 10.0)
# There are different algorithms for n_samples > n_features
# and the opposite, so test them both.
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
cv = KFold(n_samples, 5)
ridgecv = RidgeCV(alphas=alphas, cv=cv)
ridgecv.fit(X, y, sample_weight=sample_weight)
# Check using GridSearchCV directly
parameters = {'alpha': alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(), parameters, fit_params=fit_params,
cv=cv)
gs.fit(X, y)
assert_equal(ridgecv.alpha_, gs.best_estimator_.alpha)
assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]
ridge = Ridge(alpha=1)
# make sure the "OK" sample weights actually work
ridge.fit(X, y, sample_weights_OK)
ridge.fit(X, y, sample_weights_OK_1)
ridge.fit(X, y, sample_weights_OK_2)
def fit_ridge_not_ok():
ridge.fit(X, y, sample_weights_not_OK)
def fit_ridge_not_ok_2():
ridge.fit(X, y, sample_weights_not_OK_2)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok_2)
def test_sparse_design_with_sample_weights():
# Sample weights must work with sparse matrices
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
sparse_matrix_converters = [sp.coo_matrix,
sp.csr_matrix,
sp.csc_matrix,
sp.lil_matrix,
sp.dok_matrix
]
sparse_ridge = Ridge(alpha=1., fit_intercept=False)
dense_ridge = Ridge(alpha=1., fit_intercept=False)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights = rng.randn(n_samples) ** 2 + 1
for sparse_converter in sparse_matrix_converters:
X_sparse = sparse_converter(X)
sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
dense_ridge.fit(X, y, sample_weight=sample_weights)
assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
decimal=6)
def test_raises_value_error_if_solver_not_supported():
# Tests whether a ValueError is raised if a non-identified solver
# is passed to ridge_regression
wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)"
exception = ValueError
message = "Solver %s not understood" % wrong_solver
def func():
X = np.eye(3)
y = np.ones(3)
ridge_regression(X, y, alpha=1., solver=wrong_solver)
assert_raise_message(exception, message, func)
def test_sparse_cg_max_iter():
reg = Ridge(solver="sparse_cg", max_iter=1)
reg.fit(X_diabetes, y_diabetes)
assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
@ignore_warnings
def test_n_iter():
# Test that self.n_iter_ is correct.
n_targets = 2
X, y = X_diabetes, y_diabetes
y_n = np.tile(y, (n_targets, 1)).T
for max_iter in range(1, 4):
for solver in ('sag', 'lsqr'):
reg = Ridge(solver=solver, max_iter=max_iter, tol=1e-12)
reg.fit(X, y_n)
assert_array_equal(reg.n_iter_, np.tile(max_iter, n_targets))
for solver in ('sparse_cg', 'svd', 'cholesky'):
reg = Ridge(solver=solver, max_iter=1, tol=1e-1)
reg.fit(X, y_n)
assert_equal(reg.n_iter_, None)
|
bsd-3-clause
|
r-mart/scikit-learn
|
sklearn/utils/random.py
|
234
|
10510
|
# Author: Hamzeh Alsalhi <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import astype
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribtion over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = astype(classes[j], np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
|
bsd-3-clause
|
artmusic0/theano-learning.part02
|
web_with_predict_v1.0/cnn.py
|
3
|
8977
|
import os
import sys, getopt
import time
import numpy
import theano
import cPickle
import theano.tensor as T
from sklearn import preprocessing
from logistic_sgd import LogisticRegression
from theano.tensor.signal import downsample
from theano.tensor.nnet import conv
def ReLU(x):
y = T.maximum(0.0, x)
return (y)
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None,
activation=T.tanh):
"""
Typical hidden layer of a MLP: units are fully-connected and have
sigmoidal activation function. Weight matrix W is of shape (n_in,n_out)
and the bias vector b is of shape (n_out,).
NOTE : The nonlinearity used here is tanh
Hidden unit activation is given by: tanh(dot(inumpyut,W) + b)
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type inumpyut: theano.tensor.dmatrix
:param inumpyut: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimensionality of inumpyut
:type n_out: int
:param n_out: number of hidden units
:type activation: theano.Op or function
:param activation: Non linearity to be applied in the hidden
layer
"""
self.inp = input
# end-snippet-1
# `W` is initialized with `W_values` which is uniformely sampled
# from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden))
# for tanh activation function
# the output of uniform if converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
# Note : optimal initialization of weights is dependent on the
# activation function used (among other things).
# For example, results presented in [Xavier10] suggest that you
# should use 4 times larger initial weights for sigmoid
# compared to tanh
# We have no info for other function, so we use the same as
# tanh.
if W is None:
W_values = numpy.asarray(
rng.uniform(
low=-numpy.sqrt(6. / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)
),
dtype=theano.config.floatX
)
if activation == theano.tensor.nnet.sigmoid:
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
self.output = (
lin_output if activation is None
else activation(lin_output)
)
# parameters of the model
self.params = [self.W, self.b]
class LeNetConvPoolLayer(object):
"""Pool Layer of a convolutional network """
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
"""
Allocate a LeNetConvPoolLayer with shared variable internal parameters.
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dtensor4
:param input: symbolic image tensor, of shape image_shape
:type filter_shape: tuple or list of length 4
:param filter_shape: (number of filters, num input feature maps,
filter height, filter width)
:type image_shape: tuple or list of length 4
:param image_shape: (batch size, num input feature maps,
image height, image width)
:type poolsize: tuple or list of length 2
:param poolsize: the downsampling (pooling) factor (#rows, #cols)
"""
assert image_shape[1] == filter_shape[1]
self.input = input
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = numpy.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
numpy.prod(poolsize))
# initialize weights with random weights
W_bound = numpy.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(
numpy.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX
),
borrow=True
)
# the bias is a 1D tensor -- one bias per output feature map
b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True)
# convolve input feature maps with filters
conv_out = conv.conv2d(
input=input,
filters=self.W,
filter_shape=filter_shape,
image_shape=image_shape
)
# downsample each feature map individually, using maxpooling
pooled_out = downsample.max_pool_2d(
input=conv_out,
ds=poolsize,
ignore_border=True
)
# add the bias term. Since the bias is a vector (1D array), we first
# reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
# thus be broadcasted across mini-batches and feature map
# width & height
self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
# store parameters of this layer
self.params = [self.W, self.b]
class CNN(object):
def __getstate__(self):
weights = [p.get_value() for p in self.params]
#return (self.layer0.W, self.layer0.b, self.layer1.W, self.layer1.b, self.layer2.W,
# self.layer2.b, self.layer3.W, self.layer3.b)
return weights
def __setstate__(self, weights):
# (self.layer0.W, self.layer0.b, self.layer1.W, self.layer1.b, self.layer2.W, self.layer2.b, self.layer3.W, self.layer3.b) = state
i = iter(weights)
for p in self.params:
p.set_value(i.next())
def __init__(self, rng, input, nkerns, batch_size):
# Reshape matrix of rasterized images of shape (batch_size, 28 * 28)
# to a 4D tensor, compatible with our LeNetConvPoolLayer
# (28, 28) is the size of MNIST images.
self.layer0_input = input.reshape((batch_size, 1, 512, 288))
# Construct the first convolutional pooling layer:
# filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24)
# maxpooling reduces this further to (24/2, 24/2) = (12, 12)
# 4D output tensor is thus of shape (batch_size, nkerns[0], 12, 12)
self.layer0 = LeNetConvPoolLayer(
rng,
input=self.layer0_input,
image_shape=(batch_size, 1, 512, 288),
filter_shape=(nkerns[0], 1, 5, 5),
poolsize=(2, 2)
)
# Construct the second convolutional pooling layer
# filtering reduces the image size to (12-5+1, 12-5+1) = (8, 8)
# maxpooling reduces this further to (8/2, 8/2) = (4, 4)
# 4D output tensor is thus of shape (batch_size, nkerns[1], 4, 4)
self.layer1 = LeNetConvPoolLayer(
rng,
input=self.layer0.output,
image_shape=(batch_size, nkerns[0], 254, 142),
filter_shape=(nkerns[1], nkerns[0], 5, 5),
poolsize=(2, 2)
)
# the HiddenLayer being fully-connected, it operates on 2D matrices of
# shape (batch_size, num_pixels) (i.e matrix of rasterized images).
# This will generate a matrix of shape (batch_size, nkerns[1] * 4 * 4),
# or (500, 50 * 4 * 4) = (500, 800) with the default values.
self.layer2_input = self.layer1.output.flatten(2)
# construct a fully-connected sigmoidal layer
self.layer2 = HiddenLayer(
rng,
input=self.layer2_input,
n_in=nkerns[1] * 125 * 69,
n_out=500,
activation=T.tanh
)
# classify the values of the fully-connected sigmoidal layer
self.layer3 = LogisticRegression(input=self.layer2.output, n_in=500, n_out=20)
# the cost we minimize during training is the NLL of the model
# self.cost = self.layer3.negative_log_likelihood(y)
self.errors = self.layer3.errors
# create a list of all model parameters to be fit by gradient descent
self.params = self.layer3.params + self.layer2.params + self.layer1.params + self.layer0.params
|
gpl-3.0
|
BiaDarkia/scikit-learn
|
sklearn/linear_model/tests/test_huber.py
|
10
|
7676
|
# Authors: Manoj Kumar [email protected]
# License: BSD 3 clause
import numpy as np
from scipy import optimize, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_false
from sklearn.datasets import make_regression
from sklearn.linear_model import (
HuberRegressor, LinearRegression, SGDRegressor, Ridge)
from sklearn.linear_model.huber import _huber_loss_and_gradient
def make_regression_with_outliers(n_samples=50, n_features=20):
rng = np.random.RandomState(0)
# Generate data with outliers by replacing 10% of the samples with noise.
X, y = make_regression(
n_samples=n_samples, n_features=n_features,
random_state=0, noise=0.05)
# Replace 10% of the sample with noise.
num_noise = int(0.1 * n_samples)
random_samples = rng.randint(0, n_samples, num_noise)
X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1]))
return X, y
def test_huber_equals_lr_for_high_epsilon():
# Test that Ridge matches LinearRegression for large epsilon
X, y = make_regression_with_outliers()
lr = LinearRegression(fit_intercept=True)
lr.fit(X, y)
huber = HuberRegressor(fit_intercept=True, epsilon=1e3, alpha=0.0)
huber.fit(X, y)
assert_almost_equal(huber.coef_, lr.coef_, 3)
assert_almost_equal(huber.intercept_, lr.intercept_, 2)
def test_huber_max_iter():
X, y = make_regression_with_outliers()
huber = HuberRegressor(max_iter=1)
huber.fit(X, y)
assert huber.n_iter_ == huber.max_iter
def test_huber_gradient():
# Test that the gradient calculated by _huber_loss_and_gradient is correct
rng = np.random.RandomState(1)
X, y = make_regression_with_outliers()
sample_weight = rng.randint(1, 3, (y.shape[0]))
loss_func = lambda x, *args: _huber_loss_and_gradient(x, *args)[0]
grad_func = lambda x, *args: _huber_loss_and_gradient(x, *args)[1]
# Check using optimize.check_grad that the gradients are equal.
for _ in range(5):
# Check for both fit_intercept and otherwise.
for n_features in [X.shape[1] + 1, X.shape[1] + 2]:
w = rng.randn(n_features)
w[-1] = np.abs(w[-1])
grad_same = optimize.check_grad(
loss_func, grad_func, w, X, y, 0.01, 0.1, sample_weight)
assert_almost_equal(grad_same, 1e-6, 4)
def test_huber_sample_weights():
# Test sample_weights implementation in HuberRegressor"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True)
huber.fit(X, y)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
# Rescale coefs before comparing with assert_array_almost_equal to make sure
# that the number of decimal places used is somewhat insensitive to the
# amplitude of the coefficients and therefore to the scale of the data
# and the regularization parameter
scale = max(np.mean(np.abs(huber.coef_)),
np.mean(np.abs(huber.intercept_)))
huber.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale)
assert_array_almost_equal(huber.intercept_ / scale,
huber_intercept / scale)
X, y = make_regression_with_outliers(n_samples=5, n_features=20)
X_new = np.vstack((X, np.vstack((X[1], X[1], X[3]))))
y_new = np.concatenate((y, [y[1]], [y[1]], [y[3]]))
huber.fit(X_new, y_new)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
sample_weight = np.ones(X.shape[0])
sample_weight[1] = 3
sample_weight[3] = 2
huber.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale)
assert_array_almost_equal(huber.intercept_ / scale,
huber_intercept / scale)
# Test sparse implementation with sample weights.
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(fit_intercept=True)
huber_sparse.fit(X_csr, y, sample_weight=sample_weight)
assert_array_almost_equal(huber_sparse.coef_ / scale,
huber_coef / scale)
def test_huber_sparse():
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.1)
huber.fit(X, y)
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(fit_intercept=True, alpha=0.1)
huber_sparse.fit(X_csr, y)
assert_array_almost_equal(huber_sparse.coef_, huber.coef_)
assert_array_equal(huber.outliers_, huber_sparse.outliers_)
def test_huber_scaling_invariant():
# Test that outliers filtering is scaling independent.
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100)
huber.fit(X, y)
n_outliers_mask_1 = huber.outliers_
assert_false(np.all(n_outliers_mask_1))
huber.fit(X, 2. * y)
n_outliers_mask_2 = huber.outliers_
assert_array_equal(n_outliers_mask_2, n_outliers_mask_1)
huber.fit(2. * X, 2. * y)
n_outliers_mask_3 = huber.outliers_
assert_array_equal(n_outliers_mask_3, n_outliers_mask_1)
def test_huber_and_sgd_same_results():
# Test they should converge to same coefficients for same parameters
X, y = make_regression_with_outliers(n_samples=10, n_features=2)
# Fit once to find out the scale parameter. Scale down X and y by scale
# so that the scale parameter is optimized to 1.0
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100,
epsilon=1.35)
huber.fit(X, y)
X_scale = X / huber.scale_
y_scale = y / huber.scale_
huber.fit(X_scale, y_scale)
assert_almost_equal(huber.scale_, 1.0, 3)
sgdreg = SGDRegressor(
alpha=0.0, loss="huber", shuffle=True, random_state=0, max_iter=10000,
fit_intercept=False, epsilon=1.35, tol=None)
sgdreg.fit(X_scale, y_scale)
assert_array_almost_equal(huber.coef_, sgdreg.coef_, 1)
def test_huber_warm_start():
X, y = make_regression_with_outliers()
huber_warm = HuberRegressor(
fit_intercept=True, alpha=1.0, max_iter=10000, warm_start=True, tol=1e-1)
huber_warm.fit(X, y)
huber_warm_coef = huber_warm.coef_.copy()
huber_warm.fit(X, y)
# SciPy performs the tol check after doing the coef updates, so
# these would be almost same but not equal.
assert_array_almost_equal(huber_warm.coef_, huber_warm_coef, 1)
assert huber_warm.n_iter_ == 0
def test_huber_better_r2_score():
# Test that huber returns a better r2 score than non-outliers"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.01, max_iter=100)
huber.fit(X, y)
linear_loss = np.dot(X, huber.coef_) + huber.intercept_ - y
mask = np.abs(linear_loss) < huber.epsilon * huber.scale_
huber_score = huber.score(X[mask], y[mask])
huber_outlier_score = huber.score(X[~mask], y[~mask])
# The Ridge regressor should be influenced by the outliers and hence
# give a worse score on the non-outliers as compared to the huber regressor.
ridge = Ridge(fit_intercept=True, alpha=0.01)
ridge.fit(X, y)
ridge_score = ridge.score(X[mask], y[mask])
ridge_outlier_score = ridge.score(X[~mask], y[~mask])
assert_greater(huber_score, ridge_score)
# The huber model should also fit poorly on the outliers.
assert_greater(ridge_outlier_score, huber_outlier_score)
|
bsd-3-clause
|
florian-f/sklearn
|
sklearn/metrics/scorer.py
|
4
|
5141
|
"""
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A Scorer object is a callable that can be passed to
:class:`sklearn.grid_search.GridSearchCV` or
:func:`sklearn.cross_validation.cross_val_score` as the ``scoring`` parameter,
to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <[email protected]>
# Liscence: Simplified BSD
import numpy as np
from . import (r2_score, mean_squared_error, accuracy_score, f1_score,
auc_score, average_precision_score, precision_score,
recall_score)
from .cluster import adjusted_rand_score
class Scorer(object):
"""Flexible scores for any estimator.
This class wraps estimator scoring functions for the use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and provides a call method.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good.
needs_threshold : bool, default=False
Whether score_func takes a continuous decision certainty.
For example ``average_precision`` or the area under the roc curve
can not be computed using predictions alone, but need the output of
``decision_function`` or ``predict_proba``.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Examples
--------
>>> from sklearn.metrics import fbeta_score, Scorer
>>> ftwo_scorer = Scorer(fbeta_score, beta=2)
>>> from sklearn.grid_search import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
def __init__(self, score_func, greater_is_better=True,
needs_threshold=False, **kwargs):
self.score_func = score_func
self.greater_is_better = greater_is_better
self.needs_threshold = needs_threshold
self.kwargs = kwargs
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self.kwargs.items()])
return ("Scorer(score_func=%s, greater_is_better=%s, needs_thresholds="
"%s%s)" % (self.score_func.__name__, self.greater_is_better,
self.needs_threshold, kwargs_string))
def __call__(self, estimator, X, y):
"""Score X and y using the provided estimator.
Parameters
----------
estimator : object
Trained estimator to use for scoring.
If ``needs_threshold`` is True, estimator needs
to provide ``decision_function`` or ``predict_proba``.
Otherwise, estimator needs to provide ``predict``.
X : array-like or sparse matrix
Test data that will be scored by the estimator.
y : array-like
True prediction for X.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
if self.needs_threshold:
if len(np.unique(y)) > 2:
raise ValueError("This classification score only "
"supports binary classification.")
try:
y_pred = estimator.decision_function(X).ravel()
except (NotImplementedError, AttributeError):
y_pred = estimator.predict_proba(X)[:, 1]
return self.score_func(y, y_pred, **self.kwargs)
else:
y_pred = estimator.predict(X)
return self.score_func(y, y_pred, **self.kwargs)
# Standard regression scores
r2_scorer = Scorer(r2_score)
mse_scorer = Scorer(mean_squared_error, greater_is_better=False)
# Standard Classification Scores
accuracy_scorer = Scorer(accuracy_score)
f1_scorer = Scorer(f1_score)
# Score functions that need decision values
auc_scorer = Scorer(auc_score, greater_is_better=True, needs_threshold=True)
average_precision_scorer = Scorer(average_precision_score,
needs_threshold=True)
precision_scorer = Scorer(precision_score)
recall_scorer = Scorer(recall_score)
# Clustering scores
ari_scorer = Scorer(adjusted_rand_score)
SCORERS = dict(r2=r2_scorer, mse=mse_scorer, accuracy=accuracy_scorer,
f1=f1_scorer, roc_auc=auc_scorer,
average_precision=average_precision_scorer,
precision=precision_scorer, recall=recall_scorer,
ari=ari_scorer)
|
bsd-3-clause
|
gauteh/ibcao_py
|
ibcao/tests/test_proj.py
|
1
|
5566
|
# encoding: utf-8
import common
from common import outdir, TRAVIS
import logging as ll
import unittest as ut
from pyproj import Proj
from ibcao import *
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import os
import os.path
class IbcaoProjTest (ut.TestCase):
def setUp (self):
self.i = IBCAO ()
def tearDown (self):
self.i.close ()
del self.i
def get_np_stere (self):
#np_stere = Proj ("""
#+proj=stere
#+lat_ts=%(lat_ts)f
#+lat_0=%(origin_lat)f
#+lon_0=%(origin_lon)f
#+k_0=%(scale_factor)f
#+x_0=%(x0)f
#+y_0=%(y0)f
#""" % {
#'lat_ts' : self.i.true_scale,
#'origin_lat' : self.i.origin_lat,
#'origin_lon' : self.i.origin_lon,
#'scale_factor' : self.i.scale_factor,
#'x0' : 0,
#'y0' : 0
#})
return self.i.proj
def test_ibcao_grid (self):
ll.info ('test grid coordinates')
xin = self.i.x[::10]
yin = self.i.y[::10]
xx, yy = np.meshgrid (xin, yin)
xx = xx.ravel ()
yy = yy.ravel ()
## make lon, lats
g = ccrs.Geodetic ()
gxy = g.transform_points (self.i.projection, xx, yy)
## do the inverse
xy = self.i.projection.transform_points (g, gxy[:,0], gxy[:,1])
np.testing.assert_array_almost_equal (xx, xy[:,0])
np.testing.assert_array_almost_equal (yy, xy[:,1])
def test_north_pole (self):
ll.info ('test north pole')
g = ccrs.Geodetic ()
# north pole
lon = np.arange (-180, 180, 1)
lat = np.repeat (90, len(lon))
nx = self.i.projection.transform_points (g, lon, lat)
np.testing.assert_array_equal (nx[:,0:2], np.zeros ((nx.shape[0], 2)))
# test inverse conversion
lx = g.transform_points (self.i.projection, nx[:,0], nx[:,1])
#np.testing.assert_array_equal (lon, lx[:,0]) # not unique
np.testing.assert_array_equal (lat, lx[:,1])
def test_corners (self):
ll.info ('test corners')
g = ccrs.Geodetic ()
# from IBCAO v2 Techinical reference
# https://svn.nersc.no/hycom/browser/MSCPROGS/src/Conf_grid/Code/mod_ibcao.F90?rev=187
# (probably from IBCAO v2)
# 26 ! UL -2902500,2902500 (-135, 53:49:1.4687)
# 27 ! UR 2902500, 2902500 (135, 53:49:1.4687)
# 28 ! LL -2902500,-2902500 (-45, 53:49:1.4687)
# 29 ! LR 2902500, -2902500 (45, 53:49:1.4687)
rtol = 1e7
eps = 6 # meters
deps = 0.0001 # degrees
xy = self.i.projection.transform_point (-135, 53.8166 + 0.00040797, g)
np.testing.assert_allclose ((-2902500, 2902500), xy, rtol, eps)
xy = self.i.projection.transform_point (135, 53.8166 + 0.00040797, g)
np.testing.assert_allclose ((2902500, 2902500), xy, rtol, eps)
xy = self.i.projection.transform_point (-45, 53.8166 + 0.00040797, g)
np.testing.assert_allclose ((-2902500, -2902500), xy, rtol, eps)
xy = self.i.projection.transform_point (45, 53.8166 + 0.00040797, g)
np.testing.assert_allclose ((2902500, -2902500), xy, rtol, eps)
# reverse
dx = g.transform_point (-2902500, 2902500, self.i.projection)
np.testing.assert_allclose ((-135, 53.8166 + 0.00040797), dx, rtol, deps)
dx = g.transform_point (2902500, 2902500, self.i.projection)
np.testing.assert_allclose ((135, 53.8166 + 0.00040797), dx, rtol, deps)
dx = g.transform_point (-2902500, -2902500, self.i.projection)
np.testing.assert_allclose ((-45, 53.8166 + 0.00040797), dx, rtol, deps)
dx = g.transform_point (2902500, -2902500, self.i.projection)
np.testing.assert_allclose ((45, 53.8166 + 0.00040797), dx, rtol, deps)
lleft = (self.i.xlim[0], self.i.ylim[0])
uleft = (self.i.xlim[0], self.i.ylim[1])
lright = (self.i.xlim[1], self.i.ylim[0])
uright = (self.i.xlim[1], self.i.ylim[1])
# latitude calculated using this projection, included for regression testing
dlleft = (-45, 53.79955358092116)
duleft = (-135, 53.79955358092116)
dlright = (45, 53.79955358092116)
duright = (135, 53.79955358092116)
# reverse
dx = g.transform_point (*lleft, src_crs = self.i.projection)
np.testing.assert_allclose (dlleft, dx, rtol, deps)
dx = g.transform_point (*uleft, src_crs = self.i.projection)
np.testing.assert_allclose (duleft, dx, rtol, deps)
dx = g.transform_point (*lright, src_crs = self.i.projection)
np.testing.assert_allclose (dlright, dx, rtol, deps)
dx = g.transform_point (*uright, src_crs = self.i.projection)
np.testing.assert_allclose (duright, dx, rtol, deps)
# forward
xy = self.i.projection.transform_point (*dlleft, src_crs = g)
np.testing.assert_allclose (lleft, xy, rtol, eps)
xy = self.i.projection.transform_point (*duleft, src_crs = g)
np.testing.assert_allclose (uleft, xy, rtol, eps)
xy = self.i.projection.transform_point (*dlright, src_crs = g)
np.testing.assert_allclose (lright, xy, rtol, eps)
xy = self.i.projection.transform_point (*duright, src_crs = g)
np.testing.assert_allclose (uright, xy, rtol, eps)
def test_np_stere (self):
ll.info ("testing np stereographic vs our projection")
np_stere = self.get_np_stere ()
lon = np.arange (-180, 180, 1)
lat = np.arange (80, 90, 1)
llon, llat = np.meshgrid (lon, lat)
llon = llon.ravel ()
llat = llat.ravel ()
# convert to np_stere
geodetic = ccrs.Geodetic ()
xy = self.i.projection.transform_points (geodetic, llon, llat)
x = xy[:,0]
y = xy[:,1]
nx, ny = np_stere (llon, llat)
np.testing.assert_allclose (x, nx )
np.testing.assert_allclose (y, ny )
|
lgpl-3.0
|
aewhatley/scikit-learn
|
examples/calibration/plot_calibration_multiclass.py
|
272
|
6972
|
"""
==================================================
Probability Calibration for 3-class classification
==================================================
This example illustrates how sigmoid calibration changes predicted
probabilities for a 3-class classification problem. Illustrated is the
standard 2-simplex, where the three corners correspond to the three classes.
Arrows point from the probability vectors predicted by an uncalibrated
classifier to the probability vectors predicted by the same classifier after
sigmoid calibration on a hold-out validation set. Colors indicate the true
class of an instance (red: class 1, green: class 2, blue: class 3).
The base classifier is a random forest classifier with 25 base estimators
(trees). If this classifier is trained on all 800 training datapoints, it is
overly confident in its predictions and thus incurs a large log-loss.
Calibrating an identical classifier, which was trained on 600 datapoints, with
method='sigmoid' on the remaining 200 datapoints reduces the confidence of the
predictions, i.e., moves the probability vectors from the edges of the simplex
towards the center. This calibration results in a lower log-loss. Note that an
alternative would have been to increase the number of base estimators which
would have resulted in a similar decrease in log-loss.
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import log_loss
np.random.seed(0)
# Generate data
X, y = make_blobs(n_samples=1000, n_features=2, random_state=42,
cluster_std=5.0)
X_train, y_train = X[:600], y[:600]
X_valid, y_valid = X[600:800], y[600:800]
X_train_valid, y_train_valid = X[:800], y[:800]
X_test, y_test = X[800:], y[800:]
# Train uncalibrated random forest classifier on whole train and validation
# data and evaluate on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train_valid, y_train_valid)
clf_probs = clf.predict_proba(X_test)
score = log_loss(y_test, clf_probs)
# Train random forest classifier, calibrate on validation data and evaluate
# on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
sig_clf = CalibratedClassifierCV(clf, method="sigmoid", cv="prefit")
sig_clf.fit(X_valid, y_valid)
sig_clf_probs = sig_clf.predict_proba(X_test)
sig_score = log_loss(y_test, sig_clf_probs)
# Plot changes in predicted probabilities via arrows
plt.figure(0)
colors = ["r", "g", "b"]
for i in range(clf_probs.shape[0]):
plt.arrow(clf_probs[i, 0], clf_probs[i, 1],
sig_clf_probs[i, 0] - clf_probs[i, 0],
sig_clf_probs[i, 1] - clf_probs[i, 1],
color=colors[y_test[i]], head_width=1e-2)
# Plot perfect predictions
plt.plot([1.0], [0.0], 'ro', ms=20, label="Class 1")
plt.plot([0.0], [1.0], 'go', ms=20, label="Class 2")
plt.plot([0.0], [0.0], 'bo', ms=20, label="Class 3")
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
# Annotate points on the simplex
plt.annotate(r'($\frac{1}{3}$, $\frac{1}{3}$, $\frac{1}{3}$)',
xy=(1.0/3, 1.0/3), xytext=(1.0/3, .23), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.plot([1.0/3], [1.0/3], 'ko', ms=5)
plt.annotate(r'($\frac{1}{2}$, $0$, $\frac{1}{2}$)',
xy=(.5, .0), xytext=(.5, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $\frac{1}{2}$, $\frac{1}{2}$)',
xy=(.0, .5), xytext=(.1, .5), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($\frac{1}{2}$, $\frac{1}{2}$, $0$)',
xy=(.5, .5), xytext=(.6, .6), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $0$, $1$)',
xy=(0, 0), xytext=(.1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($1$, $0$, $0$)',
xy=(1, 0), xytext=(1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $1$, $0$)',
xy=(0, 1), xytext=(.1, 1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
# Add grid
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Change of predicted probabilities after sigmoid calibration")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.legend(loc="best")
print("Log-loss of")
print(" * uncalibrated classifier trained on 800 datapoints: %.3f "
% score)
print(" * classifier trained on 600 datapoints and calibrated on "
"200 datapoint: %.3f" % sig_score)
# Illustrate calibrator
plt.figure(1)
# generate grid over 2-simplex
p1d = np.linspace(0, 1, 20)
p0, p1 = np.meshgrid(p1d, p1d)
p2 = 1 - p0 - p1
p = np.c_[p0.ravel(), p1.ravel(), p2.ravel()]
p = p[p[:, 2] >= 0]
calibrated_classifier = sig_clf.calibrated_classifiers_[0]
prediction = np.vstack([calibrator.predict(this_p)
for calibrator, this_p in
zip(calibrated_classifier.calibrators_, p.T)]).T
prediction /= prediction.sum(axis=1)[:, None]
# Ploit modifications of calibrator
for i in range(prediction.shape[0]):
plt.arrow(p[i, 0], p[i, 1],
prediction[i, 0] - p[i, 0], prediction[i, 1] - p[i, 1],
head_width=1e-2, color=colors[np.argmax(p[i])])
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Illustration of sigmoid calibrator")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.show()
|
bsd-3-clause
|
philouc/pyhrf
|
python/pyhrf/ui/vb_jde_analyser.py
|
1
|
57369
|
# -*- coding: utf-8 -*-
import numpy as np
from time import time
from pyhrf.ui.analyser_ui import FMRIAnalyser
from pyhrf.ndarray import xndarray
from pyhrf.vbjde.Utils import Main_vbjde_Extension,Main_vbjde_Extension_NoDrifts,Main_vbjde_Python,roc_curve,Main_vbjde_NoDrifts_ParsiMod_Python
from pyhrf.vbjde.Utils import Main_vbjde_NoDrifts_ParsiMod_C_1,Main_vbjde_Extension_ParsiMod_C_1,Main_vbjde_Extension_ParsiMod_C_1_MeanLabels,Main_vbjde_NoDrifts_ParsiMod_C_2,Main_vbjde_NoDrifts_ParsiMod_C_3
from pyhrf.vbjde.Utils import Main_vbjde_Extension_ParsiMod_C_3,Main_vbjde_Extension_ParsiMod_C_3_tau2,Main_vbjde_Extension_ParsiMod_C_3_tau2_FixedTau1
from pyhrf.vbjde.Utils import Main_vbjde_Extension_ParsiMod_C_3_tau2_Cond,Main_vbjde_Extension_ParsiMod_C_3_tau2_Cond_FixedTau1,Main_vbjde_Extension_ParsiMod_C_4
from pyhrf.vbjde.Utils import Main_vbjde_Extension_ParsiMod_C_4_tau2, Main_vbjde_Extension_ParsiMod_C_4_tau2_FixedTau1
from pyhrf.vbjde.Utils import Main_vbjde_Extension_ParsiMod_C_RVM,classify,Main_vbjpde
#from pyhrf.vbjde.Utils import Main_vbjde, Main_vbjde_Fast, Main_vbjde_Extension,Main_vbjde_Extension_NoDrifts
from scipy.linalg import norm
from pyhrf.tools.io import read_volume
#######################
#from pylab import *
#from matplotlib import pyplot
#######################
from pylab import *
import pyhrf
from pyhrf.xmlio import XMLable2
from pyhrf.tools import format_duration
import os.path as op
def change_dim(labels):
'''
Change labels dimension from
(ncond, nclass, nvox)
to
(nclass, ncond, nvox)
'''
ncond = labels.shape[0]
nclass = labels.shape[1]
nvox = labels.shape[2]
newlabels = np.zeros((nclass, ncond, nvox))
for cond in xrange(ncond):
for clas in xrange(nclass):
for vox in xrange(nvox):
newlabels[clas][cond][vox] = labels[cond][clas][vox]
return newlabels
DEFAULT_INIT_PARCELLATION = pyhrf.get_data_file_name('subj0_parcellation_for_jpde.nii.gz')
from pyhrf.ui.jde import JDEAnalyser
class JDEVEMAnalyser(JDEAnalyser):
parametersComments = {
'CompMod' : 'running Complet Model (Without Condition selection)',
'ParsiMod' : 'running Parsimonious Model with variable selection',
'definition1' : 'running the first version of parsimonious model (Q -> W)',
'definition2' : 'running the second version of parsimonious model (W -> Q)',
'definition3' : 'running the third version of parsimonious model (\mu_1 -> W)',
'definition4' : 'running the fourth version of parsimonious model (dKL -> W)',
'FixedThresh' : 'running the parsimonious model with a predefined threshold value (tau2)',
'EstimThresh' : 'running the parsimonious model with threshold estimation (tau2)',
'OneThresh' : 'Estimation of one threshold value for all experimental conditions',
'ThreshPerCond' : 'Estimation of a threshold value for each experimental conditions (only for definition3)',
'FixedTau1' : 'Tau1 is fixed during analysis and does not change with tau2',
'RVM' : 'running the parsimonious model using the Relevant Vector Machine technique',
'dt' : 'time resolution of the estimated HRF in seconds',
'hrfDuration': 'duration of the HRF in seconds',
'sigmaH': 'variance of the HRF',
'fast': 'running fast VEM with C extensions',
'nbClasses': 'number of classes for the response levels',
'PLOT': 'plotting flag for convergence curves',
'nItMax': 'maximum iteration number',
'nItMin': 'minimum iteration number',
'scale': 'flag for the scaling factor applied to the data fidelity '\
'term during m_h step.\n'
'If scale=False then do nothing, else divide ' \
'the data fidelity term by the number of voxels',
'beta': 'initial value of spatial Potts regularization parameter',
'simulation' : 'indicates whether the run corresponds to a simulation example or not',
'estimateSigmaH': 'estimate or not the HRF variance',
'estimateHRF': 'estimate or not the HRF',
'TrueHrfFlag' : 'If True, HRF will be fixed to the simulated value',
'HrfFilename' : 'True HRF Filename',
'estimateBeta': 'estimate or not the Potts spatial regularization '\
'parameter',
'estimateDrifts': 'Explicit drift estimation (if False then drifts' \
' are marginalized',
#'driftType': 'type of the drift basis (default=``polynomlial``)', (not used in VEM)',
'outputFile': 'output xml file',
'sigmaH': 'Initial HRF variance',
'contrasts': 'Contrasts to be evaluated' ,
'hyper_prior_sigma_H': 'Parameter of the hyper-prior on sigma_H (if zero, no prior is applied)',
'jpde': "Jointly estimate the parcellation",
'init_parcellation_file': "Parcellation mask to init JPDE",
'estimateW':'estimate or not the relevance variable W',
'tau1' : "Slot of sigmoid function",
'tau2' : "Threshold of sigmoid function",
'alpha_tau2' : "first parameter of gamma prior on tau2",
'lambda_tau2' : "second parameter of gamma prior on tau2",
'S': "Number of MC step Iterations",
#'alpha' : "Prior Bernoulli probability of w=1",
#'alpha_0' : "External field",
'alpha' : "Confidence level in Posterior Probability Map (PPM)",
'gamma' : "activation Threshold in Posterior Probability Map (PPM)",
'estimateLabels' : 'estimate or not the Labels',
'LabelsFilename' : 'True Labels Filename',
'MFapprox' : 'Using of the Mean Field approximation in labels estimation',
'estimateMixtParam' : 'estimate or not the mixture parameters',
'InitVar' : 'Initiale value of active and inactive gaussian variances',
'InitMean' : 'Initiale value of active gaussian means',
'MiniVemFlag' : 'Choosing, if True, the best initialisation of MixtParam and gamma_h',
'NbItMiniVem' : 'The number of iterations in Mini VEM algorithme',
}
parametersToShow = ['CompMod','ParsiMod', 'definition1', 'definition2', 'definition3', 'definition4',
'FixedThresh', 'EstimThresh', 'OneThresh', 'ThreshPerCond',
'FixedTau1', 'RVM', 'dt', 'hrfDuration', 'nItMax', 'nItMin',
'estimateSigmaH', 'estimateHRF', 'TrueHrfFlag', 'HrfFilename', 'estimateBeta',
'estimateLabels','LabelsFilename','MFapprox','estimateW',
'estimateDrifts','estimateMixtParam', 'InitVar', 'InitMean', 'outputFile',
'scale', 'nbClasses', 'fast', 'PLOT','sigmaH',
'contrasts','hyper_prior_sigma_H', 'jpde',
'init_parcellation_file', 'tau1', 'tau2', 'alpha_tau2', 'lambda_tau2', 'alpha', 'gamma', 'S',#'alpha','alpha_0',
'simulation','MiniVemFlag','NbItMiniVem']
def __init__(self, hrfDuration=25., sigmaH=0.1, fast=True, CompMod=True, ParsiMod=False,
definition1=False, definition2=False, definition3=False, definition4=False,
FixedThresh=False, EstimThresh=True, OneThresh=True, ThreshPerCond=False,
FixedTau1=True, RVM=False, computeContrast=True, nbClasses=2,
PLOT=False, nItMax=1, nItMin=1, scale=False, beta=1.0 ,
estimateSigmaH=True, estimateHRF=True,TrueHrfFlag=False,HrfFilename='hrf.nii',estimateDrifts=True,
hyper_prior_sigma_H=1000,
estimateSigmaEpsilone=True, dt=.6, estimateBeta=True,
contrasts={'1':'rel1'},
simulation=False,
outputFile='./jde_vem_outputs.xml',
jpde=False,
init_parcellation_file=DEFAULT_INIT_PARCELLATION, estimateW=True, tau1=1.,
tau2=0.1, alpha_tau2=3.0, lambda_tau2=4.0, alpha=0.95, gamma=0.0, S=100,# alpha=0.5, alpha_0=0.5,
estimateLabels=True, LabelsFilename='labels.nii', MFapprox=False,
estimateMixtParam=True,InitVar=0.5,InitMean=2.0,MiniVemFlag=False,NbItMiniVem=5):
JDEAnalyser.__init__(self, outputPrefix='jde_vem_')
XMLable2.__init__(self)
# Important thing : all parameters must have default values
self.dt = dt
#self.driftType = driftType
self.hrfDuration = hrfDuration
self.nbClasses = nbClasses
self.nItMax = nItMax
self.estimateSigmaH = estimateSigmaH
self.scale = scale
self.estimateDrifts = estimateDrifts
self.PLOT = PLOT
self.fast = fast
self.ParsiMod = ParsiMod
self.CompMod = CompMod
self.definition1 = definition1
self.definition2 = definition2
self.definition3 = definition3
self.definition4 = definition4
self.FixedThresh = FixedThresh
self.EstimThresh = EstimThresh
self.OneThresh = OneThresh
self.ThreshPerCond = ThreshPerCond
self.FixedTau1 = FixedTau1
self.RVM = RVM
self.simulation = simulation
self.beta = beta
self.sigmaH = sigmaH
self.estimateHRF = estimateHRF
self.TrueHrfFlag = TrueHrfFlag
self.HrfFilename = HrfFilename
self.estimateSigmaEpsilone = estimateSigmaEpsilone
self.nItMin = nItMin
self.estimateBeta = estimateBeta
self.estimateW = estimateW
self.estimateMixtParam = estimateMixtParam
self.tau1 = tau1
self.tau2 = tau2
self.alpha = alpha
self.gamma = gamma
self.alpha_tau2 = alpha_tau2
self.lambda_tau2 = lambda_tau2
self.S = S
#self.alpha = alpha
#self.alpha_0 = alpha_0
self.estimateLabels = estimateLabels
self.LabelsFilename = LabelsFilename
self.MFapprox = MFapprox
self.InitVar = InitVar
self.InitMean = InitMean
self.MiniVemFlag = MiniVemFlag
self.NbItMiniVem = NbItMiniVem
if contrasts is None:
contrasts = {}
self.contrasts = contrasts
self.computeContrast = computeContrast
self.hyper_prior_sigma_H = hyper_prior_sigma_H
self.jpde = jpde
self.init_parcellation_file = init_parcellation_file
pyhrf.verbose(2, "VEM analyzer:")
pyhrf.verbose(2, " - JPDE: %s" %str(self.jpde))
pyhrf.verbose(2, " - estimate sigma H: %s" %str(self.estimateSigmaH))
pyhrf.verbose(2, " - init sigma H: %f" %self.sigmaH)
pyhrf.verbose(2, " - hyper_prior_sigma_H: %f" %self.hyper_prior_sigma_H)
pyhrf.verbose(2, " - estimate drift: %s" %str(self.estimateDrifts))
#self.contrasts.pop('dummy_example', None)
def analyse_roi(self, roiData):
#roiData is of type FmriRoiData, see pyhrf.core.FmriRoiData
# roiData.bold : numpy array of shape
#print '!! JDEVEMAnalyser !!'
## BOLD has shape (nscans, nvoxels)
#print 'roiData.bold:', roiData.bold.shape
#print roiData.bold
#print 'roiData.tr:'
#print roiData.tr
#nbVoxels = roiData.nbVoxels
#print 'nbVoxels:', nbVoxels
#print '** paradigm **'
#print 'onsets:', roiData.get_joined_onsets()
#print 'bin seq of sampled onsets:'
#print roiData.get_rastered_onset(self.dt)
#roiData.graph #list of neighbours
data = roiData.bold
#print data.shape,roiData.get_nb_vox_in_mask()
#raw_input('')
#noise = roiData.rnoise.data
#snr = 20*log(norm(data-noise) / norm(noise))
#print roiData.onsets
#print "----------------------------"
Onsets = roiData.get_joined_onsets()
#print Onsets
TR = roiData.tr
#K = 2 #number of classes
beta = self.beta
scale = 1#roiData.nbVoxels
#print dir(roiData)
#print roiData.get_nb_vox_in_mask()
nvox = roiData.get_nb_vox_in_mask()
if self.scale:
scale = nvox
#scale = roiData.nbVoxels
#print self.sigmaH
rid = roiData.get_roi_id()
pyhrf.verbose(1,"JDE VEM - roi %d, nvox=%d, nconds=%d, nItMax=%d" \
%(rid, nvox, len(Onsets),self.nItMax))
self.contrasts.pop('dummy_example', None)
cNames = roiData.paradigm.get_stimulus_names()
graph = roiData.get_graph()
t_start = time()
if self.jpde:
#print 'Do the wonderful joint detection estimation !'
#print 'fix subsequent if / else (if needed) ...'
init_parcellation = read_volume(self.init_parcellation_file)[0]
#flatten to align with BOLD data:
init_parcellation = init_parcellation[np.where(roiData.roiMask)]
#print 'init parcellation:'
from pyhrf.parcellation import parcellation_report
pyhrf.verbose(2, parcellation_report(init_parcellation))
#nbParcels = len(np.unique(init_parcellation))
init_parcellation -= init_parcellation.min()
init_parcellation = np.array(init_parcellation) + 1
J = init_parcellation.shape[0]
Pmask0 = init_parcellation
#Pmask0 = np.zeros(J)
#for j in xrange(0,J):
#if ((init_parcellation[j] == 0) or (init_parcellation[j] == 1)):
#Pmask0[j] = 1
#if ((init_parcellation[j] == 2) or (init_parcellation[j] == 3)):
#Pmask0[j] = 2
#print init_parcellation.shape,Pmask0.shape
#raw_input('')
#for j in xrange(0,J):
#if ((init_parcellation[j] == 0) or (init_parcellation[j] == 1)):
#Pmask0[j] = 1
#if ((init_parcellation[j] == 2) or (init_parcellation[j] == 3)):
#Pmask0[j] = 2
#if ((init_parcellation[j] == 4) or (init_parcellation[j] == 5)):
#Pmask0[j] = 3
#if ((init_parcellation[j] == 6) or (init_parcellation[j] == 7)):
#Pmask0[j] = 4
Pmask0 = Pmask0.astype(int)
nbParcels = Pmask0.max()+1
#print nbParcels
#raw_input('')
#print range(nbParcels)
#raw_input('')
#print Pmask0.max(),Pmask0.min()
sigmaH_prior = 0.5*self.sigmaH
beta_par = 0.5
#print self.sigmaH
#print nbClasses
#print init_parcellation
#print init_parcellation.shape
#nrls, estimated_hrf, labels, parcels, EstPmask, EstHRFDict, noiseVar, mu_k, sigma_k, Beta, L, PL,cA,cH,cZ = Main_vbjpde(graph,data,Onsets,self.hrfDuration,init_parcellation,TR,self.dt,self.nbClasses,nbParcels,self.sigmaH,sigmaH_prior,beta,beta_par,self.nItMax,self.nItMin,self.estimateBeta)
#nrls, estimated_hrf, labels, parcels, EstPmask, EstHRFDict, noiseVar, mu_k, sigma_k, Beta, L, PL,cA,cH,cZ,cQ = Main_vbjpde(graph,data,Onsets,self.hrfDuration,Pmask0,TR,self.dt,self.nbClasses,sigmaH_prior,self.sigmaH,beta,beta_par,self.nItMax,self.nItMin,self.estimateBeta)
nrls, estimated_hrf, labels, parcels, EstPmask, EstHRFDict, noiseVar, mu_k, sigma_k, Beta, L, PL,cA,cH,cZ,cQ = Main_vbjpde(graph,data,Onsets,self.hrfDuration,Pmask0,TR,self.dt,self.nbClasses,sigmaH_prior,beta,beta_par,self.nItMax,self.nItMin,self.estimateBeta)
#m_A , m_H, q_Z, q_Q, EstPmask,EstHRFDict, sigma_epsilone, mu_M , sigma_M, Beta, L, PL, cA,cH,cZ,cQ = Main_vbjpde(graph,Y,Onsets,Thrf,Pmask0,TR,dt,K,v_h,beta,beta_Q,nItMax,nItMin,outDir='/home/chaari/Boulot/Data/JPDE/simuls')
#EstPmask = classify(parcels,EstPmask)
#EstPmask += 1
else:
if self.fast:
if self.CompMod:
if self.estimateDrifts:
pyhrf.verbose(2, "fast VEM with drift estimation")
NbIter, nrls, estimated_hrf, labels, noiseVar, mu_k, sigma_k, Beta, L, PL, CONTRAST, CONTRASTVAR, cA,cH,cZ,cAH,cTime,cTimeMean,Sigma_nrls, StimuIndSignal,FreeEnergy = Main_vbjde_Extension(graph,data,Onsets,self.hrfDuration,self.nbClasses,TR,beta,self.dt,scale,self.estimateSigmaH,self.sigmaH,self.nItMax,self.nItMin,self.estimateBeta,self.PLOT,self.contrasts,self.computeContrast,self.hyper_prior_sigma_H,self.estimateHRF,self.TrueHrfFlag,self.HrfFilename,self.estimateLabels,self.LabelsFilename,self.MFapprox,self.InitVar,self.InitMean,self.MiniVemFlag,self.NbItMiniVem)
#print 'cTimeMean=',cTimeMean
else:
pyhrf.verbose(2, "fast VEM without drift estimation")
#print 'self.contrasts=',self.contrasts
nrls, estimated_hrf, labels, noiseVar, mu_k, sigma_k, Beta, CONTRAST, CONTRASTVAR, cA,cH,cZ,cTime,cTimeMean,Sigma_nrls, StimuIndSignal = Main_vbjde_Extension_NoDrifts(graph,data,Onsets,self.hrfDuration,self.nbClasses,TR,beta,self.dt,scale,self.estimateSigmaH,self.sigmaH,self.nItMax,self.nItMin,self.estimateBeta,self.PLOT,self.contrasts,self.computeContrast,self.hyper_prior_sigma_H,self.estimateHRF)
if self.ParsiMod:
if self.estimateDrifts:
if self.definition1:
Tau2,NbIter,nrls,estimated_hrf,labels,noiseVar,mu_k,sigma_k,Beta,L,PL,CONTRAST,CONTRASTVAR,cA,cH,cZ,cW,cAH,w,cTime,cTimeMean,Sigma_nrls,MCMean,StimuIndSignal,FreeEnergy = Main_vbjde_Extension_ParsiMod_C_1(graph,data,Onsets,self.hrfDuration,self.nbClasses,TR,beta,self.dt,scale,self.estimateSigmaH,self.sigmaH,self.nItMax,self.nItMin,self.estimateBeta,self.PLOT,self.contrasts,self.computeContrast,self.hyper_prior_sigma_H,self.estimateHRF,self.estimateW,self.tau1,self.tau2,self.S,self.estimateLabels,self.LabelsFilename,self.InitVar,self.InitMean)
#Tau2,NbIter,nrls,estimated_hrf,labels,noiseVar,mu_k,sigma_k,Beta,L,PL,CONTRAST,CONTRASTVAR,cA,cH,cZ,cW,cAH,w,cTime,cTimeMean,Sigma_nrls,MCMean,StimuIndSignal,FreeEnergy = Main_vbjde_Extension_ParsiMod_C_1_MeanLabels(graph,data,Onsets,self.hrfDuration,self.nbClasses,TR,beta,self.dt,scale,self.estimateSigmaH,self.sigmaH,self.nItMax,self.nItMin,self.estimateBeta,self.PLOT,self.contrasts,self.computeContrast,self.hyper_prior_sigma_H,self.estimateHRF,self.estimateW,self.tau1,self.tau2,self.S,self.estimateLabels,self.LabelsFilename,self.InitVar,self.InitMean)
#if self.definition2: ##### To Do
if self.definition3:
if self.FixedThresh:
pyhrf.verbose(2, "fast Parsimonious Model VEM ((Definition 3)) with drift estimation")
NbIter, nrls, estimated_hrf, labels, noiseVar, mu_k, sigma_k, Beta, L, PL, CONTRAST, CONTRASTVAR, cA,cH,cZ,cW,cAH, w,cTime,cTimeMean,Sigma_nrls, StimuIndSignal, FreeEnergy, Tau2 = Main_vbjde_Extension_ParsiMod_C_3(graph,data,Onsets,self.hrfDuration,self.nbClasses,TR,beta,self.dt,scale,self.estimateSigmaH,self.sigmaH,self.nItMax,self.nItMin,self.estimateBeta,self.PLOT,self.contrasts,self.computeContrast,self.hyper_prior_sigma_H,self.estimateHRF,self.TrueHrfFlag,self.HrfFilename,self.estimateW, self.tau1, self.tau2,self.alpha_tau2,self.lambda_tau2,self.S,self.estimateLabels,self.LabelsFilename,self.MFapprox,self.estimateMixtParam,self.InitVar,self.InitMean,self.MiniVemFlag,self.NbItMiniVem)
if self.EstimThresh:
if self.OneThresh:
pyhrf.verbose(2, "fast Parsimonious Model VEM ((Definition 3 / Estimation of tau2)) with drift estimation")
if not self.FixedTau1:
nrls, estimated_hrf, labels, noiseVar, mu_k, sigma_k, Beta, L, PL, CONTRAST, CONTRASTVAR, cA,cH,cZ,cW,cAH, w,cTime,cTimeMean,Sigma_nrls, StimuIndSignal, FreeEnergy, Tau2 = Main_vbjde_Extension_ParsiMod_C_3_tau2(graph,data,Onsets,self.hrfDuration,self.nbClasses,TR,beta,self.dt,scale,self.estimateSigmaH,self.sigmaH,self.nItMax,self.nItMin,self.estimateBeta,self.PLOT,self.contrasts,self.computeContrast,self.hyper_prior_sigma_H,self.estimateHRF,self.TrueHrfFlag, self.HrfFilename,self.estimateW,self.tau1, self.tau2,self.alpha_tau2,self.lambda_tau2,self.S,self.estimateLabels,self.LabelsFilename,self.MFapprox,self.estimateMixtParam,self.InitVar,self.InitMean,self.MiniVemFlag,self.NbItMiniVem)
if self.FixedTau1:
NbIter, nrls, estimated_hrf, labels, noiseVar, mu_k, sigma_k, Beta, L, PL, CONTRAST, CONTRASTVAR, cA,cH,cZ,cW,cAH, w,cTime,cTimeMean,Sigma_nrls, StimuIndSignal, FreeEnergy, Tau2 = Main_vbjde_Extension_ParsiMod_C_3_tau2_FixedTau1(graph,data,Onsets,self.hrfDuration,self.nbClasses,TR,beta,self.dt,scale,self.estimateSigmaH,self.sigmaH,self.nItMax,self.nItMin,self.estimateBeta,self.PLOT,self.contrasts,self.computeContrast,self.hyper_prior_sigma_H,self.estimateHRF,self.TrueHrfFlag,self.HrfFilename,self.estimateW, self.tau1, self.tau2,self.alpha_tau2,self.lambda_tau2,self.S,self.estimateLabels,self.LabelsFilename,self.MFapprox,self.estimateMixtParam,self.InitVar,self.InitMean,self.MiniVemFlag,self.NbItMiniVem)
if self.ThreshPerCond:
pyhrf.verbose(2, "fast Parsimonious Model VEM ((Definition 3 / Estimation of tau2 per cond)) with drift estimation")
if not self.FixedTau1:
nrls, estimated_hrf, labels, noiseVar, mu_k, sigma_k, Beta, L, PL, CONTRAST, CONTRASTVAR, cA,cH,cZ,cW,cAH, w,cTime,cTimeMean,Sigma_nrls, StimuIndSignal, FreeEnergy, Tau2 = Main_vbjde_Extension_ParsiMod_C_3_tau2_Cond(graph,data,Onsets,self.hrfDuration,self.nbClasses,TR,beta,self.dt,scale,self.estimateSigmaH,self.sigmaH,self.nItMax,self.nItMin,self.estimateBeta,self.PLOT,self.contrasts,self.computeContrast,self.hyper_prior_sigma_H,self.estimateHRF,self.TrueHrfFlag,self.HrfFilename, self.estimateW,self.alpha_tau2,self.lambda_tau2,self.S,self.estimateLabels,self.LabelsFilename,self.MFapprox,self.estimateMixtParam,self.InitVar,self.InitMean,self.MiniVemFlag,self.NbItMiniVem)
if self.FixedTau1:
NbIter, nrls, estimated_hrf, labels, noiseVar, mu_k, sigma_k, Beta, L, PL, CONTRAST, CONTRASTVAR, cA,cH,cZ,cW,cAH, w,cTime,cTimeMean,Sigma_nrls, StimuIndSignal, FreeEnergy, Tau2 = Main_vbjde_Extension_ParsiMod_C_3_tau2_Cond_FixedTau1(graph,data,Onsets,self.hrfDuration,self.nbClasses,TR,beta,self.dt,scale,self.estimateSigmaH,self.sigmaH,self.nItMax,self.nItMin,self.estimateBeta,self.PLOT,self.contrasts,self.computeContrast,self.hyper_prior_sigma_H,self.estimateHRF,self.TrueHrfFlag,self.HrfFilename, self.estimateW,self.alpha_tau2,self.lambda_tau2,self.S,self.estimateLabels,self.LabelsFilename,self.MFapprox,self.estimateMixtParam,self.InitVar,self.InitMean,self.MiniVemFlag,self.NbItMiniVem)
if(self.OneThresh==True and self.ThreshPerCond==True):
raise Exception('YOU CHOOSE ONE AND MULTIPLE THRESHOLD PER COND AT THE SAME TIME :-(' )
if(self.OneThresh==False and self.ThreshPerCond==False):
raise Exception('DO YOU WANT ONE OR MULTIPLE THRESHOLD PER COND ?' )
if(self.FixedThresh==True and self.EstimThresh==True):
raise Exception('YOU CHOOSED A FIXED OR ESTIMATED THRESHOLD AT THE SAME TIME :-(' )
if(self.FixedThresh==False and self.EstimThresh==False):
raise Exception('DO YOU WANT A FIXED OR ESTIMATED THRESHOLD ?' )
if self.definition4:
if self.FixedThresh:
pyhrf.verbose(2, "fast Parsimonious Model VEM ((Definition 4)) with drift estimation")
nrls, estimated_hrf, labels, noiseVar, mu_k, sigma_k, Beta, L, PL, CONTRAST, CONTRASTVAR, cA,cH,cZ,cW,cAH, w,cTime,cTimeMean,Sigma_nrls, StimuIndSignal, FreeEnergy = Main_vbjde_Extension_ParsiMod_C_4(graph,data,Onsets,self.hrfDuration,self.nbClasses,TR,beta,self.dt,scale,self.estimateSigmaH,self.sigmaH,self.nItMax,self.nItMin,self.estimateBeta,self.PLOT,self.contrasts,self.computeContrast,self.hyper_prior_sigma_H,self.estimateHRF, self.estimateW, self.tau1, self.tau2,self.S,self.estimateLabels,self.LabelsFilename,self.MFapprox,self.estimateMixtParam,self.InitVar,self.InitMean)
if self.EstimThresh:
pyhrf.verbose(2, "fast Parsimonious Model VEM ((Definition 4 / Estimation of tau2)) with drift estimation")
#nrls, estimated_hrf, labels, noiseVar, mu_k, sigma_k, Beta, L, PL, CONTRAST, CONTRASTVAR, cA,cH,cZ,cW,cAH, w,cTime,cTimeMean,Sigma_nrls, StimuIndSignal, FreeEnergy, Tau2 = Main_vbjde_Extension_ParsiMod_C_4_tau2(graph,data,Onsets,self.hrfDuration,self.nbClasses,TR,beta,self.dt,scale,self.estimateSigmaH,self.sigmaH,self.nItMax,self.nItMin,self.estimateBeta,self.PLOT,self.contrasts,self.computeContrast,self.hyper_prior_sigma_H,self.estimateHRF, self.estimateW, self.tau1, self.tau2,self.alpha_tau2,self.lambda_tau2,self.S,self.estimateLabels,self.LabelsFilename,self.MFapprox,self.estimateMixtParam,self.InitVar,self.InitMean)
nrls, estimated_hrf, labels, noiseVar, mu_k, sigma_k, Beta, L, PL, CONTRAST, CONTRASTVAR, cA,cH,cZ,cW,cAH, w,cTime,cTimeMean,Sigma_nrls, StimuIndSignal, FreeEnergy, Tau2 = Main_vbjde_Extension_ParsiMod_C_4_tau2_FixedTau1(graph,data,Onsets,self.hrfDuration,self.nbClasses,TR,beta,self.dt,scale,self.estimateSigmaH,self.sigmaH,self.nItMax,self.nItMin,self.estimateBeta,self.PLOT,self.contrasts,self.computeContrast,self.hyper_prior_sigma_H,self.estimateHRF, self.estimateW, self.tau1, self.tau2,self.alpha_tau2,self.lambda_tau2,self.S,self.estimateLabels,self.LabelsFilename,self.MFapprox,self.estimateMixtParam,self.InitVar,self.InitMean)
if(self.FixedThresh==True and self.EstimThresh==True):
raise Exception('YOU CHOOSED A FIXED OR ESTIMATED THRESHOLD AT THE SAME TIME :-(' )
if(self.FixedThresh==False and self.EstimThresh==False):
raise Exception('DO YOU WANT A FIXED OR ESTIMATED THRESHOLD ?' )
if self.RVM:
pyhrf.verbose(2, "fast Parsimonious Model VEM ((Relevant Vector Machine)) with drift estimation")
NbIter,nrls,estimated_hrf,labels,noiseVar,mu_k,sigma_k,Beta,L,PL,CONTRAST,CONTRASTVAR,cA,cH,cZ,cW,w,Sigma_w,alpha_RVM,cTime,cTimeMean,Sigma_nrls = Main_vbjde_Extension_ParsiMod_C_RVM(graph,data,Onsets,self.hrfDuration,self.nbClasses,TR,beta,self.dt,scale,self.estimateSigmaH,self.sigmaH,self.nItMax,self.nItMin,self.estimateBeta,self.PLOT,self.contrasts,self.computeContrast,self.hyper_prior_sigma_H,self.estimateHRF,self.estimateW,self.estimateLabels,self.LabelsFilename,self.MFapprox,self.estimateMixtParam,self.InitVar,self.InitMean)
if(self.definition1==False and self.definition3==False and self.definition4==False and self.RVM==False):
raise Exception('YOU DID NOT CHOOSE ANY DEFINITION FOR THE PARSIMONIOUS MODEL :-(' )
if((self.definition3==True and self.definition4==True) or (self.definition3==True and self.RVM==True) or (self.RVM==True and self.definition4==True) or (self.definition3==True and self.definition4==True and self.RVM==True)):
raise Exception('YOU CHOOSED MANY DIFFERENT DEFINITIONS FOR THE PARSIMONIOUS MODEL :-( ')
else:
if self.definition1:
pyhrf.verbose(2, "fast Parsimonious Model VEM ((Definition 1)) without drift estimation")
NbIter, nrls, estimated_hrf, labels, noiseVar, mu_k, sigma_k, Beta, CONTRAST, CONTRASTVAR, cA,cH,cZ,cW,cAH, w,cTime,cTimeMean,Sigma_nrls,MCMean, StimuIndSignal, FreeEnergy = Main_vbjde_NoDrifts_ParsiMod_C_1(graph,data,Onsets,self.hrfDuration,self.nbClasses,TR,beta,self.dt,scale,self.estimateSigmaH,self.sigmaH,self.nItMax,self.nItMin,self.estimateBeta,self.PLOT,self.contrasts,self.computeContrast,self.hyper_prior_sigma_H,self.estimateHRF, self.tau1, self.tau2,self.S,self.InitVar,self.InitMean)
if self.definition2:
pyhrf.verbose(2, "fast Parsimonious Model VEM ((Definition 2)) without drift estimation")
nrls, estimated_hrf, labels, noiseVar, mu_k, sigma_k, Beta, CONTRAST, CONTRASTVAR, cA,cH,cZ,cW, w,cTime,cTimeMean,Sigma_nrls,MCMean, StimuIndSignal = Main_vbjde_NoDrifts_ParsiMod_C_2(graph,data,Onsets,self.hrfDuration,self.nbClasses,TR,beta,self.dt,scale,self.estimateSigmaH,self.sigmaH,self.nItMax,self.nItMin,self.estimateBeta,self.PLOT,self.contrasts,self.computeContrast,self.hyper_prior_sigma_H,self.estimateHRF, self.tau1, self.tau2,self.S, self.alpha, self.alpha_0)
if self.definition3:
pyhrf.verbose(2, "fast Parsimonious Model VEM ((Definition 3)) without drift estimation")
nrls, estimated_hrf, labels, noiseVar, mu_k, sigma_k, Beta, CONTRAST, CONTRASTVAR, cA,cH,cZ,cW, w,cTime,cTimeMean,Sigma_nrls,MCMean, StimuIndSignal,FreeEnergy = Main_vbjde_NoDrifts_ParsiMod_C_3(graph,data,Onsets,self.hrfDuration,self.nbClasses,TR,beta,self.dt,scale,self.estimateSigmaH,self.sigmaH,self.nItMax,self.nItMin,self.estimateBeta,self.PLOT,self.contrasts,self.computeContrast,self.hyper_prior_sigma_H,self.estimateHRF, self.tau1, self.tau2,self.S)
if(self.definition1==False and self.definition2==False and self.definition3==False):
raise Exception('YOU DID NOT CHOOSE ANY DEFINITION FOR THE PARSIMONIOUS MODEL :-( ')
if((self.definition1==True and self.definition2==True) or (self.definition1==True and self.definition3==True) or (self.definition2==True and self.definition3==True) or (self.definition1==True and self.definition2==True and self.definition3==True)):
raise Exception('YOU CHOOSED MANY DIFFERENT DEFINITIONS FOR THE PARSIMONIOUS MODEL :-( ')
else:
if self.CompMod:
if self.estimateDrifts:
pyhrf.verbose(2, "not fast VEM")
nrls, estimated_hrf, labels, noiseVar, mu_k, sigma_k, Beta, L, PL = Main_vbjde_Python(graph,data,Onsets,self.hrfDuration,self.nbClasses,TR,beta,self.dt,scale,self.estimateSigmaH,self.sigmaH,self.nItMax,self.nItMin,self.estimateBeta,self.PLOT)
self.analysis_duration = time() - t_start
pyhrf.verbose(1, 'JDE VEM analysis took: %s' \
%format_duration(self.analysis_duration))
#nrls, estimated_hrf, labels, noiseVar,Hist_sigmaH = Main_vbjde(roiData.graph,data,Onsets,self.hrfDuration,self.nbClasses,TR,beta,self.dt,scale,self.estimateSigmaH,self.sigmaH,self.PLOT,self.nItMax,self.nItMin)
#figure(1)
#plot(estimated_hrf)
#show()
# Pack all outputs within a dict
outputs = {}
hrf_time = np.arange(len(estimated_hrf)) * self.dt
#print hrf_time.shape
#Xit = np.arange(len(Hist_sigmaH))
#print estimated_hrf[0:3].shape
#print Xit.shape
#print hrf_time
#print Xit
#outputs['sigmaH'] = xndarray(Hist_sigmaH, axes_names=['iteration'],
#axes_domains={'iteration'it},value_label="sigmaH")
#outputs['sigmaH'] = xndarray(Hist_sigmaH, axes_names=['iteration'])
if not self.jpde:
#axes_names = ['time', 'voxel']
#axes_domains = {'time' : np.arange(data.shape[0])*TR}
#outputs['StimuIndSignal'] = xndarray(StimuIndSignal.astype(np.float64),
#axes_names=axes_names,
#axes_domains=axes_domains)
#e = np.sqrt((StimuIndSignal.astype(np.float64) - data.astype(np.float64))**2)
#outputs['rmse'] = xndarray(e.mean(0), axes_names=['voxel'], value_label='Rmse')
if ((self.ParsiMod and (self.definition1 or self.definition3 or self.definition4)) or self.CompMod):
axes_names = ['iteration']
axes_domains = {'iteration':np.arange(FreeEnergy.shape[0])}
outputs['FreeEnergy'] = xndarray(FreeEnergy,
axes_names=axes_names,
axes_domains=axes_domains)
if self.jpde:
outputs['hrf'] = xndarray(estimated_hrf, axes_names=['time','voxel'],
axes_domains={'time':hrf_time,'voxel':range(estimated_hrf.shape[1])},
value_label="HRF")
tmp = np.array(EstHRFDict)
D = EstHRFDict[0].shape[0]
tmp2 = np.zeros((nbParcels,D))
for i in xrange(0,nbParcels):
#print EstHRFDict[i]
#print EstHRFDict[i].shape,tmp2[i,:].shape
#raw_input('')
tmp2[i,:] = EstHRFDict[i]
#print tmp
#print tmp.shape
#tmp[3,:] = 0
#tmp[6,:] = 1
outputs['EstHRFDict'] = xndarray(tmp2, axes_names=['class','time'],
axes_domains={'class':range(nbParcels),'time':hrf_time},
value_label="EstHRFDict")
domParcel = {'parcel':range(nbParcels),'voxel':range(parcels.shape[1])}
outputs['parcels'] = xndarray(parcels,value_label="Parcels",
axes_names=['parcel','voxel'],
axes_domains=domParcel)
outputs['Pmask'] = xndarray(EstPmask,value_label="ROI",
axes_names=['voxel'])
outputs['Pmask0'] = xndarray(init_parcellation,value_label="ROI",
axes_names=['voxel'])
else:
outputs['hrf'] = xndarray(estimated_hrf, axes_names=['time'],
axes_domains={'time':hrf_time},
value_label="HRF")
domCondition = {'condition':cNames}
outputs['nrls'] = xndarray(nrls.transpose(),value_label="NRLs",
axes_names=['condition','voxel'],
axes_domains=domCondition)
ad = {'condition':cNames,'condition2':Onsets.keys()}
if not self.jpde:
outputs['Sigma_nrls'] = xndarray(Sigma_nrls,value_label="Sigma_NRLs",
axes_names=['condition','condition2','voxel'],
axes_domains=ad)
outputs['NbIter'] = xndarray(np.array([NbIter]),value_label="NbIter")
### Computing PPM
#from scipy.stats import norm
#NbVox = nrls.shape[0]
#NbCond = nrls.shape[1]
#PPM = np.zeros((NbVox,NbCond),dtype=float)
#PPM_Tresh = np.zeros((NbVox,NbCond),dtype=int)
#Thresh = np.zeros((NbVox),dtype=float)
#NbVoxPPMThresh = np.zeros((NbCond),dtype=int)
#gammaPPM = self.gamma * np.ones(NbVox,dtype=float)
#for m in xrange(NbCond):
#mPPM = nrls[:,m]
#sPPM = np.sqrt(Sigma_nrls[m,m,:])
#Thresh = ( gammaPPM - mPPM ) / sPPM
#PPM[:,m] = 1.0 - norm.cdf(Thresh)
#PPM_Tresh[np.where(PPM[:,m] > self.alpha),m] = 1
#NbVoxPPMThresh[m] = sum(PPM_Tresh[:,m])
#outputs['PPM'] = xndarray(PPM.transpose(),value_label="PPM",
#axes_names=['condition','voxel'],
#axes_domains=domCondition)
#outputs['PPM_Tresh'] = xndarray(PPM_Tresh.transpose(),value_label="PPM_Tresh",
#axes_names=['condition','voxel'],
#axes_domains=domCondition)
#outputs['NbVoxPPMThresh'] = xndarray(NbVoxPPMThresh,value_label="NbVoxPPMThresh",
#axes_names=['condition'],
#axes_domains=domCondition)
outputs['beta'] = xndarray(Beta,value_label="beta",
axes_names=['condition'],
axes_domains=domCondition)
nbc, nbv = len(cNames), nrls.shape[0]
repeatedBeta = np.repeat(Beta, nbv).reshape(nbc, nbv)
outputs['beta_mapped'] = xndarray(repeatedBeta,value_label="beta",
axes_names=['condition','voxel'],
axes_domains=domCondition)
outputs['roi_mask'] = xndarray(np.zeros(nbv)+roiData.get_roi_id(),
value_label="ROI",
axes_names=['voxel'])
if self.ParsiMod:
if not self.RVM:
an = ['condition','class']
ad = {'class':['inactiv','activ'],
'condition': cNames}
outputs['w'] = xndarray(w,value_label="w",
axes_names=an,
axes_domains=ad)
#if (self.EstimThresh==True and self.OneThresh==True):
if (self.definition1==True or self.definition3==True):
outputs['tau2'] = xndarray(np.array([Tau2]),value_label="tau2")
if (self.EstimThresh==True and self.ThreshPerCond==True):
an = ['condition']
ad = {'condition': cNames}
outputs['tau2'] = xndarray(Tau2,value_label="tau2",
axes_names=an,
axes_domains=ad)
if self.RVM:
an = ['condition']
ad = {'condition': cNames}
outputs['w'] = xndarray(w,value_label="w",
axes_names=an,
axes_domains=ad)
outputs['alpha_RVM'] = xndarray(alpha_RVM,value_label="alpha_RVM",
axes_names=an,
axes_domains=ad)
ad = {'condition':cNames,'condition2':Onsets.keys()}
outputs['Sigma_w'] = xndarray(Sigma_w,value_label="Sigma_w",
axes_names=['condition','condition2'],
axes_domains=ad)
#an = ['condition','voxel','S','class']
#ad = {'condition': Onsets.keys(),
#'S': np.arange(MCMean.shape[2]),
#'class':['inactiv','activ']}
#outputs['MCMean'] = xndarray(MCMean,value_label="MCMean",
#axes_names=an,
#axes_domains=ad)
h = estimated_hrf
nrls = nrls.transpose()
nvox = nrls.shape[1]
nbconds = nrls.shape[0]
ah = zeros((h.shape[0], nvox, nbconds))
#for j in xrange(nbconds):
#ah[:,:,j] = repeat(h,nvox).reshape(h.shape[0],nvox) * \
#nrls[j,:]
#ad = {'time':hrf_time, 'condition':roiData.paradigm.get_stimulus_names()}
#outputs['ah'] = xndarray(ah, axes_names=['time','voxel','condition'],
#axes_domains=ad,
#value_label='Delta BOLD')
if 0:
# let's look for label switching
# assume mean closest to 0 corresponds to inactivating class
for m in xrange(roiData.nbConditions):
i_inact = np.argmin(np.abs(mu_k[m,:]))
mu_k[m,i_inact],mu_k[m,0] = mu_k[m,0],mu_k[m,i_inact]
sigma_k[m,i_inact],sigma_k[m,0] = sigma_k[m,0],sigma_k[m,i_inact]
labels[m,i_inact,:],labels[m,0,:] = labels[m,0,:],labels[m,i_inact,:]
mixtp = np.zeros((roiData.nbConditions, self.nbClasses, 2))
mixtp[:, :, 0] = mu_k
mixtp[:, :, 1] = sigma_k**2
an = ['condition','Act_class','component']
ad = {'Act_class':['inactiv','activ'],
'condition': cNames,
'component':['mean','var']}
outputs['mixt_p'] = xndarray(mixtp, axes_names=an, axes_domains=ad)
ad = {'class' : ['inactiv','activ'],
'condition': cNames,
}
outputs['labels'] = xndarray(labels,value_label="Labels",
axes_names=['condition','class','voxel'],
axes_domains=ad)
outputs['noiseVar'] = xndarray(noiseVar,value_label="noiseVar",
axes_names=['voxel'])
if self.estimateDrifts:
outputs['drift_coeff'] = xndarray(L,value_label="Drift",
axes_names=['coeff','voxel'])
outputs['drift'] = xndarray(PL,value_label="Delta BOLD",
axes_names=['time','voxel'])
if not self.jpde and (len(self.contrasts) >0) and self.computeContrast:
#keys = list((self.contrasts[nc]) for nc in self.contrasts)
domContrast = {'contrast':self.contrasts.keys()}
outputs['contrasts'] = xndarray(CONTRAST, value_label="Contrast",
axes_names=['voxel','contrast'],
axes_domains=domContrast)
#print 'contrast output:'
#print outputs['contrasts'].descrip()
c = xndarray(CONTRASTVAR, value_label="Contrasts_Variance",
axes_names=['voxel','contrast'],
axes_domains=domContrast)
outputs['contrasts_variance'] = c
outputs['ncontrasts'] = xndarray(CONTRAST/CONTRASTVAR**.5,
value_label="Normalized Contrast",
axes_names=['voxel','contrast'],
axes_domains=domContrast)
# use 'voxel' to specify the axis where positions are encoded
# -> it will be mapped according to the ROI shape aftewards
#outputs['voxel_stuff'] = xndarray(np.random.randn(nbVoxels),
#axes_names=['voxel'])
#print "Input SNR = " + str(snr)
#print "22211212121"
#print roiData.simulation
#print dir(roiData.simulation)
#axes_names = ['iteration']
################################################################################
axes_names = ['duration']
if not self.jpde:
# Convergence #
#print cZ
#print cH
#print len(cZ),len(cH)
outName = 'Convergence_Labels'
#ad = {'Conv_Criterion':np.arange(len(cZ))}
ax = np.arange(self.nItMax)*cTimeMean
#print cTimeMean
#print '------ check -------------'
#print len(cZ)
#print len(cTime)
#print '------ END check -------------'
ax[:len(cTime)] = cTime
ad = {'duration':ax}
#ad = {'iteration':np.arange(self.nItMax)}
#ad = {'iteration':cTime}
c = np.zeros(self.nItMax) #-.001 #
c[:len(cZ)] = cZ
outputs[outName] = xndarray(c, axes_names=axes_names,
axes_domains=ad,
value_label='Conv_Criterion_Z')
outName = 'Convergence_HRF'
#ad = {'Conv_Criterion':np.arange(len(cH))}
c = np.zeros(self.nItMax) #-.001 #
c[:len(cH)] = cH
outputs[outName] = xndarray(c, axes_names=axes_names,
axes_domains=ad,
value_label='Conv_Criterion_H')
#outName = 'Convergence_HRF'
#axes_names = ['Conv_Criterion']
#ad = {'Conv_Criterion_H':np.arange(len(cH))}
#outputs[outName] = xndarray(np.array(cH),value_label='Conv_Criterion_H')
outName = 'Convergence_NRL'
c = np.zeros(self.nItMax)# -.001 #
c[:len(cA)] = cA
#ad = {'Conv_Criterion':np.arange(len(cA))}
outputs[outName] = xndarray(c, axes_names=axes_names,
axes_domains=ad,
value_label='Conv_Criterion_A')
if self.ParsiMod:
outName = 'Convergence_W'
c = np.zeros(self.nItMax) -.001 #
c[:len(cW)] = cW
#ad = {'Conv_Criterion':np.arange(len(cA))}
outputs[outName] = xndarray(c, axes_names=axes_names,
axes_domains=ad,
value_label='Conv_Criterion_W')
################################################################################
#outputs['labels'] = xndarray(labels,value_label="Labels",
#axes_names=['condition','class','voxel'],
#axes_domains=domCondition)
#raw_input('')
#print dir(roiData)
#print roiData.get_data_files()[0]
#raw_input('')
if self.simulation:
from pyhrf.stats import compute_roc_labels
#print dir(roiData)
#print dir(roiData.simulation)
#print roiData.simulation['labels'][0]
#print roiData.get_data_files()
#raw_input('')
#fn = roiData.get_data_files()[0]
#idx = fn.index('bold')
#fn = fn[0:idx]
#labels_file = fn + 'labels_video.nii'
#labels_file = op.realpath( labels_file )
#labels_vem_video,_ = read_volume( labels_file )
labels_vem_audio = roiData.simulation['labels'][0]
labels_vem_video = roiData.simulation['labels'][1]
#labels_file = fn + 'labels_audio.nii'
#labels_vem_audio,_ = read_volume( labels_file )
#print labels.shape
#raw_input('')
M = labels.shape[0]
K = labels.shape[1]
J = labels.shape[2]
true_labels = np.zeros((K,J))
#print true_labels.shape,labels_vem_audio.shape
true_labels[0,:] = reshape(labels_vem_audio,(J))
true_labels[1,:] = reshape(labels_vem_video,(J))
newlabels = np.reshape(labels[:,1,:],(M,J))
#print newlabels.shape,true_labels.shape
se = []
sp = []
size = prod(labels.shape)
for i in xrange(0,M):
se0,sp0, auc = roc_curve(newlabels[i,:].tolist(),
true_labels[i,:].tolist())
se.append(se0)
sp.append(sp0)
size = min(size,len(sp0))
SE = np.zeros((M,size),dtype=float)
SP = np.zeros((M,size),dtype=float)
for i in xrange(0,M):
tmp = np.array(se[i])
SE[i,:] = tmp[0:size]
tmp = np.array(sp[i])
SP[i,:] = tmp[0:size]
sensData, specData = SE, SP
axes_names = ['condition','1-specificity']
outName = 'ROC_audio'
ad = {'1-specificity':specData[0],'condition':cNames}
outputs[outName] = xndarray(sensData, axes_names=axes_names,
axes_domains=ad,
value_label='sensitivity')
#axes_names = ['1-specificity','condition']
#outName = 'ROC'
#ad = {'1-specificity':specData.transpose(),'condition':Onsets.keys()}
#print ad
##print specData.transpose().shape
##print nrls.transpose().shape
##print Onsets.keys()
#outputs[outName] = xndarray(sensData.transpose(), axes_names=axes_names,
#axes_domains=ad,
#value_label='sensitivity')
#raw_input('')
#domCondition = {'condition':Onsets.keys()}
#outputs['nrls'] = xndarray(nrls.transpose(),value_label="NRLs",
#axes_names=['condition','voxel'],
#axes_domains=domCondition)
#ad = {'Conv_Criterion':np.arange(len(cH))}
#outputs[outName] = xndarray(np.array(cH), axes_names=axes_names,
#axes_domains=ad,
#value_label='Conv_Criterion_H')
m = specData[0].min()
#m2 = specData[0].min()
#print m,m2
#print min(m,m2)
import matplotlib.font_manager as fm
figure(200)
#plot(se[0],sp[0],'--',color='k',linewidth=2.0)
#hold(True)
#plot(se[1],sp[1],color='k',linewidth=2.0)
#legend(('audio','video'))
plot(sensData[0],specData[0],'--',color='k',linewidth=2.0,label='m=1')
hold(True)
plot(sensData[1],specData[1],color='k',linewidth=2.0,label='m=2')
#legend(('audio','video'))
xticks(color = 'k', size = 14,fontweight='bold')
yticks(color = 'k', size = 14,fontweight='bold')
#xlabel('1 - Specificity',fontsize=16,fontweight='bold')
#ylabel('Sensitivity',fontsize=16,fontweight='bold')
prop = fm.FontProperties(size=14,weight='bold')
legend(loc=1,prop=prop)
axis([0., 1., m, 1.02])
#grid(True)
#show()
#savefig('ROC.png')
#raw_input('')
#print true_labels.shape
#print op.realpath(nrl_file)
#nrl_vem_audio = dat[0,:,:,0]
#nrl_vem_video = dat[0,:,:,1]
#raw_input('')
#if roiData.simulation is not None:
#print "999211212121"
#easy_install --prefix=$USRLOCAL -U scikits.learn
#from pyhrf.stats import compute_roc_labels_scikit
from pyhrf.stats import compute_roc_labels
if hasattr(roiData.simulation, 'nrls'):
true_labels = roiData.simulation.nrls.labels
true_nrls = roiData.simulation.nrls.data
elif isinstance(roiData.simulation, dict) and \
roiData.simulation.has_key('labels') and \
roiData.simulation.has_key('nrls') :
true_labels = roiData.simulation['labels']
true_nrls = roiData.simulation['nrls']
else:
raise Exception('Simulation can not be retrieved from %s' \
%str(roiData.simulation))
#se,sp,auc = compute_roc_labels_scikit(labels[:,1,:], true_labels
#print 'labels dimension : ', labels.shape
#print 'true_labels dimension : ', true_labels.shape
#newlabels = change_dim(labels) # Christine
#newlabels = labels
domCondition = {'condition':cNames}
outputs['Truenrls'] = xndarray(true_nrls,value_label="True_nrls",
axes_names=['condition','voxel'],
axes_domains=domCondition)
M = labels.shape[0]
K = labels.shape[1]
J = labels.shape[2]
#
newlabels = np.reshape(labels[:,1,:],(M,J))
#print 'newlabels dimension : ', newlabels.shape
#se,sp, auc = compute_roc_labels(newlabels, true_labels)
#print se.shape,sp.shape,auc
#print newlabels.shape
#print true_labels.shape
#print type(np.array(se))
#raw_input('')
#se = np.array(se)
#sp = np.array(sp)
#from ROC import roc_curve
for i in xrange(0,M):
se0,sp0, auc = roc_curve(newlabels[i,:].tolist(),
true_labels[i,:].tolist())
se.append(se0)
sp.append(sp0)
size = min(size,len(sp0))
SE = np.zeros((M,size),dtype=float)
SP = np.zeros((M,size),dtype=float)
for i in xrange(0,M):
tmp = np.array(se[i])
SE[i,:] = tmp[0:size]
tmp = np.array(sp[i])
SP[i,:] = tmp[0:size]
#########
# noise #
#########
#se,sp, auc = roc_curve(newlabels[0,:].tolist(), true_labels[0,:].tolist())
#SE[0,:] = np.array(se)
#SP[0,:] = np.array(sp)
#print SE.shape, (np.array(se)).shape
#for i in xrange(1,M):
#se,sp, auc = roc_curve(newlabels[i,:].tolist(), true_labels[i,:].tolist())
#print SE.shape, (np.array(se)).shape
#print SP.shape, (np.array(sp)).shape
#SE[i,:] = np.array(se)
#SP[i,:] = np.array(sp)
#raw_input('')
#se,sp, auc = roc_curve(newlabels[0,1,:].tolist(), true_labels[0,:].tolist())
#se = np.array(se)
#sp = np.array(sp)
#se,sp, auc = compute_roc_labels(newlabels, true_labels)
#print se.shape
#print sp.shape
#print auc
#sensData, specData = se, sp
#print sensData.shape,specData[0].shape
#print Onsets.keys()
#raw_input('')
#outputs[outName] = xndarray(sensData, axes_names=axes_names,
#axes_domains=ad,
#value_label='sensitivity')
#axes_names = ['condition']
#outputs['AUROC'] = xndarray(auc, axes_names=axes_names,
#axes_domains={'condition':Onsets.keys()})
#axes_names = ['iteration','Conv_Criterion']
#axes_names = ['Conv_Criterion']
#outName = 'Convergence_NRL'
#ad = {'Conv_Criterion':np.arange(len(cA))}
#outputs[outName] = xndarray(np.array(cA), axes_names=axes_names,
#axes_domains=ad,
#value_label='Conv_Criterion_A')
#outName = 'Convergence_Labels'
#ad = {'Conv_Criterion_Z':np.arange(len(cZ))}
#outputs[outName] = xndarray(np.array(cZ), axes_names=axes_names,
#axes_domains=ad,
#value_label='Conv_Criterion_Z')
#outName = 'Convergence_HRF'
##print "---------------------------------------"
##print outName
##print "---------------------------------------"
#ad = {'Conv_Criterion_H':np.arange(len(cH))}
#outputs[outName] = xndarray(np.array(cH),value_label='Conv_Criterion_H', axes_names=axes_names,
#axes_domains=ad)
d = {'parcel_size':np.array([nvox])}
outputs['analysis_duration'] = xndarray(np.array([self.analysis_duration]),
axes_names=['parcel_size'],
axes_domains=d)
return outputs
# Function to use directly in parallel computation
def run_analysis(**params):
# from pyhrf.ui.vb_jde_analyser import JDEVEMAnalyser
# import pyhrf
pyhrf.verbose.set_verbosity(1)
fdata = params.pop('roi_data')
# print 'doing params:'
# print params
vem_analyser = JDEVEMAnalyser(**params)
return (dict([('ROI',fdata.get_roi_id())] + params.items()), \
vem_analyser.analyse_roi(fdata))
|
gpl-3.0
|
aflaxman/scikit-learn
|
examples/ensemble/plot_forest_importances.py
|
168
|
1793
|
"""
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), indices)
plt.xlim([-1, X.shape[1]])
plt.show()
|
bsd-3-clause
|
MagnusS/mirage-bench
|
test-vm-create/read_files.py
|
1
|
2229
|
#!/usr/bin/env python
import sys,glob,re
name_map = {}
name_map["wait_x_xl_create"] = "xl_create"
name_map["wait_x_xl_fast_bridge_create"] = "xl_create_fb"
name_map["wait_x_xl_no_net"] = "xl_create_no_net"
findnum = re.compile(r'\d+')
results = {}
found_keys = []
found_tests = []
for test in glob.glob("wait_x_*"):
if test in name_map:
name = name_map[test]
else:
name = test
results[name] = {}
if name not in found_tests:
found_tests.append(name)
for result in glob.glob(test + "/remote/create_*.log"):
memsize=int(findnum.findall(result.rpartition("/")[2]).pop())
results[name][memsize]=[]
with open(result) as f:
for l in f:
if l.find("real ") >= 0:
r = float(l.split(" ")[1]) # get float
results[name][memsize].append(r) # add to results
if memsize not in found_keys:
found_keys.append(memsize)
print "# Raw results, time in s"
print results
print "# Creating graphs (requires matplotlib)"
import matplotlib.pyplot as plt
import numpy as np
labels = sorted(found_keys)
ind = np.arange(len(labels)) # the x locations for the groups
width = 0.25 # the width of the bars
fig, ax = plt.subplots()
ax.set_ylabel('Startup time in seconds')
ax.set_title('xl create time in seconds for different mem sizes')
ax.set_xticks(ind+width)
ax.set_xticklabels( labels )
means = {}
std = {}
bars = []
colors = ['r','b','g']
for test in sorted(found_tests):
means[test] = []
std[test] = []
for l in labels:
means[test].append(np.mean(results[test][l]))
std[test].append(np.std(results[test][l]))
bars.append(ax.bar(ind, means[test], width, color=colors.pop(), yerr=std[test]))
ind = ind + width
ax.legend( bars, sorted(found_tests), loc="best" )
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.04*height, '%.2f'%float(height),
ha='center', va='bottom')
for b in bars:
autolabel(b)
fig="xl_create_graph.pdf"
print "Saving",fig
plt.savefig(fig)
plt.show()
|
isc
|
NelisVerhoef/scikit-learn
|
examples/exercises/plot_iris_exercise.py
|
323
|
1602
|
"""
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
|
bsd-3-clause
|
lakshayg/tensorflow
|
tensorflow/contrib/learn/python/learn/learn_io/pandas_io.py
|
92
|
4535
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.estimator.inputs.pandas_io import pandas_input_fn as core_pandas_input_fn
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
PANDAS_DTYPES = {
'int8': 'int',
'int16': 'int',
'int32': 'int',
'int64': 'int',
'uint8': 'int',
'uint16': 'int',
'uint32': 'int',
'uint64': 'int',
'float16': 'float',
'float32': 'float',
'float64': 'float',
'bool': 'i'
}
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=True,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""This input_fn diffs from the core version with default `shuffle`."""
return core_pandas_input_fn(x=x,
y=y,
batch_size=batch_size,
shuffle=shuffle,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=num_threads,
target_column=target_column)
def extract_pandas_data(data):
"""Extract data from pandas.DataFrame for predictors.
Given a DataFrame, will extract the values and cast them to float. The
DataFrame is expected to contain values of type int, float or bool.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values as floats.
Raises:
ValueError: if data contains types other than int, float or bool.
"""
if not isinstance(data, pd.DataFrame):
return data
bad_data = [column for column in data
if data[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return data.values.astype('float')
else:
error_report = [("'" + str(column) + "' type='" +
data[column].dtype.name + "'") for column in bad_data]
raise ValueError('Data types for extracting pandas data must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
def extract_pandas_matrix(data):
"""Extracts numpy matrix from pandas DataFrame.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values.
"""
if not isinstance(data, pd.DataFrame):
return data
return data.as_matrix()
def extract_pandas_labels(labels):
"""Extract data from pandas.DataFrame for labels.
Args:
labels: `pandas.DataFrame` or `pandas.Series` containing one column of
labels to be extracted.
Returns:
A numpy `ndarray` of labels from the DataFrame.
Raises:
ValueError: if more than one column is found or type is not int, float or
bool.
"""
if isinstance(labels,
pd.DataFrame): # pandas.Series also belongs to DataFrame
if len(labels.columns) > 1:
raise ValueError('Only one column for labels is allowed.')
bad_data = [column for column in labels
if labels[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return labels.values
else:
error_report = ["'" + str(column) + "' type="
+ str(labels[column].dtype.name) for column in bad_data]
raise ValueError('Data types for extracting labels must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
else:
return labels
|
apache-2.0
|
ephes/scikit-learn
|
sklearn/cross_validation.py
|
96
|
58309
|
"""
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
check_array, column_or_1d)
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
__all__ = ['KFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = cross_validation.KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold: take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = cross_validation.StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
This generator must include all elements in the test set exactly once.
Otherwise, a ValueError is raised.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
p = np.concatenate([p for p, _ in preds_blocks])
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
preds = p.copy()
preds[locs] = p
return preds
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
class FitFailedWarning(RuntimeWarning):
pass
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, a cv generator instance, or None
The input specifying which cv generator to use. It can be an
integer, in which case it is the number of folds in a KFold,
None, in which case 3 fold is used, or another object, that
will then be used as a cv generator.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
Returns
-------
splitting : list of arrays, length=2 * len(arrays)
List containing train-test split of input array.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
dtype = options.pop('dtype', None)
if dtype is not None:
warnings.warn("dtype option is ignored and will be removed in 0.18.",
DeprecationWarning)
allow_nd = options.pop('allow_nd', None)
allow_lists = options.pop('allow_lists', None)
stratify = options.pop('stratify', None)
if allow_lists is not None:
warnings.warn("The allow_lists option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if allow_nd is not None:
warnings.warn("The allow_nd option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if allow_lists is False or allow_nd is False:
arrays = [check_array(x, 'csr', allow_nd=allow_nd,
force_all_finite=False, ensure_2d=False)
if x is not None else x
for x in arrays]
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
cv = StratifiedShuffleSplit(stratify, test_size=test_size,
train_size=train_size,
random_state=random_state)
else:
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
|
bsd-3-clause
|
magnusdv/filtus
|
filtus/FiltusQC.py
|
1
|
20840
|
import Tkinter
import Pmw
import os.path
import math
import time
import tkFileDialog
import random
#import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import matplotlib.collections as mplcol
import matplotlib.transforms as mpltransforms
import FiltusWidgets
import FiltusUtils
import FiltusDatabase
class QC(object):
def __init__(self, filtus):
self.parent = filtus.parent
self.filtus = filtus
self.createDialog()
def createDialog(self):
filtus = self.filtus
self.dialog = Pmw.Dialog(self.parent, title='Quality plots', buttons=('Close',), activatecommand=self._prepare, command=self._executeDialogButton, dialogchildsite_pady=5, buttonbox_pady=10)
self.dialog.withdraw()
interior0 = self.dialog.interior()
fr = Tkinter.Frame(interior0) #self.dialog.interior()
fr.columnconfigure(0, weight=1)
fr.rowconfigure(1, weight=1)
fr.grid(row=0, column=0, pady=10, sticky='news')
FiltusWidgets.HelpButton(interior0, filtus=filtus, page="qcplots").grid(row=0, column=0, sticky="ne")
#fr = self.dialog.interior()
Tkinter.Label(fr, text="QUALITY CONTROL PLOTS", font=filtus.titlefont).grid(sticky='news', pady=8)
button_OPTIONS = dict(menubutton_anchor = 'w', menubutton_padx=5, menubutton_pady=1, menubutton_width=9, labelmargin=5, menu_font = filtus.defaultfont)
entry_OPTIONS = dict(labelpos="w", entry_width=5, labelmargin=5, entry_justify='center')
grid_OPTIONS = dict(sticky='news', pady=5, padx=10)
names_group = Pmw.Group(fr, tag_text = 'Select samples')
names_interior = names_group.interior()
names_interior.rowconfigure(0, weight=1)
names = FiltusWidgets.LabeledListBox(names_interior, filtus=filtus, toptext="", width=30,
scrolledlistbox_selectioncommand=self._updateSelStatus,
bottomtext = "Selected: 0", height=min(6, len(filtus.files)))
names.component('scrolledlistbox_label').destroy()
names.component('scrolledlistbox_listbox').bind('<Control-a>', self._selectall_and_update)
names.component('scrolledlistbox_listbox').bind('<KeyRelease-Up>', self._updateSelStatus)
names.component('scrolledlistbox_listbox').bind('<KeyRelease-Down>', self._updateSelStatus)
names.grid(sticky='news')
names_interior.grid(padx=5, pady=5)
self.names = names
comparative_group = Pmw.Group(fr, tag_text = 'Comparative plots')
comparative_interior = comparative_group.interior()
self.comparative_checks = Pmw.RadioSelect(comparative_interior, buttontype = 'checkbutton', orient = 'horizontal')
for name, txt in zip(['gender', 'private', 'heterozygosity'], [' Gender', ' Private variants', ' Heterozygosity']):
px = 15 if name=="private" else 0
self.comparative_checks.add(name, text=txt, padx=px)
self.comparative_checks.invoke(name)
self.save_browser = FiltusWidgets.FileBrowser(comparative_interior, filtus=filtus, label="Write to text:",
checkbutton = True, labelpos='w', browsesticky='se', entryfield_entry_width=15, browsetitle="")
self.save_browser.browsebutton.configure(command = self._browseSave)
self.save_browser.entryfield.configure(command = None)
self.comparative_checks.grid(row=0, column=0, **grid_OPTIONS)
self.save_browser.grid(row=1, column=0, **grid_OPTIONS)
Tkinter.Button(comparative_interior, text="Plot!", command=self._comparativeButtonExecute, bd=3, padx=5, pady=3).grid(row=0, column=1, rowspan=2, padx=30, pady=10)
### scatter plots
scatter_group = Pmw.Group(fr, tag_text = 'Scatter plots')
scatter_interior = scatter_group.interior()
self.scatter_x = FiltusWidgets.OptionMenuExt(scatter_interior, labelpos="w", label_text="X variabel:", **button_OPTIONS)
self.scatter_y = FiltusWidgets.OptionMenuExt(scatter_interior, labelpos="w", label_text="Y variabel:", **button_OPTIONS)
self.scatter_alpha = Pmw.EntryField(scatter_interior, label_text="Transp.:", value='0.05', validate = {'validator':'real','min':0, 'max':1,'minstrict':0, 'maxstrict':0}, **entry_OPTIONS)
self.scatter_thin = Pmw.EntryField(scatter_interior, label_text="Thin by:", value='1', validate = {'validator':'integer','min':1,'minstrict':0}, **entry_OPTIONS)
self.scatter_x.grid(**grid_OPTIONS)
self.scatter_y.grid(**grid_OPTIONS)
self.scatter_thin.grid(row=0, column=1, **grid_OPTIONS)
self.scatter_alpha.grid(row=1, column=1, **grid_OPTIONS)
Tkinter.Button(scatter_interior, text="Plot!", command=self._scatterButtonExecute, bd=3, padx=5, pady=3).grid(row=0,column=2, rowspan=2, padx=30, pady=10)
### histograms
histo_group = Pmw.Group(fr, tag_text = 'Histogram plots')
histo_interior = histo_group.interior()
#histo_interior.columnconfigure(2, weight=1)
self.histo_var = FiltusWidgets.OptionMenuExt(histo_interior, labelpos="w", label_text="Variabel:", **button_OPTIONS)
self.histo_bins = Pmw.EntryField(histo_interior, label_text="Bins:", value='20', validate = {'validator':'integer','min':1,'minstrict':0}, **entry_OPTIONS)
self.histo_var.grid(**grid_OPTIONS)
self.histo_bins.grid(row=0, column=1, **grid_OPTIONS)
Tkinter.Button(histo_interior, text="Plot!", command=self._histogramButtonExecute, bd=3, padx=5, pady=3).grid(row=0,column=2, padx=30, pady=10)
for g in (names_group, comparative_group, scatter_group, histo_group):
g.interior().columnconfigure(0, weight=1)
g.configure(tag_font = filtus.smallbold)
g.grid(ipady=5, **grid_OPTIONS)
Pmw.alignlabels([self.scatter_x, self.scatter_y, self.histo_var])
Pmw.alignlabels([self.scatter_alpha, self.scatter_thin, self.histo_bins])
def _prepare(self):
files = self.filtus.files
self.names.setlist(['%2d: %s' %(i + 1, os.path.basename(VF.shortName)) for i, VF in enumerate(files)])
self._selectall_and_update()
cols = FiltusUtils.listUnique([head for VF in files for head in VF.columnNames])
for colmenu in [self.scatter_x, self.scatter_y, self.histo_var]:
colmenu.setItems(['']+cols)
def _browseSave(self):
fil = tkFileDialog.asksaveasfilename(initialdir=self.filtus.currentDir, title = "Save plot data as")
if fil:
self.filtus.currentDir = os.path.dirname(fil)
self.save_browser.setvalue(os.path.normpath(fil))
def _updateSelStatus(self, event=None):
self.names.setbottomtext('Selected: %d' %len(self.names.curselection()))
def _selectall_and_update(self, event=None):
self.names.selectall()
self._updateSelStatus()
def clearAll(self):
self.save_browser.deselect()
def _scatterButtonExecute(self):
try:
xcol, ycol = self.scatter_x.getvalue(), self.scatter_y.getvalue()
if xcol=='': raise RuntimeError("X axis column not selected")
if ycol=='': raise RuntimeError("Y axis column not selected")
VFlist = self._validateInput(checkPresence=[xcol, ycol])
alpha = float(self.scatter_alpha.getvalue())
thin = int(self.scatter_thin.getvalue())
scatterPlot(VFlist, xcol, ycol, alpha, thin)
except Exception as e:
FiltusUtils.warningMessage(e)
def _histogramButtonExecute(self):
try:
col = self.histo_var.getvalue()
if col=='': raise RuntimeError("Column variable not selected")
VFlist = self._validateInput(checkPresence=[col])
bins = int(self.histo_bins.getvalue())
histogramPlot(VFlist, col, bins)
except Exception as e:
FiltusUtils.warningMessage(e)
def _comparativeButtonExecute(self):
try:
VFlist = self._validateInput()
plotselect = self.comparative_checks.getvalue()
p, h, g = (str in plotselect for str in ['private','heterozygosity', 'gender'])
writetofile = self.save_browser.getvalue() if self.save_browser.on() else None
QC_3plots(VFlist, private=p, heterozygosity=h, gender=g, writetofile=writetofile, save=None)
except Exception as e:
FiltusUtils.warningMessage(e)
def _executeDialogButton(self, button):
if button is None or button == 'Close':
self.dialog.deactivate()
return
def _validateInput(self, checkPresence=[]):
VFlist = self.filtus.checkLoadedSamples(select="all", VF=1, filtered=1)
VFlist = [VFlist[int(i)] for i in self.names.curselection()]
if not VFlist:
raise RuntimeError("No samples selected")
for col in checkPresence:
if any(col not in VF.columnNames for VF in VFlist):
raise ValueError("The column '%s' does not appear in all the selected samples" %col)
return VFlist
def setPlotParams(ax, title, xlab, ylab, xlim=None, ylim=None):
ax.set_title(title, size="medium")
ax.set_xlabel(xlab, size="smaller")
ax.set_ylabel(ylab, size="smaller")
ax.tick_params(axis='both', labelsize="smaller")
if xlim is not None: ax.set_xlim(xlim)
if ylim is not None: ax.set_ylim(ylim)
orig_xt = ax.get_xticks()
ax.margins(0.05, 0.05, tight=True)
if min(orig_xt) == 0 and max(orig_xt) == 1:
new_xt = orig_xt
else:
xt = ax.get_xticks()
new_xt = sorted(set(math.floor(x) for x in xt))
if len(new_xt) < 4: new_xt = xt # undo
if len(new_xt) > 5: new_xt = new_xt[1::2]
ax.xaxis.set_ticks(new_xt)
xlim = ax.get_xlim()
ylim = ax.get_ylim()
ax.set_aspect(float((xlim[1]-xlim[0])/(ylim[1]-ylim[0]))) # float to avoid matplotlib (or numpy?) unicodewarning
def scatterPlot(VFlist, xcol, ycol, alpha, thin, NA_vals = ('', 'NA', '.', '-'), GTlegend="upper left", save=None, show=True):
N = len(VFlist)
nrow = int(math.sqrt(N))
ncol = math.ceil(float(N)/nrow)
fig = plt.figure(figsize=(3.5*ncol, 3.5*nrow))
for i, VF in enumerate(VFlist):
getData = VF.columnGetter(xcol, ycol)
GTnum, keep00 = VF.GTnum(), VF.keep00
data = [(getData(v), GTnum(v)) for v in VF.variants[::thin]]
try:
floatdata = [(float(x), float(y), gt) for (x,y), gt in data if x not in NA_vals and y not in NA_vals]
except ValueError:
raise ValueError("Cannot plot columns with non-numerical values.\nTip: Use filters to remove non-numerical values before plotting.")
ax = fig.add_subplot(nrow, ncol, i+1, aspect=1)
for num, col in zip([1,0,2], ['g','b','r']):
if num==0 and not keep00: continue
gt_subset = [(x,y) for x,y,gt in floatdata if gt==num]
if gt_subset:
X, Y = zip(*gt_subset)
ax.plot(X, Y, alpha=alpha, color=col, ls='none', marker='o')
## GT legend
txt = ['ref/ref', 'ref/alt', 'alt/alt'][not keep00:]
fmt = ['bo', 'go', 'ro'][not keep00:]
axes = [ax.plot([], [], y)[0] for y in fmt]
ax.legend(axes, txt, numpoints=1, fontsize="small", fancybox=True, handletextpad=0.2, loc=GTlegend)
setPlotParams(ax, VF.shortName, xcol, ycol)
showAndSave(fig, show=show, save=save)
return fig
def histogramPlot(VFlist, column, bins, NA_vals = ('', 'NA', '.', '-'), save=None, show=True):
N = len(VFlist)
nrow = int(math.sqrt(N))
ncol = math.ceil(float(N)/nrow)
fig = plt.figure(figsize=(3.5*ncol, 3.5*nrow))
for i, VF in enumerate(VFlist):
getData = VF.columnGetter(column)
stringdat = [getData(v) for v in VF.variants]
floatdat = [float(x) for x in stringdat if x not in NA_vals]
ax = fig.add_subplot(nrow, ncol, i+1, aspect=1)
ax.hist(floatdat, bins=bins, color='b')
setPlotParams(ax, VF.shortName, column, '')
showAndSave(fig, show=show, save=save)
return fig
def homPlot(pos, obs, scores, freqs, title='', segs=None, save=None, show=True):
fig = plt.figure(figsize=(14, 6))
ax = fig.add_subplot(1, 1, 1)
obs_jit = [0.4*gt + 0.18*fr + random.random()*0.02 for gt,fr in zip(obs, freqs)]
ax.plot(pos, obs_jit, 'bo')
ax.plot(pos, scores, 'r-')
ax.add_collection(mplcol.BrokenBarHCollection([(s[0], s[2]*1.0e6) for s in segs], (0,1), facecolor='green', alpha=0.2))
ax.set_title(title, size="larger")
ax.set_xlabel("Chromosomal position (bp)", size="medium")
ax.set_ylabel("Posterior probabilities", size="medium")
ax.set_ylim([-0.05, 1.02])
ax.get_yaxis().tick_left()
trans = mpltransforms.blended_transform_factory(ax.transAxes, ax.transData)
ax.text(1.02, .9, '1/1', color="blue", size=12, verticalalignment='center', transform=trans)
ax.text(1.02, .5, '0/1', color="blue", size=12, verticalalignment='center', transform=trans)
ax.text(1.02, .1, '0/0', color="blue", size=12, verticalalignment='center', transform=trans)
ax.margins(0.01, 0.15, tight=True)
showAndSave(fig, tight=False, show=show, save=save)
return fig
def homPlotSimple(pos, obs, title='', segs=None, save=None, show=True): # plink plot, without scores and frequencies
fig = plt.figure(figsize=(14, 6))
ax = fig.add_subplot(1, 1, 1)
obs_jit = [0.4*gt + random.random()/5 for gt in obs]
ax.plot(pos, obs_jit, 'bo')
ax.add_collection(mplcol.BrokenBarHCollection([(s[0], s[2]*1.0e6) for s in segs], (0,1), facecolor='green', alpha=0.2))
ax.set_title(title, size="larger")
ax.set_xlabel("Chromosomal position (bp)", size="medium")
ax.set_ylim([-0.02, 1.02])
ax.get_yaxis().set_ticks([])
trans = mpltransforms.blended_transform_factory(ax.transAxes, ax.transData)
ax.text(1.02, .9, '1/1', color="blue", size=12, verticalalignment='center', transform=trans)
ax.text(1.02, .5, '0/1', color="blue", size=12, verticalalignment='center', transform=trans)
ax.text(1.02, .1, '0/0', color="blue", size=12, verticalalignment='center', transform=trans)
ax.margins(0.01, 0.15, tight=True)
showAndSave(fig, tight=False, show=show, save=save)
return fig
def QC_3plots(VFlist, gender=True, private=True, heterozygosity=True, writetofile=None, save=None, show=True):
if private + heterozygosity + gender == 0: return None
N = len(VFlist)
add_legend = N < 13
Nplots = private + heterozygosity + gender + add_legend
nrow = int(math.sqrt(Nplots))
ncol = math.ceil(float(Nplots)/nrow)
fig = plt.figure(figsize=(3.5*ncol, 3.5*nrow))
if add_legend:
markers = ['D','^','*','d','<','s','p','v','D','^','*','d']
sizes = [6,8,8,8,8,8,8,8,6,8,8,8]
cols = ['red', 'lime', 'cyan', 'brown', 'magenta', 'gold', 'pink', 'black', 'purple', 'gray', 'silver', 'green']
else:
markers, sizes, cols = ['o']*N, [6]*N, ['red']*N
DB = FiltusDatabase.VariantDatabase.buildFromSamples(VFlist, "Extended")
db_str = DB.variants
if writetofile:
sep = '\t'
text_out = FiltusUtils.composeMeta(VFlist, analysis="QC PLOTS")
plotnr = 0
if gender:
plotnr += 1
ax_sex = fig.add_subplot(nrow, ncol, plotnr, aspect=1)
XminusPAR = FiltusUtils.XminusPAR
db_X_raw = [x[6:] for x in db_str if XminusPAR(x[:2])]
if db_X_raw:
db_X = zip(*db_X_raw)
totals_X = [sum(map(bool, x)) for x in db_X]
hets = [sum(g == 1 for g in sample)*100.0/tot if tot>0 else 0 for sample, tot in zip(db_X, totals_X)]
for i in range(N):
ax_sex.plot(totals_X[i], hets[i], marker=markers[i], color=cols[i], markersize=sizes[i])
else:
totals_X, hets = [0]*N, [0]*N
#print "Empty gender estimation plot.\n\nNo variants found on X \ PAR."
setPlotParams(ax_sex, "Gender estimation", 'Variants on X (-PAR)', 'Heterozygosity (%)', ylim=(0,100))
ax_sex.axhspan(0, 15, facecolor='blue', alpha=0.2)
ax_sex.axhspan(15, 35, facecolor='red', alpha=0.2)
ax_sex.axhspan(35, 100, facecolor='green', alpha=0.2)
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
ax_sex.text(0.05, 0.95, "FEMALE", transform=ax_sex.transAxes, fontsize="x-small", va="top", ha='left', bbox=props)
ax_sex.text(0.05, 0.27, "? ", transform=ax_sex.transAxes, fontsize="x-small", va="center", ha='left', bbox=props)
ax_sex.text(0.95, 0.05, "MALE", transform=ax_sex.transAxes, fontsize="x-small", va="bottom", ha='right', bbox=props)
if writetofile:
headers = sep.join(['Sample', 'Variants on X (-PAR)', 'Heterozygosity (%)', 'Gender'])
genders = ['?' if tot==0 or 15<h<35 else 'Male' if h<=15 else 'Female' for tot, h in zip(totals_X, hets)]
points = [sep.join([s, str(x), '%.2f'%y, g]) for s,x,y,g in zip(DB.sampleNames, totals_X, hets, genders)]
text_out += "***Plot: Gender estimation***\n" + headers + '\n' + '\n'.join(points) + '\n\n'
if private:
plotnr += 1
ax_priv = fig.add_subplot(nrow, ncol, plotnr, aspect=1)
db_nonz = [map(bool, x) for x in zip(*db_str)[6:]]
totals_all = map(sum, db_nonz)
if max(totals_all)>2000:
totals_all = [tot/1000.0 for tot in totals_all]
xlab = '# variants/1000'
else: xlab = '# variants'
rowSums_nonz = map(sum, zip(*db_nonz))
priv_ind = [i for i in range(len(rowSums_nonz)) if rowSums_nonz[i]==1]
privates = [sum(sampl[i] for i in priv_ind) for sampl in db_nonz]
for i in range(N):
ax_priv.plot(totals_all[i], privates[i], marker=markers[i], color=cols[i], markersize=sizes[i])
setPlotParams(ax_priv, "Private variants", xlab, 'Private')
if writetofile:
headers = sep.join(['Sample', xlab, 'Private'])
points = [sep.join([s, str(x), str(y)]) for s,x,y in zip(DB.sampleNames, totals_all, privates)]
text_out += "***Plot: Private variants***\n" + headers + '\n' + '\n'.join(points) + '\n\n'
if heterozygosity:
plotnr += 1
ax_het = fig.add_subplot(nrow, ncol, plotnr, aspect=1)
chromInt = FiltusUtils.chromInt
db_AUT = zip(*[x[6:] for x in db_str if chromInt(x[0]) < 23])
if not db_AUT:
raise RuntimeError("Empty heterozygosity plot.\n\nNo autosomal variants found.")
totals_AUT = [sum(map(bool, x)) for x in db_AUT]
hets = [sum(g == 1 for g in sample)*100.0/tot if tot>0 else 0 for sample, tot in zip(db_AUT, totals_AUT)]
if max(totals_AUT) > 2000:
totals_AUT = [tot/1000.0 for tot in totals_AUT]
xlab = '# autosomal variants/1000'
else: xlab = '# autosomal variants'
for i in range(N):
ax_het.plot(totals_AUT[i], hets[i], marker=markers[i], color=cols[i], markersize=sizes[i])
setPlotParams(ax_het, "Heterozygosity", xlab, 'Heterozygosity (%)', ylim=(-5,105))
if writetofile:
headers = sep.join(['Sample', 'A'+xlab[3:], 'Heterozygosity (%)'])
points = [sep.join([s, str(x), '%.2f'%y]) for s,x,y in zip(DB.sampleNames, totals_AUT, hets)]
text_out += "***Plot: Heterozygosity***\n" + headers + '\n' + '\n'.join(points) + '\n'
if writetofile:
with open(writetofile, 'w') as out:
out.write(text_out)
if add_legend:
plotnr +=1
ax_legend = fig.add_subplot(nrow, ncol, plotnr, aspect=1)
simplenames = [VF.shortName for VF in VFlist]
ax_legend.set_frame_on(False)
ax_legend.axis('off')
for i in range(N):
ax_legend.plot([], marker=markers[i], color=cols[i], markersize=sizes[i], label=simplenames[i], ls='None')
ax_legend.legend(loc=2, numpoints=1, fontsize='small', frameon=False, title="Legend")
showAndSave(fig, tight=True, show=show, save=save)
return fig
def showAndSave(fig, tight=True, show=True, save=None):
try:
if tight: fig.set_tight_layout(True)
if show: fig.show()
if save: fig.savefig(save)
except ValueError as e:
raise ValueError("Could not show plot.\n\nInternal error message:%s"%e)
|
gpl-2.0
|
simon-pepin/scikit-learn
|
examples/linear_model/plot_robust_fit.py
|
238
|
2414
|
"""
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demoed in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- Measurement errors in y
The median absolute deviation to non corrupt new data is used to judge
the quality of the prediction.
What we can see that:
- RANSAC is good for strong outliers in the y direction
- TheilSen is good for small outliers, both in direction X and y, but has
a break point above which it performs worst than OLS.
"""
from matplotlib import pyplot as plt
import numpy as np
from sklearn import linear_model, metrics
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
np.random.seed(42)
X = np.random.normal(size=400)
y = np.sin(X)
# Make sure that it X is 2D
X = X[:, np.newaxis]
X_test = np.random.normal(size=200)
y_test = np.sin(X_test)
X_test = X_test[:, np.newaxis]
y_errors = y.copy()
y_errors[::3] = 3
X_errors = X.copy()
X_errors[::3] = 3
y_errors_large = y.copy()
y_errors_large[::3] = 10
X_errors_large = X.copy()
X_errors_large[::3] = 10
estimators = [('OLS', linear_model.LinearRegression()),
('Theil-Sen', linear_model.TheilSenRegressor(random_state=42)),
('RANSAC', linear_model.RANSACRegressor(random_state=42)), ]
x_plot = np.linspace(X.min(), X.max())
for title, this_X, this_y in [
('Modeling errors only', X, y),
('Corrupt X, small deviants', X_errors, y),
('Corrupt y, small deviants', X, y_errors),
('Corrupt X, large deviants', X_errors_large, y),
('Corrupt y, large deviants', X, y_errors_large)]:
plt.figure(figsize=(5, 4))
plt.plot(this_X[:, 0], this_y, 'k+')
for name, estimator in estimators:
model = make_pipeline(PolynomialFeatures(3), estimator)
model.fit(this_X, this_y)
mse = metrics.mean_squared_error(model.predict(X_test), y_test)
y_plot = model.predict(x_plot[:, np.newaxis])
plt.plot(x_plot, y_plot,
label='%s: error = %.3f' % (name, mse))
plt.legend(loc='best', frameon=False,
title='Error: mean absolute deviation\n to non corrupt data')
plt.xlim(-4, 10.2)
plt.ylim(-2, 10.2)
plt.title(title)
plt.show()
|
bsd-3-clause
|
vortex-ape/scikit-learn
|
sklearn/tests/test_multioutput.py
|
4
|
19442
|
from __future__ import division
import pytest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn import datasets
from sklearn.base import clone
from sklearn.datasets import make_classification
from sklearn.ensemble import GradientBoostingRegressor, RandomForestClassifier
from sklearn.exceptions import NotFittedError
from sklearn.utils import cpu_count
from sklearn.linear_model import Lasso
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import SGDRegressor
from sklearn.metrics import jaccard_similarity_score, mean_squared_error
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multioutput import ClassifierChain, RegressorChain
from sklearn.multioutput import MultiOutputClassifier
from sklearn.multioutput import MultiOutputRegressor
from sklearn.svm import LinearSVC
from sklearn.base import ClassifierMixin
from sklearn.utils import shuffle
def test_multi_target_regression():
X, y = datasets.make_regression(n_targets=3)
X_train, y_train = X[:50], y[:50]
X_test, y_test = X[50:], y[50:]
references = np.zeros_like(y_test)
for n in range(3):
rgr = GradientBoostingRegressor(random_state=0)
rgr.fit(X_train, y_train[:, n])
references[:, n] = rgr.predict(X_test)
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X_train, y_train)
y_pred = rgr.predict(X_test)
assert_almost_equal(references, y_pred)
def test_multi_target_regression_partial_fit():
X, y = datasets.make_regression(n_targets=3)
X_train, y_train = X[:50], y[:50]
X_test, y_test = X[50:], y[50:]
references = np.zeros_like(y_test)
half_index = 25
for n in range(3):
sgr = SGDRegressor(random_state=0, max_iter=5)
sgr.partial_fit(X_train[:half_index], y_train[:half_index, n])
sgr.partial_fit(X_train[half_index:], y_train[half_index:, n])
references[:, n] = sgr.predict(X_test)
sgr = MultiOutputRegressor(SGDRegressor(random_state=0, max_iter=5))
sgr.partial_fit(X_train[:half_index], y_train[:half_index])
sgr.partial_fit(X_train[half_index:], y_train[half_index:])
y_pred = sgr.predict(X_test)
assert_almost_equal(references, y_pred)
assert_false(hasattr(MultiOutputRegressor(Lasso), 'partial_fit'))
def test_multi_target_regression_one_target():
# Test multi target regression raises
X, y = datasets.make_regression(n_targets=1)
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
assert_raises(ValueError, rgr.fit, X, y)
def test_multi_target_sparse_regression():
X, y = datasets.make_regression(n_targets=3)
X_train, y_train = X[:50], y[:50]
X_test = X[50:]
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
rgr = MultiOutputRegressor(Lasso(random_state=0))
rgr_sparse = MultiOutputRegressor(Lasso(random_state=0))
rgr.fit(X_train, y_train)
rgr_sparse.fit(sparse(X_train), y_train)
assert_almost_equal(rgr.predict(X_test),
rgr_sparse.predict(sparse(X_test)))
def test_multi_target_sample_weights_api():
X = [[1, 2, 3], [4, 5, 6]]
y = [[3.141, 2.718], [2.718, 3.141]]
w = [0.8, 0.6]
rgr = MultiOutputRegressor(Lasso())
assert_raises_regex(ValueError, "does not support sample weights",
rgr.fit, X, y, w)
# no exception should be raised if the base estimator supports weights
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X, y, w)
def test_multi_target_sample_weight_partial_fit():
# weighted regressor
X = [[1, 2, 3], [4, 5, 6]]
y = [[3.141, 2.718], [2.718, 3.141]]
w = [2., 1.]
rgr_w = MultiOutputRegressor(SGDRegressor(random_state=0, max_iter=5))
rgr_w.partial_fit(X, y, w)
# weighted with different weights
w = [2., 2.]
rgr = MultiOutputRegressor(SGDRegressor(random_state=0, max_iter=5))
rgr.partial_fit(X, y, w)
assert_not_equal(rgr.predict(X)[0][0], rgr_w.predict(X)[0][0])
def test_multi_target_sample_weights():
# weighted regressor
Xw = [[1, 2, 3], [4, 5, 6]]
yw = [[3.141, 2.718], [2.718, 3.141]]
w = [2., 1.]
rgr_w = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr_w.fit(Xw, yw, w)
# unweighted, but with repeated samples
X = [[1, 2, 3], [1, 2, 3], [4, 5, 6]]
y = [[3.141, 2.718], [3.141, 2.718], [2.718, 3.141]]
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X, y)
X_test = [[1.5, 2.5, 3.5], [3.5, 4.5, 5.5]]
assert_almost_equal(rgr.predict(X_test), rgr_w.predict(X_test))
# Import the data
iris = datasets.load_iris()
# create a multiple targets by randomized shuffling and concatenating y.
X = iris.data
y1 = iris.target
y2 = shuffle(y1, random_state=1)
y3 = shuffle(y1, random_state=2)
y = np.column_stack((y1, y2, y3))
n_samples, n_features = X.shape
n_outputs = y.shape[1]
n_classes = len(np.unique(y1))
classes = list(map(np.unique, (y1, y2, y3)))
def test_multi_output_classification_partial_fit_parallelism():
sgd_linear_clf = SGDClassifier(loss='log', random_state=1, max_iter=5)
mor = MultiOutputClassifier(sgd_linear_clf, n_jobs=-1)
mor.partial_fit(X, y, classes)
est1 = mor.estimators_[0]
mor.partial_fit(X, y)
est2 = mor.estimators_[0]
if cpu_count() > 1:
# parallelism requires this to be the case for a sane implementation
assert_false(est1 is est2)
def test_multi_output_classification_partial_fit():
# test if multi_target initializes correctly with base estimator and fit
# assert predictions work as expected for predict
sgd_linear_clf = SGDClassifier(loss='log', random_state=1, max_iter=5)
multi_target_linear = MultiOutputClassifier(sgd_linear_clf)
# train the multi_target_linear and also get the predictions.
half_index = X.shape[0] // 2
multi_target_linear.partial_fit(
X[:half_index], y[:half_index], classes=classes)
first_predictions = multi_target_linear.predict(X)
assert_equal((n_samples, n_outputs), first_predictions.shape)
multi_target_linear.partial_fit(X[half_index:], y[half_index:])
second_predictions = multi_target_linear.predict(X)
assert_equal((n_samples, n_outputs), second_predictions.shape)
# train the linear classification with each column and assert that
# predictions are equal after first partial_fit and second partial_fit
for i in range(3):
# create a clone with the same state
sgd_linear_clf = clone(sgd_linear_clf)
sgd_linear_clf.partial_fit(
X[:half_index], y[:half_index, i], classes=classes[i])
assert_array_equal(sgd_linear_clf.predict(X), first_predictions[:, i])
sgd_linear_clf.partial_fit(X[half_index:], y[half_index:, i])
assert_array_equal(sgd_linear_clf.predict(X), second_predictions[:, i])
def test_multi_output_classification_partial_fit_no_first_classes_exception():
sgd_linear_clf = SGDClassifier(loss='log', random_state=1, max_iter=5)
multi_target_linear = MultiOutputClassifier(sgd_linear_clf)
assert_raises_regex(ValueError, "classes must be passed on the first call "
"to partial_fit.",
multi_target_linear.partial_fit, X, y)
def test_multi_output_classification():
# test if multi_target initializes correctly with base estimator and fit
# assert predictions work as expected for predict, prodict_proba and score
forest = RandomForestClassifier(n_estimators=10, random_state=1)
multi_target_forest = MultiOutputClassifier(forest)
# train the multi_target_forest and also get the predictions.
multi_target_forest.fit(X, y)
predictions = multi_target_forest.predict(X)
assert_equal((n_samples, n_outputs), predictions.shape)
predict_proba = multi_target_forest.predict_proba(X)
assert len(predict_proba) == n_outputs
for class_probabilities in predict_proba:
assert_equal((n_samples, n_classes), class_probabilities.shape)
assert_array_equal(np.argmax(np.dstack(predict_proba), axis=1),
predictions)
# train the forest with each column and assert that predictions are equal
for i in range(3):
forest_ = clone(forest) # create a clone with the same state
forest_.fit(X, y[:, i])
assert_equal(list(forest_.predict(X)), list(predictions[:, i]))
assert_array_equal(list(forest_.predict_proba(X)),
list(predict_proba[i]))
def test_multiclass_multioutput_estimator():
# test to check meta of meta estimators
svc = LinearSVC(random_state=0)
multi_class_svc = OneVsRestClassifier(svc)
multi_target_svc = MultiOutputClassifier(multi_class_svc)
multi_target_svc.fit(X, y)
predictions = multi_target_svc.predict(X)
assert_equal((n_samples, n_outputs), predictions.shape)
# train the forest with each column and assert that predictions are equal
for i in range(3):
multi_class_svc_ = clone(multi_class_svc) # create a clone
multi_class_svc_.fit(X, y[:, i])
assert_equal(list(multi_class_svc_.predict(X)),
list(predictions[:, i]))
def test_multiclass_multioutput_estimator_predict_proba():
seed = 542
# make test deterministic
rng = np.random.RandomState(seed)
# random features
X = rng.normal(size=(5, 5))
# random labels
y1 = np.array(['b', 'a', 'a', 'b', 'a']).reshape(5, 1) # 2 classes
y2 = np.array(['d', 'e', 'f', 'e', 'd']).reshape(5, 1) # 3 classes
Y = np.concatenate([y1, y2], axis=1)
clf = MultiOutputClassifier(LogisticRegression(
multi_class='ovr', solver='liblinear', random_state=seed))
clf.fit(X, Y)
y_result = clf.predict_proba(X)
y_actual = [np.array([[0.23481764, 0.76518236],
[0.67196072, 0.32803928],
[0.54681448, 0.45318552],
[0.34883923, 0.65116077],
[0.73687069, 0.26312931]]),
np.array([[0.5171785, 0.23878628, 0.24403522],
[0.22141451, 0.64102704, 0.13755846],
[0.16751315, 0.18256843, 0.64991843],
[0.27357372, 0.55201592, 0.17441036],
[0.65745193, 0.26062899, 0.08191907]])]
for i in range(len(y_actual)):
assert_almost_equal(y_result[i], y_actual[i])
def test_multi_output_classification_sample_weights():
# weighted classifier
Xw = [[1, 2, 3], [4, 5, 6]]
yw = [[3, 2], [2, 3]]
w = np.asarray([2., 1.])
forest = RandomForestClassifier(n_estimators=10, random_state=1)
clf_w = MultiOutputClassifier(forest)
clf_w.fit(Xw, yw, w)
# unweighted, but with repeated samples
X = [[1, 2, 3], [1, 2, 3], [4, 5, 6]]
y = [[3, 2], [3, 2], [2, 3]]
forest = RandomForestClassifier(n_estimators=10, random_state=1)
clf = MultiOutputClassifier(forest)
clf.fit(X, y)
X_test = [[1.5, 2.5, 3.5], [3.5, 4.5, 5.5]]
assert_almost_equal(clf.predict(X_test), clf_w.predict(X_test))
def test_multi_output_classification_partial_fit_sample_weights():
# weighted classifier
Xw = [[1, 2, 3], [4, 5, 6], [1.5, 2.5, 3.5]]
yw = [[3, 2], [2, 3], [3, 2]]
w = np.asarray([2., 1., 1.])
sgd_linear_clf = SGDClassifier(random_state=1, max_iter=5)
clf_w = MultiOutputClassifier(sgd_linear_clf)
clf_w.fit(Xw, yw, w)
# unweighted, but with repeated samples
X = [[1, 2, 3], [1, 2, 3], [4, 5, 6], [1.5, 2.5, 3.5]]
y = [[3, 2], [3, 2], [2, 3], [3, 2]]
sgd_linear_clf = SGDClassifier(random_state=1, max_iter=5)
clf = MultiOutputClassifier(sgd_linear_clf)
clf.fit(X, y)
X_test = [[1.5, 2.5, 3.5]]
assert_array_almost_equal(clf.predict(X_test), clf_w.predict(X_test))
def test_multi_output_exceptions():
# NotFittedError when fit is not done but score, predict and
# and predict_proba are called
moc = MultiOutputClassifier(LinearSVC(random_state=0))
assert_raises(NotFittedError, moc.predict, y)
assert_raises(NotFittedError, moc.predict_proba, y)
assert_raises(NotFittedError, moc.score, X, y)
# ValueError when number of outputs is different
# for fit and score
y_new = np.column_stack((y1, y2))
moc.fit(X, y)
assert_raises(ValueError, moc.score, X, y_new)
# ValueError when y is continuous
assert_raise_message(ValueError, "Unknown label type", moc.fit, X, X[:, 1])
def generate_multilabel_dataset_with_correlations():
# Generate a multilabel data set from a multiclass dataset as a way of
# by representing the integer number of the original class using a binary
# encoding.
X, y = make_classification(n_samples=1000,
n_features=100,
n_classes=16,
n_informative=10,
random_state=0)
Y_multi = np.array([[int(yyy) for yyy in format(yy, '#06b')[2:]]
for yy in y])
return X, Y_multi
def test_classifier_chain_fit_and_predict_with_linear_svc():
# Fit classifier chain and verify predict performance using LinearSVC
X, Y = generate_multilabel_dataset_with_correlations()
classifier_chain = ClassifierChain(LinearSVC())
classifier_chain.fit(X, Y)
Y_pred = classifier_chain.predict(X)
assert_equal(Y_pred.shape, Y.shape)
Y_decision = classifier_chain.decision_function(X)
Y_binary = (Y_decision >= 0)
assert_array_equal(Y_binary, Y_pred)
assert not hasattr(classifier_chain, 'predict_proba')
@pytest.mark.filterwarnings('ignore: Default solver will be changed') # 0.22
@pytest.mark.filterwarnings('ignore: Default multi_class will') # 0.22
def test_classifier_chain_fit_and_predict_with_sparse_data():
# Fit classifier chain with sparse data
X, Y = generate_multilabel_dataset_with_correlations()
X_sparse = sp.csr_matrix(X)
classifier_chain = ClassifierChain(LogisticRegression())
classifier_chain.fit(X_sparse, Y)
Y_pred_sparse = classifier_chain.predict(X_sparse)
classifier_chain = ClassifierChain(LogisticRegression())
classifier_chain.fit(X, Y)
Y_pred_dense = classifier_chain.predict(X)
assert_array_equal(Y_pred_sparse, Y_pred_dense)
@pytest.mark.filterwarnings('ignore: Default solver will be changed') # 0.22
@pytest.mark.filterwarnings('ignore: Default multi_class will') # 0.22
def test_classifier_chain_vs_independent_models():
# Verify that an ensemble of classifier chains (each of length
# N) can achieve a higher Jaccard similarity score than N independent
# models
X, Y = generate_multilabel_dataset_with_correlations()
X_train = X[:600, :]
X_test = X[600:, :]
Y_train = Y[:600, :]
Y_test = Y[600:, :]
ovr = OneVsRestClassifier(LogisticRegression())
ovr.fit(X_train, Y_train)
Y_pred_ovr = ovr.predict(X_test)
chain = ClassifierChain(LogisticRegression())
chain.fit(X_train, Y_train)
Y_pred_chain = chain.predict(X_test)
assert_greater(jaccard_similarity_score(Y_test, Y_pred_chain),
jaccard_similarity_score(Y_test, Y_pred_ovr))
@pytest.mark.filterwarnings('ignore: Default solver will be changed') # 0.22
@pytest.mark.filterwarnings('ignore: Default multi_class will') # 0.22
def test_base_chain_fit_and_predict():
# Fit base chain and verify predict performance
X, Y = generate_multilabel_dataset_with_correlations()
chains = [RegressorChain(Ridge()),
ClassifierChain(LogisticRegression())]
for chain in chains:
chain.fit(X, Y)
Y_pred = chain.predict(X)
assert_equal(Y_pred.shape, Y.shape)
assert_equal([c.coef_.size for c in chain.estimators_],
list(range(X.shape[1], X.shape[1] + Y.shape[1])))
Y_prob = chains[1].predict_proba(X)
Y_binary = (Y_prob >= .5)
assert_array_equal(Y_binary, Y_pred)
assert isinstance(chains[1], ClassifierMixin)
@pytest.mark.filterwarnings('ignore: Default solver will be changed') # 0.22
@pytest.mark.filterwarnings('ignore: Default multi_class will') # 0.22
def test_base_chain_fit_and_predict_with_sparse_data_and_cv():
# Fit base chain with sparse data cross_val_predict
X, Y = generate_multilabel_dataset_with_correlations()
X_sparse = sp.csr_matrix(X)
base_chains = [ClassifierChain(LogisticRegression(), cv=3),
RegressorChain(Ridge(), cv=3)]
for chain in base_chains:
chain.fit(X_sparse, Y)
Y_pred = chain.predict(X_sparse)
assert_equal(Y_pred.shape, Y.shape)
@pytest.mark.filterwarnings('ignore: Default solver will be changed') # 0.22
@pytest.mark.filterwarnings('ignore: Default multi_class will') # 0.22
def test_base_chain_random_order():
# Fit base chain with random order
X, Y = generate_multilabel_dataset_with_correlations()
for chain in [ClassifierChain(LogisticRegression()),
RegressorChain(Ridge())]:
chain_random = clone(chain).set_params(order='random', random_state=42)
chain_random.fit(X, Y)
chain_fixed = clone(chain).set_params(order=chain_random.order_)
chain_fixed.fit(X, Y)
assert_array_equal(chain_fixed.order_, chain_random.order_)
assert_not_equal(list(chain_random.order), list(range(4)))
assert_equal(len(chain_random.order_), 4)
assert_equal(len(set(chain_random.order_)), 4)
# Randomly ordered chain should behave identically to a fixed order
# chain with the same order.
for est1, est2 in zip(chain_random.estimators_,
chain_fixed.estimators_):
assert_array_almost_equal(est1.coef_, est2.coef_)
@pytest.mark.filterwarnings('ignore: Default solver will be changed') # 0.22
@pytest.mark.filterwarnings('ignore: Default multi_class will') # 0.22
def test_base_chain_crossval_fit_and_predict():
# Fit chain with cross_val_predict and verify predict
# performance
X, Y = generate_multilabel_dataset_with_correlations()
for chain in [ClassifierChain(LogisticRegression()),
RegressorChain(Ridge())]:
chain.fit(X, Y)
chain_cv = clone(chain).set_params(cv=3)
chain_cv.fit(X, Y)
Y_pred_cv = chain_cv.predict(X)
Y_pred = chain.predict(X)
assert Y_pred_cv.shape == Y_pred.shape
assert not np.all(Y_pred == Y_pred_cv)
if isinstance(chain, ClassifierChain):
assert jaccard_similarity_score(Y, Y_pred_cv) > .4
else:
assert mean_squared_error(Y, Y_pred_cv) < .25
|
bsd-3-clause
|
CG-F16-7-Rutgers/steersuite-rutgers
|
steerstats/tools/plotting/plotBarMetricOptEntropy.py
|
8
|
1518
|
#!/usr/bin/env python
# a bar plot with errorbars
import numpy as np
import matplotlib.pyplot as plt
N = 2
scale = 100.0
ppr = np.array([[3.41784, 3.403440],
[1.91507, 2.271680]])
ppr = np.divide(ppr[0] - ppr[1], ppr[0]) * scale
orca = np.array([[2.117200, 2.953220],
[0.628748, 2.203690]])
orca = np.divide(orca[0] - orca[1], orca[0]) * scale
sf = np.array([[3.741280, 3.620520],
[3.098120, 2.757230]])
sf = np.divide(sf[0] - sf[1], sf[0]) * scale
"""
max = np.amax(ppr)
ppr = ppr / max
orca = orca / max
sf = sf / max
"""
# menStd = (2, 3, 4, 1, 2)
ind = np.arange(N) # the x locations for the groups
width = 0.25 # the width of the bars
adjust=0.5
fig, ax = plt.subplots()
rects1 = ax.bar(ind, ppr, width, color='r')
# womenStd = (3, 5, 2, 3, 3)
rects2 = ax.bar(ind+width, orca, width, color='b')
rects3 = ax.bar(ind+(width*2.0), sf, width, color='g')
# add some
# ax.set_ylabel('Scores')
# ax.set_title('Scores by group and gender')
ax.set_xticks(ind+(width*1.5))
# ax.set_xticklabels( ('d', 'q^d', 'q^t', 'q^e', 'e', 'u') )
ax.set_xticklabels( ('', '', '', '', '', '') )
ax.legend( (rects1[0], rects2[0], rects3), ('PPR', 'ORCA', 'SF', ) )
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%d'%int(height),
ha='center', va='bottom')
# autolabel(rects1)
# autolabel(rects2)
# autolabel(rects3)
plt.show()
|
gpl-3.0
|
bmazin/SDR
|
Projects/SuperMatchedFilters/capture.py
|
2
|
8803
|
from matplotlib import rcParams, rc
import numpy as np
import sys
from fitFunctions import gaussian
import scipy.interpolate
import scipy.signal
from baselineIIR import IirFilter
# common setup for matplotlib
params = {'savefig.dpi': 300, # save figures to 300 dpi
'axes.labelsize': 14,
'text.fontsize': 14,
'legend.fontsize': 14,
'xtick.labelsize': 14,
'ytick.major.pad': 6,
'xtick.major.pad': 6,
'ytick.labelsize': 14}
# use of Sans Serif also in math mode
rc('text.latex', preamble='\usepackage{sfmath}')
rcParams.update(params)
import matplotlib.pyplot as plt
import numpy as np
import os
import struct
def calcThreshold(phase,Nsigma=2.5,nSamples=5000):
n,bins= np.histogram(phase[:nSamples],bins=100)
n = np.array(n,dtype='float32')/np.sum(n)
tot = np.zeros(len(bins))
for i in xrange(len(bins)):
tot[i] = np.sum(n[:i])
med = bins[np.abs(tot-0.5).argmin()]
thresh = bins[np.abs(tot-0.05).argmin()]
threshold = int(med-Nsigma*abs(med-thresh))
return threshold
def oldBaseFilter(data,alpha=0.08):
#construct IIR
alpha = 0.08
numCoeffs = np.zeros(31)
numCoeffs[30] = alpha
denomCoeffs = np.zeros(11)
denomCoeffs[0] = 1
denomCoeffs[10] = -(1-alpha)
baselines = scipy.signal.lfilter(numCoeffs,denomCoeffs,data)
return baselines
def detectPulses(sample,threshold,baselines,deadtime=10):
#deadtime in ticks (us)
data = np.array(sample)
#threshold = calcThreshold(data[0:2000])
dataSubBase = data - baselines
derivative = np.diff(data)
peakHeights = []
t = 0
negDeriv = derivative <= 0
posDeriv = np.logical_not(negDeriv)
print np.shape(derivative)
print np.shape(data)
print np.shape(negDeriv)
nNegDerivChecks = 10
lenience = 1
triggerBooleans = dataSubBase[nNegDerivChecks:-2] < threshold
negDerivChecksSum = np.zeros(len(negDeriv[0:-nNegDerivChecks-1]))
for i in range(nNegDerivChecks):
negDerivChecksSum += negDeriv[i:i-nNegDerivChecks-1]
peakCondition0 = negDerivChecksSum >= nNegDerivChecks-lenience
peakCondition1 = np.logical_and(posDeriv[nNegDerivChecks:-1],posDeriv[nNegDerivChecks+1:])
peakCondition01 = np.logical_and(peakCondition0,peakCondition1)
peakBooleans = np.logical_and(triggerBooleans,peakCondition01)
try:
peakIndices = np.where(peakBooleans)[0]+nNegDerivChecks
i = 0
p = peakIndices[i]
while p < peakIndices[-1]:
peakIndices = peakIndices[np.logical_or(peakIndices-p > deadtime , peakIndices-p <= 0)]#apply deadtime
i+=1
if i < len(peakIndices):
p = peakIndices[i]
else:
p = peakIndices[-1]
except IndexError:
return np.array([]),np.array([]),np.array([])
peakHeights = data[peakIndices]
peakBaselines = baselines[peakIndices]
return peakIndices,peakHeights,peakBaselines
def oldDetectPulses(sample,threshold,baselines):
filtered = np.array(sample)
#threshold = calcThreshold(filtered[0:2000])
filtered -= baselines
derivative = np.diff(filtered)
peakHeights = []
t = 0
negDeriv = derivative <= 0
posDeriv = np.logical_not(negDeriv)
triggerBooleans = filtered[1:-2] < threshold
peakCondition1 = np.logical_and(negDeriv[0:-2],posDeriv[1:-1])
peakCondition2 = np.logical_and(triggerBooleans,posDeriv[2:])
peakBooleans = np.logical_and(peakCondition1,peakCondition2)
try:
peakIndices = np.where(peakBooleans)[0]+1
i = 0
p = peakIndices[i]
deadtime=10#us
while p < peakIndices[-1]:
peakIndices = peakIndices[np.logical_or(peakIndices-p > deadtime , peakIndices-p <= 0)]#apply deadtime
i+=1
if i < len(peakIndices):
p = peakIndices[i]
else:
p = peakIndices[-1]
except IndexError:
return np.array([]),np.array([]),np.array([])
peakHeights = filtered[peakIndices]
peakBaselines = baselines[peakIndices]
return peakIndices,peakHeights,peakBaselines
rootFolder = '/Scratch/filterData/'
quietFolder = '/Scratch/filterData/20130925/blue/'
sampleRate=1e6 # 1 MHz
roachNum = 0
pixelNum = 51
secs=60
folder = '/Scratch/filterData/20130925/blue/'
cps=700
bFiltered = False
phaseFilename = os.path.join(folder,'ch_snap_r%dp%d_%dsecs_%dcps.dat'%(roachNum,pixelNum,secs,cps))
quietFilename = os.path.join(quietFolder,'ch_snap_r%dp%d_%dsecs_%dcps.dat'%(roachNum,pixelNum,30,0))
label='Blue'
#roachNum = 0
#pixelNum = 51
#secs=60
#folder = '/home/kids/labData/20130925/red/'
#cps=600
#bFiltered = False
#phaseFilename = os.path.join(folder,'ch_snap_r%dp%d_%dsecs_%dcps.dat'%(roachNum,pixelNum,secs,cps))
#quietFilename = os.path.join(quietFolder,'ch_snap_r%dp%d_%dsecs_%dcps.dat'%(roachNum,pixelNum,30,0))
#label='Red'
#roachNum = 0
#pixelNum = 134
#secs=5
#folder = '/home/kids/labData/20130220/'
#cps=700
#bFiltered = True
#phaseFilename = os.path.join(folder,'ch_snap_r%dp%d_%dsecs_%dcps.dat'%(roachNum,pixelNum,secs,cps))
#roachNum = 4
#pixelNum = 2
#secs=20
#folder = os.path.join(rootFolder,'20121123/')
#bFiltered = False
#phaseFilename = os.path.join(folder,'ch_snap_r%dp%d_%dsecs.dat'%(roachNum,pixelNum,secs))
##missing quiet file, so use another
#quietFilename = os.path.join(quietFolder,'ch_snap_r%dp%d_%dsecs_%dcps.dat'%(0,51,30,0))
bPlotPeaks = True
deadtime=10
phaseFile = open(phaseFilename,'r')
quietFile = open(quietFilename,'r')
phase = phaseFile.read()
quietPhase = quietFile.read()
numQDRSamples=2**19
numBytesPerSample=4
nLongsnapSamples = numQDRSamples*2*secs
qdrValues = struct.unpack('>%dh'%(nLongsnapSamples),phase)
qdrPhaseValues = np.array(qdrValues,dtype=np.float32)*360./2**16*4/np.pi #convert from adc units to degrees
nPhaseValues=len(qdrValues)
#nPhaseValues=int(1e5)
print nPhaseValues,'us'
quietQdrValues = struct.unpack('>%dh'%(numQDRSamples*2*30),quietPhase)
quietQdrPhaseValues = np.array(quietQdrValues,dtype=np.float32)*360./2**16*4/np.pi #convert from adc units to degrees
fig = plt.figure()
NAxes = 1
iAxes = 1
size=26
offset = 3
sampleStart = 5000
nSamples = nPhaseValues-sampleStart
thresholdLength = 2000
thresholdSigma = 2.1
sample=qdrValues[sampleStart:sampleStart+nSamples]
quietSample=quietQdrValues[sampleStart:sampleStart+thresholdLength]
#sample = np.array(qdrPhaseValues)
if bFiltered == False:
rawdata = np.array(sample)
quietRawdata = np.array(quietSample)
#filter= np.loadtxt(os.path.join(rootFolder,'fir/template20121207r%d.txt'%roachNum))[pixelNum,:]
#lpf250kHz= np.loadtxt('/Scratch/filterData/fir/lpf_250kHz.txt')
matched30= np.loadtxt(os.path.join(rootFolder,'fir/matched_30us.txt'))
filter=matched30
#data = np.correlate(filter,rawdata,mode='same')[::-1]
data = scipy.signal.lfilter(filter,1,rawdata)
#quietData = np.correlate(filter,quietRawdata,mode='same')[::-1]
quietData = scipy.signal.lfilter(filter,1,quietRawdata)
print 'filtering done'
sys.stdout.flush()
else:
data = np.array(sample)
quietData = np.array(quietSample)
criticalFreq = 200 #Hz
hpSos = IirFilter(sampleFreqHz=sampleRate,criticalFreqHz=criticalFreq,btype='highpass')
f=2*np.sin(np.pi*criticalFreq/sampleRate)
Q=.7
q=1./Q
hpSvf = IirFilter(sampleFreqHz=sampleRate,numCoeffs=np.array([1,-2,1]),denomCoeffs=np.array([1+f**2, f*q-2,1-f*q]))
baselines = data - hpSvf.filterData(data)
print 'baselines done'
threshold = calcThreshold(quietData,Nsigma=thresholdSigma)
print 'threshold done'
sys.stdout.flush()
endIdx = 1000*thresholdLength
if bPlotPeaks:
ax=fig.add_subplot(NAxes,1,iAxes)
ax.plot(rawdata[0:endIdx],'.-',color='gray',label='raw phase')
ax.plot(data[0:endIdx],'k.-',label='optimal filtered phase')
ax.plot(baselines[0:endIdx],'b',label='lpf baseline')
ax.plot(baselines[0:endIdx]+threshold,'y--',label='threshold')
idx,peaks,bases = detectPulses(data,threshold,baselines)
print len(peaks),'peaks detected'
sys.stdout.flush()
#
#
if len(peaks)>0:
if bPlotPeaks:
ax.plot(idx,peaks,'r.',label='detected peak')
ax.plot(idx,bases,'g.',label='detected baseline')
ax.set_xlabel('time (us)')
ax.set_ylabel('phase (${}^{\circ}$)')
#ax.set_xlim([5000,15000])
#ax.set_title('detected peaks and baseline for ~%d cps, pixel /r%d/p%d'%(cps,roachNum,pixelNum))
ax.legend(loc='lower right')
iAxes+=1
np.savez('/Scratch/dataProcessing/filterTests/sdetected%d%s_dead%d.npz'%(cps,label,deadtime),idx=idx,peaks=peaks,bases=bases,baselines=baselines,threshold=threshold,qdrValues=qdrValues,data=rawdata,filtered=data)
print 'done'
sys.stdout.flush()
plt.show()
|
gpl-2.0
|
beni55/sympy
|
sympy/plotting/tests/test_plot_implicit.py
|
17
|
2600
|
import warnings
from sympy import (plot_implicit, cos, Symbol, Eq, sin, re, And, Or, exp, I,
tan, pi)
from sympy.plotting.plot import unset_show
from tempfile import NamedTemporaryFile
from sympy.utilities.pytest import skip
from sympy.external import import_module
#Set plots not to show
unset_show()
def tmp_file(name=''):
return NamedTemporaryFile(suffix='.png').name
def plot_and_save(name):
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
#implicit plot tests
plot_implicit(Eq(y, cos(x)), (x, -5, 5), (y, -2, 2)).save(tmp_file(name))
plot_implicit(Eq(y**2, x**3 - x), (x, -5, 5),
(y, -4, 4)).save(tmp_file(name))
plot_implicit(y > 1 / x, (x, -5, 5),
(y, -2, 2)).save(tmp_file(name))
plot_implicit(y < 1 / tan(x), (x, -5, 5),
(y, -2, 2)).save(tmp_file(name))
plot_implicit(y >= 2 * sin(x) * cos(x), (x, -5, 5),
(y, -2, 2)).save(tmp_file(name))
plot_implicit(y <= x**2, (x, -3, 3),
(y, -1, 5)).save(tmp_file(name))
#Test all input args for plot_implicit
plot_implicit(Eq(y**2, x**3 - x)).save(tmp_file())
plot_implicit(Eq(y**2, x**3 - x), adaptive=False).save(tmp_file())
plot_implicit(Eq(y**2, x**3 - x), adaptive=False, points=500).save(tmp_file())
plot_implicit(y > x, (x, -5, 5)).save(tmp_file())
plot_implicit(And(y > exp(x), y > x + 2)).save(tmp_file())
plot_implicit(Or(y > x, y > -x)).save(tmp_file())
plot_implicit(x**2 - 1, (x, -5, 5)).save(tmp_file())
plot_implicit(x**2 - 1).save(tmp_file())
plot_implicit(y > x, depth=-5).save(tmp_file())
plot_implicit(y > x, depth=5).save(tmp_file())
plot_implicit(y > cos(x), adaptive=False).save(tmp_file())
plot_implicit(y < cos(x), adaptive=False).save(tmp_file())
plot_implicit(And(y > cos(x), Or(y > x, Eq(y, x)))).save(tmp_file())
plot_implicit(y - cos(pi / x)).save(tmp_file())
#Test plots which cannot be rendered using the adaptive algorithm
#TODO: catch the warning.
plot_implicit(Eq(y, re(cos(x) + I*sin(x)))).save(tmp_file(name))
with warnings.catch_warnings(record=True) as w:
plot_implicit(x**2 - 1, legend='An implicit plot').save(tmp_file())
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
assert 'No labeled objects found' in str(w[0].message)
def test_matplotlib():
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
plot_and_save('test')
else:
skip("Matplotlib not the default backend")
|
bsd-3-clause
|
jch1/models
|
cognitive_mapping_and_planning/tfcode/nav_utils.py
|
14
|
18657
|
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various losses for training navigation agents.
Defines various loss functions for navigation agents,
compute_losses_multi_or.
"""
import os, numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.contrib import slim
from tensorflow.contrib.slim import arg_scope
from tensorflow.contrib.slim.nets import resnet_v2
from tensorflow.python.training import moving_averages
import logging
from src import utils
import src.file_utils as fu
from tfcode import tf_utils
def compute_losses_multi_or(logits, actions_one_hot, weights=None,
num_actions=-1, data_loss_wt=1., reg_loss_wt=1.,
ewma_decay=0.99, reg_loss_op=None):
assert(num_actions > 0), 'num_actions must be specified and must be > 0.'
with tf.name_scope('loss'):
if weights is None:
weight = tf.ones_like(actions_one_hot, dtype=tf.float32, name='weight')
actions_one_hot = tf.cast(tf.reshape(actions_one_hot, [-1, num_actions],
're_actions_one_hot'), tf.float32)
weights = tf.reduce_sum(tf.reshape(weights, [-1, num_actions], 're_weight'),
reduction_indices=1)
total = tf.reduce_sum(weights)
action_prob = tf.nn.softmax(logits)
action_prob = tf.reduce_sum(tf.multiply(action_prob, actions_one_hot),
reduction_indices=1)
example_loss = -tf.log(tf.maximum(tf.constant(1e-4), action_prob))
data_loss_op = tf.reduce_sum(example_loss * weights) / total
if reg_loss_op is None:
if reg_loss_wt > 0:
reg_loss_op = tf.add_n(tf.losses.get_regularization_losses())
else:
reg_loss_op = tf.constant(0.)
if reg_loss_wt > 0:
total_loss_op = data_loss_wt*data_loss_op + reg_loss_wt*reg_loss_op
else:
total_loss_op = data_loss_wt*data_loss_op
is_correct = tf.cast(tf.greater(action_prob, 0.5, name='pred_class'), tf.float32)
acc_op = tf.reduce_sum(is_correct*weights) / total
ewma_acc_op = moving_averages.weighted_moving_average(
acc_op, ewma_decay, weight=total, name='ewma_acc')
acc_ops = [ewma_acc_op]
return reg_loss_op, data_loss_op, total_loss_op, acc_ops
def get_repr_from_image(images_reshaped, modalities, data_augment, encoder,
freeze_conv, wt_decay, is_training):
# Pass image through lots of convolutional layers, to obtain pool5
if modalities == ['rgb']:
with tf.name_scope('pre_rgb'):
x = (images_reshaped + 128.) / 255. # Convert to brightness between 0 and 1.
if data_augment.relight and is_training:
x = tf_utils.distort_image(x, fast_mode=data_augment.relight_fast)
x = (x-0.5)*2.0
scope_name = encoder
elif modalities == ['depth']:
with tf.name_scope('pre_d'):
d_image = images_reshaped
x = 2*(d_image[...,0] - 80.0)/100.0
y = d_image[...,1]
d_image = tf.concat([tf.expand_dims(x, -1), tf.expand_dims(y, -1)], 3)
x = d_image
scope_name = 'd_'+encoder
resnet_is_training = is_training and (not freeze_conv)
with slim.arg_scope(resnet_v2.resnet_utils.resnet_arg_scope(resnet_is_training)):
fn = getattr(tf_utils, encoder)
x, end_points = fn(x, num_classes=None, global_pool=False,
output_stride=None, reuse=None,
scope=scope_name)
vars_ = slim.get_variables_to_restore()
conv_feat = x
return conv_feat, vars_
def default_train_step_kwargs(m, obj, logdir, rng_seed, is_chief, num_steps,
iters, train_display_interval,
dagger_sample_bn_false):
train_step_kwargs = {}
train_step_kwargs['obj'] = obj
train_step_kwargs['m'] = m
# rng_data has 2 independent rngs, one for sampling episodes and one for
# sampling perturbs (so that we can make results reproducible.
train_step_kwargs['rng_data'] = [np.random.RandomState(rng_seed),
np.random.RandomState(rng_seed)]
train_step_kwargs['rng_action'] = np.random.RandomState(rng_seed)
if is_chief:
train_step_kwargs['writer'] = tf.summary.FileWriter(logdir) #, m.tf_graph)
else:
train_step_kwargs['writer'] = None
train_step_kwargs['iters'] = iters
train_step_kwargs['train_display_interval'] = train_display_interval
train_step_kwargs['num_steps'] = num_steps
train_step_kwargs['logdir'] = logdir
train_step_kwargs['dagger_sample_bn_false'] = dagger_sample_bn_false
return train_step_kwargs
# Utilities for visualizing and analysing validation output.
def save_d_at_t(outputs, global_step, output_dir, metric_summary, N):
"""Save distance to goal at all time steps.
Args:
outputs : [gt_dist_to_goal].
global_step : number of iterations.
output_dir : output directory.
metric_summary : to append scalars to summary.
N : number of outputs to process.
"""
d_at_t = np.concatenate(map(lambda x: x[0][:,:,0]*1, outputs), axis=0)
fig, axes = utils.subplot(plt, (1,1), (5,5))
axes.plot(np.arange(d_at_t.shape[1]), np.mean(d_at_t, axis=0), 'r.')
axes.set_xlabel('time step')
axes.set_ylabel('dist to next goal')
axes.grid('on')
file_name = os.path.join(output_dir, 'dist_at_t_{:d}.png'.format(global_step))
with fu.fopen(file_name, 'w') as f:
fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0)
file_name = os.path.join(output_dir, 'dist_at_t_{:d}.pkl'.format(global_step))
utils.save_variables(file_name, [d_at_t], ['d_at_t'], overwrite=True)
plt.close(fig)
return None
def save_all(outputs, global_step, output_dir, metric_summary, N):
"""Save numerous statistics.
Args:
outputs : [locs, goal_loc, gt_dist_to_goal, node_ids, perturbs]
global_step : number of iterations.
output_dir : output directory.
metric_summary : to append scalars to summary.
N : number of outputs to process.
"""
all_locs = np.concatenate(map(lambda x: x[0], outputs), axis=0)
all_goal_locs = np.concatenate(map(lambda x: x[1], outputs), axis=0)
all_d_at_t = np.concatenate(map(lambda x: x[2][:,:,0]*1, outputs), axis=0)
all_node_ids = np.concatenate(map(lambda x: x[3], outputs), axis=0)
all_perturbs = np.concatenate(map(lambda x: x[4], outputs), axis=0)
file_name = os.path.join(output_dir, 'all_locs_at_t_{:d}.pkl'.format(global_step))
vars = [all_locs, all_goal_locs, all_d_at_t, all_node_ids, all_perturbs]
var_names = ['all_locs', 'all_goal_locs', 'all_d_at_t', 'all_node_ids', 'all_perturbs']
utils.save_variables(file_name, vars, var_names, overwrite=True)
return None
def eval_ap(outputs, global_step, output_dir, metric_summary, N, num_classes=4):
"""Processes the collected outputs to compute AP for action prediction.
Args:
outputs : [logits, labels]
global_step : global_step.
output_dir : where to store results.
metric_summary : summary object to add summaries to.
N : number of outputs to process.
num_classes : number of classes to compute AP over, and to reshape tensors.
"""
if N >= 0:
outputs = outputs[:N]
logits = np.concatenate(map(lambda x: x[0], outputs), axis=0).reshape((-1, num_classes))
labels = np.concatenate(map(lambda x: x[1], outputs), axis=0).reshape((-1, num_classes))
aps = []
for i in range(logits.shape[1]):
ap, rec, prec = utils.calc_pr(labels[:,i], logits[:,i])
ap = ap[0]
tf_utils.add_value_to_summary(metric_summary, 'aps/ap_{:d}: '.format(i), ap)
aps.append(ap)
return aps
def eval_dist(outputs, global_step, output_dir, metric_summary, N):
"""Processes the collected outputs during validation to
1. Plot the distance over time curve.
2. Compute mean and median distances.
3. Plots histogram of end distances.
Args:
outputs : [locs, goal_loc, gt_dist_to_goal].
global_step : global_step.
output_dir : where to store results.
metric_summary : summary object to add summaries to.
N : number of outputs to process.
"""
SUCCESS_THRESH = 3
if N >= 0:
outputs = outputs[:N]
# Plot distance at time t.
d_at_t = []
for i in range(len(outputs)):
locs, goal_loc, gt_dist_to_goal = outputs[i]
d_at_t.append(gt_dist_to_goal[:,:,0]*1)
# Plot the distance.
fig, axes = utils.subplot(plt, (1,1), (5,5))
d_at_t = np.concatenate(d_at_t, axis=0)
axes.plot(np.arange(d_at_t.shape[1]), np.mean(d_at_t, axis=0), 'r.')
axes.set_xlabel('time step')
axes.set_ylabel('dist to next goal')
axes.grid('on')
file_name = os.path.join(output_dir, 'dist_at_t_{:d}.png'.format(global_step))
with fu.fopen(file_name, 'w') as f:
fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0)
file_name = os.path.join(output_dir, 'dist_at_t_{:d}.pkl'.format(global_step))
utils.save_variables(file_name, [d_at_t], ['d_at_t'], overwrite=True)
plt.close(fig)
# Plot the trajectories and the init_distance and final distance.
d_inits = []
d_ends = []
for i in range(len(outputs)):
locs, goal_loc, gt_dist_to_goal = outputs[i]
d_inits.append(gt_dist_to_goal[:,0,0]*1)
d_ends.append(gt_dist_to_goal[:,-1,0]*1)
# Plot the distance.
fig, axes = utils.subplot(plt, (1,1), (5,5))
d_inits = np.concatenate(d_inits, axis=0)
d_ends = np.concatenate(d_ends, axis=0)
axes.plot(d_inits+np.random.rand(*(d_inits.shape))-0.5,
d_ends+np.random.rand(*(d_ends.shape))-0.5, '.', mec='red', mew=1.0)
axes.set_xlabel('init dist'); axes.set_ylabel('final dist');
axes.grid('on'); axes.axis('equal');
title_str = 'mean: {:0.1f}, 50: {:0.1f}, 75: {:0.2f}, s: {:0.1f}'
title_str = title_str.format(
np.mean(d_ends), np.median(d_ends), np.percentile(d_ends, q=75),
100*(np.mean(d_ends <= SUCCESS_THRESH)))
axes.set_title(title_str)
file_name = os.path.join(output_dir, 'dist_{:d}.png'.format(global_step))
with fu.fopen(file_name, 'w') as f:
fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0)
file_name = os.path.join(output_dir, 'dist_{:d}.pkl'.format(global_step))
utils.save_variables(file_name, [d_inits, d_ends], ['d_inits', 'd_ends'],
overwrite=True)
plt.close(fig)
# Plot the histogram of the end_distance.
with plt.style.context('seaborn-white'):
d_ends_ = np.sort(d_ends)
d_inits_ = np.sort(d_inits)
leg = [];
fig, ax = utils.subplot(plt, (1,1), (5,5))
ax.grid('on')
ax.set_xlabel('Distance from goal'); ax.xaxis.label.set_fontsize(16);
ax.set_ylabel('Fraction of data'); ax.yaxis.label.set_fontsize(16);
ax.plot(d_ends_, np.arange(d_ends_.size)*1./d_ends_.size, 'r')
ax.plot(d_inits_, np.arange(d_inits_.size)*1./d_inits_.size, 'k')
leg.append('Final'); leg.append('Init');
ax.legend(leg, fontsize='x-large');
ax.set_axis_on()
title_str = 'mean: {:0.1f}, 50: {:0.1f}, 75: {:0.2f}, s: {:0.1f}'
title_str = title_str.format(
np.mean(d_ends), np.median(d_ends), np.percentile(d_ends, q=75),
100*(np.mean(d_ends <= SUCCESS_THRESH)))
ax.set_title(title_str)
file_name = os.path.join(output_dir, 'dist_hist_{:d}.png'.format(global_step))
with fu.fopen(file_name, 'w') as f:
fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0)
# Log distance metrics.
tf_utils.add_value_to_summary(metric_summary, 'dists/success_init: ',
100*(np.mean(d_inits <= SUCCESS_THRESH)))
tf_utils.add_value_to_summary(metric_summary, 'dists/success_end: ',
100*(np.mean(d_ends <= SUCCESS_THRESH)))
tf_utils.add_value_to_summary(metric_summary, 'dists/dist_init (75): ',
np.percentile(d_inits, q=75))
tf_utils.add_value_to_summary(metric_summary, 'dists/dist_end (75): ',
np.percentile(d_ends, q=75))
tf_utils.add_value_to_summary(metric_summary, 'dists/dist_init (median): ',
np.median(d_inits))
tf_utils.add_value_to_summary(metric_summary, 'dists/dist_end (median): ',
np.median(d_ends))
tf_utils.add_value_to_summary(metric_summary, 'dists/dist_init (mean): ',
np.mean(d_inits))
tf_utils.add_value_to_summary(metric_summary, 'dists/dist_end (mean): ',
np.mean(d_ends))
return np.median(d_inits), np.median(d_ends), np.mean(d_inits), np.mean(d_ends), \
np.percentile(d_inits, q=75), np.percentile(d_ends, q=75), \
100*(np.mean(d_inits) <= SUCCESS_THRESH), 100*(np.mean(d_ends) <= SUCCESS_THRESH)
def plot_trajectories(outputs, global_step, output_dir, metric_summary, N):
"""Processes the collected outputs during validation to plot the trajectories
in the top view.
Args:
outputs : [locs, orig_maps, goal_loc].
global_step : global_step.
output_dir : where to store results.
metric_summary : summary object to add summaries to.
N : number of outputs to process.
"""
if N >= 0:
outputs = outputs[:N]
N = len(outputs)
plt.set_cmap('gray')
fig, axes = utils.subplot(plt, (N, outputs[0][1].shape[0]), (5,5))
axes = axes.ravel()[::-1].tolist()
for i in range(N):
locs, orig_maps, goal_loc = outputs[i]
is_semantic = np.isnan(goal_loc[0,0,1])
for j in range(orig_maps.shape[0]):
ax = axes.pop();
ax.plot(locs[j,0,0], locs[j,0,1], 'ys')
# Plot one by one, so that they come in different colors.
for k in range(goal_loc.shape[1]):
if not is_semantic:
ax.plot(goal_loc[j,k,0], goal_loc[j,k,1], 's')
if False:
ax.plot(locs[j,:,0], locs[j,:,1], 'r.', ms=3)
ax.imshow(orig_maps[j,0,:,:,0], origin='lower')
ax.set_axis_off();
else:
ax.scatter(locs[j,:,0], locs[j,:,1], c=np.arange(locs.shape[1]),
cmap='jet', s=10, lw=0)
ax.imshow(orig_maps[j,0,:,:,0], origin='lower', vmin=-1.0, vmax=2.0)
if not is_semantic:
xymin = np.minimum(np.min(goal_loc[j,:,:], axis=0), np.min(locs[j,:,:], axis=0))
xymax = np.maximum(np.max(goal_loc[j,:,:], axis=0), np.max(locs[j,:,:], axis=0))
else:
xymin = np.min(locs[j,:,:], axis=0)
xymax = np.max(locs[j,:,:], axis=0)
xy1 = (xymax+xymin)/2. - np.maximum(np.max(xymax-xymin), 12)
xy2 = (xymax+xymin)/2. + np.maximum(np.max(xymax-xymin), 12)
ax.set_xlim([xy1[0], xy2[0]])
ax.set_ylim([xy1[1], xy2[1]])
ax.set_axis_off()
file_name = os.path.join(output_dir, 'trajectory_{:d}.png'.format(global_step))
with fu.fopen(file_name, 'w') as f:
fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0)
plt.close(fig)
return None
def add_default_summaries(mode, arop_full_summary_iters, summarize_ops,
summarize_names, to_aggregate, action_prob_op,
input_tensors, scope_name):
assert(mode == 'train' or mode == 'val' or mode == 'test'), \
'add_default_summaries mode is neither train or val or test.'
s_ops = tf_utils.get_default_summary_ops()
if mode == 'train':
s_ops.summary_ops, s_ops.print_summary_ops, additional_return_ops, \
arop_summary_iters, arop_eval_fns = tf_utils.simple_summaries(
summarize_ops, summarize_names, mode, to_aggregate=False,
scope_name=scope_name)
s_ops.additional_return_ops += additional_return_ops
s_ops.arop_summary_iters += arop_summary_iters
s_ops.arop_eval_fns += arop_eval_fns
elif mode == 'val':
s_ops.summary_ops, s_ops.print_summary_ops, additional_return_ops, \
arop_summary_iters, arop_eval_fns = tf_utils.simple_summaries(
summarize_ops, summarize_names, mode, to_aggregate=to_aggregate,
scope_name=scope_name)
s_ops.additional_return_ops += additional_return_ops
s_ops.arop_summary_iters += arop_summary_iters
s_ops.arop_eval_fns += arop_eval_fns
elif mode == 'test':
s_ops.summary_ops, s_ops.print_summary_ops, additional_return_ops, \
arop_summary_iters, arop_eval_fns = tf_utils.simple_summaries(
[], [], mode, to_aggregate=[], scope_name=scope_name)
s_ops.additional_return_ops += additional_return_ops
s_ops.arop_summary_iters += arop_summary_iters
s_ops.arop_eval_fns += arop_eval_fns
if mode == 'val':
arop = s_ops.additional_return_ops
arop += [[action_prob_op, input_tensors['train']['action']]]
arop += [[input_tensors['step']['loc_on_map'],
input_tensors['common']['goal_loc'],
input_tensors['step']['gt_dist_to_goal']]]
arop += [[input_tensors['step']['loc_on_map'],
input_tensors['common']['orig_maps'],
input_tensors['common']['goal_loc']]]
s_ops.arop_summary_iters += [-1, arop_full_summary_iters,
arop_full_summary_iters]
s_ops.arop_eval_fns += [eval_ap, eval_dist, plot_trajectories]
elif mode == 'test':
arop = s_ops.additional_return_ops
arop += [[input_tensors['step']['loc_on_map'],
input_tensors['common']['goal_loc'],
input_tensors['step']['gt_dist_to_goal']]]
arop += [[input_tensors['step']['gt_dist_to_goal']]]
arop += [[input_tensors['step']['loc_on_map'],
input_tensors['common']['goal_loc'],
input_tensors['step']['gt_dist_to_goal'],
input_tensors['step']['node_ids'],
input_tensors['step']['perturbs']]]
arop += [[input_tensors['step']['loc_on_map'],
input_tensors['common']['orig_maps'],
input_tensors['common']['goal_loc']]]
s_ops.arop_summary_iters += [-1, -1, -1, arop_full_summary_iters]
s_ops.arop_eval_fns += [eval_dist, save_d_at_t, save_all,
plot_trajectories]
return s_ops
|
apache-2.0
|
magic2du/contact_matrix
|
Contact_maps/DeepLearning/DeepLearningTool/DL_contact_matrix_load2-new10fold_10_15_2014_server.py
|
1
|
38119
|
# coding: utf-8
# In[3]:
import sys, os
sys.path.append('../../../libs/')
import os.path
import IO_class
from IO_class import FileOperator
from sklearn import cross_validation
import sklearn
import numpy as np
import csv
from dateutil import parser
from datetime import timedelta
from sklearn import svm
import numpy as np
import pandas as pd
import pdb
import pickle
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import KFold
from sklearn import preprocessing
import sklearn
import scipy.stats as ss
from sklearn.svm import LinearSVC
import random
from DL_libs import *
from itertools import izip #new
import math
# In[4]:
#filename = 'SUCCESS_log_CrossValidation_load_DL_remoteFisherM1_DL_RE_US_DL_RE_US_1_1_19MAY2014.txt'
#filename = 'listOfDDIsHaveOver2InterfacesHave40-75_Examples_2010_real_selected.txt' #for testing
filename = 'ddi_examples_40_60_over2top_diff_name_2014.txt'
file_obj = FileOperator(filename)
ddis = file_obj.readStripLines()
import logging
import time
current_date = time.strftime("%m_%d_%Y")
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logname = 'log_DL_contact_matrix_load' + current_date + '.log'
handler = logging.FileHandler(logname)
handler.setLevel(logging.DEBUG)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
logger.info('Input DDI file: ' + filename)
#logger.debug('This message should go to the log file')
# In[5]:
ddis
# In[28]:
class DDI_family_base(object):
#def __init__(self, ddi, Vectors_Fishers_aaIndex_raw_folder = '/home/du/Documents/Vectors_Fishers_aaIndex_raw_2014/'):
#def __init__(self, ddi, Vectors_Fishers_aaIndex_raw_folder = '/home/sun/Downloads/contactmatrix/contactmatrixanddeeplearningcode/data_test/'):
def __init__(self, ddi, Vectors_Fishers_aaIndex_raw_folder = '/big/du/Protein_Protein_Interaction_Project/Contact_Matrix_Project/Vectors_Fishers_aaIndex_raw_2014_paper/'):
""" get total number of sequences in a ddi familgy
Attributes:
ddi: string ddi name
Vectors_Fishers_aaIndex_raw_folder: string, folder
total_number_of_sequences: int
raw_data: dict raw_data[2]
LOO_data['FisherM1'][1]
"""
self.ddi = ddi
self.Vectors_Fishers_aaIndex_raw_folder = Vectors_Fishers_aaIndex_raw_folder
self.ddi_folder = self.Vectors_Fishers_aaIndex_raw_folder + ddi + '/'
self.total_number_of_sequences = self.get_total_number_of_sequences()
self.raw_data = {}
self.positve_negative_number = {}
self.equal_size_data = {}
for seq_no in range(1, self.total_number_of_sequences+1):
self.raw_data[seq_no] = self.get_raw_data_for_selected_seq(seq_no)
try:
#positive_file = self.ddi_folder + 'numPos_'+ str(seq_no) + '.txt'
#file_obj = FileOperator(positive_file)
#lines = file_obj.readStripLines()
#import pdb; pdb.set_trace()
count_pos = int(np.sum(self.raw_data[seq_no][:, -1]))
count_neg = self.raw_data[seq_no].shape[0] - count_pos
#self.positve_negative_number[seq_no] = {'numPos': int(float(lines[0]))}
#assert int(float(lines[0])) == count_pos
self.positve_negative_number[seq_no] = {'numPos': count_pos}
#negative_file = self.ddi_folder + 'numNeg_'+ str(seq_no) + '.txt'
#file_obj = FileOperator(negative_file)
#lines = file_obj.readStripLines()
#self.positve_negative_number[seq_no]['numNeg'] = int(float(lines[0]))
self.positve_negative_number[seq_no]['numNeg'] = count_neg
except Exception,e:
print ddi, seq_no
print str(e)
logger.info(ddi + str(seq_no))
logger.info(str(e))
# get data for equal positive and negative
n_pos = self.positve_negative_number[seq_no]['numPos']
n_neg = self.positve_negative_number[seq_no]['numNeg']
index_neg = range(n_pos, n_pos + n_neg)
random.shuffle(index_neg)
index_neg = index_neg[: n_pos]
positive_examples = self.raw_data[seq_no][ : n_pos, :]
negative_examples = self.raw_data[seq_no][index_neg, :]
self.equal_size_data[seq_no] = np.vstack((positive_examples, negative_examples))
def get_LOO_training_and_reduced_traing(self, seq_no, fisher_mode = 'FisherM1ONLY' , reduce_ratio = 4):
""" get the leave one out traing data, reduced traing
Parameters:
seq_no:
fisher_mode: default 'FisherM1ONLY'
Returns:
(train_X_LOO, train_y_LOO),(train_X_reduced, train_y_reduced), (test_X, test_y)
"""
train_X_LOO = np.array([])
train_y_LOO = np.array([])
train_X_reduced = np.array([])
train_y_reduced = np.array([])
total_number_of_sequences = self.total_number_of_sequences
equal_size_data_selected_sequence = self.equal_size_data[seq_no]
#get test data for selected sequence
test_X, test_y = self.select_X_y(equal_size_data_selected_sequence, fisher_mode = fisher_mode)
total_sequences = range(1, total_number_of_sequences+1)
loo_sequences = [i for i in total_sequences if i != seq_no]
number_of_reduced = len(loo_sequences)/reduce_ratio if len(loo_sequences)/reduce_ratio !=0 else 1
random.shuffle(loo_sequences)
reduced_sequences = loo_sequences[:number_of_reduced]
#for loo data
for current_no in loo_sequences:
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if train_X_LOO.ndim ==1:
train_X_LOO = current_X
else:
train_X_LOO = np.vstack((train_X_LOO, current_X))
train_y_LOO = np.concatenate((train_y_LOO, current_y))
#for reduced data
for current_no in reduced_sequences:
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if train_X_reduced.ndim ==1:
train_X_reduced = current_X
else:
train_X_reduced = np.vstack((train_X_reduced, current_X))
train_y_reduced = np.concatenate((train_y_reduced, current_y))
return (train_X_LOO, train_y_LOO),(train_X_reduced, train_y_reduced), (test_X, test_y)
#def get_ten_fold_crossvalid_one_subset(self, start_subset, end_subset, fisher_mode = 'FisherM1ONLY' , reduce_ratio = 4):
def get_ten_fold_crossvalid_one_subset(self, train_index, test_index, fisher_mode = 'FisherM1ONLY' , reduce_ratio = 4):
""" get traing data, reduced traing data for 10-fold crossvalidation
Parameters:
start_subset: index of start of the testing data
end_subset: index of end of the testing data
fisher_mode: default 'FisherM1ONLY'
Returns:
(train_X_10fold, train_y_10fold),(train_X_reduced, train_y_reduced), (test_X, test_y)
"""
train_X_10fold = np.array([])
train_y_10fold = np.array([])
train_X_reduced = np.array([])
train_y_reduced = np.array([])
test_X = np.array([])
test_y = np.array([])
total_number_of_sequences = self.total_number_of_sequences
#get test data for selected sequence
#for current_no in range(start_subset, end_subset):
for num in test_index:
current_no = num + 1
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if test_X.ndim ==1:
test_X = current_X
else:
test_X = np.vstack((test_X, current_X))
test_y = np.concatenate((test_y, current_y))
#total_sequences = range(1, total_number_of_sequences+1)
#ten_fold_sequences = [i for i in total_sequences if not(i in range(start_subset, end_subset))]
#number_of_reduced = len(ten_fold_sequences)/reduce_ratio if len(ten_fold_sequences)/reduce_ratio !=0 else 1
#random.shuffle(ten_fold_sequences)
#reduced_sequences = ten_fold_sequences[:number_of_reduced]
number_of_reduced = len(train_index)/reduce_ratio if len(train_index)/reduce_ratio !=0 else 1
random.shuffle(train_index)
reduced_sequences = train_index[:number_of_reduced]
#for 10-fold cross-validation data
#for current_no in ten_fold_sequences:
for num in train_index:
current_no = num + 1
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if train_X_10fold.ndim ==1:
train_X_10fold = current_X
else:
train_X_10fold = np.vstack((train_X_10fold, current_X))
train_y_10fold = np.concatenate((train_y_10fold, current_y))
#for reduced data
for num in reduced_sequences:
current_no = num + 1
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if train_X_reduced.ndim ==1:
train_X_reduced = current_X
else:
train_X_reduced = np.vstack((train_X_reduced, current_X))
train_y_reduced = np.concatenate((train_y_reduced, current_y))
return (train_X_10fold, train_y_10fold),(train_X_reduced, train_y_reduced), (test_X, test_y)
def get_total_number_of_sequences(self):
""" get total number of sequences in a ddi familgy
Parameters:
ddi: string
Vectors_Fishers_aaIndex_raw_folder: string
Returns:
n: int
"""
folder_path = self.Vectors_Fishers_aaIndex_raw_folder + self.ddi + '/'
filename = folder_path +'allPairs.txt'
all_pairs = np.loadtxt(filename)
return len(all_pairs)
def get_raw_data_for_selected_seq(self, seq_no):
""" get raw data for selected seq no in a family
Parameters:
ddi:
seq_no:
Returns:
data: raw data in the sequence file
"""
folder_path = self.Vectors_Fishers_aaIndex_raw_folder + self.ddi + '/'
filename = folder_path + 'F0_20_F1_20_Sliding_17_11_F0_20_F1_20_Sliding_17_11_ouput_'+ str(seq_no) + '.txt'
data = np.loadtxt(filename)
return data
def select_X_y(self, data, fisher_mode = ''):
""" select subset from the raw input data set
Parameters:
data: data from matlab txt file
fisher_mode: subset base on this Fisher of AAONLY...
Returns:
selected X, y
"""
y = data[:,-1] # get lable
if fisher_mode == 'FisherM1': # fisher m1 plus AA index
a = data[:, 20:227]
b = data[:, 247:454]
X = np.hstack((a,b))
elif fisher_mode == 'FisherM1ONLY':
a = data[:, 20:40]
b = data[:, 247:267]
X = np.hstack((a,b))
elif fisher_mode == 'AAONLY':
a = data[:, 40:227]
b = data[:, 267:454]
X = np.hstack((a,b))
else:
raise('there is an error in mode')
return X, y
# In[28]:
# In[29]:
import sklearn.preprocessing
class Precessing_Scaler_0_9(sklearn.preprocessing.StandardScaler):
def __init__(self):
super(Precessing_Scaler_0_9, self).__init__(self, with_std=0.333)
def transform(self, X): # transform data to 0.1 to 0.9
new_X = super(Precessing_Scaler_0_9, self).transform(X)
print
new_X[new_X > 1] = 1
new_X[new_X < -1] = -1
new_X = (new_X + 1) * 0.4 + 0.1
return new_X
def fit_transform(self):
print 'Did not implement'
def performance_score(target_label, predicted_label, predicted_score = False, print_report = True):
""" get performance matrix for prediction
Attributes:
target_label: int 0, 1
predicted_label: 0, 1 or ranking
predicted_score: bool if False, predicted_label is from 0, 1. If Ture, predicted_label is ranked, need to get AUC score.
print_report: if True, print the perfromannce on screen
"""
import sklearn
from sklearn.metrics import roc_auc_score
score = {}
if predicted_score == False:
score['accuracy'] = sklearn.metrics.accuracy_score(target_label, predicted_label)
score['precision'] = sklearn.metrics.precision_score(target_label, predicted_label, pos_label=1)
score['recall'] = sklearn.metrics.recall_score(target_label, predicted_label, pos_label=1)
if predicted_score == True:
auc_score = roc_auc_score(target_label, predicted_label)
score['auc_score'] = auc_score
target_label = [x >= 0.5 for x in target_label]
score['accuracy'] = sklearn.metrics.accuracy_score(target_label, predicted_label)
score['precision'] = sklearn.metrics.precision_score(target_label, predicted_label, pos_label=1)
score['recall'] = sklearn.metrics.recall_score(target_label, predicted_label, pos_label=1)
if print_report == True:
for key, value in score.iteritems():
print key, '{percent:.1%}'.format(percent=value)
return score
def saveAsCsv(predicted_score, fname, score_dict, *arguments): #new
newfile = False
if os.path.isfile(fname + '_report.csv'):
pass
else:
newfile = True
csvfile = open(fname + '_report.csv', 'a+')
writer = csv.writer(csvfile)
if newfile == True:
if predicted_score == False:
writer.writerow(['DDI', 'no.', 'FisherMode', 'method', 'isTest']+ score_dict.keys()) #, 'AUC'])
else:
writer.writerow(['DDI', 'no.', 'FisherMode', 'method', 'isTest'] + score_dict.keys())
for arg in arguments:
writer.writerows(arg)
csvfile.close()
def LOO_out_performance_for_all(ddis):
for ddi in ddis:
try:
one_ddi_family = LOO_out_performance_for_one_ddi(ddi)
one_ddi_family.get_LOO_perfermance('FisherM1', '')
except Exception,e:
print str(e)
logger.info("There is a error in this ddi: %s" % ddi)
logger.info(str(e))
class LOO_out_performance_for_one_ddi(object):
""" get the performance of ddi families
Attributes:
ddi: string ddi name
Vectors_Fishers_aaIndex_raw_folder: string, folder
total_number_of_sequences: int
raw_data: dict raw_data[2]
"""
def __init__(self, ddi):
self.ddi_obj = DDI_family_base(ddi)
self.ddi = ddi
def get_LOO_perfermance(self, fisher_mode, settings = None):
analysis_scr = []
predicted_score = False
reduce_ratio = 1
for seq_no in range(1, self.ddi_obj.total_number_of_sequences+1):
print seq_no
logger.info('sequence number: ' + str(seq_no))
if 1:
print "SVM"
(train_X_LOO, train_y_LOO),(train_X_reduced, train_y_reduced), (test_X, test_y) = self.ddi_obj.get_LOO_training_and_reduced_traing(seq_no, reduce_ratio = reduce_ratio)
standard_scaler = preprocessing.StandardScaler().fit(train_X_reduced)
scaled_train_X = standard_scaler.transform(train_X_reduced)
scaled_test_X = standard_scaler.transform(test_X)
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(scaled_train_X, train_y_reduced)
predicted_test_y = Linear_SVC.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
# Deep learning part
min_max_scaler = Precessing_Scaler_0_9()
X_train_pre_validation_minmax = min_max_scaler.fit(train_X_reduced)
X_train_pre_validation_minmax = min_max_scaler.transform(train_X_reduced)
x_test_minmax = min_max_scaler.transform(test_X)
pretraining_X_minmax = min_max_scaler.transform(train_X_LOO)
x_train_minmax, x_validation_minmax, y_train_minmax, y_validation_minmax = train_test_split(X_train_pre_validation_minmax,
train_y_reduced
, test_size=0.4, random_state=42)
finetune_lr = 1
batch_size = 100
pretraining_epochs = cal_epochs(1500, x_train_minmax, batch_size = batch_size)
#pretrain_lr=0.001
pretrain_lr = 0.001
training_epochs = 1500
hidden_layers_sizes= [100, 100]
corruption_levels = [0,0]
if 1:
print "direct deep learning"
# direct deep learning
sda = trainSda(x_train_minmax, y_train_minmax,
x_validation_minmax, y_validation_minmax ,
x_test_minmax, test_y,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL', isTest) + tuple(performance_score(y_train, training_predicted).values()))
test_predicted = sda.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL', isTest) + tuple(performance_score(y_test, test_predicted).values()))
if 0:
# deep learning using unlabeled data for pretraining
print 'deep learning with unlabel data'
pretraining_epochs_for_reduced = cal_epochs(1500, pretraining_X_minmax, batch_size = batch_size)
sda_unlabel = trainSda(x_train_minmax, y_train_minmax,
x_validation_minmax, y_validation_minmax ,
x_test_minmax, test_y,
pretraining_X_minmax = pretraining_X_minmax,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs_for_reduced,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda_unlabel.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL_U', isTest) + tuple(performance_score(y_train, training_predicted, predicted_score).values()))
test_predicted = sda_unlabel.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL_U', isTest) + tuple(performance_score(y_test, test_predicted, predicted_score).values()))
if 0:
# deep learning using split network
print 'deep learning using split network'
# get the new representation for A set. first 784-D
pretraining_epochs=1500
hidden_layers_sizes =[50, 50]
corruption_levels=[0, 0]
x = x_train_minmax[:, :x_train_minmax.shape[1]/2]
print "original shape for A", x.shape
a_MAE_A = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_A.transform(x_train_minmax[:, :x_train_minmax.shape[1]/2])
x = x_train_minmax[:, x_train_minmax.shape[1]/2:]
print "original shape for B", x.shape
a_MAE_B = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_B = a_MAE_B.transform(x_train_minmax[:, x_train_minmax.shape[1]/2:])
new_x_test_minmax_A = a_MAE_A.transform(x_test_minmax[:, :x_test_minmax.shape[1]/2])
new_x_test_minmax_B = a_MAE_B.transform(x_test_minmax[:, x_test_minmax.shape[1]/2:])
new_x_validation_minmax_A = a_MAE_A.transform(x_validation_minmax[:, :x_validation_minmax.shape[1]/2])
new_x_validation_minmax_B = a_MAE_B.transform(x_validation_minmax[:, x_validation_minmax.shape[1]/2:])
new_x_train_minmax_whole = np.hstack((new_x_train_minmax_A, new_x_train_minmax_B))
new_x_test_minmax_whole = np.hstack((new_x_test_minmax_A, new_x_test_minmax_B))
new_x_validationt_minmax_whole = np.hstack((new_x_validation_minmax_A, new_x_validation_minmax_B))
finetune_lr = 1
batch_size = 100
pretraining_epochs = cal_epochs(1500, x_train_minmax, batch_size = batch_size)
#pretrain_lr=0.001
pretrain_lr = 0.001
training_epochs = 1500
hidden_layers_sizes= [100, 100]
corruption_levels = [0,0]
sda_transformed = trainSda(new_x_train_minmax_whole, y_train_minmax,
new_x_validationt_minmax_whole, y_validation_minmax ,
new_x_test_minmax_whole, y_test,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda_transformed.predict(new_x_train_minmax_whole)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL_S', isTest) + tuple(performance_score(y_train, training_predicted, predicted_score).values()))
test_predicted = sda_transformed.predict(new_x_test_minmax_whole)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL_S', isTest) + tuple(performance_score(y_test, test_predicted, predicted_score).values()))
report_name = filename + '_' + '_'.join(map(str, hidden_layers_sizes)) + '_' + str(pretrain_lr) + '_' + str(finetune_lr) + '_' + str(reduce_ratio)+ '_' +str(training_epochs) + '_' + current_date
saveAsCsv(predicted_score, report_name, performance_score(y_test, test_predicted, predicted_score), analysis_scr)
# In[29]:
# In[30]:
#for 10-fold cross validation
def ten_fold_crossvalid_performance_for_all(ddis):
for ddi in ddis:
try:
process_one_ddi_tenfold(ddi)
except Exception,e:
print str(e)
logger.debug("There is a error in this ddi: %s" % ddi)
logger.info(str(e))
def process_one_ddi_tenfold(ddi):
"""A function to waste CPU cycles"""
logger.info('DDI: %s' % ddi)
one_ddi_family = {}
one_ddi_family[ddi] = Ten_fold_crossvalid_performance_for_one_ddi(ddi)
one_ddi_family[ddi].get_ten_fold_crossvalid_perfermance('FisherM1', '')
return None
class Ten_fold_crossvalid_performance_for_one_ddi(object):
""" get the performance of ddi families
Attributes:
ddi: string ddi name
Vectors_Fishers_aaIndex_raw_folder: string, folder
total_number_of_sequences: int
raw_data: dict raw_data[2]
"""
def __init__(self, ddi):
self.ddi_obj = DDI_family_base(ddi)
self.ddi = ddi
def get_ten_fold_crossvalid_perfermance(self, fisher_mode, settings = None):
analysis_scr = []
predicted_score = False
reduce_ratio = 1
#for seq_no in range(1, self.ddi_obj.total_number_of_sequences+1):
#subset_size = math.floor(self.ddi_obj.total_number_of_sequences / 10.0)
kf = KFold(self.ddi_obj.total_number_of_sequences, n_folds = 10)
#for subset_no in range(1, 11):
for ((train_index, test_index),subset_no) in izip(kf,range(1,11)):
#for train_index, test_index in kf;
print("Subset:", subset_no)
print("Train index: ", train_index)
print("Test index: ", test_index)
#logger.info('subset number: ' + str(subset_no))
if 1:
print "SVM"
#start_index = int((subset_no - 1) * subset_size + 1)
#if subset_no == 10:
# end_index = int(max(start_index + subset_size, self.ddi_obj.total_number_of_sequences))
#else:
# end_index = int(start_index + subset_size)
#print start_index, end_index
#(train_X_10fold, train_y_10fold),(train_X_reduced, train_y_reduced), (test_X, test_y) = self.ddi_obj.get_ten_fold_crossvalid_one_subset(start_index, end_index, reduce_ratio = reduce_ratio)
(train_X_10fold, train_y_10fold),(train_X_reduced, train_y_reduced), (test_X, test_y) = self.ddi_obj.get_ten_fold_crossvalid_one_subset(train_index, test_index, reduce_ratio = reduce_ratio)
standard_scaler = preprocessing.StandardScaler().fit(train_X_reduced)
scaled_train_X = standard_scaler.transform(train_X_reduced)
scaled_test_X = standard_scaler.transform(test_X)
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(scaled_train_X, train_y_reduced)
predicted_test_y = Linear_SVC.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
# direct deep learning
min_max_scaler = Precessing_Scaler_0_9()
X_train_pre_validation_minmax = min_max_scaler.fit(train_X_reduced)
X_train_pre_validation_minmax = min_max_scaler.transform(train_X_reduced)
x_test_minmax = min_max_scaler.transform(test_X)
pretraining_X_minmax = min_max_scaler.transform(train_X_10fold)
x_train_minmax, x_validation_minmax, y_train_minmax, y_validation_minmax = train_test_split(X_train_pre_validation_minmax,
train_y_reduced
, test_size=0.4, random_state=42)
finetune_lr = 1
batch_size = 100
pretraining_epochs = cal_epochs(5000, x_train_minmax, batch_size = batch_size)
#pretrain_lr=0.001
pretrain_lr = 0.001
training_epochs = 1500
hidden_layers_sizes= [100, 100]
corruption_levels = [0.1, 0.1]
if 1:
print "direct deep learning"
sda = trainSda(x_train_minmax, y_train_minmax,
x_validation_minmax, y_validation_minmax ,
x_test_minmax, test_y,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL', isTest) + tuple(performance_score(y_train, training_predicted).values()))
test_predicted = sda.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL', isTest) + tuple(performance_score(y_test, test_predicted).values()))
if 0:
# deep learning using unlabeled data for pretraining
print 'deep learning with unlabel data'
pretraining_epochs = cal_epochs(5000, pretraining_X_minmax, batch_size = batch_size)
sda_unlabel = trainSda(x_train_minmax, y_train_minmax,
x_validation_minmax, y_validation_minmax ,
x_test_minmax, test_y,
pretraining_X_minmax = pretraining_X_minmax,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda_unlabel.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_U', isTest) + tuple(performance_score(y_train, training_predicted, predicted_score).values()))
test_predicted = sda_unlabel.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_U', isTest) + tuple(performance_score(y_test, test_predicted, predicted_score).values()))
if 0:
# deep learning using split network
print 'deep learning using split network'
# get the new representation for A set. first 784-D
pretraining_epochs=5000
hidden_layers_sizes =[100, 100, 100]
corruption_levels=[0, 0, 0]
x = x_train_minmax[:, :x_train_minmax.shape[1]/2]
print "original shape for A", x.shape
a_MAE_A = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_A.transform(x_train_minmax[:, :x_train_minmax.shape[1]/2])
x = x_train_minmax[:, x_train_minmax.shape[1]/2:]
print "original shape for B", x.shape
a_MAE_B = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_B = a_MAE_B.transform(x_train_minmax[:, x_train_minmax.shape[1]/2:])
new_x_test_minmax_A = a_MAE_A.transform(x_test_minmax[:, :x_test_minmax.shape[1]/2])
new_x_test_minmax_B = a_MAE_B.transform(x_test_minmax[:, x_test_minmax.shape[1]/2:])
new_x_validation_minmax_A = a_MAE_A.transform(x_validation_minmax[:, :x_validation_minmax.shape[1]/2])
new_x_validation_minmax_B = a_MAE_B.transform(x_validation_minmax[:, x_validation_minmax.shape[1]/2:])
new_x_train_minmax_whole = np.hstack((new_x_train_minmax_A, new_x_train_minmax_B))
new_x_test_minmax_whole = np.hstack((new_x_test_minmax_A, new_x_test_minmax_B))
new_x_validationt_minmax_whole = np.hstack((new_x_validation_minmax_A, new_x_validation_minmax_B))
finetune_lr = 1
batch_size = 100
pretraining_epochs = cal_epochs(5000, x_train_minmax, batch_size = batch_size)
#pretrain_lr=0.001
pretrain_lr = 0.001
training_epochs = 1500
hidden_layers_sizes= [100, 100, 100]
corruption_levels = [0,0,0]
sda_transformed = trainSda(new_x_train_minmax_whole, y_train_minmax,
new_x_validationt_minmax_whole, y_validation_minmax ,
new_x_test_minmax_whole, y_test,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda_transformed.predict(new_x_train_minmax_whole)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_S', isTest) + tuple(performance_score(y_train, training_predicted, predicted_score).values()))
test_predicted = sda_transformed.predict(new_x_test_minmax_whole)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_S', isTest) + tuple(performance_score(y_test, test_predicted, predicted_score).values()))
report_name = filename + '_' + '_test10fold_'.join(map(str, hidden_layers_sizes)) + '_' + str(pretrain_lr) + '_' + str(finetune_lr) + '_' + str(reduce_ratio)+ '_' + str(training_epochs) + '_' + current_date
saveAsCsv(predicted_score, report_name, performance_score(y_test, test_predicted, predicted_score), analysis_scr)
# In[ ]:
ten_fold_crossvalid_performance_for_all(ddis)
# In[ ]:
#LOO_out_performance_for_all(ddis)
# In[25]:
x = logging._handlers.copy()
for i in x:
log.removeHandler(i)
i.flush()
i.close()
# In[ ]:
|
gpl-2.0
|
lazywei/scikit-learn
|
examples/plot_kernel_approximation.py
|
262
|
8004
|
"""
==================================================
Explicit feature map approximation for RBF kernels
==================================================
An example illustrating the approximation of the feature map
of an RBF kernel.
.. currentmodule:: sklearn.kernel_approximation
It shows how to use :class:`RBFSampler` and :class:`Nystroem` to
approximate the feature map of an RBF kernel for classification with an SVM on
the digits dataset. Results using a linear SVM in the original space, a linear
SVM using the approximate mappings and using a kernelized SVM are compared.
Timings and accuracy for varying amounts of Monte Carlo samplings (in the case
of :class:`RBFSampler`, which uses random Fourier features) and different sized
subsets of the training set (for :class:`Nystroem`) for the approximate mapping
are shown.
Please note that the dataset here is not large enough to show the benefits
of kernel approximation, as the exact SVM is still reasonably fast.
Sampling more dimensions clearly leads to better classification results, but
comes at a greater cost. This means there is a tradeoff between runtime and
accuracy, given by the parameter n_components. Note that solving the Linear
SVM and also the approximate kernel SVM could be greatly accelerated by using
stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`.
This is not easily possible for the case of the kernelized SVM.
The second plot visualized the decision surfaces of the RBF kernel SVM and
the linear SVM with approximate kernel maps.
The plot shows decision surfaces of the classifiers projected onto
the first two principal components of the data. This visualization should
be taken with a grain of salt since it is just an interesting slice through
the decision surface in 64 dimensions. In particular note that
a datapoint (represented as a dot) does not necessarily be classified
into the region it is lying in, since it will not lie on the plane
that the first two principal components span.
The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail
in :ref:`kernel_approximation`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
import numpy as np
from time import time
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, pipeline
from sklearn.kernel_approximation import (RBFSampler,
Nystroem)
from sklearn.decomposition import PCA
# The digits dataset
digits = datasets.load_digits(n_class=9)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.data)
data = digits.data / 16.
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = data[:n_samples / 2], digits.target[:n_samples / 2]
# Now predict the value of the digit on the second half:
data_test, targets_test = data[n_samples / 2:], digits.target[n_samples / 2:]
#data_test = scaler.transform(data_test)
# Create a classifier: a support vector classifier
kernel_svm = svm.SVC(gamma=.2)
linear_svm = svm.LinearSVC()
# create pipeline from kernel approximation
# and linear svm
feature_map_fourier = RBFSampler(gamma=.2, random_state=1)
feature_map_nystroem = Nystroem(gamma=.2, random_state=1)
fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier),
("svm", svm.LinearSVC())])
nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem),
("svm", svm.LinearSVC())])
# fit and predict using linear and kernel svm:
kernel_svm_time = time()
kernel_svm.fit(data_train, targets_train)
kernel_svm_score = kernel_svm.score(data_test, targets_test)
kernel_svm_time = time() - kernel_svm_time
linear_svm_time = time()
linear_svm.fit(data_train, targets_train)
linear_svm_score = linear_svm.score(data_test, targets_test)
linear_svm_time = time() - linear_svm_time
sample_sizes = 30 * np.arange(1, 10)
fourier_scores = []
nystroem_scores = []
fourier_times = []
nystroem_times = []
for D in sample_sizes:
fourier_approx_svm.set_params(feature_map__n_components=D)
nystroem_approx_svm.set_params(feature_map__n_components=D)
start = time()
nystroem_approx_svm.fit(data_train, targets_train)
nystroem_times.append(time() - start)
start = time()
fourier_approx_svm.fit(data_train, targets_train)
fourier_times.append(time() - start)
fourier_score = fourier_approx_svm.score(data_test, targets_test)
nystroem_score = nystroem_approx_svm.score(data_test, targets_test)
nystroem_scores.append(nystroem_score)
fourier_scores.append(fourier_score)
# plot the results:
plt.figure(figsize=(8, 8))
accuracy = plt.subplot(211)
# second y axis for timeings
timescale = plt.subplot(212)
accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel")
timescale.plot(sample_sizes, nystroem_times, '--',
label='Nystroem approx. kernel')
accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel")
timescale.plot(sample_sizes, fourier_times, '--',
label='Fourier approx. kernel')
# horizontal lines for exact rbf and linear kernels:
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_score, linear_svm_score], label="linear svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_time, linear_svm_time], '--', label='linear svm')
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_score, kernel_svm_score], label="rbf svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_time, kernel_svm_time], '--', label='rbf svm')
# vertical line for dataset dimensionality = 64
accuracy.plot([64, 64], [0.7, 1], label="n_features")
# legends and labels
accuracy.set_title("Classification accuracy")
timescale.set_title("Training times")
accuracy.set_xlim(sample_sizes[0], sample_sizes[-1])
accuracy.set_xticks(())
accuracy.set_ylim(np.min(fourier_scores), 1)
timescale.set_xlabel("Sampling steps = transformed feature dimension")
accuracy.set_ylabel("Classification accuracy")
timescale.set_ylabel("Training time in seconds")
accuracy.legend(loc='best')
timescale.legend(loc='best')
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=8).fit(data_train)
X = pca.transform(data_train)
# Gemerate grid along first two principal components
multiples = np.arange(-2, 2, 0.1)
# steps along first component
first = multiples[:, np.newaxis] * pca.components_[0, :]
# steps along second component
second = multiples[:, np.newaxis] * pca.components_[1, :]
# combine
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, data.shape[1])
# title for the plots
titles = ['SVC with rbf kernel',
'SVC (linear kernel)\n with Fourier rbf feature map\n'
'n_components=100',
'SVC (linear kernel)\n with Nystroem rbf feature map\n'
'n_components=100']
plt.tight_layout()
plt.figure(figsize=(12, 5))
# predict and plot
for i, clf in enumerate((kernel_svm, nystroem_approx_svm,
fourier_approx_svm)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 3, i + 1)
Z = clf.predict(flat_grid)
# Put the result into a color plot
Z = Z.reshape(grid.shape[:-1])
plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.tight_layout()
plt.show()
|
bsd-3-clause
|
flailingsquirrel/cmake_scipy_ctypes_example
|
src/python/plotting_utils.py
|
2
|
1927
|
import matplotlib
################################################################################
# Utility functions intended for use by other functions an classes in this module
################################################################################
def get_plottable( input_axis, object_type ):
'''Get the children of the input axis which are the object type specified'''
children = input_axis.get_children()
objects = [obj for obj in children if type(obj) == object_type]
return objects
# end get_plottable
def get_plottables( input_axis, object_types ):
'''Get the children of the input axis which are any of the object types specified'''
objects = []
for obj_type in object_types:
more_objects = get_plottable( input_axis, obj_type )
objects.extend( more_objects )
return objects
# end get_plottables
def get_lines( input_axis ):
'''Get the children of the input axis which are line objects'''
children = input_axis.get_children()
lines = [l for l in children if type(l) == matplotlib.lines.Line2D]
return lines
def get_color_rgb( artist ):
'''
Attempts to get the color of the input artist as an RGB vector
Colors are accessed differently depending on whether the artist
is a line, collection, etc. Also, there may be more than one
color (e.g., edge color versus face color), and sometimes the
color is stored as a string (like this: 'b') instead of a vector
'''
artist_type = type(artist)
if artist_type == matplotlib.lines.Line2D:
color = artist.get_color()
elif artist_type == matplotlib.collections.PathCollection:
color = artist.get_facecolor()[0]
else:
print 'Could not get color RGB--assuming blue for no good reason'
color = [0, 0, 1]
color_rgb = matplotlib.colors.colorConverter.to_rgb( color )
return color_rgb
# end get_color_rgb
|
bsd-3-clause
|
justinfinkle/pydiffexp
|
scripts/unittest_like_scripts/voom_testing.py
|
1
|
2181
|
import sys
import pandas as pd
from pydiffexp import DEAnalysis, DEPlot
pd.set_option('display.width', 1000)
# Load the data
test_path = "/Users/jfinkle/Documents/Northwestern/MoDyLS/Code/Python/sprouty/data/raw_data/GSE63497_Oncogene_Formatted.tsv"
# test_path = "/Users/jfinkle/Documents/Northwestern/MoDyLS/Code/Python/sprouty/data/raw_data/GSE63497_VEC_CRE_Formatted.tsv"
raw_data = pd.read_csv(test_path, sep='\t', index_col=0)
hierarchy = ['condition', 'replicate']
# The example data has been background corrected, so set everything below 0 to a trivial positive value of 1
raw_data[raw_data <= 0] = 1
# Remove all genes with low counts so voom isn't confused
raw_data = raw_data[~(raw_data < 5).all(axis=1)]
# Make the Differential Expression Analysis Object
# The reference labels specify how samples will be organized into unique values
dea = DEAnalysis(raw_data, index_names=hierarchy, reference_labels=['condition'], time=None, counts=True)
# Data can be standarized if desired
# norm_data = dea.standardize()
# Fit the contrasts and save the object
# cont = dea.possible_contrasts()
# cont[0] = 'CRE-BRaf'
dea.fit_contrasts()
dep = DEPlot(dea)
sys.exit()
# Volcano Plot
x = dea.results[0].top_table(p=0.05)
# sns.clustermap(x.iloc[:, :10])
genes = utils.grepl('SOX', x.index)
g = sns.clustermap(x.loc[genes].iloc[:, :10])
plt.setp(g.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
plt.setp(g.ax_heatmap.xaxis.get_majorticklabels(), rotation=30)
plt.show()
sys.exit()
gene = 'SPRY4'
print(rh.rvect_to_py(dea.data_matrix).loc[gene].reset_index())
print(dea.data.loc[gene])
# ax = sns.boxplot(data=rh.rvect_to_py(dea.data_matrix).loc[gene].reset_index(), x='index', y=gene)
ax = sns.swarmplot(data=rh.rvect_to_py(dea.data_matrix).loc[gene].reset_index(), x='index', y=gene, size=10)
plt.xlabel('Condition', fontsize=20, fontweight='bold')
plt.ylabel(('%s Estimated log2 CPM' % gene), fontsize=20, fontweight='bold')
plt.tick_params(axis='both', which='major', labelsize=16)
plt.tight_layout()
plt.show()
sys.exit()
dep.volcano_plot(x, top_n=5, show_labels=True, top_by=['-log10p', 'logFC'])
plt.tight_layout()
plt.show()
# dea.to_pickle("./sprouty_pickle.pkl")
|
gpl-3.0
|
reuk/wayverb
|
scripts/python/iterative_tetrahedral.py
|
2
|
4819
|
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from numpy import sqrt
import operator
BASIC_CUBE = [(0, 0, 0), # 0
(0.5, 0, 0.5), # 1
(0.25, 0.25, 0.25), # 2
(0.75, 0.25, 0.75), # 3
(0, 0.5, 0.5), # 4
(0.5, 0.5, 0), # 5
(0.25, 0.75, 0.75), # 6
(0.75, 0.75, 0.25)] # 7
def get_neighbor_offset_table():
ret = [[((0, 0, 0), 2), ((-1, 0, -1), 3), ((-1, -1, 0), 6), ((0, -1, -1), 7)],
[((0, 0, 0), 2), ((0, 0, 0), 3), ((0, -1, 0), 6), ((0, -1, 0), 7)],
[((0, 0, 0), 0), ((0, 0, 0), 1), ((0, 0, 0), 4), ((0, 0, 0), 5)],
[((1, 0, 1), 0), ((0, 0, 0), 1), ((0, 0, 1), 4), ((1, 0, 0), 5)],
[((0, 0, 0), 2), ((0, 0, -1), 3), ((0, 0, 0), 6), ((0, 0, -1), 7)],
[((0, 0, 0), 2), ((-1, 0, 0), 3), ((-1, 0, 0), 6), ((0, 0, 0), 7)],
[((1, 1, 0), 0), ((0, 1, 0), 1), ((0, 0, 0), 4), ((1, 0, 0), 5)],
[((0, 1, 1), 0), ((0, 1, 0), 1), ((0, 0, 1), 4), ((0, 0, 0), 5)]]
return map(lambda j: map(lambda i: Locator(*i), j), ret)
def mul((x, y, z), d):
return (x * d, y * d, z * d)
def add(a, b):
return map(lambda (x, y): x + y, zip(a, b))
def node_cube(spacing):
return map(lambda i: mul(i, spacing), BASIC_CUBE)
def get_mesh((x, y, z), spacing):
c = []
for i in range(x):
xo = i * spacing
for j in range(y):
yo = j * spacing
for k in range(z):
zo = k * spacing
nodes = node_cube(spacing)
nodes = map(lambda i: add(i, (xo, yo, zo)), nodes)
c += nodes
return c
class Locator:
def __init__(self, pos, mod_ind):
self.pos = pos
self.mod_ind = mod_ind
class WaveguideMesh:
def __init__(self, dim, spacing):
self.mesh = get_mesh(dim, spacing)
self.dim = dim
self.offsets = get_neighbor_offset_table()
def get_index(self, locator):
i, j, k = self.dim
x, y, z = locator.pos
l = len(BASIC_CUBE)
return locator.mod_ind + x * l + y * i * l + z * i * j * l
def get_locator(self, index):
i, j, k = self.dim
mod_ind = index % len(BASIC_CUBE)
index -= mod_ind
index /= len(BASIC_CUBE)
x = index % i
index -= x
index /= i
y = index % j
index -= y
index /= j
z = index % k
index -= z
index /= k
return Locator((x, y, z), mod_ind)
def locator_filter(self, c, relative):
x, y, z = self.dim
rlx, rly, rlz = add(c.pos, relative.pos)
return 0 <= rlx < x and 0 <= rly < y and 0 <= rlz < z
def get_absolute_neighbors(self, index):
locator = self.get_locator(index)
x, y, z = locator.pos
mod_ind = locator.mod_ind
relative = self.offsets[mod_ind]
ret = []
for i in relative:
summed = add(locator.pos, i.pos)
sx, sy, sz = summed
is_neighbor = (0 <= summed[0] < self.dim[0] and
0 <= summed[1] < self.dim[1] and
0 <= summed[2] < self.dim[2])
ind = self.get_index(Locator(summed, i.mod_ind)) if is_neighbor else -1;
ret.append(ind)
return ret
def concat(l):
return reduce(operator.add, l)
def main():
waveguide = WaveguideMesh((2, 2, 2), 1)
x, y, z = map(lambda i: np.array(i), zip(*waveguide.mesh))
max_range = np.array([x.max() - x.min(), y.max() - y.min(), z.max() - z.min()]).max() / 2.0
mean_x = x.mean()
mean_y = y.mean()
mean_z = z.mean()
fig = plt.figure()
for plot in range(8):
ax = fig.add_subplot(331 + plot, projection='3d', aspect='equal')
pos = waveguide.get_index(Locator((0, 0, 0), plot))
n = waveguide.get_absolute_neighbors(pos)
n = filter(lambda i: i >= 0, n)
p = []
p += [waveguide.mesh[i] for i in n]
p += [waveguide.mesh[pos]]
print plot, p
ax.scatter(*zip(*p))
ax.set_xlim(mean_x - max_range, mean_x + max_range)
ax.set_ylim(mean_y - max_range, mean_y + max_range)
ax.set_zlim(mean_z - max_range, mean_z + max_range)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax = fig.add_subplot(339, projection='3d', aspect='equal')
ax.scatter(*zip(*waveguide.mesh))
ax.set_xlim(mean_x - max_range, mean_x + max_range)
ax.set_ylim(mean_y - max_range, mean_y + max_range)
ax.set_zlim(mean_z - max_range, mean_z + max_range)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
plt.show()
if __name__ == "__main__":
main()
|
gpl-2.0
|
aitoralmeida/networkx
|
examples/drawing/knuth_miles.py
|
36
|
2994
|
#!/usr/bin/env python
"""
An example using networkx.Graph().
miles_graph() returns an undirected graph over the 128 US cities from
the datafile miles_dat.txt. The cities each have location and population
data. The edges are labeled with the distance betwen the two cities.
This example is described in Section 1.1 in Knuth's book [1,2].
References.
-----------
[1] Donald E. Knuth,
"The Stanford GraphBase: A Platform for Combinatorial Computing",
ACM Press, New York, 1993.
[2] http://www-cs-faculty.stanford.edu/~knuth/sgb.html
"""
__author__ = """Aric Hagberg ([email protected])"""
# Copyright (C) 2004-2006 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
def miles_graph():
""" Return the cites example graph in miles_dat.txt
from the Stanford GraphBase.
"""
# open file miles_dat.txt.gz (or miles_dat.txt)
import gzip
fh = gzip.open('knuth_miles.txt.gz','r')
G=nx.Graph()
G.position={}
G.population={}
cities=[]
for line in fh.readlines():
line = line.decode()
if line.startswith("*"): # skip comments
continue
numfind=re.compile("^\d+")
if numfind.match(line): # this line is distances
dist=line.split()
for d in dist:
G.add_edge(city,cities[i],weight=int(d))
i=i+1
else: # this line is a city, position, population
i=1
(city,coordpop)=line.split("[")
cities.insert(0,city)
(coord,pop)=coordpop.split("]")
(y,x)=coord.split(",")
G.add_node(city)
# assign position - flip x axis for matplotlib, shift origin
G.position[city]=(-int(x)+7500,int(y)-3000)
G.population[city]=float(pop)/1000.0
return G
if __name__ == '__main__':
import networkx as nx
import re
import sys
G=miles_graph()
print("Loaded miles_dat.txt containing 128 cities.")
print("digraph has %d nodes with %d edges"\
%(nx.number_of_nodes(G),nx.number_of_edges(G)))
# make new graph of cites, edge if less then 300 miles between them
H=nx.Graph()
for v in G:
H.add_node(v)
for (u,v,d) in G.edges(data=True):
if d['weight'] < 300:
H.add_edge(u,v)
# draw with matplotlib/pylab
try:
import matplotlib.pyplot as plt
plt.figure(figsize=(8,8))
# with nodes colored by degree sized by population
node_color=[float(H.degree(v)) for v in H]
nx.draw(H,G.position,
node_size=[G.population[v] for v in H],
node_color=node_color,
with_labels=False)
# scale the axes equally
plt.xlim(-5000,500)
plt.ylim(-2000,3500)
plt.savefig("knuth_miles.png")
except:
pass
|
bsd-3-clause
|
ilo10/scikit-learn
|
sklearn/neighbors/classification.py
|
106
|
13987
|
"""Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import check_array
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default = 'minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array of shape [n_samples, n_features]
A 2-D array representing the test points.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array, shape = (n_samples, n_features)
A 2-D array representing the test points.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_samples, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label : int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array of shape [n_samples, n_features]
A 2-D array representing the test points.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
neigh_dist, neigh_ind = self.radius_neighbors(X)
inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0]
outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0]
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
if self.outlier_label is not None:
neigh_dist[outliers] = 1e-6
elif outliers:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
pred_labels = np.array([_y[ind, k] for ind in neigh_ind],
dtype=object)
if weights is None:
mode = np.array([stats.mode(pl)[0]
for pl in pred_labels[inliers]], dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w)
in zip(pred_labels[inliers], weights)],
dtype=np.int)
mode = mode.ravel()
y_pred[inliers, k] = classes_k.take(mode)
if outliers:
y_pred[outliers, :] = self.outlier_label
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
|
bsd-3-clause
|
mapleyustat/tns
|
calcs/peps2d_square_ising_m_T_N_thdyn/plot.py
|
1
|
1825
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 6 14:41:31 2014
@author: michael
"""
import numpy as np
import sys
import matplotlib.pyplot as plt
Nv = int(sys.argv[1])
H = float(sys.argv[2])
NT = int(sys.argv[3])
NNh = int(sys.argv[4])
T = np.ndarray(NT)
Nh = np.ndarray(NNh)
x = np.ndarray((NNh, NT))
with open("F_T_N.dat", "r") as f:
i = 0
for line in f:
fields = line.split(" ")
if i < NT:
T[i] = float(fields[1])
if i % NT == 0:
Nh[i/NT] = float(fields[0])
x[i/NT,i%NT] = float(fields[2])
i += 1
for i in range(NNh):
plt.plot(T, x[i], marker="x", label="$N_h = " + str(Nh[i]) + "$")
plt.grid(True)
plt.title("$N_v = " + str(Nv) + "$, $H = " + str(H) + "$")
plt.legend(loc=2)
plt.xlabel("$T$ $[J]$")
plt.ylabel("$F$ $[J]$")
plt.savefig("F_T_N.png")
with open("m_T_N.dat", "r") as f:
i = 0
for line in f:
fields = line.split(" ")
x[i/NT,i%NT] = float(fields[2])
i += 1
plt.clf()
for i in range(NNh):
plt.plot(T, x[i], marker="x", label="$N_h = " + str(Nh[i]) + "$")
plt.grid(True)
plt.title("$N_v = " + str(Nv) + "$, $H = " + str(H) + "$")
plt.legend(loc=1)
plt.xlabel("$T$ $[J]$")
plt.ylabel("$\\frac{M}{N} = \\frac{\partial F}{\partial H} + 1$")
plt.ylim(0, 1)
plt.savefig("m_T_N.png")
with open("chi_T_N.dat", "r") as f:
i = 0
for line in f:
fields = line.split(" ")
x[i/NT,i%NT] = float(fields[2])
i += 1
plt.clf()
for i in range(NNh):
plt.plot(T, x[i], marker="x", label="$N_h = " + str(Nh[i]) + "$")
plt.grid(True)
plt.title("$N_v = " + str(Nv) + "$, $H = " + str(H) + "$")
plt.legend(loc=1)
plt.xlabel("$T$ $[J]$")
plt.ylabel("$\\chi = \\frac{\partial m}{\partial H} = \\frac{\partial^2 F}{\partial H^2}$ $[1/J]$")
plt.ylim(ymin=0)
plt.savefig("chi_T_N.png")
|
gpl-2.0
|
rs2/pandas
|
pandas/tests/generic/test_label_or_level_utils.py
|
9
|
9968
|
import pytest
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
# Fixtures
# ========
@pytest.fixture
def df():
"""DataFrame with columns 'L1', 'L2', and 'L3' """
return pd.DataFrame({"L1": [1, 2, 3], "L2": [11, 12, 13], "L3": ["A", "B", "C"]})
@pytest.fixture(params=[[], ["L1"], ["L1", "L2"], ["L1", "L2", "L3"]])
def df_levels(request, df):
"""DataFrame with columns or index levels 'L1', 'L2', and 'L3' """
levels = request.param
if levels:
df = df.set_index(levels)
return df
@pytest.fixture
def df_ambig(df):
"""DataFrame with levels 'L1' and 'L2' and labels 'L1' and 'L3' """
df = df.set_index(["L1", "L2"])
df["L1"] = df["L3"]
return df
@pytest.fixture
def df_duplabels(df):
"""DataFrame with level 'L1' and labels 'L2', 'L3', and 'L2' """
df = df.set_index(["L1"])
df = pd.concat([df, df["L2"]], axis=1)
return df
# Test is label/level reference
# =============================
def get_labels_levels(df_levels):
expected_labels = list(df_levels.columns)
expected_levels = [name for name in df_levels.index.names if name is not None]
return expected_labels, expected_levels
def assert_label_reference(frame, labels, axis):
for label in labels:
assert frame._is_label_reference(label, axis=axis)
assert not frame._is_level_reference(label, axis=axis)
assert frame._is_label_or_level_reference(label, axis=axis)
def assert_level_reference(frame, levels, axis):
for level in levels:
assert frame._is_level_reference(level, axis=axis)
assert not frame._is_label_reference(level, axis=axis)
assert frame._is_label_or_level_reference(level, axis=axis)
# DataFrame
# ---------
def test_is_level_or_label_reference_df_simple(df_levels, axis):
# Compute expected labels and levels
expected_labels, expected_levels = get_labels_levels(df_levels)
# Transpose frame if axis == 1
if axis in {1, "columns"}:
df_levels = df_levels.T
# Perform checks
assert_level_reference(df_levels, expected_levels, axis=axis)
assert_label_reference(df_levels, expected_labels, axis=axis)
def test_is_level_reference_df_ambig(df_ambig, axis):
# Transpose frame if axis == 1
if axis in {1, "columns"}:
df_ambig = df_ambig.T
# df has both an on-axis level and off-axis label named L1
# Therefore L1 should reference the label, not the level
assert_label_reference(df_ambig, ["L1"], axis=axis)
# df has an on-axis level named L2 and it is not ambiguous
# Therefore L2 is an level reference
assert_level_reference(df_ambig, ["L2"], axis=axis)
# df has a column named L3 and it not an level reference
assert_label_reference(df_ambig, ["L3"], axis=axis)
# Series
# ------
def test_is_level_reference_series_simple_axis0(df):
# Make series with L1 as index
s = df.set_index("L1").L2
assert_level_reference(s, ["L1"], axis=0)
assert not s._is_level_reference("L2")
# Make series with L1 and L2 as index
s = df.set_index(["L1", "L2"]).L3
assert_level_reference(s, ["L1", "L2"], axis=0)
assert not s._is_level_reference("L3")
def test_is_level_reference_series_axis1_error(df):
# Make series with L1 as index
s = df.set_index("L1").L2
with pytest.raises(ValueError, match="No axis named 1"):
s._is_level_reference("L1", axis=1)
# Test _check_label_or_level_ambiguity_df
# =======================================
# DataFrame
# ---------
def test_check_label_or_level_ambiguity_df(df_ambig, axis):
# Transpose frame if axis == 1
if axis in {1, "columns"}:
df_ambig = df_ambig.T
if axis in {0, "index"}:
msg = "'L1' is both an index level and a column label"
else:
msg = "'L1' is both a column level and an index label"
# df_ambig has both an on-axis level and off-axis label named L1
# Therefore, L1 is ambiguous.
with pytest.raises(ValueError, match=msg):
df_ambig._check_label_or_level_ambiguity("L1", axis=axis)
# df_ambig has an on-axis level named L2,, and it is not ambiguous.
df_ambig._check_label_or_level_ambiguity("L2", axis=axis)
# df_ambig has an off-axis label named L3, and it is not ambiguous
assert not df_ambig._check_label_or_level_ambiguity("L3", axis=axis)
# Series
# ------
def test_check_label_or_level_ambiguity_series(df):
# A series has no columns and therefore references are never ambiguous
# Make series with L1 as index
s = df.set_index("L1").L2
s._check_label_or_level_ambiguity("L1", axis=0)
s._check_label_or_level_ambiguity("L2", axis=0)
# Make series with L1 and L2 as index
s = df.set_index(["L1", "L2"]).L3
s._check_label_or_level_ambiguity("L1", axis=0)
s._check_label_or_level_ambiguity("L2", axis=0)
s._check_label_or_level_ambiguity("L3", axis=0)
def test_check_label_or_level_ambiguity_series_axis1_error(df):
# Make series with L1 as index
s = df.set_index("L1").L2
with pytest.raises(ValueError, match="No axis named 1"):
s._check_label_or_level_ambiguity("L1", axis=1)
# Test _get_label_or_level_values
# ===============================
def assert_label_values(frame, labels, axis):
for label in labels:
if axis in {0, "index"}:
expected = frame[label]._values
else:
expected = frame.loc[label]._values
result = frame._get_label_or_level_values(label, axis=axis)
assert array_equivalent(expected, result)
def assert_level_values(frame, levels, axis):
for level in levels:
if axis in {0, "index"}:
expected = frame.index.get_level_values(level=level)._values
else:
expected = frame.columns.get_level_values(level=level)._values
result = frame._get_label_or_level_values(level, axis=axis)
assert array_equivalent(expected, result)
# DataFrame
# ---------
def test_get_label_or_level_values_df_simple(df_levels, axis):
# Compute expected labels and levels
expected_labels, expected_levels = get_labels_levels(df_levels)
# Transpose frame if axis == 1
if axis in {1, "columns"}:
df_levels = df_levels.T
# Perform checks
assert_label_values(df_levels, expected_labels, axis=axis)
assert_level_values(df_levels, expected_levels, axis=axis)
def test_get_label_or_level_values_df_ambig(df_ambig, axis):
# Transpose frame if axis == 1
if axis in {1, "columns"}:
df_ambig = df_ambig.T
# df has an on-axis level named L2, and it is not ambiguous.
assert_level_values(df_ambig, ["L2"], axis=axis)
# df has an off-axis label named L3, and it is not ambiguous.
assert_label_values(df_ambig, ["L3"], axis=axis)
def test_get_label_or_level_values_df_duplabels(df_duplabels, axis):
# Transpose frame if axis == 1
if axis in {1, "columns"}:
df_duplabels = df_duplabels.T
# df has unambiguous level 'L1'
assert_level_values(df_duplabels, ["L1"], axis=axis)
# df has unique label 'L3'
assert_label_values(df_duplabels, ["L3"], axis=axis)
# df has duplicate labels 'L2'
if axis in {0, "index"}:
expected_msg = "The column label 'L2' is not unique"
else:
expected_msg = "The index label 'L2' is not unique"
with pytest.raises(ValueError, match=expected_msg):
assert_label_values(df_duplabels, ["L2"], axis=axis)
# Series
# ------
def test_get_label_or_level_values_series_axis0(df):
# Make series with L1 as index
s = df.set_index("L1").L2
assert_level_values(s, ["L1"], axis=0)
# Make series with L1 and L2 as index
s = df.set_index(["L1", "L2"]).L3
assert_level_values(s, ["L1", "L2"], axis=0)
def test_get_label_or_level_values_series_axis1_error(df):
# Make series with L1 as index
s = df.set_index("L1").L2
with pytest.raises(ValueError, match="No axis named 1"):
s._get_label_or_level_values("L1", axis=1)
# Test _drop_labels_or_levels
# ===========================
def assert_labels_dropped(frame, labels, axis):
for label in labels:
df_dropped = frame._drop_labels_or_levels(label, axis=axis)
if axis in {0, "index"}:
assert label in frame.columns
assert label not in df_dropped.columns
else:
assert label in frame.index
assert label not in df_dropped.index
def assert_levels_dropped(frame, levels, axis):
for level in levels:
df_dropped = frame._drop_labels_or_levels(level, axis=axis)
if axis in {0, "index"}:
assert level in frame.index.names
assert level not in df_dropped.index.names
else:
assert level in frame.columns.names
assert level not in df_dropped.columns.names
# DataFrame
# ---------
def test_drop_labels_or_levels_df(df_levels, axis):
# Compute expected labels and levels
expected_labels, expected_levels = get_labels_levels(df_levels)
# Transpose frame if axis == 1
if axis in {1, "columns"}:
df_levels = df_levels.T
# Perform checks
assert_labels_dropped(df_levels, expected_labels, axis=axis)
assert_levels_dropped(df_levels, expected_levels, axis=axis)
with pytest.raises(ValueError, match="not valid labels or levels"):
df_levels._drop_labels_or_levels("L4", axis=axis)
# Series
# ------
def test_drop_labels_or_levels_series(df):
# Make series with L1 as index
s = df.set_index("L1").L2
assert_levels_dropped(s, ["L1"], axis=0)
with pytest.raises(ValueError, match="not valid labels or levels"):
s._drop_labels_or_levels("L4", axis=0)
# Make series with L1 and L2 as index
s = df.set_index(["L1", "L2"]).L3
assert_levels_dropped(s, ["L1", "L2"], axis=0)
with pytest.raises(ValueError, match="not valid labels or levels"):
s._drop_labels_or_levels("L4", axis=0)
|
bsd-3-clause
|
smartscheduling/scikit-learn-categorical-tree
|
sklearn/gaussian_process/gaussian_process.py
|
18
|
34542
|
# -*- coding: utf-8 -*-
# Author: Vincent Dubourg <[email protected]>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
MACHINE_EPSILON = np.finfo(np.double).eps
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The Gaussian Process model class.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
pass
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = np.log10(self.thetaL) \
+ self.random_state.rand(self.theta0.size).reshape(
self.theta0.shape) * np.log10(self.thetaU
/ self.thetaL)
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given atrributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given atrributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = check_array(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = check_array(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = check_array(self.thetaL)
self.thetaU = check_array(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
|
bsd-3-clause
|
JVillella/tensorflow
|
tensorflow/contrib/timeseries/examples/predict_test.py
|
80
|
2487
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that the TensorFlow parts of the prediction example run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
from tensorflow.contrib.timeseries.examples import predict
from tensorflow.python.platform import test
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/period_trend.csv")
class PeriodTrendExampleTest(test.TestCase):
def test_shapes_and_variance_structural(self):
(times, observed, all_times, mean, upper_limit, lower_limit
) = predict.structural_ensemble_train_and_predict(_DATA_FILE)
# Just check that plotting will probably be OK. We can't actually run the
# plotting code since we don't want to pull in matplotlib as a dependency
# for this test.
self.assertAllEqual([500], times.shape)
self.assertAllEqual([500], observed.shape)
self.assertAllEqual([700], all_times.shape)
self.assertAllEqual([700], mean.shape)
self.assertAllEqual([700], upper_limit.shape)
self.assertAllEqual([700], lower_limit.shape)
# Check that variance hasn't blown up too much. This is a relatively good
# indication that training was successful.
self.assertLess(upper_limit[-1] - lower_limit[-1],
1.5 * (upper_limit[0] - lower_limit[0]))
def test_ar(self):
(times, observed, all_times, mean,
upper_limit, lower_limit) = predict.ar_train_and_predict(_DATA_FILE)
self.assertAllEqual(times.shape, observed.shape)
self.assertAllEqual(all_times.shape, mean.shape)
self.assertAllEqual(all_times.shape, upper_limit.shape)
self.assertAllEqual(all_times.shape, lower_limit.shape)
self.assertLess((upper_limit - lower_limit).mean(), 4.)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
ggventurini/dualscope123
|
dualscope123/main.py
|
1
|
40618
|
#!/usr/bin/env python
"""
Oscilloscope + spectrum analyser in Python for the NIOS server.
Modified version from the original code by R. Fearick.
Giuseppe Venturini, July 2012-2013
Original copyright notice follows. The same license applies.
------------------------------------------------------------
Copyright (C) 2008, Roger Fearick, University of Cape Town
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
------------------------------------------------------------
Version 0.1
This code provides a two-channel oscilloscope and spectrum analyzer.
Dependencies:
Python 2.6+
numpy -- numerics, fft
PyQt4, PyQwt5 -- gui, graphics
Optional packages:
pyspectrum -- expert mode spectrum calculation
Typically, a modification of the Python path and ld library is necessary,
like this:
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:.
export PYTHONPATH=$PYTHONPATH:.
The code can be adjusted for different sampling rates and chunks lengths.
The interface, based on qwt, uses a familar 'knob based' layout so that it
approximates an analogue scope.
Traces can be averaged to reduce influence of noise.
A cross hair status display permits the reading of values off the screen.
Printing and exporting CSV and PDF files are provided.
FFT options
- by default we use the periogram algorithm from pyspectrum [1] - not
in Debian stable but available through pypi and easy_install.
[1] https://www.assembla.com/spaces/PySpectrum/wiki
- If 'pyspectrum' is not available, we fallback to using the FFT
method from numpy to compute the PSD.
- Using numpy to calculate the FFT can be forced setting:
USE_NUMPY_FFT = True
in the following code.
- additionally, it is possible to use matplotlib.psd().
-> you need to modify the sources to do so.
INSTALLING pyspectrum
The package pyspectrum can be installed with either:
'pip install spectrum'
"""
import sys
import struct
import subprocess
import time
import os.path
import ConfigParser
import importlib
from PyQt4 import Qt
from PyQt4 import Qwt5 as Qwt
import numpy as np
import numpy.fft as FFT
# part of this package -- csv interface and toolbar icons
from . import csvlib, icons, utils
import dualscope123.probes
from dualscope123.probes import eth_nios
# scope configuration
CHANNELS = 2
DEFAULT_TIMEBASE = 0.01
BOTH12 = 0
CH1 = 1
CH2 = 2
scopeheight = 500 #px
scopewidth = 800 #px
SELECTEDCH = BOTH12
TIMEPENWIDTH = 1
FFTPENWIDTH = 2
# status messages
freezeInfo = 'Freeze: Press mouse button and drag'
cursorInfo = 'Cursor Pos: Press mouse button in plot region'
# FFT CONFIG
USE_NUMPY_FFT = False
try:
import spectrum
print "(II) spectrum MODULE FOUND"
SPECTRUM_MODULE = True
except ImportError:
print "(WW) PSD: spectrum MODULE NOT FOUND"
SPECTRUM_MODULE = False
if USE_NUMPY_FFT:
print "(WW) SPECTRUM MODULE DISABLED in source"
SPECTRUM_MODULE = False
if not SPECTRUM_MODULE:
print "(WW) PSD: using FFTs through NUMPY.fftpack"
# utility classes
class LogKnob(Qwt.QwtKnob):
"""
Provide knob with log scale
"""
def __init__(self, *args):
apply(Qwt.QwtKnob.__init__, (self,) + args)
self.setScaleEngine(Qwt.QwtLog10ScaleEngine())
def setRange(self, minR, maxR, step=.333333):
self.setScale(minR, maxR)
Qwt.QwtKnob.setRange(self, np.log10(minR), np.log10(maxR), step)
def setValue(self, val):
Qwt.QwtKnob.setValue(self, np.log10(val))
class LblKnob:
"""
Provide knob with a label
"""
def __init__(self, wgt, x, y, name, logscale=0):
if logscale:
self.knob = LogKnob(wgt)
else:
self.knob = Qwt.QwtKnob(wgt)
color = Qt.QColor(200, 200, 210)
self.knob.palette().setColor(Qt.QPalette.Active,
Qt.QPalette.Button,
color)
self.lbl = Qt.QLabel(name, wgt)
self.knob.setGeometry(x, y, 140, 100)
# oooh, eliminate this ...
if name[0] == 'o':
self.knob.setKnobWidth(40)
self.lbl.setGeometry(x, y+90, 140, 15)
self.lbl.setAlignment(Qt.Qt.AlignCenter)
def setRange(self, *args):
apply(self.knob.setRange, args)
def setValue(self, *args):
apply(self.knob.setValue, args)
def setScaleMaxMajor(self, *args):
apply(self.knob.setScaleMaxMajor, args)
class Scope(Qwt.QwtPlot):
"""
Oscilloscope display widget
"""
def __init__(self, *args):
apply(Qwt.QwtPlot.__init__, (self,) + args)
self.setTitle('Scope')
self.setCanvasBackground(Qt.Qt.white)
# grid
self.grid = Qwt.QwtPlotGrid()
self.grid.enableXMin(True)
self.grid.setMajPen(Qt.QPen(Qt.Qt.gray, 0, Qt.Qt.SolidLine))
self.grid.attach(self)
# axes
self.enableAxis(Qwt.QwtPlot.yRight)
self.setAxisTitle(Qwt.QwtPlot.xBottom, 'Time [s]')
self.setAxisTitle(Qwt.QwtPlot.yLeft, 'Amplitude []')
self.setAxisMaxMajor(Qwt.QwtPlot.xBottom, 10)
self.setAxisMaxMinor(Qwt.QwtPlot.xBottom, 0)
self.setAxisScaleEngine(Qwt.QwtPlot.yRight, Qwt.QwtLinearScaleEngine())
self.setAxisMaxMajor(Qwt.QwtPlot.yLeft, 10)
self.setAxisMaxMinor(Qwt.QwtPlot.yLeft, 0)
self.setAxisMaxMajor(Qwt.QwtPlot.yRight, 10)
self.setAxisMaxMinor(Qwt.QwtPlot.yRight, 0)
# curves for scope traces: 2 first so 1 is on top
self.curve2 = Qwt.QwtPlotCurve('Trace2')
self.curve2.setSymbol(Qwt.QwtSymbol(Qwt.QwtSymbol.Ellipse,
Qt.QBrush(2),
Qt.QPen(Qt.Qt.darkMagenta),
Qt.QSize(3, 3)))
self.curve2.setPen(Qt.QPen(Qt.Qt.magenta, TIMEPENWIDTH))
self.curve2.setYAxis(Qwt.QwtPlot.yRight)
self.curve2.attach(self)
self.curve1 = Qwt.QwtPlotCurve('Trace1')
self.curve1.setSymbol(Qwt.QwtSymbol(Qwt.QwtSymbol.Ellipse,
Qt.QBrush(2),
Qt.QPen(Qt.Qt.darkBlue),
Qt.QSize(3, 3)))
self.curve1.setPen(Qt.QPen(Qt.Qt.blue, TIMEPENWIDTH))
self.curve1.setYAxis(Qwt.QwtPlot.yLeft)
self.curve1.attach(self)
# default settings
self.triggerval = 0.10
self.triggerCH = None
self.triggerslope = 0
self.maxamp = 100.0
self.maxamp2 = 100.0
self.freeze = 0
self.average = 0
self.autocorrelation = 0
self.avcount = 0
self.datastream = None
self.offset1 = 0.0
self.offset2 = 0.0
self.maxtime = 0.1
# set data
# NumPy: f, g, a and p are arrays!
self.dt = 1.0/samplerate
self.f = np.arange(0.0, 10.0, self.dt)
self.a1 = 0.0*self.f
self.a2 = 0.0*self.f
self.curve1.setData(self.f, self.a1)
self.curve2.setData(self.f, self.a2)
# start self.timerEvent() callbacks running
self.timer_id = self.startTimer(self.maxtime*100+50)
# plot
self.replot()
# convenience methods for knob callbacks
def setMaxAmp(self, val):
self.maxamp = val
def setMaxAmp2(self, val):
self.maxamp2 = val
def setMaxTime(self, val):
self.maxtime = val
def setOffset1(self, val):
self.offset1 = val
def setOffset2(self, val):
self.offset2 = val
def setTriggerLevel(self, val):
self.triggerval = val
def setTriggerCH(self, val):
self.triggerCH = val
def setTriggerSlope(self, val):
self.triggerslope = val
# plot scope traces
def setDisplay(self):
l = len(self.a1)
if SELECTEDCH == BOTH12:
self.curve1.setData(self.f[0:l], self.a1[:l]+self.offset1*self.maxamp)
self.curve2.setData(self.f[0:l], self.a2[:l]+self.offset2*self.maxamp2)
elif SELECTEDCH == CH2:
self.curve1.setData([0.0,0.0], [0.0,0.0])
self.curve2.setData(self.f[0:l], self.a2[:l]+self.offset2*self.maxamp2)
elif SELECTEDCH == CH1:
self.curve1.setData(self.f[0:l], self.a1[:l]+self.offset1*self.maxamp)
self.curve2.setData([0.0,0.0], [0.0,0.0])
self.replot()
def getValue(self, index):
return self.f[index], self.a[index]
def setAverage(self, state):
self.average = state
self.avcount = 0
def setAutoc(self, state):
self.autocorrelation = state
self.avcount = 0
def setFreeze(self, freeze):
self.freeze = freeze
def setDatastream(self, datastream):
self.datastream = datastream
def updateTimer(self):
self.killTimer(self.timer_id)
self.timer_id = self.startTimer(self.maxtime*100 + 50)
# timer callback that does the work
def timerEvent(self,e): # Scope
global fftbuffersize
if self.datastream == None: return
if self.freeze == 1: return
points = int(np.ceil(self.maxtime*samplerate))
if self.triggerCH or self.autocorrelation:
# we read twice as much data to be sure to be able to display data for all time points.
# independently of trigger point location.
read_points = 2*points
else:
read_points = points
fftbuffersize = read_points
if SELECTEDCH == BOTH12:
channel = 12
if verbose:
print "Reading %d frames" % (read_points)
X, Y = self.datastream.read(channel, read_points, verbose)
if X is None or not len(X): return
if len(X) == 0: return
i=0
data_CH1 = X
data_CH2 = Y
elif SELECTEDCH == CH1:
channel = 1
if verbose:
print "Reading %d frames" % (read_points)
X = self.datastream.read(channel, read_points, verbose)
if X is None or not len(X): return
if len(X) == 0: return
i=0
data_CH1 = X
data_CH2 = np.zeros((points,))
if SELECTEDCH == CH2:
channel = 2
if verbose:
print "Reading %d frames" % (read_points)
X = self.datastream.read(channel, read_points, verbose)
if X is None or not len(X): return
data_CH2 = X
data_CH1 = np.zeros((points,))
if self.triggerCH == 1 and (SELECTEDCH == BOTH12 or SELECTEDCH == CH1):
print "Waiting for CH1 trigger..."
if self.triggerslope == 0:
zero_crossings = np.where(np.diff(np.sign(data_CH1[points/2:-points/2] - self.triggerval*self.maxamp)) != 0)[0]
if self.triggerslope == 1:
zero_crossings = np.where(np.diff(np.sign(data_CH1[points/2:-points/2] - self.triggerval*self.maxamp)) > 0)[0]
if self.triggerslope == 2:
zero_crossings = np.where(np.diff(np.sign(data_CH1[points/2:-points/2] - self.triggerval*self.maxamp)) < 0)[0]
if not len(zero_crossings): return
print "Triggering on sample", zero_crossings[0]
imin = zero_crossings[0]
imax = zero_crossings[0] + points
data_CH1 = data_CH1[imin:imax]
elif self.triggerCH == 2 and (SELECTEDCH == BOTH12 or SELECTEDCH == CH2):
print "Waiting for CH2 trigger..."
if self.triggerslope == 0:
zero_crossings = np.where(np.diff(np.sign(data_CH2[points/2:-points/2] - self.triggerval*self.maxamp2)) != 0)[0]
if self.triggerslope == 1:
zero_crossings = np.where(np.diff(np.sign(data_CH2[points/2:-points/2] - self.triggerval*self.maxamp2)) > 0)[0]
if self.triggerslope == 2:
zero_crossings = np.where(np.diff(np.sign(data_CH2[points/2:-points/2] - self.triggerval*self.maxamp2)) < 0)[0]
if not len(zero_crossings): return
print "Triggering on sample", zero_crossings[0]
imin = zero_crossings[0]
imax = zero_crossings[0] + points
data_CH2 = data_CH2[imin:imax]
if self.autocorrelation:
if SELECTEDCH == BOTH12 or SELECTEDCH == CH1:
data_CH1 = utils.autocorrelation(data_CH1[:2*points])[:points]
else:
data_CH1 = np.zeros((points,))
if SELECTEDCH == BOTH12 or SELECTEDCH == CH2:
data_CH2 = utils.autocorrelation(data_CH2[:2*points])[:points]
else:
data_CH2 = np.zeros((points,))
if self.average == 0:
self.a1 = data_CH1
self.a2 = data_CH2
else:
self.avcount += 1
if self.avcount == 1:
self.sumCH1 = np.array(data_CH1, dtype=np.float_)
self.sumCH2 = np.array(data_CH2, dtype=np.float_)
else:
if SELECTEDCH==BOTH12:
assert len(data_CH1) == len(data_CH2)
lp = len(data_CH1)
if len(self.sumCH1) == lp and len(self.sumCH2) == lp:
self.sumCH1 = self.sumCH1[:lp] + np.array(data_CH1[:lp], dtype=np.float_)
self.sumCH2 = self.sumCH2[:lp] + np.array(data_CH2[:lp], dtype=np.float_)
else:
self.sumCH1 = np.array(data_CH1, dtype=np.float_)
self.sumCH2 = np.array(data_CH2, dtype=np.float_)
self.avcount = 1
elif SELECTEDCH == CH1:
lp = len(data_CH1)
if len(self.sumCH1) == lp:
self.sumCH1 = self.sumCH1[:lp] + np.array(data_CH1[:lp], dtype=np.float_)
else:
self.sumCH1 = np.array(data_CH1, dtype=np.float_)
self.avcount = 1
elif SELECTEDCH==CH2:
lp = len(data_CH2)
if len(self.sumCH2) == lp:
self.sumCH2 = self.sumCH2[:lp] + np.array(data_CH2[:lp], dtype=np.float_)
else:
self.sumCH2 = np.array(data_CH2, dtype=np.float_)
self.avcount = 1
self.a1 = self.sumCH1/self.avcount
self.a2 = self.sumCH2/self.avcount
self.setDisplay()
inittime=0.01
initamp=100
class ScopeFrame(Qt.QFrame):
"""
Oscilloscope widget --- contains controls + display
"""
def __init__(self, *args):
apply(Qt.QFrame.__init__, (self,) + args)
# the following: setPal.. doesn't seem to work on Win
try:
self.setPaletteBackgroundColor( QColor(240,240,245))
except: pass
hknobpos=scopewidth+20
vknobpos=scopeheight+30
self.setFixedSize(scopewidth+150, scopeheight+150)
self.freezeState = 0
self.triggerComboBox = Qt.QComboBox(self)
self.triggerComboBox.setGeometry(hknobpos+10, 50, 100, 40)#"Channel: ")
self.triggerComboBox.addItem("Trigger off")
self.triggerComboBox.addItem("CH1")
self.triggerComboBox.addItem("CH2")
self.triggerComboBox.setCurrentIndex(0)
self.triggerSlopeComboBox = Qt.QComboBox(self)
self.triggerSlopeComboBox.setGeometry(hknobpos+10, 100, 100, 40)#"Channel: ")
self.triggerSlopeComboBox.addItem("Any Slope")
self.triggerSlopeComboBox.addItem("Positive")
self.triggerSlopeComboBox.addItem("Negative")
self.triggerSlopeComboBox.setCurrentIndex(0)
self.knbLevel = LblKnob(self, hknobpos, 160,"Trigger level (%FS)")
self.knbTime = LblKnob(self, hknobpos, 300,"Time", 1)
self.knbSignal = LblKnob(self, 150, vknobpos, "Signal1",1)
self.knbSignal2 = LblKnob(self, 450, vknobpos, "Signal2",1)
self.knbOffset1=LblKnob(self, 10, vknobpos, "offset1")
self.knbOffset2=LblKnob(self, 310, vknobpos, "offset2")
self.knbTime.setRange(0.0001, 1.0)
self.knbTime.setValue(DEFAULT_TIMEBASE)
self.knbSignal.setRange(1, 1e6, 1)
self.knbSignal.setValue(100.0)
self.knbSignal2.setRange(1, 1e6, 1)
self.knbSignal2.setValue(100.0)
self.knbOffset2.setRange(-1.0, 1.0, 0.1)
self.knbOffset2.setValue(0.0)
self.knbOffset1.setRange(-1.0, 1.0, 0.1)
self.knbOffset1.setValue(0.0)
self.knbLevel.setRange(-1.0, 1.0, 0.1)
self.knbLevel.setValue(0.1)
self.knbLevel.setScaleMaxMajor(10)
self.plot = Scope(self)
self.plot.setGeometry(10, 10, scopewidth, scopeheight)
self.picker = Qwt.QwtPlotPicker(
Qwt.QwtPlot.xBottom,
Qwt.QwtPlot.yLeft,
Qwt.QwtPicker.PointSelection | Qwt.QwtPicker.DragSelection,
Qwt.QwtPlotPicker.CrossRubberBand,
Qwt.QwtPicker.ActiveOnly, #AlwaysOn,
self.plot.canvas())
self.picker.setRubberBandPen(Qt.QPen(Qt.Qt.green))
self.picker.setTrackerPen(Qt.QPen(Qt.Qt.cyan))
self.connect(self.knbTime.knob, Qt.SIGNAL("valueChanged(double)"),
self.setTimebase)
self.knbTime.setValue(0.01)
self.connect(self.knbSignal.knob, Qt.SIGNAL("valueChanged(double)"),
self.setAmplitude)
self.connect(self.knbSignal2.knob, Qt.SIGNAL("valueChanged(double)"),
self.setAmplitude2)
#self.knbSignal.setValue(0.1)
self.connect(self.knbLevel.knob, Qt.SIGNAL("valueChanged(double)"),
self.setTriggerlevel)
self.connect(self.knbOffset1.knob, Qt.SIGNAL("valueChanged(double)"),
self.plot.setOffset1)
self.connect(self.knbOffset2.knob, Qt.SIGNAL("valueChanged(double)"),
self.plot.setOffset2)
self.connect(self.triggerComboBox, Qt.SIGNAL('currentIndexChanged(int)'), self.setTriggerCH)
self.connect(self.triggerSlopeComboBox, Qt.SIGNAL('currentIndexChanged(int)'), self.plot.setTriggerSlope)
self.knbLevel.setValue(0.1)
self.plot.setAxisScale( Qwt.QwtPlot.xBottom, 0.0, 10.0*inittime)
self.plot.setAxisScale( Qwt.QwtPlot.yLeft, -initamp, initamp)
self.plot.setAxisScale( Qwt.QwtPlot.yRight, -initamp, initamp)
self.plot.show()
def _calcKnobVal(self, val):
ival = np.floor(val)
frac = val - ival
if frac >= 0.9:
frac = 1.0
elif frac >= 0.66:
frac = np.log10(5.0)
elif frac >= np.log10(2.0):
frac = np.log10(2.0)
else:
frac = 0.0
dt = 10**frac*10**ival
return dt
def setTimebase(self, val):
dt = self._calcKnobVal(val)
self.plot.setAxisScale( Qwt.QwtPlot.xBottom, 0.0, 10.0*dt)
self.plot.setMaxTime(dt*10.0)
self.plot.replot()
def setAmplitude(self, val):
dt = self._calcKnobVal(val)
self.plot.setAxisScale( Qwt.QwtPlot.yLeft, -dt, dt)
self.plot.setMaxAmp(dt)
self.plot.replot()
def setAmplitude2(self, val):
dt = self._calcKnobVal(val)
self.plot.setAxisScale( Qwt.QwtPlot.yRight, -dt, dt)
self.plot.setMaxAmp2(dt)
self.plot.replot()
def setTriggerlevel(self, val):
self.plot.setTriggerLevel(val)
self.plot.setDisplay()
def setTriggerCH(self, val):
if val == 0:
val = None
self.plot.setTriggerCH(val)
self.plot.setDisplay()
#--------------------------------------------------------------------
class FScope(Qwt.QwtPlot):
"""
Power spectrum display widget
"""
def __init__(self, *args):
apply(Qwt.QwtPlot.__init__, (self,) + args)
self.setTitle('Power spectrum');
self.setCanvasBackground(Qt.Qt.white)
# grid
self.grid = Qwt.QwtPlotGrid()
self.grid.enableXMin(True)
self.grid.setMajPen(Qt.QPen(Qt.Qt.gray, 0, Qt.Qt.SolidLine));
self.grid.attach(self)
# axes
self.setAxisTitle(Qwt.QwtPlot.xBottom, 'Frequency [Hz]');
self.setAxisTitle(Qwt.QwtPlot.yLeft, 'Power Spectrum [dBc/Hz]');
self.setAxisMaxMajor(Qwt.QwtPlot.xBottom, 10);
self.setAxisMaxMinor(Qwt.QwtPlot.xBottom, 0);
self.setAxisMaxMajor(Qwt.QwtPlot.yLeft, 10);
self.setAxisMaxMinor(Qwt.QwtPlot.yLeft, 0);
# curves
self.curve2 = Qwt.QwtPlotCurve('PSTrace2')
self.curve2.setPen(Qt.QPen(Qt.Qt.magenta,FFTPENWIDTH))
self.curve2.setYAxis(Qwt.QwtPlot.yLeft)
self.curve2.attach(self)
self.curve1 = Qwt.QwtPlotCurve('PSTrace1')
self.curve1.setPen(Qt.QPen(Qt.Qt.blue,FFTPENWIDTH))
self.curve1.setYAxis(Qwt.QwtPlot.yLeft)
self.curve1.attach(self)
self.triggerval=0.0
self.maxamp=100.0
self.maxamp2=100.0
self.freeze=0
self.average=0
self.avcount=0
self.logy=1
self.datastream=None
self.dt=1.0/samplerate
self.df=1.0/(fftbuffersize*self.dt)
self.f = np.arange(0.0, samplerate, self.df)
self.a1 = 0.0*self.f
self.a2 = 0.0*self.f
self.curve1.setData(self.f, self.a1)
self.curve2.setData(self.f, self.a2)
self.setAxisScale( Qwt.QwtPlot.xBottom, 0.0, 12.5*initfreq)
self.setAxisScale( Qwt.QwtPlot.yLeft, -120.0, 0.0)
self.startTimer(100)
self.replot()
def resetBuffer(self):
self.df=1.0/(fftbuffersize*self.dt)
self.f = np.arange(0.0, samplerate, self.df)
self.a1 = 0.0*self.f
self.a2 = 0.0*self.f
self.curve1.setData(self.curve1, self.f, self.a1)
self.curve1.setData(self.curve1, self.f, self.a2)
def setMaxAmp(self, val):
if val>0.6:
self.setAxisScale( Qwt.QwtPlot.yLeft, -120.0, 0.0)
self.logy=1
else:
self.setAxisScale( Qwt.QwtPlot.yLeft, 0.0, 10.0*val)
self.logy=0
self.maxamp=val
def setMaxTime(self, val):
self.maxtime=val
self.updateTimer()
def setTriggerLevel(self, val):
self.triggerval=val
def setDisplay(self):
n=fftbuffersize/2
if SELECTEDCH==BOTH12:
self.curve1.setData(self.f[0:n], self.a1[:n])
self.curve2.setData(self.f[0:n], self.a2[:n])
elif SELECTEDCH==CH2:
self.curve1.setData([0.0,0.0], [0.0,0.0])
self.curve2.setData(self.f[0:n], self.a2[:n])
elif SELECTEDCH==CH1:
self.curve1.setData(self.f[0:n], self.a1[:n])
self.curve2.setData([0.0,0.0], [0.0,0.0])
self.replot()
def getValue(self, index):
return self.f[index],self.a1[index]
def setAverage(self, state):
self.average = state
self.avcount=0
def setFreeze(self, freeze):
self.freeze = freeze
def setDatastream(self, datastream):
self.datastream = datastream
def timerEvent(self,e): # FFT
global fftbuffersize
if self.datastream == None: return
if self.freeze == 1: return
if SELECTEDCH == BOTH12:
channel = 12
X, Y = self.datastream.read(channel, fftbuffersize, verbose)
if X is None or not len(X): return
data_CH1 = X[:fftbuffersize]
data_CH2 = Y[:fftbuffersize]
elif SELECTEDCH == CH1:
channel = 1
X = self.datastream.read(channel, fftbuffersize, verbose)
if X is None or not len(X): return
data_CH1 = X[:fftbuffersize]
data_CH2 = np.ones((fftbuffersize,))
elif SELECTEDCH == CH2:
channel = 2
X = self.datastream.read(channel, fftbuffersize, verbose)
if X is None or not len(X): return
data_CH2 = X[:fftbuffersize]
data_CH1 = np.ones((fftbuffersize,))
self.df = 1.0/(fftbuffersize*self.dt)
self.setAxisTitle(Qwt.QwtPlot.xBottom, 'Frequency [Hz] - Bin width %g Hz' % (self.df,))
self.f = np.arange(0.0, samplerate, self.df)
if not SPECTRUM_MODULE:
lenX = fftbuffersize
window = np.blackman(lenX)
sumw = np.sum(window*window)
A = FFT.fft(data_CH1*window) #lenX
B = (A*np.conjugate(A)).real
A = FFT.fft(data_CH2*window) #lenX
B2 = (A*np.conjugate(A)).real
sumw *= 2.0 # sym about Nyquist (*4); use rms (/2)
sumw /= self.dt # sample rate
B /= sumw
B2 /= sumw
else:
print "FFT buffer size: %d points" % (fftbuffersize,)
B = spectrum.Periodogram(np.array(data_CH1, dtype=float64), samplerate)
B.sides = 'onesided'
B.run()
B = B.get_converted_psd('onesided')
B2 = spectrum.Periodogram(np.array(data_CH2, dtype=float64), samplerate)
B2.sides = 'onesided'
B2.run()
B2 = B2.get_converted_psd('onesided')
if self.logy:
P1 = np.log10(B)*10.0
P2 = np.log10(B2)*10.0
P1 -= P1.max()
P2 -= P2.max()
else:
P1 = B
P2 = B2
if not self.average:
self.a1 = P1
self.a2 = P2
self.avcount = 0
else:
self.avcount += 1
if self.avcount == 1:
self.sumP1 = P1
self.sumP2 = P2
elif self.sumP1.shape != P1.shape or self.sumP1.shape != P1.shape:
self.avcount = 1
self.sumP1 = P1
self.sumP2 = P2
else:
self.sumP1 += P1
self.sumP2 += P2
self.a1 = self.sumP1/self.avcount
self.a2 = self.sumP2/self.avcount
self.setDisplay()
initfreq = 100.0
class FScopeFrame(Qt.QFrame):
"""
Power spectrum widget --- contains controls + display
"""
def __init__(self , *args):
apply(Qt.QFrame.__init__, (self,) + args)
vknobpos=scopeheight+30
hknobpos=scopewidth+10
# the following: setPal.. doesn't seem to work on Ein
try:
self.setPaletteBackgroundColor( QColor(240,240,245))
except: pass
self.setFixedSize(scopewidth+160, scopeheight+160)
self.freezeState = 0
self.knbSignal = LblKnob(self,160, vknobpos, "Signal",1)
self.knbTime = LblKnob(self,310, vknobpos,"Frequency", 1)
self.knbTime.setRange(1.0, 1250.0)
self.knbSignal.setRange(100, 1000000)
self.plot = FScope(self)
self.plot.setGeometry(12.5, 10, scopewidth+120, scopeheight)
self.picker = Qwt.QwtPlotPicker(
Qwt.QwtPlot.xBottom,
Qwt.QwtPlot.yLeft,
Qwt.QwtPicker.PointSelection | Qwt.QwtPicker.DragSelection,
Qwt.QwtPlotPicker.CrossRubberBand,
Qwt.QwtPicker.ActiveOnly, #AlwaysOn,
self.plot.canvas())
self.picker.setRubberBandPen(Qt.QPen(Qt.Qt.green))
self.picker.setTrackerPen(Qt.QPen(Qt.Qt.cyan))
self.connect(self.knbTime.knob, Qt.SIGNAL("valueChanged(double)"),
self.setTimebase)
self.knbTime.setValue(1000.0)
self.connect(self.knbSignal.knob, Qt.SIGNAL("valueChanged(double)"),
self.setAmplitude)
self.knbSignal.setValue(1000000)
self.plot.show()
def _calcKnobVal(self,val):
ival = np.floor(val)
frac = val - ival
if frac >= 0.9:
frac = 1.0
elif frac >= 0.66:
frac = np.log10(5.0)
elif frac >= np.log10(2.0):
frac = np.log10(2.0)
else:
frac = 0.0
dt = 10**frac*10**ival
return dt
def setTimebase(self, val):
dt = self._calcKnobVal(val)
self.plot.setAxisScale(Qwt.QwtPlot.xBottom, 0.0, 12.5*dt)
self.plot.replot()
def setAmplitude(self, val):
minp = self._calcKnobVal(val)
self.plot.setAxisScale(Qwt.QwtPlot.yLeft, -int(np.log10(minp)*20), 0.0)
self.plot.replot()
#---------------------------------------------------------------------
class FScopeDemo(Qt.QMainWindow):
"""
Application container widget
Contains scope and power spectrum analyser in tabbed windows.
Enables switching between the two.
Handles toolbar and status.
"""
def __init__(self, *args):
apply(Qt.QMainWindow.__init__, (self,) + args)
self.freezeState = 0
self.changeState = 0
self.averageState = 0
self.autocState = 0
self.scope = ScopeFrame(self)
self.current = self.scope
self.pwspec = FScopeFrame(self)
self.pwspec.hide()
self.stack=Qt.QTabWidget(self)
self.stack.addTab(self.scope,"scope")
self.stack.addTab(self.pwspec,"fft")
self.setCentralWidget(self.stack)
toolBar = Qt.QToolBar(self)
self.addToolBar(toolBar)
sb=self.statusBar()
sbfont=Qt.QFont("Helvetica",12)
sb.setFont(sbfont)
self.btnFreeze = Qt.QToolButton(toolBar)
self.btnFreeze.setText("Freeze")
self.btnFreeze.setIcon(Qt.QIcon(Qt.QPixmap(icons.stopicon)))
self.btnFreeze.setCheckable(True)
self.btnFreeze.setToolButtonStyle(Qt.Qt.ToolButtonTextUnderIcon)
toolBar.addWidget(self.btnFreeze)
self.btnSave = Qt.QToolButton(toolBar)
self.btnSave.setText("Save CSV")
self.btnSave.setIcon(Qt.QIcon(Qt.QPixmap(icons.save)))
self.btnSave.setToolButtonStyle(Qt.Qt.ToolButtonTextUnderIcon)
toolBar.addWidget(self.btnSave)
self.btnPDF = Qt.QToolButton(toolBar)
self.btnPDF.setText("Export PDF")
self.btnPDF.setIcon(Qt.QIcon(Qt.QPixmap(icons.pdf)))
self.btnPDF.setToolButtonStyle(Qt.Qt.ToolButtonTextUnderIcon)
toolBar.addWidget(self.btnPDF)
self.btnPrint = Qt.QToolButton(toolBar)
self.btnPrint.setText("Print")
self.btnPrint.setIcon(Qt.QIcon(Qt.QPixmap(icons.print_xpm)))
self.btnPrint.setToolButtonStyle(Qt.Qt.ToolButtonTextUnderIcon)
toolBar.addWidget(self.btnPrint)
self.btnMode = Qt.QToolButton(toolBar)
self.btnMode.setText("fft")
self.btnMode.setIcon(Qt.QIcon(Qt.QPixmap(icons.pwspec)))
self.btnMode.setCheckable(True)
self.btnMode.setToolButtonStyle(Qt.Qt.ToolButtonTextUnderIcon)
toolBar.addWidget(self.btnMode)
self.btnAvge = Qt.QToolButton(toolBar)
self.btnAvge.setText("average")
self.btnAvge.setIcon(Qt.QIcon(Qt.QPixmap(icons.avge)))
self.btnAvge.setCheckable(True)
self.btnAvge.setToolButtonStyle(Qt.Qt.ToolButtonTextUnderIcon)
toolBar.addWidget(self.btnAvge)
self.btnAutoc = Qt.QToolButton(toolBar)
self.btnAutoc.setText("autocorrelation")
self.btnAutoc.setIcon(Qt.QIcon(Qt.QPixmap(icons.avge)))
self.btnAutoc.setCheckable(True)
self.btnAutoc.setToolButtonStyle(Qt.Qt.ToolButtonTextUnderIcon)
toolBar.addWidget(self.btnAutoc)
#self.lstLabl = Qt.QLabel("Buffer:",toolBar)
#toolBar.addWidget(self.lstLabl)
#self.lstChan = Qt.QComboBox(toolBar)
#self.lstChan.insertItem(0,"8192")
#self.lstChan.insertItem(1,"16k")
#self.lstChan.insertItem(2,"32k")
#toolBar.addWidget(self.lstChan)
self.lstLR = Qt.QLabel("Channels:",toolBar)
toolBar.addWidget(self.lstLR)
self.lstLRmode = Qt.QComboBox(toolBar)
self.lstLRmode.insertItem(0,"1&2")
self.lstLRmode.insertItem(1,"CH1")
self.lstLRmode.insertItem(2,"CH2")
toolBar.addWidget(self.lstLRmode)
self.connect(self.btnPrint, Qt.SIGNAL('clicked()'), self.printPlot)
self.connect(self.btnSave, Qt.SIGNAL('clicked()'), self.saveData)
self.connect(self.btnPDF, Qt.SIGNAL('clicked()'), self.printPDF)
self.connect(self.btnFreeze, Qt.SIGNAL('toggled(bool)'), self.freeze)
self.connect(self.btnMode, Qt.SIGNAL('toggled(bool)'), self.mode)
self.connect(self.btnAvge, Qt.SIGNAL('toggled(bool)'), self.average)
self.connect(self.btnAutoc, Qt.SIGNAL('toggled(bool)'),
self.autocorrelation)
#self.connect(self.lstChan, Qt.SIGNAL('activated(int)'), self.fftsize)
self.connect(self.lstLRmode, Qt.SIGNAL('activated(int)'), self.channel)
self.connect(self.scope.picker,
Qt.SIGNAL('moved(const QPoint&)'),
self.moved)
self.connect(self.scope.picker,
Qt.SIGNAL('appended(const QPoint&)'),
self.appended)
self.connect(self.pwspec.picker,
Qt.SIGNAL('moved(const QPoint&)'),
self.moved)
self.connect(self.pwspec.picker,
Qt.SIGNAL('appended(const QPoint&)'),
self.appended)
self.connect(self.stack,
Qt.SIGNAL('currentChanged(int)'),
self.mode)
self.showInfo(cursorInfo)
#self.showFullScreen()
#print self.size()
def showInfo(self, text):
self.statusBar().showMessage(text)
def printPlot(self):
printer = Qt.QPrinter(Qt.QPrinter.HighResolution)
printer.setOutputFileName('scope-plot.ps')
printer.setCreator('Ethernet Scope')
printer.setOrientation(Qt.QPrinter.Landscape)
printer.setColorMode(Qt.QPrinter.Color)
docName = self.current.plot.title().text()
if not docName.isEmpty():
docName.replace(Qt.QRegExp(Qt.QString.fromLatin1('\n')), self.tr(' -- '))
printer.setDocName(docName)
dialog = Qt.QPrintDialog(printer)
if dialog.exec_():
# filter = Qwt.PrintFilter()
# if (Qt.QPrinter.GrayScale == printer.colorMode()):
# filter.setOptions(
# Qwt.QwtPlotPrintFilter.PrintAll
# & ~Qwt.QwtPlotPrintFilter.PrintBackground
# | Qwt.QwtPlotPrintFilter.PrintFrameWithScales)
self.current.plot.print_(printer)
#p = Qt.QPrinter()
#if p.setup():
# self.current.plot.printPlot(p)#, Qwt.QwtFltrDim(200));
def printPDF(self):
fileName = Qt.QFileDialog.getSaveFileName(
self,
'Export File Name',
'',
'PDF Documents (*.pdf)')
if not fileName.isEmpty():
printer = Qt.QPrinter()
printer.setOutputFormat(Qt.QPrinter.PdfFormat)
printer.setOrientation(Qt.QPrinter.Landscape)
printer.setOutputFileName(fileName)
printer.setCreator('Ethernet Scope')
self.current.plot.print_(printer)
# p = QPrinter()
# if p.setup():
# self.current.plot.printPlot(p)#, Qwt.QwtFltrDim(200));
def saveData(self):
fileName = Qt.QFileDialog.getSaveFileName(
self,
'Export File Name',
'',
'CSV Documents (*.csv)')
if not fileName.isEmpty():
csvlib.write_csv(fileName,
np.vstack((
np.arange(self.current.plot.a1.shape[0], dtype=int32)/samplerate,
self.current.plot.a1,
self.current.plot.a2)),
("TIME", "CH1", "CH2"))
def channel(self, item):
global SELECTEDCH
if item == 1:
SELECTEDCH = CH1
elif item == 2:
SELECTEDCH = CH2
else:
SELECTEDCH = BOTH12
self.scope.plot.avcount = 0
self.pwspec.plot.avcount = 0
def freeze(self, on, changeIcon=True):
if on:
self.freezeState = 1
if changeIcon:
self.btnFreeze.setText("Run")
self.btnFreeze.setIcon(Qt.QIcon(Qt.QPixmap(icons.goicon)))
else:
self.freezeState = 0
if changeIcon:
self.btnFreeze.setText("Freeze")
self.btnFreeze.setIcon(Qt.QIcon(Qt.QPixmap(icons.stopicon)))
self.scope.plot.setFreeze(self.freezeState)
self.pwspec.plot.setFreeze(self.freezeState)
def average(self, on):
if on:
self.averageState = 1
self.btnAvge.setText("single")
self.btnAvge.setIcon(Qt.QIcon(Qt.QPixmap(icons.single)))
else:
self.averageState = 0
self.btnAvge.setText("average")
self.btnAvge.setIcon(Qt.QIcon(Qt.QPixmap(icons.avge)))
self.scope.plot.setAverage(self.averageState)
self.pwspec.plot.setAverage(self.averageState)
def autocorrelation(self, on):
if on:
self.autocState = 1
self.btnAutoc.setText("normal")
self.btnAutoc.setIcon(Qt.QIcon(Qt.QPixmap(icons.single)))
else:
self.autocState = 0
self.btnAutoc.setText("autocorrelation")
self.btnAutoc.setIcon(Qt.QIcon(Qt.QPixmap(icons.avge)))
self.scope.plot.setAutoc(self.autocState)
def mode(self, on):
if on:
self.changeState=1
self.current=self.pwspec
self.btnMode.setText("scope")
self.btnMode.setIcon(Qt.QIcon(Qt.QPixmap(icons.scope)))
self.btnMode.setChecked(True)
else:
self.changeState=0
self.current=self.scope
self.btnMode.setText("fft")
self.btnMode.setIcon(Qt.QIcon(Qt.QPixmap(icons.pwspec)))
self.btnMode.setChecked(False)
if self.changeState==1:
self.stack.setCurrentIndex(self.changeState)
self.scope.plot.setDatastream(None)
self.pwspec.plot.setDatastream(stream)
else:
self.stack.setCurrentIndex(self.changeState)
self.pwspec.plot.setDatastream(None)
self.scope.plot.setDatastream(stream)
def moved(self, e):
if self.changeState==1:
name='Freq'
else:
name='Time'
frequency = self.current.plot.invTransform(Qwt.QwtPlot.xBottom, e.x())
amplitude = self.current.plot.invTransform(Qwt.QwtPlot.yLeft, e.y())
if name=='Time':
df=self.scope.plot.dt
i=int(frequency/df)
ampa=self.scope.plot.a1[i]
ampb=self.scope.plot.a2[i]
else:
df=self.pwspec.plot.df
i=int(frequency/df)
ampa=self.pwspec.plot.a1[i]
ampb=self.pwspec.plot.a2[i]
self.showInfo('%s=%g, cursor=%g, A=%g, B=%g' %
(name,frequency, amplitude,ampa,ampb))
def appended(self, e):
print 's'
# Python semantics: self.pos = e.pos() does not work; force a copy
self.xpos = e.x()
self.ypos = e.y()
self.moved(e) # fake a mouse move to show the cursor position
def load_cfg():
default = '.audio' # default probe
conf_path = os.path.expanduser('~/.dualscope123')
conf = ConfigParser.ConfigParser()
print "Loaded config file %s" % (conf_path,)
if not os.path.isfile(conf_path):
conf.add_section('probes')
conf.set("probes", "probe", 'audio')
conf.set("DEFAULT", "verbose", 'false')
with open(conf_path, 'w') as fp:
conf.write(fp)
return load_cfg()
else:
conf.read([conf_path])
if not 'probes' in conf.sections() or 'DEFAULT' in conf.sections():
raise ConfigParser.NoSectionError("Malformed config file.")
try:
probe_name = conf.get('probes', 'probe').strip("\"'").strip()
except ConfigParser.NoOptionError:
probe = default[1:]
try:
verbose = conf.get('DEFAULT', 'verbose').strip("\"'").strip()
except ConfigParser.NoOptionError:
verbose = False
try:
probe_module = importlib.import_module("."+probe_name, "dualscope123.probes")
except ImportError:
probe_module = importlib.import_module(default, "dualscope123.probes")
probe_name = default[1:]
if verbose in ('true', 'True', '1', 'on', 'yes', 'YES', 'Yes', 'On'):
print "Loaded probe %s" % probe_name
verbose = True
else:
verbose = False
return probe_module, verbose
def main():
global verbose, samplerate, CHUNK, fftbuffersize, stream
probe, verbose = load_cfg()
stream = probe.Probe()
stream.open()
samplerate = stream.RATE
CHUNK = stream.CHUNK
fftbuffersize = CHUNK
app = Qt.QApplication(sys.argv)
demo = FScopeDemo()
demo.scope.plot.setDatastream(stream)
demo.show()
app.exec_()
stream.close()
if __name__ == '__main__':
main()
|
gpl-3.0
|
TomAugspurger/pandas
|
pandas/tests/frame/test_reshape.py
|
1
|
45804
|
from datetime import datetime
import itertools
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Period, Series, Timedelta, date_range
import pandas._testing as tm
class TestDataFrameReshape:
def test_pivot(self):
data = {
"index": ["A", "B", "C", "C", "B", "A"],
"columns": ["One", "One", "One", "Two", "Two", "Two"],
"values": [1.0, 2.0, 3.0, 3.0, 2.0, 1.0],
}
frame = DataFrame(data)
pivoted = frame.pivot(index="index", columns="columns", values="values")
expected = DataFrame(
{
"One": {"A": 1.0, "B": 2.0, "C": 3.0},
"Two": {"A": 1.0, "B": 2.0, "C": 3.0},
}
)
expected.index.name, expected.columns.name = "index", "columns"
tm.assert_frame_equal(pivoted, expected)
# name tracking
assert pivoted.index.name == "index"
assert pivoted.columns.name == "columns"
# don't specify values
pivoted = frame.pivot(index="index", columns="columns")
assert pivoted.index.name == "index"
assert pivoted.columns.names == (None, "columns")
def test_pivot_duplicates(self):
data = DataFrame(
{
"a": ["bar", "bar", "foo", "foo", "foo"],
"b": ["one", "two", "one", "one", "two"],
"c": [1.0, 2.0, 3.0, 3.0, 4.0],
}
)
with pytest.raises(ValueError, match="duplicate entries"):
data.pivot("a", "b", "c")
def test_pivot_empty(self):
df = DataFrame(columns=["a", "b", "c"])
result = df.pivot("a", "b", "c")
expected = DataFrame()
tm.assert_frame_equal(result, expected, check_names=False)
def test_pivot_integer_bug(self):
df = DataFrame(data=[("A", "1", "A1"), ("B", "2", "B2")])
result = df.pivot(index=1, columns=0, values=2)
repr(result)
tm.assert_index_equal(result.columns, Index(["A", "B"], name=0))
def test_pivot_index_none(self):
# gh-3962
data = {
"index": ["A", "B", "C", "C", "B", "A"],
"columns": ["One", "One", "One", "Two", "Two", "Two"],
"values": [1.0, 2.0, 3.0, 3.0, 2.0, 1.0],
}
frame = DataFrame(data).set_index("index")
result = frame.pivot(columns="columns", values="values")
expected = DataFrame(
{
"One": {"A": 1.0, "B": 2.0, "C": 3.0},
"Two": {"A": 1.0, "B": 2.0, "C": 3.0},
}
)
expected.index.name, expected.columns.name = "index", "columns"
tm.assert_frame_equal(result, expected)
# omit values
result = frame.pivot(columns="columns")
expected.columns = pd.MultiIndex.from_tuples(
[("values", "One"), ("values", "Two")], names=[None, "columns"]
)
expected.index.name = "index"
tm.assert_frame_equal(result, expected, check_names=False)
assert result.index.name == "index"
assert result.columns.names == (None, "columns")
expected.columns = expected.columns.droplevel(0)
result = frame.pivot(columns="columns", values="values")
expected.columns.name = "columns"
tm.assert_frame_equal(result, expected)
def test_stack_unstack(self, float_frame):
df = float_frame.copy()
df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)
stacked = df.stack()
stacked_df = DataFrame({"foo": stacked, "bar": stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
tm.assert_frame_equal(unstacked, df)
tm.assert_frame_equal(unstacked_df["bar"], df)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
tm.assert_frame_equal(unstacked_cols.T, df)
tm.assert_frame_equal(unstacked_cols_df["bar"].T, df)
def test_stack_mixed_level(self):
# GH 18310
levels = [range(3), [3, "a", "b"], [1, 2]]
# flat columns:
df = DataFrame(1, index=levels[0], columns=levels[1])
result = df.stack()
expected = Series(1, index=MultiIndex.from_product(levels[:2]))
tm.assert_series_equal(result, expected)
# MultiIndex columns:
df = DataFrame(1, index=levels[0], columns=MultiIndex.from_product(levels[1:]))
result = df.stack(1)
expected = DataFrame(
1, index=MultiIndex.from_product([levels[0], levels[2]]), columns=levels[1]
)
tm.assert_frame_equal(result, expected)
# as above, but used labels in level are actually of homogeneous type
result = df[["a", "b"]].stack(1)
expected = expected[["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_unstack_fill(self):
# GH #9746: fill_value keyword argument for Series
# and DataFrame unstack
# From a series
data = Series([1, 2, 4, 5], dtype=np.int16)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack(fill_value=-1)
expected = DataFrame(
{"a": [1, -1, 5], "b": [2, 4, -1]}, index=["x", "y", "z"], dtype=np.int16
)
tm.assert_frame_equal(result, expected)
# From a series with incorrect data type for fill_value
result = data.unstack(fill_value=0.5)
expected = DataFrame(
{"a": [1, 0.5, 5], "b": [2, 4, 0.5]}, index=["x", "y", "z"], dtype=np.float
)
tm.assert_frame_equal(result, expected)
# GH #13971: fill_value when unstacking multiple levels:
df = DataFrame(
{"x": ["a", "a", "b"], "y": ["j", "k", "j"], "z": [0, 1, 2], "w": [0, 1, 2]}
).set_index(["x", "y", "z"])
unstacked = df.unstack(["x", "y"], fill_value=0)
key = ("w", "b", "j")
expected = unstacked[key]
result = pd.Series([0, 0, 2], index=unstacked.index, name=key)
tm.assert_series_equal(result, expected)
stacked = unstacked.stack(["x", "y"])
stacked.index = stacked.index.reorder_levels(df.index.names)
# Workaround for GH #17886 (unnecessarily casts to float):
stacked = stacked.astype(np.int64)
result = stacked.loc[df.index]
tm.assert_frame_equal(result, df)
# From a series
s = df["w"]
result = s.unstack(["x", "y"], fill_value=0)
expected = unstacked["w"]
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame(self):
# From a dataframe
rows = [[1, 2], [3, 4], [5, 6], [7, 8]]
df = DataFrame(rows, columns=list("AB"), dtype=np.int32)
df.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = df.unstack(fill_value=-1)
rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]
expected = DataFrame(rows, index=list("xyz"), dtype=np.int32)
expected.columns = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]
)
tm.assert_frame_equal(result, expected)
# From a mixed type dataframe
df["A"] = df["A"].astype(np.int16)
df["B"] = df["B"].astype(np.float64)
result = df.unstack(fill_value=-1)
expected["A"] = expected["A"].astype(np.int16)
expected["B"] = expected["B"].astype(np.float64)
tm.assert_frame_equal(result, expected)
# From a dataframe with incorrect data type for fill_value
result = df.unstack(fill_value=0.5)
rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]
expected = DataFrame(rows, index=list("xyz"), dtype=np.float)
expected.columns = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_datetime(self):
# Test unstacking with date times
dv = pd.date_range("2012-01-01", periods=4).values
data = Series(dv)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [dv[0], pd.NaT, dv[3]], "b": [dv[1], dv[2], pd.NaT]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=dv[0])
expected = DataFrame(
{"a": [dv[0], dv[0], dv[3]], "b": [dv[1], dv[2], dv[0]]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_timedelta(self):
# Test unstacking with time deltas
td = [Timedelta(days=i) for i in range(4)]
data = Series(td)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [td[0], pd.NaT, td[3]], "b": [td[1], td[2], pd.NaT]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=td[1])
expected = DataFrame(
{"a": [td[0], td[1], td[3]], "b": [td[1], td[2], td[1]]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_period(self):
# Test unstacking with period
periods = [
Period("2012-01"),
Period("2012-02"),
Period("2012-03"),
Period("2012-04"),
]
data = Series(periods)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [periods[0], None, periods[3]], "b": [periods[1], periods[2], None]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=periods[1])
expected = DataFrame(
{
"a": [periods[0], periods[1], periods[3]],
"b": [periods[1], periods[2], periods[1]],
},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_categorical(self):
# Test unstacking with categorical
data = pd.Series(["a", "b", "c", "a"], dtype="category")
data.index = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame(
{
"a": pd.Categorical(list("axa"), categories=list("abc")),
"b": pd.Categorical(list("bcx"), categories=list("abc")),
},
index=list("xyz"),
)
tm.assert_frame_equal(result, expected)
# Fill with non-category results in a ValueError
msg = r"'fill_value=d' is not present in"
with pytest.raises(ValueError, match=msg):
data.unstack(fill_value="d")
# Fill with category value replaces missing values as expected
result = data.unstack(fill_value="c")
expected = DataFrame(
{
"a": pd.Categorical(list("aca"), categories=list("abc")),
"b": pd.Categorical(list("bcc"), categories=list("abc")),
},
index=list("xyz"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_tuplename_in_multiindex(self):
# GH 19966
idx = pd.MultiIndex.from_product(
[["a", "b", "c"], [1, 2, 3]], names=[("A", "a"), ("B", "b")]
)
df = pd.DataFrame({"d": [1] * 9, "e": [2] * 9}, index=idx)
result = df.unstack(("A", "a"))
expected = pd.DataFrame(
[[1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2]],
columns=pd.MultiIndex.from_tuples(
[
("d", "a"),
("d", "b"),
("d", "c"),
("e", "a"),
("e", "b"),
("e", "c"),
],
names=[None, ("A", "a")],
),
index=pd.Index([1, 2, 3], name=("B", "b")),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"unstack_idx, expected_values, expected_index, expected_columns",
[
(
("A", "a"),
[[1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2]],
pd.MultiIndex.from_tuples(
[(1, 3), (1, 4), (2, 3), (2, 4)], names=["B", "C"]
),
pd.MultiIndex.from_tuples(
[("d", "a"), ("d", "b"), ("e", "a"), ("e", "b")],
names=[None, ("A", "a")],
),
),
(
(("A", "a"), "B"),
[[1, 1, 1, 1, 2, 2, 2, 2], [1, 1, 1, 1, 2, 2, 2, 2]],
pd.Index([3, 4], name="C"),
pd.MultiIndex.from_tuples(
[
("d", "a", 1),
("d", "a", 2),
("d", "b", 1),
("d", "b", 2),
("e", "a", 1),
("e", "a", 2),
("e", "b", 1),
("e", "b", 2),
],
names=[None, ("A", "a"), "B"],
),
),
],
)
def test_unstack_mixed_type_name_in_multiindex(
self, unstack_idx, expected_values, expected_index, expected_columns
):
# GH 19966
idx = pd.MultiIndex.from_product(
[["a", "b"], [1, 2], [3, 4]], names=[("A", "a"), "B", "C"]
)
df = pd.DataFrame({"d": [1] * 8, "e": [2] * 8}, index=idx)
result = df.unstack(unstack_idx)
expected = pd.DataFrame(
expected_values, columns=expected_columns, index=expected_index,
)
tm.assert_frame_equal(result, expected)
def test_unstack_preserve_dtypes(self):
# Checks fix for #11847
df = pd.DataFrame(
dict(
state=["IL", "MI", "NC"],
index=["a", "b", "c"],
some_categories=pd.Series(["a", "b", "c"]).astype("category"),
A=np.random.rand(3),
B=1,
C="foo",
D=pd.Timestamp("20010102"),
E=pd.Series([1.0, 50.0, 100.0]).astype("float32"),
F=pd.Series([3.0, 4.0, 5.0]).astype("float64"),
G=False,
H=pd.Series([1, 200, 923442], dtype="int8"),
)
)
def unstack_and_compare(df, column_name):
unstacked1 = df.unstack([column_name])
unstacked2 = df.unstack(column_name)
tm.assert_frame_equal(unstacked1, unstacked2)
df1 = df.set_index(["state", "index"])
unstack_and_compare(df1, "index")
df1 = df.set_index(["state", "some_categories"])
unstack_and_compare(df1, "some_categories")
df1 = df.set_index(["F", "C"])
unstack_and_compare(df1, "F")
df1 = df.set_index(["G", "B", "state"])
unstack_and_compare(df1, "B")
df1 = df.set_index(["E", "A"])
unstack_and_compare(df1, "E")
df1 = df.set_index(["state", "index"])
s = df1["A"]
unstack_and_compare(s, "index")
def test_stack_ints(self):
columns = MultiIndex.from_tuples(list(itertools.product(range(3), repeat=3)))
df = DataFrame(np.random.randn(30, 27), columns=columns)
tm.assert_frame_equal(df.stack(level=[1, 2]), df.stack(level=1).stack(level=1))
tm.assert_frame_equal(
df.stack(level=[-2, -1]), df.stack(level=1).stack(level=1)
)
df_named = df.copy()
df_named.columns.set_names(range(3), inplace=True)
tm.assert_frame_equal(
df_named.stack(level=[1, 2]), df_named.stack(level=1).stack(level=1)
)
def test_stack_mixed_levels(self):
columns = MultiIndex.from_tuples(
[
("A", "cat", "long"),
("B", "cat", "long"),
("A", "dog", "short"),
("B", "dog", "short"),
],
names=["exp", "animal", "hair_length"],
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
animal_hair_stacked = df.stack(level=["animal", "hair_length"])
exp_hair_stacked = df.stack(level=["exp", "hair_length"])
# GH #8584: Need to check that stacking works when a number
# is passed that is both a level name and in the range of
# the level numbers
df2 = df.copy()
df2.columns.names = ["exp", "animal", 1]
tm.assert_frame_equal(
df2.stack(level=["animal", 1]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=["exp", 1]), exp_hair_stacked, check_names=False
)
# When mixed types are passed and the ints are not level
# names, raise
msg = (
"level should contain all level names or all level numbers, not "
"a mixture of the two"
)
with pytest.raises(ValueError, match=msg):
df2.stack(level=["animal", 0])
# GH #8584: Having 0 in the level names could raise a
# strange error about lexsort depth
df3 = df.copy()
df3.columns.names = ["exp", "animal", 0]
tm.assert_frame_equal(
df3.stack(level=["animal", 0]), animal_hair_stacked, check_names=False
)
def test_stack_int_level_names(self):
columns = MultiIndex.from_tuples(
[
("A", "cat", "long"),
("B", "cat", "long"),
("A", "dog", "short"),
("B", "dog", "short"),
],
names=["exp", "animal", "hair_length"],
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
exp_animal_stacked = df.stack(level=["exp", "animal"])
animal_hair_stacked = df.stack(level=["animal", "hair_length"])
exp_hair_stacked = df.stack(level=["exp", "hair_length"])
df2 = df.copy()
df2.columns.names = [0, 1, 2]
tm.assert_frame_equal(
df2.stack(level=[1, 2]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=[0, 1]), exp_animal_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=[0, 2]), exp_hair_stacked, check_names=False
)
# Out-of-order int column names
df3 = df.copy()
df3.columns.names = [2, 0, 1]
tm.assert_frame_equal(
df3.stack(level=[0, 1]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df3.stack(level=[2, 0]), exp_animal_stacked, check_names=False
)
tm.assert_frame_equal(
df3.stack(level=[2, 1]), exp_hair_stacked, check_names=False
)
def test_unstack_bool(self):
df = DataFrame(
[False, False],
index=MultiIndex.from_arrays([["a", "b"], ["c", "l"]]),
columns=["col"],
)
rs = df.unstack()
xp = DataFrame(
np.array([[False, np.nan], [np.nan, False]], dtype=object),
index=["a", "b"],
columns=MultiIndex.from_arrays([["col", "col"], ["c", "l"]]),
)
tm.assert_frame_equal(rs, xp)
def test_unstack_level_binding(self):
# GH9856
mi = pd.MultiIndex(
levels=[["foo", "bar"], ["one", "two"], ["a", "b"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]],
names=["first", "second", "third"],
)
s = pd.Series(0, index=mi)
result = s.unstack([1, 2]).stack(0)
expected_mi = pd.MultiIndex(
levels=[["foo", "bar"], ["one", "two"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=["first", "second"],
)
expected = pd.DataFrame(
np.array(
[[np.nan, 0], [0, np.nan], [np.nan, 0], [0, np.nan]], dtype=np.float64
),
index=expected_mi,
columns=pd.Index(["a", "b"], name="third"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_to_series(self, float_frame):
# check reversibility
data = float_frame.unstack()
assert isinstance(data, Series)
undo = data.unstack().T
tm.assert_frame_equal(undo, float_frame)
# check NA handling
data = DataFrame({"x": [1, 2, np.NaN], "y": [3.0, 4, np.NaN]})
data.index = Index(["a", "b", "c"])
result = data.unstack()
midx = MultiIndex(
levels=[["x", "y"], ["a", "b", "c"]],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
)
expected = Series([1, 2, np.NaN, 3, 4, np.NaN], index=midx)
tm.assert_series_equal(result, expected)
# check composability of unstack
old_data = data.copy()
for _ in range(4):
data = data.unstack()
tm.assert_frame_equal(old_data, data)
def test_unstack_dtypes(self):
# GH 2929
rows = [[1, 1, 3, 4], [1, 2, 3, 4], [2, 1, 3, 4], [2, 2, 3, 4]]
df = DataFrame(rows, columns=list("ABCD"))
result = df.dtypes
expected = Series([np.dtype("int64")] * 4, index=list("ABCD"))
tm.assert_series_equal(result, expected)
# single dtype
df2 = df.set_index(["A", "B"])
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("int64")] * 4,
index=pd.MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
# mixed
df2 = df.set_index(["A", "B"])
df2["C"] = 3.0
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("float64")] * 2 + [np.dtype("int64")] * 2,
index=pd.MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
df2["D"] = "foo"
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("float64")] * 2 + [np.dtype("object")] * 2,
index=pd.MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
# GH7405
for c, d in (
(np.zeros(5), np.zeros(5)),
(np.arange(5, dtype="f8"), np.arange(5, 10, dtype="f8")),
):
df = DataFrame(
{
"A": ["a"] * 5,
"C": c,
"D": d,
"B": pd.date_range("2012-01-01", periods=5),
}
)
right = df.iloc[:3].copy(deep=True)
df = df.set_index(["A", "B"])
df["D"] = df["D"].astype("int64")
left = df.iloc[:3].unstack(0)
right = right.set_index(["A", "B"]).unstack(0)
right[("D", "a")] = right[("D", "a")].astype("int64")
assert left.shape == (3, 2)
tm.assert_frame_equal(left, right)
def test_unstack_non_unique_index_names(self):
idx = MultiIndex.from_tuples([("a", "b"), ("c", "d")], names=["c1", "c1"])
df = DataFrame([1, 2], index=idx)
msg = "The name c1 occurs multiple times, use a level number"
with pytest.raises(ValueError, match=msg):
df.unstack("c1")
with pytest.raises(ValueError, match=msg):
df.T.stack("c1")
def test_unstack_unused_levels(self):
# GH 17845: unused codes in index make unstack() cast int to float
idx = pd.MultiIndex.from_product([["a"], ["A", "B", "C", "D"]])[:-1]
df = pd.DataFrame([[1, 0]] * 3, index=idx)
result = df.unstack()
exp_col = pd.MultiIndex.from_product([[0, 1], ["A", "B", "C"]])
expected = pd.DataFrame([[1, 1, 1, 0, 0, 0]], index=["a"], columns=exp_col)
tm.assert_frame_equal(result, expected)
assert (result.columns.levels[1] == idx.levels[1]).all()
# Unused items on both levels
levels = [[0, 1, 7], [0, 1, 2, 3]]
codes = [[0, 0, 1, 1], [0, 2, 0, 2]]
idx = pd.MultiIndex(levels, codes)
block = np.arange(4).reshape(2, 2)
df = pd.DataFrame(np.concatenate([block, block + 4]), index=idx)
result = df.unstack()
expected = pd.DataFrame(
np.concatenate([block * 2, block * 2 + 1], axis=1), columns=idx
)
tm.assert_frame_equal(result, expected)
assert (result.columns.levels[1] == idx.levels[1]).all()
# With mixed dtype and NaN
levels = [["a", 2, "c"], [1, 3, 5, 7]]
codes = [[0, -1, 1, 1], [0, 2, -1, 2]]
idx = pd.MultiIndex(levels, codes)
data = np.arange(8)
df = pd.DataFrame(data.reshape(4, 2), index=idx)
cases = (
(0, [13, 16, 6, 9, 2, 5, 8, 11], [np.nan, "a", 2], [np.nan, 5, 1]),
(1, [8, 11, 1, 4, 12, 15, 13, 16], [np.nan, 5, 1], [np.nan, "a", 2]),
)
for level, idces, col_level, idx_level in cases:
result = df.unstack(level=level)
exp_data = np.zeros(18) * np.nan
exp_data[idces] = data
cols = pd.MultiIndex.from_product([[0, 1], col_level])
expected = pd.DataFrame(
exp_data.reshape(3, 6), index=idx_level, columns=cols
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("cols", [["A", "C"], slice(None)])
def test_unstack_unused_level(self, cols):
# GH 18562 : unused codes on the unstacked level
df = pd.DataFrame(
[[2010, "a", "I"], [2011, "b", "II"]], columns=["A", "B", "C"]
)
ind = df.set_index(["A", "B", "C"], drop=False)
selection = ind.loc[(slice(None), slice(None), "I"), cols]
result = selection.unstack()
expected = ind.iloc[[0]][cols]
expected.columns = MultiIndex.from_product(
[expected.columns, ["I"]], names=[None, "C"]
)
expected.index = expected.index.droplevel("C")
tm.assert_frame_equal(result, expected)
def test_unstack_long_index(self):
# PH 32624: Error when using a lot of indices to unstack.
# The error occurred only, if a lot of indices are used.
df = pd.DataFrame(
[[1]],
columns=pd.MultiIndex.from_tuples([[0]], names=["c1"]),
index=pd.MultiIndex.from_tuples(
[[0, 0, 1, 0, 0, 0, 1]],
names=["i1", "i2", "i3", "i4", "i5", "i6", "i7"],
),
)
result = df.unstack(["i2", "i3", "i4", "i5", "i6", "i7"])
expected = pd.DataFrame(
[[1]],
columns=pd.MultiIndex.from_tuples(
[[0, 0, 1, 0, 0, 0, 1]],
names=["c1", "i2", "i3", "i4", "i5", "i6", "i7"],
),
index=pd.Index([0], name="i1"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_multi_level_cols(self):
# PH 24729: Unstack a df with multi level columns
df = pd.DataFrame(
[[0.0, 0.0], [0.0, 0.0]],
columns=pd.MultiIndex.from_tuples(
[["B", "C"], ["B", "D"]], names=["c1", "c2"]
),
index=pd.MultiIndex.from_tuples(
[[10, 20, 30], [10, 20, 40]], names=["i1", "i2", "i3"],
),
)
assert df.unstack(["i2", "i1"]).columns.names[-2:] == ["i2", "i1"]
def test_unstack_multi_level_rows_and_cols(self):
# PH 28306: Unstack df with multi level cols and rows
df = pd.DataFrame(
[[1, 2], [3, 4], [-1, -2], [-3, -4]],
columns=pd.MultiIndex.from_tuples([["a", "b", "c"], ["d", "e", "f"]]),
index=pd.MultiIndex.from_tuples(
[
["m1", "P3", 222],
["m1", "A5", 111],
["m2", "P3", 222],
["m2", "A5", 111],
],
names=["i1", "i2", "i3"],
),
)
result = df.unstack(["i3", "i2"])
expected = df.unstack(["i3"]).unstack(["i2"])
tm.assert_frame_equal(result, expected)
def test_unstack_nan_index(self): # GH7466
def cast(val):
val_str = "" if val != val else val
return f"{val_str:1}"
def verify(df):
mk_list = lambda a: list(a) if isinstance(a, tuple) else [a]
rows, cols = df.notna().values.nonzero()
for i, j in zip(rows, cols):
left = sorted(df.iloc[i, j].split("."))
right = mk_list(df.index[i]) + mk_list(df.columns[j])
right = sorted(map(cast, right))
assert left == right
df = DataFrame(
{
"jim": ["a", "b", np.nan, "d"],
"joe": ["w", "x", "y", "z"],
"jolie": ["a.w", "b.x", " .y", "d.z"],
}
)
left = df.set_index(["jim", "joe"]).unstack()["jolie"]
right = df.set_index(["joe", "jim"]).unstack()["jolie"].T
tm.assert_frame_equal(left, right)
for idx in itertools.permutations(df.columns[:2]):
mi = df.set_index(list(idx))
for lev in range(2):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == len(df)
verify(udf["jolie"])
df = DataFrame(
{
"1st": ["d"] * 3
+ [np.nan] * 5
+ ["a"] * 2
+ ["c"] * 3
+ ["e"] * 2
+ ["b"] * 5,
"2nd": ["y"] * 2
+ ["w"] * 3
+ [np.nan] * 3
+ ["z"] * 4
+ [np.nan] * 3
+ ["x"] * 3
+ [np.nan] * 2,
"3rd": [
67,
39,
53,
72,
57,
80,
31,
18,
11,
30,
59,
50,
62,
59,
76,
52,
14,
53,
60,
51,
],
}
)
df["4th"], df["5th"] = (
df.apply(lambda r: ".".join(map(cast, r)), axis=1),
df.apply(lambda r: ".".join(map(cast, r.iloc[::-1])), axis=1),
)
for idx in itertools.permutations(["1st", "2nd", "3rd"]):
mi = df.set_index(list(idx))
for lev in range(3):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == 2 * len(df)
for col in ["4th", "5th"]:
verify(udf[col])
# GH7403
df = pd.DataFrame({"A": list("aaaabbbb"), "B": range(8), "C": range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack(0)
vals = [
[3, 0, 1, 2, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, 4, 5, 6, 7],
]
vals = list(map(list, zip(*vals)))
idx = Index([np.nan, 0, 1, 2, 4, 5, 6, 7], name="B")
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
df = DataFrame({"A": list("aaaabbbb"), "B": list(range(4)) * 2, "C": range(8)})
df.iloc[2, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack(0)
vals = [[2, np.nan], [0, 4], [1, 5], [np.nan, 6], [3, 7]]
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
idx = Index([np.nan, 0, 1, 2, 3], name="B")
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
df = pd.DataFrame(
{"A": list("aaaabbbb"), "B": list(range(4)) * 2, "C": range(8)}
)
df.iloc[3, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack(0)
vals = [[3, np.nan], [0, 4], [1, 5], [2, 6], [np.nan, 7]]
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
idx = Index([np.nan, 0, 1, 2, 3], name="B")
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
# GH7401
df = pd.DataFrame(
{
"A": list("aaaaabbbbb"),
"B": (date_range("2012-01-01", periods=5).tolist() * 2),
"C": np.arange(10),
}
)
df.iloc[3, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack()
vals = np.array([[3, 0, 1, 2, np.nan, 4], [np.nan, 5, 6, 7, 8, 9]])
idx = Index(["a", "b"], name="A")
cols = MultiIndex(
levels=[["C"], date_range("2012-01-01", periods=5)],
codes=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, "B"],
)
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
# GH4862
vals = [
["Hg", np.nan, np.nan, 680585148],
["U", 0.0, np.nan, 680585148],
["Pb", 7.07e-06, np.nan, 680585148],
["Sn", 2.3614e-05, 0.0133, 680607017],
["Ag", 0.0, 0.0133, 680607017],
["Hg", -0.00015, 0.0133, 680607017],
]
df = DataFrame(
vals,
columns=["agent", "change", "dosage", "s_id"],
index=[17263, 17264, 17265, 17266, 17267, 17268],
)
left = df.copy().set_index(["s_id", "dosage", "agent"]).unstack()
vals = [
[np.nan, np.nan, 7.07e-06, np.nan, 0.0],
[0.0, -0.00015, np.nan, 2.3614e-05, np.nan],
]
idx = MultiIndex(
levels=[[680585148, 680607017], [0.0133]],
codes=[[0, 1], [-1, 0]],
names=["s_id", "dosage"],
)
cols = MultiIndex(
levels=[["change"], ["Ag", "Hg", "Pb", "Sn", "U"]],
codes=[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4]],
names=[None, "agent"],
)
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
left = df.loc[17264:].copy().set_index(["s_id", "dosage", "agent"])
tm.assert_frame_equal(left.unstack(), right)
# GH9497 - multiple unstack with nulls
df = DataFrame(
{
"1st": [1, 2, 1, 2, 1, 2],
"2nd": pd.date_range("2014-02-01", periods=6, freq="D"),
"jim": 100 + np.arange(6),
"joe": (np.random.randn(6) * 10).round(2),
}
)
df["3rd"] = df["2nd"] - pd.Timestamp("2014-02-02")
df.loc[1, "2nd"] = df.loc[3, "2nd"] = np.nan
df.loc[1, "3rd"] = df.loc[4, "3rd"] = np.nan
left = df.set_index(["1st", "2nd", "3rd"]).unstack(["2nd", "3rd"])
assert left.notna().values.sum() == 2 * len(df)
for col in ["jim", "joe"]:
for _, r in df.iterrows():
key = r["1st"], (col, r["2nd"], r["3rd"])
assert r[col] == left.loc[key]
def test_stack_datetime_column_multiIndex(self):
# GH 8039
t = datetime(2014, 1, 1)
df = DataFrame([1, 2, 3, 4], columns=MultiIndex.from_tuples([(t, "A", "B")]))
result = df.stack()
eidx = MultiIndex.from_product([(0, 1, 2, 3), ("B",)])
ecols = MultiIndex.from_tuples([(t, "A")])
expected = DataFrame([1, 2, 3, 4], index=eidx, columns=ecols)
tm.assert_frame_equal(result, expected)
def test_stack_partial_multiIndex(self):
# GH 8844
def _test_stack_with_multiindex(multiindex):
df = DataFrame(
np.arange(3 * len(multiindex)).reshape(3, len(multiindex)),
columns=multiindex,
)
for level in (-1, 0, 1, [0, 1], [1, 0]):
result = df.stack(level=level, dropna=False)
if isinstance(level, int):
# Stacking a single level should not make any all-NaN rows,
# so df.stack(level=level, dropna=False) should be the same
# as df.stack(level=level, dropna=True).
expected = df.stack(level=level, dropna=True)
if isinstance(expected, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
df.columns = MultiIndex.from_tuples(
df.columns.to_numpy(), names=df.columns.names
)
expected = df.stack(level=level, dropna=False)
if isinstance(expected, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
full_multiindex = MultiIndex.from_tuples(
[("B", "x"), ("B", "z"), ("A", "y"), ("C", "x"), ("C", "u")],
names=["Upper", "Lower"],
)
for multiindex_columns in (
[0, 1, 2, 3, 4],
[0, 1, 2, 3],
[0, 1, 2, 4],
[0, 1, 2],
[1, 2, 3],
[2, 3, 4],
[0, 1],
[0, 2],
[0, 3],
[0],
[2],
[4],
):
_test_stack_with_multiindex(full_multiindex[multiindex_columns])
if len(multiindex_columns) > 1:
multiindex_columns.reverse()
_test_stack_with_multiindex(full_multiindex[multiindex_columns])
df = DataFrame(np.arange(6).reshape(2, 3), columns=full_multiindex[[0, 1, 3]])
result = df.stack(dropna=False)
expected = DataFrame(
[[0, 2], [1, np.nan], [3, 5], [4, np.nan]],
index=MultiIndex(
levels=[[0, 1], ["u", "x", "y", "z"]],
codes=[[0, 0, 1, 1], [1, 3, 1, 3]],
names=[None, "Lower"],
),
columns=Index(["B", "C"], name="Upper"),
dtype=df.dtypes[0],
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [False, True])
@pytest.mark.parametrize("labels", [list("yxz"), list("yxy")])
def test_stack_preserve_categorical_dtype(self, ordered, labels):
# GH13854
cidx = pd.CategoricalIndex(labels, categories=list("xyz"), ordered=ordered)
df = DataFrame([[10, 11, 12]], columns=cidx)
result = df.stack()
# `MultiIndex.from_product` preserves categorical dtype -
# it's tested elsewhere.
midx = pd.MultiIndex.from_product([df.index, cidx])
expected = Series([10, 11, 12], index=midx)
tm.assert_series_equal(result, expected)
def test_stack_preserve_categorical_dtype_values(self):
# GH-23077
cat = pd.Categorical(["a", "a", "b", "c"])
df = pd.DataFrame({"A": cat, "B": cat})
result = df.stack()
index = pd.MultiIndex.from_product([[0, 1, 2, 3], ["A", "B"]])
expected = pd.Series(
pd.Categorical(["a", "a", "a", "a", "b", "b", "c", "c"]), index=index
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"index, columns",
[
([0, 0, 1, 1], pd.MultiIndex.from_product([[1, 2], ["a", "b"]])),
([0, 0, 2, 3], pd.MultiIndex.from_product([[1, 2], ["a", "b"]])),
([0, 1, 2, 3], pd.MultiIndex.from_product([[1, 2], ["a", "b"]])),
],
)
def test_stack_multi_columns_non_unique_index(self, index, columns):
# GH-28301
df = pd.DataFrame(index=index, columns=columns).fillna(1)
stacked = df.stack()
new_index = pd.MultiIndex.from_tuples(stacked.index.to_numpy())
expected = pd.DataFrame(
stacked.to_numpy(), index=new_index, columns=stacked.columns
)
tm.assert_frame_equal(stacked, expected)
stacked_codes = np.asarray(stacked.index.codes)
expected_codes = np.asarray(new_index.codes)
tm.assert_numpy_array_equal(stacked_codes, expected_codes)
@pytest.mark.parametrize("level", [0, 1])
def test_unstack_mixed_extension_types(self, level):
index = pd.MultiIndex.from_tuples(
[("A", 0), ("A", 1), ("B", 1)], names=["a", "b"]
)
df = pd.DataFrame(
{
"A": pd.core.arrays.integer_array([0, 1, None]),
"B": pd.Categorical(["a", "a", "b"]),
},
index=index,
)
result = df.unstack(level=level)
expected = df.astype(object).unstack(level=level)
expected_dtypes = pd.Series(
[df.A.dtype] * 2 + [df.B.dtype] * 2, index=result.columns
)
tm.assert_series_equal(result.dtypes, expected_dtypes)
tm.assert_frame_equal(result.astype(object), expected)
@pytest.mark.parametrize("level", [0, "baz"])
def test_unstack_swaplevel_sortlevel(self, level):
# GH 20994
mi = pd.MultiIndex.from_product([[0], ["d", "c"]], names=["bar", "baz"])
df = pd.DataFrame([[0, 2], [1, 3]], index=mi, columns=["B", "A"])
df.columns.name = "foo"
expected = pd.DataFrame(
[[3, 1, 2, 0]],
columns=pd.MultiIndex.from_tuples(
[("c", "A"), ("c", "B"), ("d", "A"), ("d", "B")], names=["baz", "foo"]
),
)
expected.index.name = "bar"
result = df.unstack().swaplevel(axis=1).sort_index(axis=1, level=level)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_object():
# GH12815 Test unstacking with object.
data = pd.Series(["a", "b", "c", "a"], dtype="object")
data.index = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
# By default missing values will be NaN
result = data.unstack()
expected = pd.DataFrame(
{"a": ["a", np.nan, "a"], "b": ["b", "c", np.nan]}, index=list("xyz")
)
tm.assert_frame_equal(result, expected)
# Fill with any value replaces missing values as expected
result = data.unstack(fill_value="d")
expected = pd.DataFrame(
{"a": ["a", "d", "a"], "b": ["b", "c", "d"]}, index=list("xyz")
)
tm.assert_frame_equal(result, expected)
def test_unstack_timezone_aware_values():
# GH 18338
df = pd.DataFrame(
{
"timestamp": [pd.Timestamp("2017-08-27 01:00:00.709949+0000", tz="UTC")],
"a": ["a"],
"b": ["b"],
"c": ["c"],
},
columns=["timestamp", "a", "b", "c"],
)
result = df.set_index(["a", "b"]).unstack()
expected = pd.DataFrame(
[[pd.Timestamp("2017-08-27 01:00:00.709949+0000", tz="UTC"), "c"]],
index=pd.Index(["a"], name="a"),
columns=pd.MultiIndex(
levels=[["timestamp", "c"], ["b"]],
codes=[[0, 1], [0, 0]],
names=[None, "b"],
),
)
tm.assert_frame_equal(result, expected)
def test_stack_timezone_aware_values():
# GH 19420
ts = pd.date_range(
freq="D", start="20180101", end="20180103", tz="America/New_York"
)
df = pd.DataFrame({"A": ts}, index=["a", "b", "c"])
result = df.stack()
expected = pd.Series(
ts,
index=pd.MultiIndex(
levels=[["a", "b", "c"], ["A"]], codes=[[0, 1, 2], [0, 0, 0]]
),
)
tm.assert_series_equal(result, expected)
def test_unstacking_multi_index_df():
# see gh-30740
df = DataFrame(
{
"name": ["Alice", "Bob"],
"score": [9.5, 8],
"employed": [False, True],
"kids": [0, 0],
"gender": ["female", "male"],
}
)
df = df.set_index(["name", "employed", "kids", "gender"])
df = df.unstack(["gender"], fill_value=0)
expected = df.unstack("employed", fill_value=0).unstack("kids", fill_value=0)
result = df.unstack(["employed", "kids"], fill_value=0)
expected = DataFrame(
[[9.5, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 8.0]],
index=Index(["Alice", "Bob"], name="name"),
columns=MultiIndex.from_tuples(
[
("score", "female", False, 0),
("score", "female", True, 0),
("score", "male", False, 0),
("score", "male", True, 0),
],
names=[None, "gender", "employed", "kids"],
),
)
tm.assert_frame_equal(result, expected)
|
bsd-3-clause
|
rbalda/neural_ocr
|
env/lib/python2.7/site-packages/matplotlib/tests/test_artist.py
|
6
|
6247
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import warnings
from matplotlib.externals import six
import io
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
import matplotlib.path as mpath
import matplotlib.transforms as mtrans
import matplotlib.collections as mcollections
from matplotlib.testing.decorators import image_comparison, cleanup
from nose.tools import (assert_true, assert_false)
@cleanup
def test_patch_transform_of_none():
# tests the behaviour of patches added to an Axes with various transform
# specifications
ax = plt.axes()
ax.set_xlim([1, 3])
ax.set_ylim([1, 3])
# Draw an ellipse over data coord (2,2) by specifying device coords.
xy_data = (2, 2)
xy_pix = ax.transData.transform_point(xy_data)
# Not providing a transform of None puts the ellipse in data coordinates .
e = mpatches.Ellipse(xy_data, width=1, height=1, fc='yellow', alpha=0.5)
ax.add_patch(e)
assert e._transform == ax.transData
# Providing a transform of None puts the ellipse in device coordinates.
e = mpatches.Ellipse(xy_pix, width=120, height=120, fc='coral',
transform=None, alpha=0.5)
assert e.is_transform_set() is True
ax.add_patch(e)
assert isinstance(e._transform, mtrans.IdentityTransform)
# Providing an IdentityTransform puts the ellipse in device coordinates.
e = mpatches.Ellipse(xy_pix, width=100, height=100,
transform=mtrans.IdentityTransform(), alpha=0.5)
ax.add_patch(e)
assert isinstance(e._transform, mtrans.IdentityTransform)
# Not providing a transform, and then subsequently "get_transform" should
# not mean that "is_transform_set".
e = mpatches.Ellipse(xy_pix, width=120, height=120, fc='coral',
alpha=0.5)
intermediate_transform = e.get_transform()
assert e.is_transform_set() is False
ax.add_patch(e)
assert e.get_transform() != intermediate_transform
assert e.is_transform_set() is True
assert e._transform == ax.transData
@cleanup
def test_collection_transform_of_none():
# tests the behaviour of collections added to an Axes with various
# transform specifications
ax = plt.axes()
ax.set_xlim([1, 3])
ax.set_ylim([1, 3])
#draw an ellipse over data coord (2,2) by specifying device coords
xy_data = (2, 2)
xy_pix = ax.transData.transform_point(xy_data)
# not providing a transform of None puts the ellipse in data coordinates
e = mpatches.Ellipse(xy_data, width=1, height=1)
c = mcollections.PatchCollection([e], facecolor='yellow', alpha=0.5)
ax.add_collection(c)
# the collection should be in data coordinates
assert c.get_offset_transform() + c.get_transform() == ax.transData
# providing a transform of None puts the ellipse in device coordinates
e = mpatches.Ellipse(xy_pix, width=120, height=120)
c = mcollections.PatchCollection([e], facecolor='coral',
alpha=0.5)
c.set_transform(None)
ax.add_collection(c)
assert isinstance(c.get_transform(), mtrans.IdentityTransform)
# providing an IdentityTransform puts the ellipse in device coordinates
e = mpatches.Ellipse(xy_pix, width=100, height=100)
c = mcollections.PatchCollection([e], transform=mtrans.IdentityTransform(),
alpha=0.5)
ax.add_collection(c)
assert isinstance(c._transOffset, mtrans.IdentityTransform)
@image_comparison(baseline_images=["clip_path_clipping"], remove_text=True)
def test_clipping():
exterior = mpath.Path.unit_rectangle().deepcopy()
exterior.vertices *= 4
exterior.vertices -= 2
interior = mpath.Path.unit_circle().deepcopy()
interior.vertices = interior.vertices[::-1]
clip_path = mpath.Path(vertices=np.concatenate([exterior.vertices,
interior.vertices]),
codes=np.concatenate([exterior.codes,
interior.codes]))
star = mpath.Path.unit_regular_star(6).deepcopy()
star.vertices *= 2.6
ax1 = plt.subplot(121)
col = mcollections.PathCollection([star], lw=5, edgecolor='blue',
facecolor='red', alpha=0.7, hatch='*')
col.set_clip_path(clip_path, ax1.transData)
ax1.add_collection(col)
ax2 = plt.subplot(122, sharex=ax1, sharey=ax1)
patch = mpatches.PathPatch(star, lw=5, edgecolor='blue', facecolor='red',
alpha=0.7, hatch='*')
patch.set_clip_path(clip_path, ax2.transData)
ax2.add_patch(patch)
ax1.set_xlim([-3, 3])
ax1.set_ylim([-3, 3])
@cleanup
def test_cull_markers():
x = np.random.random(20000)
y = np.random.random(20000)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y, 'k.')
ax.set_xlim(2, 3)
pdf = io.BytesIO()
fig.savefig(pdf, format="pdf")
assert len(pdf.getvalue()) < 8000
svg = io.BytesIO()
fig.savefig(svg, format="svg")
assert len(svg.getvalue()) < 20000
@cleanup
def test_remove():
fig, ax = plt.subplots()
im = ax.imshow(np.arange(36).reshape(6, 6))
ln, = ax.plot(range(5))
assert_true(fig.stale)
assert_true(ax.stale)
fig.canvas.draw()
assert_false(fig.stale)
assert_false(ax.stale)
assert_false(ln.stale)
assert_true(im in ax.mouseover_set)
assert_true(ln not in ax.mouseover_set)
assert_true(im.axes is ax)
im.remove()
ln.remove()
for art in [im, ln]:
assert_true(art.axes is None)
assert_true(art.figure is None)
assert_true(im not in ax.mouseover_set)
assert_true(fig.stale)
assert_true(ax.stale)
@cleanup
def test_properties():
ln = mlines.Line2D([], [])
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
ln.properties()
assert len(w) == 0
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
|
mit
|
paschalidoud/feature-aggregation
|
feature_aggregation/fv.py
|
1
|
10519
|
"""Aggregate local features using Fisher Vectors with a GMM as the
probabilistic model"""
from joblib import Parallel, delayed
import numpy as np
from sklearn.mixture import GaussianMixture
from sklearn.decomposition import PCA
from .base import BaseAggregator
def _transform_batch(x, means, inv_covariances, inv_sqrt_covariances):
"""Compute the grad with respect to the parameters of the model for the
each vector in the matrix x and return the sum.
see "Improving the Fisher Kernel for Large-Scale Image Classification"
by Perronnin et al. for the equations
Parameters
----------
x: array
The feature matrix to be encoded with fisher encoding
means: array
The GMM means
inverted_covariances: array
The inverse diagonal covariance matrix
Return
------
vector The fisher vector for the passed in local features
"""
# number of gaussians
N, D = means.shape
# number of dimensions
M, D = x.shape
# calculate the probabilities that each x was created by each gaussian
# distribution keeping some intermediate computations as well
diff = x.reshape(-1, D, 1) - means.T.reshape(1, D, N)
diff = diff.transpose(0, 2, 1)
q = -0.5 * (diff * inv_covariances.reshape(1, N, D) * diff).sum(axis=-1)
q = np.exp(q - q.max(axis=1, keepdims=True))
q /= q.sum(axis=1, keepdims=True)
# Finally compute the unnormalized FV and return it
diff_over_cov = diff * inv_sqrt_covariances.reshape(1, N, D)
return np.hstack([
(q.reshape(M, N, 1) * diff_over_cov).sum(axis=0),
(q.reshape(M, N, 1) * (diff_over_cov**2 - 1)).sum(axis=0)
]).ravel()
class FisherVectors(BaseAggregator):
"""Aggregate local features using Fisher Vector encoding with a GMM.
Train a GMM on some local features and then extract the normalized
derivative
Parameters
----------
n_gaussians : int
The number of gaussians to be used for the fisher vector
encoding
n_pca_components : float
Control the number of PCA components we will use to
reduce the dimensionality of our data. The valid range
for this parameter is (0, 1), whith 1 being used to denote
that the PCA components are equal to the number of feature's
dimension
max_iter : int
The maximum number of EM iterations
normalization : int
A bitmask of POWER_NORMALIZATION and L2_NORMALIZATION
dimension_ordering : {'th', 'tf'}
Changes how n-dimensional arrays are reshaped to form
simple local feature matrices. 'th' ordering means the
local feature dimension is the second dimension and
'tf' means it is the last dimension.
inner_batch : int
Compute the fisher vector of 'inner_batch' vectors together.
It controls a trade off between speed and memory.
n_jobs : int
The threads to use for the transform
verbose : int
Controls the verbosity of the GMM
"""
POWER_NORMALIZATION = 1
L2_NORMALIZATION = 2
def __init__(self, n_gaussians, n_pca_components=0.8, max_iter=100,
normalization=3, dimension_ordering="tf", inner_batch=64,
n_jobs=-1, verbose=0):
self.n_gaussians = n_gaussians
self.max_iter = max_iter
self.normalization = normalization
self.inner_batch = inner_batch
self.n_jobs = n_jobs
self.verbose = verbose
self.n_pca_components = n_pca_components
super(self.__class__, self).__init__(dimension_ordering)
# initialize the rest of the attributes of the class for any use
# (mainly because we want to be able to check if fit has been called
# before on this instance)
self.pca_model = None
self.weights = None
self.means = None
self.covariances = None
self.inverted_covariances = None
self.normalization_factor = None
def __getstate__(self):
"""Return the data that should be pickled in order to save the fisher
encoder after it is trained.
This way allows us to control what is actually saved to disk and to
recreate whatever cannot be saved like the probability density
functions. Moreover we can choose if we want to trade between storage
space and initialization time (currently maximum space is used).
"""
# we could be simply grabing self.__dict__ removing pdfs and returning
# it but I believe this is more explicit
return {
"n_gaussians": self.n_gaussians,
"n_pca_components": self.n_pca_components,
"max_iter": self.max_iter,
"normalization": self.normalization,
"dimension_ordering": self.dimension_ordering,
"inner_batch": self.inner_batch,
"n_jobs": self.n_jobs,
"verbose": self.verbose,
"pca_model": self.pca_model,
"weights": self.weights,
"means": self.means,
"covariances": self.covariances,
"inverted_covariances": self.inverted_covariances,
"inverted_sqrt_covariances": self.inverted_sqrt_covariances,
"normalization_factor": self.normalization_factor
}
def __setstate__(self, state):
"""Restore the class's state after unpickling.
Parameters
----------
state: dictionary
The unpickled data that were returned by __getstate__
"""
# A temporary instance for accessing the default values
t = FisherVectors(0)
# Load from state
self.n_gaussians = state["n_gaussians"]
self.n_pca_components = state["n_pca_components"]
self.max_iter = state.get("max_iter", t.max_iter)
self.normalization = state.get("normalization", t.normalization)
self.dimension_ordering = \
state.get("dimension_ordering", t.dimension_ordering)
self.inner_batch = state.get("inner_batch", t.inner_batch)
self.n_jobs = state.get("n_jobs", t.n_jobs)
self.verbose = state.get("verbose", t.verbose)
self.pca_model = state.get("pca_model", t.pca_model)
self.weights = state.get("weights", t.weights)
self.means = state.get("means", t.means)
self.covariances = state.get("covariances", t.covariances)
self.inverted_covariances = \
state.get("inverted_covariances", t.inverted_covariances)
self.inverted_sqrt_covariances= \
state.get("inverted_sqrt_covariances", t.inverted_sqrt_covariances)
self.normalization_factor = \
state.get("normalization_factor", t.normalization_factor)
def fit(self, X, y=None):
"""Learn a fisher vector encoding.
Fit a gaussian mixture model to the data using n_gaussians with
diagonal covariance matrices.
Parameters
----------
X : array_like or list
The local features to train on. They must be either nd arrays or
a list of nd arrays.
"""
X, _ = self._reshape_local_features(X)
if self.n_pca_components != 1:
# train PCA
self.pca_model = PCA(n_components=int(X.shape[-1]*self.n_pca_components))
self.pca_model.fit(X)
# apply PCA and reduce dimensionality
X = self.pca_model.transform(X)
# consider changing the initialization parameters
gmm = GaussianMixture(
n_components=self.n_gaussians,
max_iter=self.max_iter,
covariance_type='diag',
verbose=self.verbose
)
gmm.fit(X)
# save the results of the gmm
self.weights = gmm.weights_
self.means = gmm.means_
self.covariances = gmm.covariances_
# precompute some values for encoding
D = X[0].size
self.inverted_covariances = (1./self.covariances)
self.inverted_sqrt_covariances = np.sqrt(1./self.covariances)
self.normalization_factor = np.hstack([
np.repeat(1.0/np.sqrt(self.weights), D),
np.repeat(1.0/np.sqrt(2*self.weights), D)
])
return self
def transform(self, X):
"""Compute the fisher vector implementation of the provided data.
Parameters
----------
X : array_like or list
The local features to aggregate. They must be either nd arrays or
a list of nd arrays. In case of a list each item is aggregated
separately.
"""
# Check if the GMM is fitted
if self.weights is None:
raise RuntimeError(
"GMM model not found. Have you called fit(data) first?"
)
# Get the local features and the number of local features per document
X, lengths = self._reshape_local_features(X)
if self.n_pca_components != 1:
# Apply PCA and reduce dimensionality
X = self.pca_model.transform(X)
# Allocate the memory necessary for the encoded data
fv = np.zeros((len(lengths), self.normalization_factor.shape[0]))
# Do a naive double loop for now
s, e = 0, 0
for i, l in enumerate(lengths):
s, e = e, e+l
fv[i] = sum(
Parallel(n_jobs=self.n_jobs, backend="threading")(
delayed(_transform_batch)(
X[j:min(e, j+self.inner_batch)],
self.means,
self.inverted_covariances,
self.inverted_sqrt_covariances
)
for j in range(s, e, self.inner_batch)
)
)
# normalize the vectors
fv *= 1.0/np.array(lengths).reshape(-1, 1)
fv *= self.normalization_factor.reshape(1, -1)
# check if we should be normalizing the power
if self.normalization & self.POWER_NORMALIZATION:
fv = np.sqrt(np.abs(fv))*np.sign(fv)
# check if we should be performing L2 normalization
if self.normalization & self.L2_NORMALIZATION:
fv /= np.sqrt(np.einsum("...j,...j", fv, fv)).reshape(-1, 1)
return fv
|
mit
|
looooo/panel-method
|
examples/plots/alpha_cL_3d.py
|
2
|
1161
|
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import parabem
from parabem import pan3d
from parabem.mesh import mesh_object
from parabem.vtk_export import CaseToVTK
from parabem.utils import check_path, v_inf_deg_range3
mesh = mesh_object.from_OBJ("../mesh/wing_lift.obj")
alpha_0 = np.deg2rad(5)
v_inf = parabem.Vector3(np.cos(alpha_0), 0, np.sin(alpha_0))
v_inf_range = v_inf_deg_range3(v_inf, -5, 10, 20)
fz = []
cmy = []
xcp = []
cL = []
cD = []
case = pan3d.DirichletDoublet0Source0Case3(mesh.panels, mesh.trailing_edges)
case.farfield = 5
case.create_wake(10, 30)
case.v_inf = v_inf
case.create_wake(length=10000, count=4) # length, count
polars = case.polars(v_inf_range)
p = []
alpha = []
for i in polars.values:
alpha.append(np.rad2deg(i.alpha))
p.append((i.cL, i.cD, i.cP, i.cop.x, i.cop.z))
plt.figure(figsize=(10,4))
plt.ylabel(u"alpha")
plt.xlabel(u"cW, cN, cA")
plt.plot(np.array(p).T[0], alpha, label=u"cA")
plt.plot(np.array(p).T[1], alpha, label=u"cW_i")
plt.plot(-np.array(p).T[2], alpha, label=u"- cN")
plt.legend()
plt.grid()
plt.savefig(check_path("results/3d/cLcD.png"))
|
gpl-3.0
|
josherick/bokeh
|
bokeh/charts/builder/tests/test_timeseries_builder.py
|
33
|
2825
|
""" This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import datetime
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import TimeSeries
from bokeh.charts.builder.tests._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestTimeSeries(unittest.TestCase):
def test_supported_input(self):
now = datetime.datetime.now()
delta = datetime.timedelta(minutes=1)
dts = [now + delta*i for i in range(5)]
xyvalues = OrderedDict({'Date': dts})
y_python = xyvalues['python'] = [2, 3, 7, 5, 26]
y_pypy = xyvalues['pypy'] = [12, 33, 47, 15, 126]
y_jython = xyvalues['jython'] = [22, 43, 10, 25, 26]
xyvaluesdf = pd.DataFrame(xyvalues)
groups = ['python', 'pypy', 'jython']
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
ts = create_chart(TimeSeries, _xy, index='Date')
builder = ts._builders[0]
self.assertEqual(builder._groups, groups)
assert_array_equal(builder._data['x_python'], dts)
assert_array_equal(builder._data['x_pypy'], dts)
assert_array_equal(builder._data['x_jython'], dts)
assert_array_equal(builder._data['y_python'], y_python)
assert_array_equal(builder._data['y_pypy'], y_pypy)
assert_array_equal(builder._data['y_jython'], y_jython)
lvalues = [[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]]
for _xy in [lvalues, np.array(lvalues)]:
hm = create_chart(TimeSeries, _xy, index=dts)
builder = hm._builders[0]
self.assertEqual(builder._groups, ['0', '1', '2'])
assert_array_equal(builder._data['x_0'], dts)
assert_array_equal(builder._data['x_1'], dts)
assert_array_equal(builder._data['x_2'], dts)
assert_array_equal(builder._data['y_0'], y_python)
assert_array_equal(builder._data['y_1'], y_pypy)
assert_array_equal(builder._data['y_2'], y_jython)
|
bsd-3-clause
|
nicproulx/mne-python
|
tutorials/plot_stats_cluster_methods.py
|
5
|
8797
|
# doc:slow-example
"""
.. _tut_stats_cluster_methods:
======================================================
Permutation t-test on toy data with spatial clustering
======================================================
Following the illustrative example of Ridgway et al. 2012 [1]_,
this demonstrates some basic ideas behind both the "hat"
variance adjustment method, as well as threshold-free
cluster enhancement (TFCE) [2]_ methods in mne-python.
This toy dataset consists of a 40 x 40 square with a "signal"
present in the center (at pixel [20, 20]) with white noise
added and a 5-pixel-SD normal smoothing kernel applied.
In the top row plot the T statistic over space, peaking toward the
center. Note that it has peaky edges. Second, with the "hat" variance
correction/regularization, the peak becomes correctly centered. Third,
the TFCE approach also corrects for these edge artifacts. Fourth, the
the two methods combined provide a tighter estimate, for better or
worse.
Now considering multiple-comparisons corrected statistics on these
variables, note that a non-cluster test (e.g., FDR or Bonferroni) would
mis-localize the peak due to sharpness in the T statistic driven by
low-variance pixels toward the edge of the plateau. Standard clustering
(first plot in the second row) identifies the correct region, but the
whole area must be declared significant, so no peak analysis can be done.
Also, the peak is broad. In this method, all significances are
family-wise error rate (FWER) corrected, and the method is
non-parametric so assumptions of Gaussian data distributions (which do
actually hold for this example) don't need to be satisfied. Adding the
"hat" technique tightens the estimate of significant activity (second
plot). The TFCE approach (third plot) allows analyzing each significant
point independently, but still has a broadened estimate. Note that
this is also FWER corrected. Finally, combining the TFCE and "hat"
methods tightens the area declared significant (again FWER corrected),
and allows for evaluation of each point independently instead of as
a single, broad cluster.
.. note:: This example does quite a bit of processing, so even on a
fast machine it can take a few minutes to complete.
"""
# Authors: Eric Larson <[email protected]>
# License: BSD (3-clause)
import numpy as np
from scipy import stats
from functools import partial
import matplotlib.pyplot as plt
# this changes hidden MPL vars:
from mpl_toolkits.mplot3d import Axes3D # noqa
from mne.stats import (spatio_temporal_cluster_1samp_test,
bonferroni_correction, ttest_1samp_no_p)
try:
from sklearn.feature_extraction.image import grid_to_graph
except ImportError:
from scikits.learn.feature_extraction.image import grid_to_graph
print(__doc__)
###############################################################################
# Set parameters
# --------------
width = 40
n_subjects = 10
signal_mean = 100
signal_sd = 100
noise_sd = 0.01
gaussian_sd = 5
sigma = 1e-3 # sigma for the "hat" method
threshold = -stats.distributions.t.ppf(0.05, n_subjects - 1)
threshold_tfce = dict(start=0, step=0.2)
n_permutations = 1024 # number of clustering permutations (1024 for exact)
###############################################################################
# Construct simulated data
# ------------------------
#
# Make the connectivity matrix just next-neighbor spatially
n_src = width * width
connectivity = grid_to_graph(width, width)
# For each "subject", make a smoothed noisy signal with a centered peak
rng = np.random.RandomState(42)
X = noise_sd * rng.randn(n_subjects, width, width)
# Add a signal at the dead center
X[:, width // 2, width // 2] = signal_mean + rng.randn(n_subjects) * signal_sd
# Spatially smooth with a 2D Gaussian kernel
size = width // 2 - 1
gaussian = np.exp(-(np.arange(-size, size + 1) ** 2 / float(gaussian_sd ** 2)))
for si in range(X.shape[0]):
for ri in range(X.shape[1]):
X[si, ri, :] = np.convolve(X[si, ri, :], gaussian, 'same')
for ci in range(X.shape[2]):
X[si, :, ci] = np.convolve(X[si, :, ci], gaussian, 'same')
###############################################################################
# Do some statistics
# ------------------
#
# .. note::
# X needs to be a multi-dimensional array of shape
# samples (subjects) x time x space, so we permute dimensions:
X = X.reshape((n_subjects, 1, n_src))
###############################################################################
# Now let's do some clustering using the standard method.
#
# .. note::
# Not specifying a connectivity matrix implies grid-like connectivity,
# which we want here:
T_obs, clusters, p_values, H0 = \
spatio_temporal_cluster_1samp_test(X, n_jobs=1, threshold=threshold,
connectivity=connectivity,
tail=1, n_permutations=n_permutations)
# Let's put the cluster data in a readable format
ps = np.zeros(width * width)
for cl, p in zip(clusters, p_values):
ps[cl[1]] = -np.log10(p)
ps = ps.reshape((width, width))
T_obs = T_obs.reshape((width, width))
# To do a Bonferroni correction on these data is simple:
p = stats.distributions.t.sf(T_obs, n_subjects - 1)
p_bon = -np.log10(bonferroni_correction(p)[1])
# Now let's do some clustering using the standard method with "hat":
stat_fun = partial(ttest_1samp_no_p, sigma=sigma)
T_obs_hat, clusters, p_values, H0 = \
spatio_temporal_cluster_1samp_test(X, n_jobs=1, threshold=threshold,
connectivity=connectivity,
tail=1, n_permutations=n_permutations,
stat_fun=stat_fun, buffer_size=None)
# Let's put the cluster data in a readable format
ps_hat = np.zeros(width * width)
for cl, p in zip(clusters, p_values):
ps_hat[cl[1]] = -np.log10(p)
ps_hat = ps_hat.reshape((width, width))
T_obs_hat = T_obs_hat.reshape((width, width))
# Now the threshold-free cluster enhancement method (TFCE):
T_obs_tfce, clusters, p_values, H0 = \
spatio_temporal_cluster_1samp_test(X, n_jobs=1, threshold=threshold_tfce,
connectivity=connectivity,
tail=1, n_permutations=n_permutations)
T_obs_tfce = T_obs_tfce.reshape((width, width))
ps_tfce = -np.log10(p_values.reshape((width, width)))
# Now the TFCE with "hat" variance correction:
T_obs_tfce_hat, clusters, p_values, H0 = \
spatio_temporal_cluster_1samp_test(X, n_jobs=1, threshold=threshold_tfce,
connectivity=connectivity,
tail=1, n_permutations=n_permutations,
stat_fun=stat_fun, buffer_size=None)
T_obs_tfce_hat = T_obs_tfce_hat.reshape((width, width))
ps_tfce_hat = -np.log10(p_values.reshape((width, width)))
###############################################################################
# Visualize results
# -----------------
fig = plt.figure(facecolor='w')
x, y = np.mgrid[0:width, 0:width]
kwargs = dict(rstride=1, cstride=1, linewidth=0, cmap='Greens')
Ts = [T_obs, T_obs_hat, T_obs_tfce, T_obs_tfce_hat]
titles = ['T statistic', 'T with "hat"', 'TFCE statistic', 'TFCE w/"hat" stat']
for ii, (t, title) in enumerate(zip(Ts, titles)):
ax = fig.add_subplot(2, 4, ii + 1, projection='3d')
ax.plot_surface(x, y, t, **kwargs)
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(title)
p_lims = [1.3, -np.log10(1.0 / n_permutations)]
pvals = [ps, ps_hat, ps_tfce, ps_tfce_hat]
titles = ['Standard clustering', 'Clust. w/"hat"',
'Clust. w/TFCE', 'Clust. w/TFCE+"hat"']
axs = []
for ii, (p, title) in enumerate(zip(pvals, titles)):
ax = fig.add_subplot(2, 4, 5 + ii)
plt.imshow(p, cmap='Purples', vmin=p_lims[0], vmax=p_lims[1])
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(title)
axs.append(ax)
plt.tight_layout()
for ax in axs:
cbar = plt.colorbar(ax=ax, shrink=0.75, orientation='horizontal',
fraction=0.1, pad=0.025)
cbar.set_label('-log10(p)')
cbar.set_ticks(p_lims)
cbar.set_ticklabels(['%0.1f' % p for p in p_lims])
plt.show()
###############################################################################
# References
# ----------
# .. [1] Ridgway et al. 2012, "The problem of low variance voxels in
# statistical parametric mapping; a new hat avoids a 'haircut'",
# NeuroImage. 2012 Feb 1;59(3):2131-41.
#
# .. [2] Smith and Nichols 2009, "Threshold-free cluster enhancement:
# addressing problems of smoothing, threshold dependence, and
# localisation in cluster inference", NeuroImage 44 (2009) 83-98.
|
bsd-3-clause
|
yunlongliukm/chm1_scripts
|
BreakpointCluster.py
|
2
|
7045
|
#!/usr/bin/env python
import pysam
import argparse
import matplotlib.pyplot as plt
import numpy as np
import math
import sys
import pdb
import bisect
ap = argparse.ArgumentParser(description="Cluster breakpoints.")
ap.add_argument("table", help="Input tabular file")
ap.add_argument("left", help="Left clusters.", nargs=2)
ap.add_argument("right", help="Right clusters.", nargs=2)
ap.add_argument("out", help="Output file, - implies stdout")
ap.add_argument("--delta", help="Allowable gap between endponts.", type=int, default=1000)
ap.add_argument("--minSupport", help="Min overlapping clusters", type=int, default=2)
args = ap.parse_args()
tabFile = open(args.table)
#
# *-----*
# *---* right-overlap
# *---* left-overlap, and close enough
def DefineClusters(table, span, col, minSupport = 2):
i = 0
j = 1
clusters = {}
while ( i < len(table) and j < len(table) ):
j = i + 1
if (j == len(table)):
break
maxSpan = table[j][2] - table[j][1]
maxSpanIndex = i
minLeft = table[i][1]
maxRight = table[i][2]
while (j < len(table) and
table[j][0] == table[j-1][0] and
table[j][col] < table[j-1][col] + span):
if (minLeft > table[j][1]):
minLeft = table[j][1]
if (maxRight < table[j][1]):
maxRight = table[j][2]
curSpan = maxRight - minLeft
if (curSpan > maxSpan):
maxSpan = curSpan
maxSpanIndex = j
j += 1
if (table[i][0] not in clusters):
clusters[table[i][0]] = []
if (j - i >= minSupport):
clusters[table[i][0]].append((minLeft, maxRight, j - i))
i = j
return clusters
def ReadClusters(clusterFileName, side, minSupport=2, span=100):
cf = open(clusterFileName, 'r')
clusters = {}
nAdded = 0
table = []
for line in cf:
vals = line.split()
chrom = vals[0]
start = int(vals[1])
end = int(vals[2])
table.append((chrom,start,end))
return DefineClusters(table, span, side, minSupport)
def FindCluster(chrom, start, clusters):
s = (start, start)
if (chrom not in clusters):
return None
sb = bisect.bisect_left(clusters[chrom], s)
while (sb < len(clusters[chrom]) and sb > 0 and clusters[chrom][sb][0] > start):
sb -= 1
if (sb >= len(clusters[chrom])):
return None
if (clusters[chrom][sb][0] <= start and clusters[chrom][sb][1] >= start):
return (chrom, sb)
return None
class Interval:
def __init__(self):
self.intv = []
def Check(self, lp, rp):
for i in range(len(self.intv)):
if (abs(lp - self.intv[i][0]) < args.delta or
abs(rp - self.intv[i][1]) < args.delta):
return True
return False
def Add(self, lp, rp):
self.intv.append((lp,rp))
# self.intv.sort()
def Size(self):
return len(self.intv)
def Coordinates(self):
return (np.min(self.intv) , np.max(self.intv))
class Cluster:
def __init__(self, leftChr, leftStrand, lstart, lend, rightChr, rightStrand, rstart, rend):
self.leftStrand = leftStrand
self.rightStrand = rightStrand
self.leftChr = leftChr
self.rightChr = rightChr
self.leftBp = Interval()
self.rightBp = Interval()
if (lend< rend):
self.leftBp.Add(lstart, lend)
self.rightBp.Add(rstart, rend)
else:
self.leftBp.Add(rstart, rend)
self.rightBp.Add(lstart, lend)
def CoordinatesOverlap(self, left, right, side):
return getattr(self, side).Check(left, right)
def IntervalsOverlap(self, lchr, lstr, lsta, lend, rchr, rstr, rsta, rend):
if ((lchr == self.leftChr and lstr == self.leftStrand and self.CoordinatesOverlap(lsta, lend, 'leftBp')) and
(rchr == self.rightChr and rstr == self.rightStrand and self.CoordinatesOverlap(rsta, rend, 'rightBp'))):
return 1
elif ((lchr == self.rightChr and lstr == self.rightStrand and self.CoordinatesOverlap(lsta, lend, 'rightBp')) and
(rchr == self.leftChr and rstr == self.leftStrand and self.CoordinatesOverlap(rsta, rend, 'leftBp'))):
return 2
else:
return 0
def Add(self, left, right, side):
getattr(self, side).Add(left, right)
nLines = 0
if (args.out == "-"):
outFile = sys.stdout
else:
outFile = open(args.out, 'w')
allClusters = {}
leftClusters = {}
rightClusters = {}
llClusters = ReadClusters(args.left[0], 1, args.minSupport, args.delta)
lrClusters = ReadClusters(args.left[1], 2, args.minSupport, args.delta)
rlClusters = ReadClusters(args.right[0], 1, args.minSupport, args.delta)
rrClusters = ReadClusters(args.right[1], 2, args.minSupport, args.delta)
clusterPairCounts = {}
leftClusters = [llClusters, lrClusters]
rightClusters = [rlClusters, rrClusters]
for line in tabFile:
vals = line.split()
lscore = int(vals[1])
lstrand = int(vals[2])
lchr = vals[3]
lstart = int(vals[4])
lend = int(vals[5])
rscore = int(vals[6])
rstrand = int(vals[7])
rchr = vals[8]
rstart = int(vals[9])
rend = int(vals[10])
clLeftStart = FindCluster(lchr, lstart, llClusters)
clLeftEnd = FindCluster(lchr, lend, lrClusters)
clRightStart = FindCluster(rchr, rstart, rlClusters)
clRightEnd = FindCluster(rchr, rend, rrClusters)
# print "coords"
# print str((lstart, lend, rstart, rend))
# print str((clLeftStart, clLeftEnd, clRightStart, clRightEnd))
if ( (clLeftStart != None or clLeftEnd != None) and
(clRightStart != None or clRightEnd != None) ):
if (clLeftStart != None):
leftCluster = (clLeftStart[0], clLeftStart[1], 0)
else:
leftCluster = (clLeftEnd[0],clLeftEnd[1], 1)
if (clRightStart != None):
rightCluster = (clRightStart[0], clRightStart[1], 0)
else:
rightCluster = (clRightEnd[0], clRightEnd[1], 1)
if (leftCluster not in clusterPairCounts):
clusterPairCounts[leftCluster] = {}
if (rightCluster not in clusterPairCounts[leftCluster]):
clusterPairCounts[leftCluster][rightCluster] = 0
clusterPairCounts[leftCluster][rightCluster] += 1
for cpLeft in clusterPairCounts.keys():
for cpRight in clusterPairCounts[cpLeft].keys():
outFile.write(cpLeft[0] + "\t" + str(leftClusters[cpLeft[2]][cpLeft[0]][cpLeft[1]][0]) + "\t" + str(leftClusters[cpLeft[2]][cpLeft[0]][cpLeft[1]][1]) + "\t" + cpRight[0] + "\t" + str(rightClusters[cpRight[2]][cpRight[0]][cpRight[1]][0]) + "\t" + str(rightClusters[cpRight[2]][cpRight[0]][cpRight[1]][1]) + "\t" + str(clusterPairCounts[cpLeft][cpRight]) + "\n" )
if (outFile != sys.stdout):
outFile.close()
##except:
## print "problem"
|
mit
|
jonwright/ImageD11
|
sandbox/offset_image.py
|
1
|
3239
|
from __future__ import print_function
import numpy as np
from PIL import Image
REAL = np.float
def normalise( im ):
""" Set mean to 0, std to 1 """
avg = np.ravel(im).mean()
std = np.ravel(im).std()
if std > 0:
return ( im - avg ) / std
else:
return im - avg
def cross_correlate(a2, b2, fftshape):
""" Make 2D cross correlation image """
# FFT:
a2 = np.fft.rfft2(a2, s = fftshape)
b2 = np.fft.rfft2(b2, s = fftshape)
# Cross correlate
c = np.fft.irfft2( a2 * b2.conj() )
return c
def find_offset( c ):
""" Co-ordinates of max in a 2D cross correlation """
flatmax = c.argmax()
# print flatmax, c.flat[flatmax]
dim0 = int(flatmax/c.shape[1])
dim1 = flatmax - c.shape[1] * dim0
# print dim0,dim1,c[dim0,dim1]
roi = c[ dim0-6:dim0+7, dim1-6:dim1+7 ]
troi = roi.ravel().sum()
x = np.dot( roi.sum(axis=1), np.arange(dim0-6,dim0+7) ) / troi
y = np.dot( roi.sum(axis=0), np.arange(dim1-6,dim1+7) ) / troi
# Average the local area ?
return x, y
def register_image( rname, cname ):
""" We take the current and place it onto reference """
ref = normalise(np.asarray(Image.open(rname)).sum(axis=2, dtype=REAL))
cur = normalise(np.asarray(Image.open(cname)).sum(axis=2, dtype=REAL))
fftshape = ( ref.shape[0] + cur.shape[0] + 4,
ref.shape[1] + cur.shape[1] + 4 )
cor = cross_correlate( ref, cur, fftshape)
x, y = find_offset( cor )
if False:
from matplotlib.pylab import imshow, show, figure, colorbar, plot
figure(1)
imshow(ref)
figure(2)
imshow(cur)
figure(3)
imshow(cor)
colorbar()
print(y,x)
plot( [y], [x], "+", mec='g', lw=2, ms=10 )
show()
print(x, y)
return cur, ref, x, y
def display_registered( current, reference, xf, yf ):
merged = reference.copy()
x, y = int(xf+0.5) , int( yf+0.5)
print("cur",current.shape, "ref",reference.shape,x,y)
print(x,x+current.shape[0] , y,y+current.shape[1])
merged[ x:x+current.shape[0] , y:y+current.shape[1] ] = current
from matplotlib.pylab import imshow, show, title
imshow(merged)
title("x=%f y=%f"%(xf,yf))
show()
def test(filename):
rgb = np.asarray(Image.open(filename))
vals = 1.0*rgb[:,:,0] + rgb[:,:,1] + rgb[:,:,2]
print(vals.shape)
cen = vals.shape[0]/2, vals.shape[1]/2
obj = vals[ 190:263 , 460:523 ]
fftshape = ( vals.shape[0] + obj.shape[0] + 3, vals.shape[1] + obj.shape[1] + 3 )
c = cross_correlate(
normalise(vals),
normalise(obj),
fftshape )
c = c[:vals.shape[0],:vals.shape[1]]
x, y = find_offset(c)
# ideal offset? position of the (0,0) pixel of obj in vals
from matplotlib.pylab import imshow, show, figure, colorbar, plot
figure(1)
imshow(obj)
figure(2)
imshow(vals)
figure(3)
imshow(c)
colorbar()
print(y,x)
plot( [y], [x], "+", mec='g', lw=2, ms=10 )
show()
return c
if __name__=="__main__":
import sys
# test(sys.argv[1])
c,r,x,y = register_image( sys.argv[1], sys.argv[2] )
display_registered( c,r, x, y )
|
gpl-2.0
|
ferchault/iago
|
src/iago/PandasPatch.py
|
1
|
1302
|
""" The purpose of this file is to monkey-patch the pandas.DataFrame class to include metadata about columns like
units.
"""
# system modules
import types
# third-party modules
import pandas as pd
def explain(self, columnnames=None):
if isinstance(columnnames, types.StringTypes):
columnnames = [columnnames]
if columnnames is None:
columnnames = self.columns
comments, units = [], []
for columnname in columnnames:
try:
comment = self._iago_comments[columnname]
except KeyError:
comment = 'No description available.'
try:
unit = self._iago_units[columnname]
if unit is None:
unit = 'Dimensionless.'
except KeyError:
unit = 'No unit available.'
comments.append(comment)
units.append(unit)
return pd.DataFrame({'Name': columnnames, 'Comment': comments, 'Unit': units})
def modified_init(self, *args, **kwargs):
self._iago_old_init(*args, **kwargs)
self._iago_comments = dict()
self._iago_units = dict()
def annotations_to_dict(self):
columns = self._iago_units.keys()
return {column: (self._iago_comments[column], str(self._iago_units[column])) for column in columns}
pd.DataFrame._iago_old_init = pd.DataFrame.__init__
pd.DataFrame.__init__ = modified_init
pd.DataFrame.explain = explain
pd.DataFrame.annotations_to_dict = annotations_to_dict
|
mit
|
klocey/DiversityTools
|
distributions/distributions.py
|
10
|
2502
|
from __future__ import division
import sys
import os
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import statsmodels.api as sm
import statsmodels.formula.api as smf
import pandas as pd
########### PATHS ##############################################################
tools = os.path.expanduser("~/tools")
sys.path.append(tools + "/macroeco_distributions")
import macroeco_distributions as md
######### CLASS ################################################################
class zipf:
""" A class to obtain a zipf object with inherited mle shape parameter,
mle form of the rank-abundance distribution, and a rank-abundance curve
based on fitting the zipf to the observed via a generalized linear model."""
def __init__(self, obs):
self.obs = obs
def from_cdf(self):
""" Obtain the maximum likelihood form of the Zipf distribution, given
the mle value for the Zipf shape parameter (a). Using a, this code
generates a rank-abundance distribution (RAD) from the cumulative
density function (cdf) using the percent point function (ppf) also known
as the quantile function.
see: http://www.esapubs.org/archive/ecol/E093/155/appendix-B.htm
This is an actual form of the Zipf distribution, obtained from getting
the mle for the shape parameter.
"""
p = md.zipf_solver(self.obs)
S = len(self.obs)
rv = stats.zipf(a=p)
rad = []
for i in range(1, S+1):
val = (S - i + 0.5)/S
x = rv.ppf(val)
rad.append(int(x))
return rad
def from_glm(self):
""" Fit the Zipf distribution to the observed vector of integer values
using a generalized linear model.
Note: This is a fitted curve; not an actual form of the Zipf distribution
This method was inspired by the vegan
package's open source code on vegan's public GitHub repository:
https://github.com/vegandevs/vegan/blob/master/R/rad.zipf.R
on Thursday, 19 Marth 2015 """
ranks = np.log(range(1, len(self.obs)+1))
off = [np.log(sum(self.obs))] * len(self.obs)
d = pd.DataFrame({'ranks': ranks, 'off': off, 'x':self.obs})
lm = smf.glm(formula='x ~ ranks', data = d, family = sm.families.Poisson()).fit()
pred = lm.predict()
return pred
# zipf_pred = zipf(ad)
# zipf_mle = zipf_pred.from_cdf()
# zipf_glm = zipf_pred.from_glm()
|
mit
|
mayhem/led-chandelier
|
software/hippietrap/generator.py
|
1
|
10017
|
#!/usr/bin/python
import abc
import math
import random
from . import common
def plot(g):
import matplotlib.pyplot as plt
t = -2.0
end = 2.0
step = .01
x = []
y = []
while t <= end:
x.append(t)
y.append(g[t])
t += step
plt.plot(x, y)
plt.ylabel('value')
plt.xlabel('time')
plt.show()
MAX_LOCAL_RANDOM_VALUES = 6
local_random_values = []
def clear_local_random_values():
global local_random_values
local_random_values = []
def set_local_random_value(value):
global local_random_values
# if len(local_random_values) >= MAX_LOCAL_RANDOM_VALUES:
# print "Warning: attempting to store more than %d random values -- it won't be repeatable." % MAX_LOCAL_RANDOM_VALUES
local_random_values.append(value)
def get_local_random_value(index):
global local_random_values
if len(local_random_values) <= index:
raise KeyError("Invalid get local random index: %d" % index)
return local_random_values[index]
class GeneratorBase(object):
pass
class GenOp(GeneratorBase):
def __init__(self, operation, gen1, gen2):
if not isinstance(gen1, GeneratorBase) or not isinstance(gen2, GeneratorBase):
raise TypeError("GenOp needs to be passed two Generator objects")
self.operation = operation
self.g1 = gen1
self.g2 = gen2
def describe(self):
desc = common.make_function(common.FUNC_GENOP, (common.ARG_VALUE, common.ARG_FUNC,common.ARG_FUNC))
desc += common.pack_fixed(self.operation)
desc += self.g1.describe()
desc += self.g2.describe()
return desc
def __getitem__(self, t):
if self.operation == common.OP_ADD:
return self.g1[t] + self.g2[t]
elif self.operation == common.OP_SUB:
return self.g1[t] - self.g2[t]
elif self.operation == common.OP_MUL:
return self.g1[t] * self.g2[t]
elif self.operation == common.OP_SUB:
return self.g1[t] / self.g2[t]
elif self.operation == common.OP_MOD:
return self.g1[t] % self.g2[t]
return 0.0
class Abs(GeneratorBase):
def __init__(self, gen):
self.g = gen
def describe(self):
desc = common.make_function(common.FUNC_ABS, (common.ARG_FUNC,))
desc += self.g.describe()
return desc
def __getitem__(self, t):
return abs(self.g[t])
class Constant(GeneratorBase):
def __init__(self, value):
self.value = value
def describe(self):
desc = common.make_function(common.FUNC_CONSTANT, (common.ARG_VALUE,))
desc += common.pack_fixed(self.value)
return desc
def __getitem__(self, t):
return self.value
class LocalAngle(GeneratorBase):
def __init__(self, rev = False):
self.rev = rev
def describe(self):
desc = common.make_function(common.FUNC_LOCAL_ANGLE, (common.ARG_VALUE,))
desc += common.pack_fixed(self.rev)
return desc
def __getitem__(self, t):
return self.rev
class LocalRandomValue(GeneratorBase):
def __init__(self, lower, upper):
self.lower = lower
self.upper = upper
self.value = lower + random.random() * (upper - lower)
set_local_random_value(self.value)
def describe(self):
desc = common.make_function(common.FUNC_LOCAL_RANDOM, (common.ARG_VALUE,common.ARG_VALUE))
desc += common.pack_fixed(self.lower)
desc += common.pack_fixed(self.upper)
return desc
def __getitem__(self, t):
return self.value
class RepeatLocalRandomValue(GeneratorBase):
def __init__(self, index):
self.index = index
self.value = get_local_random_value(index)
def describe(self):
desc = common.make_function(common.FUNC_REPEAT_LOCAL_RANDOM, (common.ARG_VALUE,))
desc += common.pack_fixed(self.index)
return desc
def __getitem__(self, t):
return self.value
class Generator(GeneratorBase):
def __init__(self, period, phase, amplitude, offset):
self.period = period
self.phase = phase
self.amplitude = amplitude
self.offset = offset
if type(self.period) in (int, float):
self.period_f = None
else:
self.period_f = self.period
self.period = self.period_f[0]
if self.period == 0.0:
raise ValueError("Period 0 is invalid")
if type(self.phase) in (int, float):
self.phase_f = None
else:
self.phase_f = self.phase
self.phase = self.phase_f[0]
if type(self.amplitude) in (int, float):
self.amplitude_f = None
else:
self.amplitude_f = self.amplitude
self.amplitude = self.amplitude_f[0]
if type(self.offset) in (int, float):
self.offset_f = None
else:
self.offset_f = self.offset
self.offset = self.offset_f[0]
@abc.abstractmethod
def describe(self):
pass
def _describe(self):
args = []
desc = bytearray()
if self.period_f:
desc += self.period_f.describe()
args.append(common.ARG_FUNC)
else:
desc += common.pack_fixed(self.period)
args.append(common.ARG_VALUE)
if self.phase_f:
desc += self.phase_f.describe()
args.append(common.ARG_FUNC)
else:
desc += common.pack_fixed(self.phase)
args.append(common.ARG_VALUE)
if self.amplitude_f:
desc += self.amplitude_f.describe()
args.append(common.ARG_FUNC)
else:
desc += common.pack_fixed(self.amplitude)
args.append(common.ARG_VALUE)
if self.offset_f:
desc += self.offset_f.describe()
args.append(common.ARG_FUNC)
else:
desc += common.pack_fixed(self.offset)
args.append(common.ARG_VALUE)
return (desc, args)
@abc.abstractmethod
def __getitem__(self, t):
pass
class Sin(Generator):
def __init__(self, period = 1.0, phase = 0, amplitude = .5, offset = .5):
# convert from using pesky pi to using parametric values
super(Sin, self).__init__(period, phase, amplitude, offset)
def describe(self):
desc, args = self._describe()
return common.make_function(common.FUNC_SIN, args) + desc
def __getitem__(self, t):
period = math.pi / (self.period/2.0)
phase = (-math.pi / 2.0) + (math.pi * 2 * self.phase)
v = math.sin(t * period + phase) * self.amplitude + self.offset
return v
class Square(Generator):
def __init__(self, period = 1.0, phase = 0.0, amplitude = 1.0, offset = 0.0, duty=.5):
super(Square, self).__init__(period, phase, amplitude, offset)
self.duty = duty
if type(self.duty) in (int, float):
self.duty_f = None
else:
self.duty_f = self.duty
self.duty = self.duty_f[0]
def describe(self):
desc, args = self._describe()
if self.duty_f:
desc += self.duty_f.describe()
args.append(common.ARG_FUNC)
else:
desc += common.pack_fixed(self.duty)
args.append(common.ARG_VALUE)
return common.make_function(common.FUNC_SQUARE, args) + desc
def __getitem__(self, t):
v = (t / self.period) + self.phase
if float(v) % 1 < self.duty:
return self.amplitude + self.offset
else:
return self.offset
class Sawtooth(Generator):
def __init__(self, period = 1.0, phase = 0.0, amplitude = 1.0, offset = 0.0):
super(Sawtooth, self).__init__(period, phase, amplitude, offset)
def describe(self):
desc, args = self._describe()
return common.make_function(common.FUNC_SAWTOOTH, args) + desc
def __getitem__(self, t):
period = 1.0 / self.period
return (t * period + self.phase) % 1.0 * self.amplitude + self.offset
class Step(Generator):
def __init__(self, period = 1.0, phase = 0.0, amplitude = 1.0, offset = 0.0):
super(Step, self).__init__(period, phase, amplitude, offset)
def describe(self):
desc, args = self._describe()
return common.make_function(common.FUNC_STEP, args) + desc
def __getitem__(self, t):
v = (t / self.period) + self.phase
if v >= 0.0:
return self.amplitude + self.offset
else:
return self.offset
class Impulse(Generator):
def __init__(self, period = 1.0, phase = 0.0, amplitude = 1.0, offset = 0.0):
super(Impulse, self).__init__(period, phase, amplitude, offset)
def describe(self):
desc, args = self._describe()
return common.make_function(common.FUNC_IMPULSE, args) + desc
def __getitem__(self, t):
v = (t / self.period) + self.phase
if v >= 0.0 and v < 1.0:
return self.amplitude + self.offset
else:
return self.offset
class Sparkle(Generator):
def __init__(self, period = 1.0, phase = 0.0, amplitude = 1.0, offset = 0.0):
super(Sparkle, self).__init__(period, phase, amplitude, offset)
def describe(self):
desc, args = self._describe()
return common.make_function(common.FUNC_SPARKLE, args) + desc
def __getitem__(self, t):
v = (t / self.period) + self.phase
if v < 1.0:
return self.amplitude - t + self.offset
if t >= 1.0:
v = self.offset
class Line(Generator):
def __init__(self, period = 1.0, phase = 0.0, amplitude = 1.0, offset = 0.0):
super(Line, self).__init__(period, phase, amplitude, offset)
def describe(self):
desc, args = self._describe()
return common.make_function(common.FUNC_LINE, args) + desc
def __getitem__(self, t):
return (t * self.amplitude) + self.offset
|
mit
|
ahaberlie/MetPy
|
src/metpy/plots/__init__.py
|
1
|
1325
|
# Copyright (c) 2014,2015,2016,2017 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
r"""Contains functionality for making meteorological plots."""
import logging
# Trigger matplotlib wrappers
from . import _mpl # noqa: F401
from ._util import add_metpy_logo, add_timestamp, add_unidata_logo, convert_gempak_color
from .ctables import * # noqa: F403
from .declarative import * # noqa: F403
from .skewt import * # noqa: F403
from .station_plot import * # noqa: F403
from .wx_symbols import * # noqa: F403
logger = logging.getLogger(__name__)
__all__ = ctables.__all__[:] # pylint: disable=undefined-variable
__all__.extend(declarative.__all__) # pylint: disable=undefined-variable
__all__.extend(skewt.__all__) # pylint: disable=undefined-variable
__all__.extend(station_plot.__all__) # pylint: disable=undefined-variable
__all__.extend(wx_symbols.__all__) # pylint: disable=undefined-variable
__all__.extend([add_metpy_logo, add_timestamp, add_unidata_logo,
convert_gempak_color]) # pylint: disable=undefined-variable
try:
from .cartopy_utils import USCOUNTIES, USSTATES
__all__.extend([USCOUNTIES, USSTATES])
except ImportError:
logger.warning('Cannot import USCOUNTIES and USSTATES without Cartopy installed.')
|
bsd-3-clause
|
dseredyn/velma_scripts
|
scripts/test_or_urdf.py
|
1
|
12088
|
#!/usr/bin/env python
# Copyright (c) 2015, Robot Control and Pattern Recognition Group,
# Institute of Control and Computation Engineering
# Warsaw University of Technology
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Warsaw University of Technology nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYright HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: Dawid Seredynski
#
import roslib
roslib.load_manifest('velma_scripts')
import rospy
import tf
from std_msgs.msg import *
from sensor_msgs.msg import *
from geometry_msgs.msg import *
from visualization_msgs.msg import *
import tf
from tf import *
from tf.transformations import *
import tf_conversions.posemath as pm
from tf2_msgs.msg import *
import PyKDL
import math
import numpy as np
import copy
import matplotlib.pyplot as plt
import thread
import random
import openravepy
from openravepy import *
from optparse import OptionParser
from openravepy.misc import OpenRAVEGlobalArguments
import itertools
import operator
import rospkg
import velmautils
import openraveinstance
class TestOrURDF:
"""
"""
def KDLToOpenrave(self, T):
ret = numpy.array([
[T.M[0,0], T.M[0,1], T.M[0,2], T.p.x()],
[T.M[1,0], T.M[1,1], T.M[1,2], T.p.y()],
[T.M[2,0], T.M[2,1], T.M[2,2], T.p.z()],
[0, 0, 0, 1]])
return ret
def __init__(self):
self.pub_marker = velmautils.MarkerPublisher()
def spin(self):
#
# Initialise Openrave
#
# parser = OptionParser(description='Openrave Velma interface')
# OpenRAVEGlobalArguments.addOptions(parser)
# (options, leftargs) = parser.parse_args()
# options._collision = "fcl"
# env = OpenRAVEGlobalArguments.parseAndCreate(options,defaultviewer=True)
# xacro_uri = "package://velma_description/robots/velma.urdf.xacro"
# srdf_uri = "package://velma_description/robots/velma.srdf"
rospack = rospkg.RosPack()
openrave = openraveinstance.OpenraveInstance()
openrave.startOpenraveURDF()#env_file=rospack.get_path('velma_scripts')+"/data/romoco/romoco.env.xml")#, collision='fcl')
openrave.readRobot(xacro_uri=rospack.get_path('velma_description') + '/robots/velma.urdf.xacro', srdf_uri=rospack.get_path('velma_description') + '/robots/velma.srdf')
# openrave.startOpenrave(rospack.get_path('velma_scripts')+"/data/romoco/romoco_robot.env.xml")
# print "geometry group:", openrave.env.GetCollisionChecker().GetGeometryGroup()
# openrave.env.GetCollisionChecker().SetGeometryGroup("spheres")
# print "geometry group:", openrave.env.GetCollisionChecker().GetGeometryGroup()
# urdf_module = RaveCreateModule(env, 'urdf')
# name = urdf_module.SendCommand('load ' + urdf_uri + ' ' + srdf_uri)
# robot_rave = env.GetRobot(name)
# for man in robot_rave.GetManipulators():
# print "manipulator", man
# print "gripper", man.GetEndEffector()
# for joint in openrave.robot_rave.GetJoints():
# print joint
# TEST: wrist joints collision
if False:
tab2=[
[-0.397855401039,-2.90307354927],
[2.12894010544,-2.90307354927],
[2.12043237686,-1.87363839149],
[1.92475450039,-1.43123674393],
[0.77621114254,-1.39720571041],
[0.350824713707,-1.00585031509],
[0.401871085167,-0.571956157684],
[0.810242056847,0.414940297604],
[1.34622907639,0.942419290543],
[2.11192464828,1.01898884773],
[2.12894010544,2.8906891346],
[-0.814733862877,2.8906891346],
[-1.22310483456,2.27813267708],
[-2.21850919724,2.29514837265],
[-2.22701668739,-1.32063627243],
[-1.81013822556,-1.66945314407],
[-0.814733862877,-1.73751521111],
[-0.423378348351,-2.09483933449],
]
m_id = 0
for pt in tab2:
m_id = self.pub_marker.publishSinglePointMarker(PyKDL.Vector(pt[0],pt[1],0.1), m_id, r=0, g=0, b=1, namespace='default', frame_id='torso_base', m_type=Marker.CUBE, scale=Vector3(0.1, 0.1, 0.1), T=None)
lim5_lo, lim5_up = openrave.robot_rave.GetJoint("right_arm_5_joint").GetLimits()
lim6_lo, lim6_up = openrave.robot_rave.GetJoint("right_arm_6_joint").GetLimits()
for q5 in np.linspace(lim5_lo[0], lim5_up[0], 20):
for q6 in np.linspace(lim6_lo[0], lim6_up[0], 20):
conf = {
"right_arm_5_joint":q5,
"right_arm_6_joint":q6,
}
openrave.updateRobotConfigurationRos(conf)
openrave.env.UpdatePublishedBodies()
report = CollisionReport()
if openrave.robot_rave.CheckSelfCollision(report):
m_id = self.pub_marker.publishSinglePointMarker(PyKDL.Vector(q5,q6,0), m_id, r=1, g=0, b=0, namespace='default', frame_id='torso_base', m_type=Marker.CUBE, scale=Vector3(0.1, 0.1, 0.1), T=None)
else:
m_id = self.pub_marker.publishSinglePointMarker(PyKDL.Vector(q5,q6,0), m_id, r=0, g=1, b=0, namespace='default', frame_id='torso_base', m_type=Marker.CUBE, scale=Vector3(0.1, 0.1, 0.1), T=None)
rospy.sleep(0.01)
raw_input(".")
exit(0)
if False:
for link in openrave.robot_rave.GetLinks():
geoms = link.GetGeometries()
print "geoms:", len(geoms)
col_geoms = link.GetGroupNumGeometries("collision")
print "col_geoms:", col_geoms
vis_geoms = link.GetGroupNumGeometries("visual")
print "vis_geoms:", vis_geoms
print link, link.GetCollisionData()
for g in geoms:
info = g.GetInfo()
print " geom", g.GetType()
print " mesh_collision", info._meshcollision
if len(info._meshcollision.vertices) > 0:
x = info._meshcollision.vertices[0][0]
y = info._meshcollision.vertices[0][1]
z = info._meshcollision.vertices[0][2]
print " mesh_collision", math.sqrt(x*x+y*y+z*z)
print " modifable", info._bModifiable
print " render", info._filenamerender
print " coll", info._filenamecollision
# TEST: collision
if False:
sphere = RaveCreateKinBody(openrave.env,'')
sphere.SetName("sphere")
sphere.InitFromSpheres(numpy.array([[0,0,0,0.1]]),True)
openrave.env.Add(sphere,True)
x = -0.3
while True:
tr = self.KDLToOpenrave(PyKDL.Frame(PyKDL.Vector(x,0.8,1.9)))
sphere.SetTransform(tr)
openrave.env.UpdatePublishedBodies()
report = CollisionReport()
ret = openrave.env.CheckCollision(sphere, report)
if report.plink1 == None:
print None
else:
print report.plink1.GetParent().GetName(), report.plink2.GetName()
# print " ", report.vLinkColliding
for link1, link2 in report.vLinkColliding:
print " ", link1.GetParent().GetName(), link2.GetName()
# print report.plink1.GetParent().GetName(), report.plink2.GetParent().GetName()
raw_input(".")
x += 0.005
exit(0)
# CollisionOptions:
# CO_Distance = 1, ///< Compute distance measurements, this is usually slow and not all checkers support it.
# CO_UseTolerance = 2, ///< not used
# CO_Contacts = 4, ///< Return the contact points of the collision in the \ref CollisionReport. Note that this takes longer to compute.
# CO_RayAnyHit = 8, ///< When performing collision with rays, if this is set, algorithm just returns any hit instead of the closest (can be faster)
# Allows planners to greatly reduce redundant collision checks.
# If set and the target object is a robot, then only the links controlled by the currently set active DOFs and their attached bodies will be checked for collisions.
# The things that **will not be** checked for collision are:
# - links that do not remove with respect to each other as a result of moving the active dofs.
# CO_ActiveDOFs = 0x10,
# CO_AllLinkCollisions = 0x20, ///< if set then all the link collisions will be returned inside CollisionReport::vLinkColliding. Collision is slower because more pairs have to be checked.
# CO_AllGeometryContacts = 0x40, ///< if set, then will return the contacts of all the colliding geometries. This option can be very slow.
if True:
openrave.env.GetCollisionChecker().SetCollisionOptions(4)
box = RaveCreateKinBody(openrave.env,'')
box.SetName("box")
box.InitFromBoxes(numpy.array([[0,0,0,0.1,0.1,0.1]]),True)
openrave.env.Add(box,True)
sphere = RaveCreateKinBody(openrave.env,'')
sphere.SetName("sphere")
sphere.InitFromSpheres(numpy.array([[0,0,0,0.1]]),True)
openrave.env.Add(sphere,True)
transforms = [
self.KDLToOpenrave(PyKDL.Frame(PyKDL.Vector(0,0,0.198))),
self.KDLToOpenrave(PyKDL.Frame(PyKDL.Vector(0,0,-0.198))),
self.KDLToOpenrave(PyKDL.Frame(PyKDL.Vector(0,0.198,0))),
self.KDLToOpenrave(PyKDL.Frame(PyKDL.Vector(0,-0.198,0))),
self.KDLToOpenrave(PyKDL.Frame(PyKDL.Vector(0.198,0,0))),
self.KDLToOpenrave(PyKDL.Frame(PyKDL.Vector(-0.198,0,0)))
]
for tr in transforms:
print "transform", tr
sphere.SetTransform(tr)
openrave.env.UpdatePublishedBodies()
report = CollisionReport()
ret = openrave.env.CheckCollision(box, report)
print report.plink1.GetParent().GetName(), report.plink2.GetParent().GetName()
print report.contacts[0]
ret = openrave.env.CheckCollision(sphere, report)
# print ret
# print report
print report.plink1.GetParent().GetName(), report.plink2.GetParent().GetName()
print report.contacts[0]
raw_input(".")
exit(0)
if __name__ == '__main__':
rospy.init_node('test_or_urdf')
task = TestOrURDF()
rospy.sleep(1)
task.spin()
|
bsd-3-clause
|
moonbury/pythonanywhere
|
MasteringPandas/2060_11_Code/display_iris_dimensions.py
|
3
|
1180
|
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from IPython.core.pylabtools import figsize
iris_data=load_iris() # Load the iris dataset
figsize(12.5, 10)
fig = plt.figure()
fig.suptitle('Plots of Iris Dimensions', fontsize=14)
fig.subplots_adjust(wspace=0.35,hspace=0.5)
colors=('r','g','b')
cols=[colors[i] for i in iris_data.target]
def get_legend_data(clrs):
leg_data = []
for clr in clrs:
line=plt.Line2D(range(1),range(1),marker='o', color=clr)
leg_data.append(line)
return tuple(leg_data)
def display_iris_dimensions(fig,x_idx, y_idx,sp_idx):
ax = fig.add_subplot(3,2,sp_idx)
ax.scatter(iris_data.data[:, x_idx], iris_data.data[:,y_idx],c=cols)
ax.set_xlabel(iris_data.feature_names[x_idx])
ax.set_ylabel(iris_data.feature_names[y_idx])
leg_data = get_legend_data(colors)
ax.legend(leg_data,iris_data.target_names, numpoints=1,
bbox_to_anchor=(1.265,1.0),prop={'size':8.5})
idx = 1
pairs = [(x,y) for x in range(0,4) for y in range(0,4) if x < y]
for (x,y) in pairs:
display_iris_dimensions(fig,x,y,idx);
idx += 1
|
gpl-3.0
|
rkmaddox/mne-python
|
mne/viz/topomap.py
|
5
|
106033
|
"""Functions to plot M/EEG data e.g. topographies."""
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Robert Luke <[email protected]>
#
# License: Simplified BSD
import copy
import itertools
from functools import partial
from numbers import Integral
import warnings
import numpy as np
from ..baseline import rescale
from ..channels.channels import _get_ch_type
from ..channels.layout import (
_find_topomap_coords, find_layout, _pair_grad_sensors, _merge_ch_data)
from ..defaults import _EXTRAPOLATE_DEFAULT, _BORDER_DEFAULT
from ..io.pick import (pick_types, _picks_by_type, pick_info, pick_channels,
_pick_data_channels, _picks_to_idx, _get_channel_types,
_MEG_CH_TYPES_SPLIT)
from ..utils import (_clean_names, _time_mask, verbose, logger, fill_doc,
_validate_type, _check_sphere, _check_option, _is_numeric)
from .utils import (tight_layout, _setup_vmin_vmax, _prepare_trellis,
_check_delayed_ssp, _draw_proj_checkbox, figure_nobar,
plt_show, _process_times, DraggableColorbar,
_validate_if_list_of_axes, _setup_cmap, _check_time_unit)
from ..time_frequency import psd_multitaper
from ..defaults import _handle_default
from ..transforms import apply_trans, invert_transform
from ..io.meas_info import Info, _simplify_info
from ..io.proj import Projection
_fnirs_types = ('hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_od')
def _adjust_meg_sphere(sphere, info, ch_type):
sphere = _check_sphere(sphere, info)
assert ch_type is not None
if ch_type in ('mag', 'grad', 'planar1', 'planar2'):
# move sphere X/Y (head coords) to device X/Y space
if info['dev_head_t'] is not None:
head_dev_t = invert_transform(info['dev_head_t'])
sphere[:3] = apply_trans(head_dev_t, sphere[:3])
# Set the sphere Z=0 because all this really affects is flattening.
# We could make the head size change as a function of depth in
# the helmet like:
#
# sphere[2] /= -5
#
# but let's just assume some orthographic rather than parallel
# projection for explicitness / simplicity.
sphere[2] = 0.
clip_origin = (0., 0.)
else:
clip_origin = sphere[:2].copy()
return sphere, clip_origin
def _prepare_topomap_plot(inst, ch_type, sphere=None):
"""Prepare topo plot."""
info = copy.deepcopy(inst if isinstance(inst, Info) else inst.info)
sphere, clip_origin = _adjust_meg_sphere(sphere, info, ch_type)
clean_ch_names = _clean_names(info['ch_names'])
for ii, this_ch in enumerate(info['chs']):
this_ch['ch_name'] = clean_ch_names[ii]
info['bads'] = _clean_names(info['bads'])
for comp in info['comps']:
comp['data']['col_names'] = _clean_names(comp['data']['col_names'])
info._update_redundant()
info._check_consistency()
# special case for merging grad channels
layout = find_layout(info)
if (ch_type == 'grad' and layout is not None and
(layout.kind.startswith('Vectorview') or
layout.kind.startswith('Neuromag_122'))):
picks, _ = _pair_grad_sensors(info, layout)
pos = _find_topomap_coords(info, picks[::2], sphere=sphere)
merge_channels = True
elif ch_type in _fnirs_types:
# fNIRS data commonly has overlapping channels, so deal with separately
picks, pos, merge_channels, overlapping_channels = \
_average_fnirs_overlaps(info, ch_type, sphere)
else:
merge_channels = False
if ch_type == 'eeg':
picks = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude='bads')
elif ch_type == 'csd':
picks = pick_types(info, meg=False, csd=True, ref_meg=False,
exclude='bads')
elif ch_type == 'dbs':
picks = pick_types(info, meg=False, dbs=True, ref_meg=False,
exclude='bads')
elif ch_type == 'seeg':
picks = pick_types(info, meg=False, seeg=True, ref_meg=False,
exclude='bads')
else:
picks = pick_types(info, meg=ch_type, ref_meg=False,
exclude='bads')
if len(picks) == 0:
raise ValueError("No channels of type %r" % ch_type)
pos = _find_topomap_coords(info, picks, sphere=sphere)
ch_names = [info['ch_names'][k] for k in picks]
if ch_type in _fnirs_types:
# Remove the chroma label type for cleaner labeling.
ch_names = [k[:-4] for k in ch_names]
if merge_channels:
if ch_type == 'grad':
# change names so that vectorview combined grads appear as MEG014x
# instead of MEG0142 or MEG0143 which are the 2 planar grads.
ch_names = [ch_names[k][:-1] + 'x' for k in
range(0, len(ch_names), 2)]
else:
assert ch_type in _fnirs_types
# Modify the nirs channel names to indicate they are to be merged
# New names will have the form S1_D1xS2_D2
# More than two channels can overlap and be merged
for set in overlapping_channels:
idx = ch_names.index(set[0][:-4])
new_name = 'x'.join(s[:-4] for s in set)
ch_names[idx] = new_name
pos = np.array(pos)[:, :2] # 2D plot, otherwise interpolation bugs
return picks, pos, merge_channels, ch_names, ch_type, sphere, clip_origin
def _average_fnirs_overlaps(info, ch_type, sphere):
from scipy.spatial.distance import pdist, squareform
picks = pick_types(info, meg=False, ref_meg=False,
fnirs=ch_type, exclude='bads')
chs = [info['chs'][i] for i in picks]
locs3d = np.array([ch['loc'][:3] for ch in chs])
dist = pdist(locs3d)
# Store the sets of channels to be merged
overlapping_channels = list()
# Channels to be excluded from picks, as will be removed after merging
channels_to_exclude = list()
if len(locs3d) > 1 and np.min(dist) < 1e-10:
overlapping_mask = np.triu(squareform(dist < 1e-10))
for chan_idx in range(overlapping_mask.shape[0]):
already_overlapped = list(itertools.chain.from_iterable(
overlapping_channels))
if overlapping_mask[chan_idx].any() and \
(chs[chan_idx]['ch_name'] not in already_overlapped):
# Determine the set of channels to be combined. Ensure the
# first listed channel is the one to be replaced with merge
overlapping_set = [chs[i]['ch_name'] for i in
np.where(overlapping_mask[chan_idx])[0]]
overlapping_set = np.insert(overlapping_set, 0,
(chs[chan_idx]['ch_name']))
overlapping_channels.append(overlapping_set)
channels_to_exclude.append(overlapping_set[1:])
exclude = list(itertools.chain.from_iterable(channels_to_exclude))
[exclude.append(bad) for bad in info['bads']]
picks = pick_types(info, meg=False, ref_meg=False, fnirs=ch_type,
exclude=exclude)
pos = _find_topomap_coords(info, picks, sphere=sphere)
picks = pick_types(info, meg=False, ref_meg=False, fnirs=ch_type)
# Overload the merge_channels variable as this is returned to calling
# function and indicates that merging of data is required
merge_channels = overlapping_channels
else:
picks = pick_types(info, meg=False, ref_meg=False, fnirs=ch_type,
exclude='bads')
merge_channels = False
pos = _find_topomap_coords(info, picks, sphere=sphere)
return picks, pos, merge_channels, overlapping_channels
def _plot_update_evoked_topomap(params, bools):
"""Update topomaps."""
projs = [proj for ii, proj in enumerate(params['projs'])
if ii in np.where(bools)[0]]
params['proj_bools'] = bools
new_evoked = params['evoked'].copy()
new_evoked.info['projs'] = []
new_evoked.add_proj(projs)
new_evoked.apply_proj()
data = new_evoked.data[:, params['time_idx']] * params['scale']
if params['merge_channels']:
data, _ = _merge_ch_data(data, 'grad', [])
interp = params['interp']
new_contours = list()
for cont, ax, im, d in zip(params['contours_'], params['axes'],
params['images'], data.T):
Zi = interp.set_values(d)()
im.set_data(Zi)
# must be removed and re-added
if len(cont.collections) > 0:
tp = cont.collections[0]
visible = tp.get_visible()
patch_ = tp.get_clip_path()
color = tp.get_color()
lw = tp.get_linewidth()
for tp in cont.collections:
tp.remove()
cont = ax.contour(interp.Xi, interp.Yi, Zi, params['contours'],
colors=color, linewidths=lw)
for tp in cont.collections:
tp.set_visible(visible)
tp.set_clip_path(patch_)
new_contours.append(cont)
params['contours_'] = new_contours
params['fig'].canvas.draw()
def _add_colorbar(ax, im, cmap, side="right", pad=.05, title=None,
format=None, size="5%"):
"""Add a colorbar to an axis."""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(ax)
cax = divider.append_axes(side, size=size, pad=pad)
cbar = plt.colorbar(im, cax=cax, format=format)
if cmap is not None and cmap[1]:
ax.CB = DraggableColorbar(cbar, im)
if title is not None:
cax.set_title(title, y=1.05, fontsize=10)
return cbar, cax
def _eliminate_zeros(proj):
"""Remove grad or mag data if only contains 0s (gh 5641)."""
GRAD_ENDING = ('2', '3')
MAG_ENDING = '1'
proj = copy.deepcopy(proj)
proj['data']['data'] = np.atleast_2d(proj['data']['data'])
for ending in (GRAD_ENDING, MAG_ENDING):
names = proj['data']['col_names']
idx = [i for i, name in enumerate(names) if name.endswith(ending)]
# if all 0, remove the 0s an their labels
if not proj['data']['data'][0][idx].any():
new_col_names = np.delete(np.array(names), idx).tolist()
new_data = np.delete(np.array(proj['data']['data'][0]), idx)
proj['data']['col_names'] = new_col_names
proj['data']['data'] = np.array([new_data])
proj['data']['ncol'] = len(proj['data']['col_names'])
return proj
@fill_doc
def plot_projs_topomap(projs, info, cmap=None, sensors=True,
colorbar=False, res=64, size=1, show=True,
outlines='head', contours=6, image_interp='bilinear',
axes=None, vlim=(None, None),
sphere=None, extrapolate=_EXTRAPOLATE_DEFAULT,
border=_BORDER_DEFAULT):
"""Plot topographic maps of SSP projections.
Parameters
----------
projs : list of Projection
The projections.
info : instance of Info
The info associated with the channels in the projectors.
.. versionchanged:: 0.20
The positional argument ``layout`` was deprecated and replaced
by ``info``.
%(proj_topomap_kwargs)s
%(topomap_sphere_auto)s
%(topomap_extrapolate)s
.. versionadded:: 0.20
%(topomap_border)s
Returns
-------
fig : instance of matplotlib.figure.Figure
Figure with a topomap subplot for each projector.
Notes
-----
.. versionadded:: 0.9.0
"""
import matplotlib.pyplot as plt
sphere = _check_sphere(sphere, info)
# be forgiving if `projs` isn't a list
if isinstance(projs, Projection):
projs = [projs]
_validate_type(info, 'info', 'info')
types, datas, poss, spheres, outliness, ch_typess = [], [], [], [], [], []
for proj in projs:
# get ch_names, ch_types, data
proj = _eliminate_zeros(proj) # gh 5641
ch_names = _clean_names(proj['data']['col_names'],
remove_whitespace=True)
if vlim == 'joint':
ch_idxs = np.where(np.in1d(info['ch_names'],
proj['data']['col_names']))[0]
these_ch_types = _get_channel_types(info, ch_idxs, unique=True)
# each projector should have only one channel type
assert len(these_ch_types) == 1
types.append(list(these_ch_types)[0])
data = proj['data']['data'].ravel()
info_names = _clean_names(info['ch_names'], remove_whitespace=True)
picks = pick_channels(info_names, ch_names)
if len(picks) == 0:
raise ValueError(
f'No channel names in info match projector {proj}')
use_info = pick_info(info, picks)
data_picks, pos, merge_channels, names, ch_type, this_sphere, \
clip_origin = _prepare_topomap_plot(
use_info, _get_ch_type(use_info, None), sphere=sphere)
these_outlines = _make_head_outlines(
sphere, pos, outlines, clip_origin)
data = data[data_picks]
if merge_channels:
data, _ = _merge_ch_data(data, 'grad', [])
data = data.ravel()
# populate containers
datas.append(data)
poss.append(pos)
spheres.append(this_sphere)
outliness.append(these_outlines)
ch_typess.append(ch_type)
del data, pos, this_sphere, these_outlines, ch_type
del sphere
# setup axes
n_projs = len(projs)
if axes is None:
fig, axes, ncols, nrows = _prepare_trellis(
n_projs, ncols='auto', nrows='auto', sharex=True, sharey=True)
elif isinstance(axes, plt.Axes):
axes = [axes]
_validate_if_list_of_axes(axes, n_projs)
# handle vmin/vmax
vlims = [None for _ in range(len(datas))]
if vlim == 'joint':
for _ch_type in set(types):
idx = np.where(np.in1d(types, _ch_type))[0]
these_data = np.concatenate(np.array(datas, dtype=object)[idx])
norm = all(these_data >= 0)
_vl = _setup_vmin_vmax(these_data, vmin=None, vmax=None, norm=norm)
for _idx in idx:
vlims[_idx] = _vl
# make sure we got a vlim for all projs
assert all([vl is not None for vl in vlims])
else:
vlims = [vlim for _ in range(len(datas))]
# plot
for proj, ax, _data, _pos, _vlim, _sphere, _outlines, _ch_type in zip(
projs, axes, datas, poss, vlims, spheres, outliness, ch_typess):
# title
title = proj['desc']
title = '\n'.join(title[ii:ii + 22] for ii in range(0, len(title), 22))
ax.set_title(title, fontsize=10)
# plot
vmin, vmax = _vlim
im = plot_topomap(_data, _pos[:, :2], vmin=vmin, vmax=vmax, cmap=cmap,
sensors=sensors, res=res, axes=ax,
outlines=_outlines, contours=contours,
image_interp=image_interp, show=False,
extrapolate=extrapolate, sphere=_sphere,
border=border, ch_type=_ch_type)[0]
if colorbar:
_add_colorbar(ax, im, cmap)
fig = ax.get_figure()
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore')
tight_layout(fig=fig)
plt_show(show)
return fig
def _make_head_outlines(sphere, pos, outlines, clip_origin):
"""Check or create outlines for topoplot."""
assert isinstance(sphere, np.ndarray)
x, y, _, radius = sphere
del sphere
if outlines in ('head', 'skirt', None):
ll = np.linspace(0, 2 * np.pi, 101)
head_x = np.cos(ll) * radius + x
head_y = np.sin(ll) * radius + y
dx = np.exp(np.arccos(np.deg2rad(12)) * 1j)
dx, dy = dx.real, dx.imag
nose_x = np.array([-dx, 0, dx]) * radius + x
nose_y = np.array([dy, 1.15, dy]) * radius + y
ear_x = np.array([.497, .510, .518, .5299, .5419, .54, .547,
.532, .510, .489]) * (radius * 2)
ear_y = np.array([.0555, .0775, .0783, .0746, .0555, -.0055, -.0932,
-.1313, -.1384, -.1199]) * (radius * 2) + y
if outlines is not None:
# Define the outline of the head, ears and nose
outlines_dict = dict(head=(head_x, head_y), nose=(nose_x, nose_y),
ear_left=(ear_x + x, ear_y),
ear_right=(-ear_x + x, ear_y))
else:
outlines_dict = dict()
# Make the figure encompass slightly more than all points
mask_scale = 1.25 if outlines == 'skirt' else 1.
# We probably want to ensure it always contains our most
# extremely positioned channels, so we do:
mask_scale = max(
mask_scale, np.linalg.norm(pos, axis=1).max() * 1.01 / radius)
outlines_dict['mask_pos'] = (mask_scale * head_x, mask_scale * head_y)
clip_radius = radius * mask_scale
outlines_dict['clip_radius'] = (clip_radius,) * 2
outlines_dict['clip_origin'] = clip_origin
outlines = outlines_dict
elif isinstance(outlines, dict):
if 'mask_pos' not in outlines:
raise ValueError('You must specify the coordinates of the image '
'mask.')
else:
raise ValueError('Invalid value for `outlines`.')
return outlines
def _draw_outlines(ax, outlines):
"""Draw the outlines for a topomap."""
from matplotlib import rcParams
outlines_ = {k: v for k, v in outlines.items()
if k not in ['patch']}
for key, (x_coord, y_coord) in outlines_.items():
if 'mask' in key or key in ('clip_radius', 'clip_origin'):
continue
ax.plot(x_coord, y_coord, color=rcParams['axes.edgecolor'],
linewidth=1, clip_on=False)
return outlines_
def _get_extra_points(pos, extrapolate, origin, radii):
"""Get coordinates of additinal interpolation points."""
from scipy.spatial.qhull import Delaunay
radii = np.array(radii, float)
assert radii.shape == (2,)
x, y = origin
# auto should be gone by now
_check_option('extrapolate', extrapolate, ('head', 'box', 'local'))
# the old method of placement - large box
mask_pos = None
if extrapolate == 'box':
extremes = np.array([pos.min(axis=0), pos.max(axis=0)])
diffs = extremes[1] - extremes[0]
extremes[0] -= diffs
extremes[1] += diffs
eidx = np.array(list(itertools.product(
*([[0] * (pos.shape[1] - 1) + [1]] * pos.shape[1]))))
pidx = np.tile(np.arange(pos.shape[1])[np.newaxis], (len(eidx), 1))
outer_pts = extremes[eidx, pidx]
return outer_pts, mask_pos, Delaunay(np.concatenate((pos, outer_pts)))
# check if positions are colinear:
diffs = np.diff(pos, axis=0)
with np.errstate(divide='ignore'):
slopes = diffs[:, 1] / diffs[:, 0]
colinear = ((slopes == slopes[0]).all() or np.isinf(slopes).all())
# compute median inter-electrode distance
if colinear or pos.shape[0] < 4:
dim = 1 if diffs[:, 1].sum() > diffs[:, 0].sum() else 0
sorting = np.argsort(pos[:, dim])
pos_sorted = pos[sorting, :]
diffs = np.diff(pos_sorted, axis=0)
distances = np.linalg.norm(diffs, axis=1)
distance = np.median(distances)
else:
tri = Delaunay(pos, incremental=True)
idx1, idx2, idx3 = tri.simplices.T
distances = np.concatenate(
[np.linalg.norm(pos[i1, :] - pos[i2, :], axis=1)
for i1, i2 in zip([idx1, idx2], [idx2, idx3])])
distance = np.median(distances)
if extrapolate == 'local':
if colinear or pos.shape[0] < 4:
# special case for colinear points and when there is too
# little points for Delaunay (needs at least 3)
edge_points = sorting[[0, -1]]
line_len = np.diff(pos[edge_points, :], axis=0)
unit_vec = line_len / np.linalg.norm(line_len) * distance
unit_vec_par = unit_vec[:, ::-1] * [[-1, 1]]
edge_pos = (pos[edge_points, :] +
np.concatenate([-unit_vec, unit_vec], axis=0))
new_pos = np.concatenate([pos + unit_vec_par,
pos - unit_vec_par, edge_pos], axis=0)
if pos.shape[0] == 3:
# there may be some new_pos points that are too close
# to the original points
new_pos_diff = pos[..., np.newaxis] - new_pos.T[np.newaxis, :]
new_pos_diff = np.linalg.norm(new_pos_diff, axis=1)
good_extra = (new_pos_diff > 0.5 * distance).all(axis=0)
new_pos = new_pos[good_extra]
tri = Delaunay(np.concatenate([pos, new_pos], axis=0))
return new_pos, new_pos, tri
# get the convex hull of data points from triangulation
hull_pos = pos[tri.convex_hull]
# extend the convex hull limits outwards a bit
channels_center = pos.mean(axis=0)
radial_dir = hull_pos - channels_center
unit_radial_dir = radial_dir / np.linalg.norm(radial_dir, axis=-1,
keepdims=True)
hull_extended = hull_pos + unit_radial_dir * distance
mask_pos = hull_pos + unit_radial_dir * distance * 0.5
hull_diff = np.diff(hull_pos, axis=1)[:, 0]
hull_distances = np.linalg.norm(hull_diff, axis=-1)
del channels_center
# Construct a mask
mask_pos = np.unique(mask_pos.reshape(-1, 2), axis=0)
mask_center = np.mean(mask_pos, axis=0)
mask_pos -= mask_center
mask_pos = mask_pos[
np.argsort(np.arctan2(mask_pos[:, 1], mask_pos[:, 0]))]
mask_pos += mask_center
# add points along hull edges so that the distance between points
# is around that of average distance between channels
add_points = list()
eps = np.finfo('float').eps
n_times_dist = np.round(0.25 * hull_distances / distance).astype('int')
for n in range(2, n_times_dist.max() + 1):
mask = n_times_dist == n
mult = np.arange(1 / n, 1 - eps, 1 / n)[:, np.newaxis, np.newaxis]
steps = hull_diff[mask][np.newaxis, ...] * mult
add_points.append((hull_extended[mask, 0][np.newaxis, ...] +
steps).reshape((-1, 2)))
# remove duplicates from hull_extended
hull_extended = np.unique(hull_extended.reshape((-1, 2)), axis=0)
new_pos = np.concatenate([hull_extended] + add_points)
else:
assert extrapolate == 'head'
# return points on the head circle
angle = np.arcsin(distance / np.mean(radii))
n_pnts = max(12, int(np.round(2 * np.pi / angle)))
points_l = np.linspace(0, 2 * np.pi, n_pnts, endpoint=False)
use_radii = radii * 1.1 + distance
points_x = np.cos(points_l) * use_radii[0] + x
points_y = np.sin(points_l) * use_radii[1] + y
new_pos = np.stack([points_x, points_y], axis=1)
if colinear or pos.shape[0] == 3:
tri = Delaunay(np.concatenate([pos, new_pos], axis=0))
return new_pos, mask_pos, tri
tri.add_points(new_pos)
return new_pos, mask_pos, tri
class _GridData(object):
"""Unstructured (x,y) data interpolator.
This class allows optimized interpolation by computing parameters
for a fixed set of true points, and allowing the values at those points
to be set independently.
"""
def __init__(self, pos, extrapolate, origin, radii, border):
# in principle this works in N dimensions, not just 2
assert pos.ndim == 2 and pos.shape[1] == 2, pos.shape
_validate_type(border, ('numeric', str), 'border')
# check that border, if string, is correct
if isinstance(border, str):
_check_option('border', border, ('mean',), extra='when a string')
# Adding points outside the extremes helps the interpolators
outer_pts, mask_pts, tri = _get_extra_points(
pos, extrapolate, origin, radii)
self.n_extra = outer_pts.shape[0]
self.mask_pts = mask_pts
self.border = border
self.tri = tri
def set_values(self, v):
"""Set the values at interpolation points."""
# Rbf with thin-plate is what we used to use, but it's slower and
# looks about the same:
#
# zi = Rbf(x, y, v, function='multiquadric', smooth=0)(xi, yi)
#
# Eventually we could also do set_values with this class if we want,
# see scipy/interpolate/rbf.py, especially the self.nodes one-liner.
from scipy.interpolate import CloughTocher2DInterpolator
if isinstance(self.border, str):
# we've already checked that border = 'mean'
n_points = v.shape[0]
v_extra = np.zeros(self.n_extra)
indices, indptr = self.tri.vertex_neighbor_vertices
rng = range(n_points, n_points + self.n_extra)
used = np.zeros(len(rng), bool)
for idx, extra_idx in enumerate(rng):
ngb = indptr[indices[extra_idx]:indices[extra_idx + 1]]
ngb = ngb[ngb < n_points]
if len(ngb) > 0:
used[idx] = True
v_extra[idx] = v[ngb].mean()
if not used.all() and used.any():
# Eventually we might want to use the value of the nearest
# point or something, but this case should hopefully be
# rare so for now just use the average value of all extras
v_extra[~used] = np.mean(v_extra[used])
else:
v_extra = np.full(self.n_extra, self.border, dtype=float)
v = np.concatenate((v, v_extra))
self.interpolator = CloughTocher2DInterpolator(self.tri, v)
return self
def set_locations(self, Xi, Yi):
"""Set locations for easier (delayed) calling."""
self.Xi = Xi
self.Yi = Yi
return self
def __call__(self, *args):
"""Evaluate the interpolator."""
if len(args) == 0:
args = [self.Xi, self.Yi]
return self.interpolator(*args)
def _topomap_plot_sensors(pos_x, pos_y, sensors, ax):
"""Plot sensors."""
if sensors is True:
ax.scatter(pos_x, pos_y, s=0.25, marker='o',
edgecolor=['k'] * len(pos_x), facecolor='none')
else:
ax.plot(pos_x, pos_y, sensors)
def _get_pos_outlines(info, picks, sphere, to_sphere=True):
ch_type = _get_ch_type(pick_info(_simplify_info(info), picks), None)
orig_sphere = sphere
sphere, clip_origin = _adjust_meg_sphere(sphere, info, ch_type)
logger.debug('Generating pos outlines with sphere '
f'{sphere} from {orig_sphere} for {ch_type}')
pos = _find_topomap_coords(
info, picks, ignore_overlap=True, to_sphere=to_sphere,
sphere=sphere)
outlines = _make_head_outlines(sphere, pos, 'head', clip_origin)
return pos, outlines
@fill_doc
def plot_topomap(data, pos, vmin=None, vmax=None, cmap=None, sensors=True,
res=64, axes=None, names=None, show_names=False, mask=None,
mask_params=None, outlines='head',
contours=6, image_interp='bilinear', show=True,
onselect=None, extrapolate=_EXTRAPOLATE_DEFAULT,
sphere=None, border=_BORDER_DEFAULT,
ch_type='eeg'):
"""Plot a topographic map as image.
Parameters
----------
data : array, shape (n_chan,)
The data values to plot.
pos : array, shape (n_chan, 2) | instance of Info
Location information for the data points(/channels).
If an array, for each data point, the x and y coordinates.
If an Info object, it must contain only one data type and
exactly ``len(data)`` data channels, and the x/y coordinates will
be inferred from this Info object.
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range.
If None, the maximum absolute value is used. If callable, the output
equals vmax(data). Defaults to None.
cmap : matplotlib colormap | None
Colormap to use. If None, 'Reds' is used for all positive data,
otherwise defaults to 'RdBu_r'.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib plot
format string (e.g., 'r+' for red plusses). If True (default), circles
will be used.
res : int
The resolution of the topomap image (n pixels along each side).
axes : instance of Axes | None
The axes to plot to. If None, the current axes will be used.
names : list | None
List of channel names. If None, channel names are not plotted.
%(topomap_show_names)s
If ``True``, a list of names must be provided (see ``names`` keyword).
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indices set to ``True`` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals::
dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)
%(topomap_outlines)s
contours : int | array of float
The number of contour lines to draw. If 0, no contours will be drawn.
If an array, the values represent the levels for the contours. The
values are in µV for EEG, fT for magnetometers and fT/m for
gradiometers. Defaults to 6.
image_interp : str
The image interpolation to be used. All matplotlib options are
accepted.
show : bool
Show figure if True.
onselect : callable | None
Handle for a function that is called when the user selects a set of
channels by rectangle selection (matplotlib ``RectangleSelector``). If
None interactive selection is disabled. Defaults to None.
%(topomap_extrapolate)s
.. versionadded:: 0.18
%(topomap_sphere)s
%(topomap_border)s
%(topomap_ch_type)s
Returns
-------
im : matplotlib.image.AxesImage
The interpolated data.
cn : matplotlib.contour.ContourSet
The fieldlines.
"""
sphere = _check_sphere(sphere)
return _plot_topomap(data, pos, vmin, vmax, cmap, sensors, res, axes,
names, show_names, mask, mask_params, outlines,
contours, image_interp, show,
onselect, extrapolate, sphere=sphere, border=border,
ch_type=ch_type)[:2]
def _setup_interp(pos, res, extrapolate, sphere, outlines, border):
logger.debug(f'Interpolation mode {extrapolate} to {border}')
xlim = np.inf, -np.inf,
ylim = np.inf, -np.inf,
mask_ = np.c_[outlines['mask_pos']]
clip_radius = outlines['clip_radius']
clip_origin = outlines.get('clip_origin', (0., 0.))
xmin, xmax = (np.min(np.r_[xlim[0],
mask_[:, 0],
clip_origin[0] - clip_radius[0]]),
np.max(np.r_[xlim[1],
mask_[:, 0],
clip_origin[0] + clip_radius[0]]))
ymin, ymax = (np.min(np.r_[ylim[0],
mask_[:, 1],
clip_origin[1] - clip_radius[1]]),
np.max(np.r_[ylim[1],
mask_[:, 1],
clip_origin[1] + clip_radius[1]]))
xi = np.linspace(xmin, xmax, res)
yi = np.linspace(ymin, ymax, res)
Xi, Yi = np.meshgrid(xi, yi)
interp = _GridData(pos, extrapolate, clip_origin, clip_radius, border)
extent = (xmin, xmax, ymin, ymax)
return extent, Xi, Yi, interp
def _get_patch(outlines, extrapolate, interp, ax):
from matplotlib import patches
clip_radius = outlines['clip_radius']
clip_origin = outlines.get('clip_origin', (0., 0.))
_use_default_outlines = any(k.startswith('head') for k in outlines)
patch_ = None
if 'patch' in outlines:
patch_ = outlines['patch']
patch_ = patch_() if callable(patch_) else patch_
patch_.set_clip_on(False)
ax.add_patch(patch_)
ax.set_transform(ax.transAxes)
ax.set_clip_path(patch_)
if _use_default_outlines:
if extrapolate == 'local':
patch_ = patches.Polygon(
interp.mask_pts, clip_on=True, transform=ax.transData)
else:
patch_ = patches.Ellipse(
clip_origin, 2 * clip_radius[0], 2 * clip_radius[1],
clip_on=True, transform=ax.transData)
return patch_
def _plot_topomap(data, pos, vmin=None, vmax=None, cmap=None, sensors=True,
res=64, axes=None, names=None, show_names=False, mask=None,
mask_params=None, outlines='head',
contours=6, image_interp='bilinear', show=True,
onselect=None, extrapolate=_EXTRAPOLATE_DEFAULT, sphere=None,
border=_BORDER_DEFAULT, ch_type='eeg'):
import matplotlib.pyplot as plt
from matplotlib.widgets import RectangleSelector
data = np.asarray(data)
logger.debug(f'Plotting topomap for {ch_type} data shape {data.shape}')
if isinstance(pos, Info): # infer pos from Info object
picks = _pick_data_channels(pos, exclude=()) # pick only data channels
pos = pick_info(pos, picks)
# check if there is only 1 channel type, and n_chans matches the data
ch_type = _get_channel_types(pos, unique=True)
info_help = ("Pick Info with e.g. mne.pick_info and "
"mne.io.pick.channel_indices_by_type.")
if len(ch_type) > 1:
raise ValueError("Multiple channel types in Info structure. " +
info_help)
elif len(pos["chs"]) != data.shape[0]:
raise ValueError("Number of channels in the Info object (%s) and "
"the data array (%s) do not match. "
% (len(pos['chs']), data.shape[0]) + info_help)
else:
ch_type = ch_type.pop()
if any(type_ in ch_type for type_ in ('planar', 'grad')):
# deal with grad pairs
picks = _pair_grad_sensors(pos, topomap_coords=False)
pos = _find_topomap_coords(pos, picks=picks[::2], sphere=sphere)
data, _ = _merge_ch_data(data[picks], ch_type, [])
data = data.reshape(-1)
else:
picks = list(range(data.shape[0]))
pos = _find_topomap_coords(pos, picks=picks, sphere=sphere)
extrapolate = _check_extrapolate(extrapolate, ch_type)
if data.ndim > 1:
raise ValueError("Data needs to be array of shape (n_sensors,); got "
"shape %s." % str(data.shape))
# Give a helpful error message for common mistakes regarding the position
# matrix.
pos_help = ("Electrode positions should be specified as a 2D array with "
"shape (n_channels, 2). Each row in this matrix contains the "
"(x, y) position of an electrode.")
if pos.ndim != 2:
error = ("{ndim}D array supplied as electrode positions, where a 2D "
"array was expected").format(ndim=pos.ndim)
raise ValueError(error + " " + pos_help)
elif pos.shape[1] == 3:
error = ("The supplied electrode positions matrix contains 3 columns. "
"Are you trying to specify XYZ coordinates? Perhaps the "
"mne.channels.create_eeg_layout function is useful for you.")
raise ValueError(error + " " + pos_help)
# No error is raised in case of pos.shape[1] == 4. In this case, it is
# assumed the position matrix contains both (x, y) and (width, height)
# values, such as Layout.pos.
elif pos.shape[1] == 1 or pos.shape[1] > 4:
raise ValueError(pos_help)
pos = pos[:, :2]
if len(data) != len(pos):
raise ValueError("Data and pos need to be of same length. Got data of "
"length %s, pos of length %s" % (len(data), len(pos)))
norm = min(data) >= 0
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax, norm)
if cmap is None:
cmap = 'Reds' if norm else 'RdBu_r'
outlines = _make_head_outlines(sphere, pos, outlines, (0., 0.))
assert isinstance(outlines, dict)
ax = axes if axes else plt.gca()
_prepare_topomap(pos, ax)
mask_params = _handle_default('mask_params', mask_params)
# find mask limits
extent, Xi, Yi, interp = _setup_interp(
pos, res, extrapolate, sphere, outlines, border)
interp.set_values(data)
Zi = interp.set_locations(Xi, Yi)()
# plot outline
patch_ = _get_patch(outlines, extrapolate, interp, ax)
# plot interpolated map
im = ax.imshow(Zi, cmap=cmap, vmin=vmin, vmax=vmax, origin='lower',
aspect='equal', extent=extent,
interpolation=image_interp)
# gh-1432 had a workaround for no contours here, but we'll remove it
# because mpl has probably fixed it
linewidth = mask_params['markeredgewidth']
cont = True
if isinstance(contours, (np.ndarray, list)):
pass
elif contours == 0 or ((Zi == Zi[0, 0]) | np.isnan(Zi)).all():
cont = None # can't make contours for constant-valued functions
if cont:
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore')
cont = ax.contour(Xi, Yi, Zi, contours, colors='k',
linewidths=linewidth / 2.)
if patch_ is not None:
im.set_clip_path(patch_)
if cont is not None:
for col in cont.collections:
col.set_clip_path(patch_)
pos_x, pos_y = pos.T
if sensors is not False and mask is None:
_topomap_plot_sensors(pos_x, pos_y, sensors=sensors, ax=ax)
elif sensors and mask is not None:
idx = np.where(mask)[0]
ax.plot(pos_x[idx], pos_y[idx], **mask_params)
idx = np.where(~mask)[0]
_topomap_plot_sensors(pos_x[idx], pos_y[idx], sensors=sensors, ax=ax)
elif not sensors and mask is not None:
idx = np.where(mask)[0]
ax.plot(pos_x[idx], pos_y[idx], **mask_params)
if isinstance(outlines, dict):
_draw_outlines(ax, outlines)
if show_names:
if names is None:
raise ValueError("To show names, a list of names must be provided"
" (see `names` keyword).")
if show_names is True:
def _show_names(x):
return x
else:
_show_names = show_names
show_idx = np.arange(len(names)) if mask is None else np.where(mask)[0]
for ii, (p, ch_id) in enumerate(zip(pos, names)):
if ii not in show_idx:
continue
ch_id = _show_names(ch_id)
ax.text(p[0], p[1], ch_id, horizontalalignment='center',
verticalalignment='center', size='x-small')
plt.subplots_adjust(top=.95)
if onselect is not None:
lim = ax.dataLim
x0, y0, width, height = lim.x0, lim.y0, lim.width, lim.height
ax.RS = RectangleSelector(ax, onselect=onselect)
ax.set(xlim=[x0, x0 + width], ylim=[y0, y0 + height])
plt_show(show)
return im, cont, interp
def _plot_ica_topomap(ica, idx=0, ch_type=None, res=64,
vmin=None, vmax=None, cmap='RdBu_r', colorbar=False,
title=None, show=True, outlines='head', contours=6,
image_interp='bilinear', axes=None,
sensors=True, allow_ref_meg=False,
extrapolate=_EXTRAPOLATE_DEFAULT,
sphere=None, border=_BORDER_DEFAULT):
"""Plot single ica map to axes."""
from matplotlib.axes import Axes
if ica.info is None:
raise RuntimeError('The ICA\'s measurement info is missing. Please '
'fit the ICA or add the corresponding info object.')
sphere = _check_sphere(sphere, ica.info)
if not isinstance(axes, Axes):
raise ValueError('axis has to be an instance of matplotlib Axes, '
'got %s instead.' % type(axes))
ch_type = _get_ch_type(ica, ch_type, allow_ref_meg=ica.allow_ref_meg)
if ch_type == "ref_meg":
logger.info("Cannot produce topographies for MEG reference channels.")
return
data = ica.get_components()[:, idx]
data_picks, pos, merge_channels, names, _, sphere, clip_origin = \
_prepare_topomap_plot(ica, ch_type, sphere=sphere)
data = data[data_picks]
outlines = _make_head_outlines(sphere, pos, outlines, clip_origin)
if merge_channels:
data, names = _merge_ch_data(data, ch_type, names)
axes.set_title(ica._ica_names[idx], fontsize=12)
vmin_, vmax_ = _setup_vmin_vmax(data, vmin, vmax)
im = plot_topomap(
data.ravel(), pos, vmin=vmin_, vmax=vmax_, res=res, axes=axes,
cmap=cmap, outlines=outlines, contours=contours, sensors=sensors,
image_interp=image_interp, show=show, extrapolate=extrapolate,
sphere=sphere, border=border, ch_type=ch_type)[0]
if colorbar:
cbar, cax = _add_colorbar(axes, im, cmap, pad=.05, title="AU",
format='%3.2f')
cbar.ax.tick_params(labelsize=12)
cbar.set_ticks((vmin_, vmax_))
_hide_frame(axes)
@verbose
def plot_ica_components(ica, picks=None, ch_type=None, res=64,
vmin=None, vmax=None, cmap='RdBu_r',
sensors=True, colorbar=False, title=None,
show=True, outlines='head', contours=6,
image_interp='bilinear',
inst=None, plot_std=True, topomap_args=None,
image_args=None, psd_args=None, reject='auto',
sphere=None, *, verbose=None):
"""Project mixing matrix on interpolated sensor topography.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA solution.
%(picks_all)s
If None all are plotted in batches of 20.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then channels are chosen in the order given above.
res : int
The resolution of the topomap image (n pixels along each side).
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range.
If None, the maximum absolute value is used. If callable, the output
equals vmax(data). Defaults to None.
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap to
use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging the
colorbar with left and right mouse button. Left mouse button moves the
scale up and down and right mouse button adjusts the range. Hitting
space bar resets the range. Up and down arrows can be used to change
the colormap. If None, 'Reds' is used for all positive data,
otherwise defaults to 'RdBu_r'. If 'interactive', translates to
(None, True). Defaults to 'RdBu_r'.
.. warning:: Interactive mode works smoothly only for a small amount
of topomaps.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True (default),
circles will be used.
colorbar : bool
Plot a colorbar.
title : str | None
Title to use.
show : bool
Show figure if True.
%(topomap_outlines)s
contours : int | array of float
The number of contour lines to draw. If 0, no contours will be drawn.
When an integer, matplotlib ticker locator is used to find suitable
values for the contour thresholds (may sometimes be inaccurate, use
array for accuracy). If an array, the values represent the levels for
the contours. Defaults to 6.
image_interp : str
The image interpolation to be used. All matplotlib options are
accepted.
inst : Raw | Epochs | None
To be able to see component properties after clicking on component
topomap you need to pass relevant data - instances of Raw or Epochs
(for example the data that ICA was trained on). This takes effect
only when running matplotlib in interactive mode.
plot_std : bool | float
Whether to plot standard deviation in ERP/ERF and spectrum plots.
Defaults to True, which plots one standard deviation above/below.
If set to float allows to control how many standard deviations are
plotted. For example 2.5 will plot 2.5 standard deviation above/below.
topomap_args : dict | None
Dictionary of arguments to ``plot_topomap``. If None, doesn't pass any
additional arguments. Defaults to None.
image_args : dict | None
Dictionary of arguments to ``plot_epochs_image``. If None, doesn't pass
any additional arguments. Defaults to None.
psd_args : dict | None
Dictionary of arguments to ``psd_multitaper``. If None, doesn't pass
any additional arguments. Defaults to None.
reject : 'auto' | dict | None
Allows to specify rejection parameters used to drop epochs
(or segments if continuous signal is passed as inst).
If None, no rejection is applied. The default is 'auto',
which applies the rejection parameters used when fitting
the ICA object.
%(topomap_sphere_auto)s
%(verbose)s
Returns
-------
fig : instance of matplotlib.figure.Figure or list
The figure object(s).
Notes
-----
When run in interactive mode, ``plot_ica_components`` allows to reject
components by clicking on their title label. The state of each component
is indicated by its label color (gray: rejected; black: retained). It is
also possible to open component properties by clicking on the component
topomap (this option is only available when the ``inst`` argument is
supplied).
"""
from ..io import BaseRaw
from ..epochs import BaseEpochs
if ica.info is None:
raise RuntimeError('The ICA\'s measurement info is missing. Please '
'fit the ICA or add the corresponding info object.')
topomap_args = dict() if topomap_args is None else topomap_args
topomap_args = copy.copy(topomap_args)
if 'sphere' not in topomap_args:
topomap_args['sphere'] = sphere
if picks is None: # plot components by sets of 20
ch_type = _get_ch_type(ica, ch_type)
n_components = ica.mixing_matrix_.shape[1]
p = 20
figs = []
for k in range(0, n_components, p):
picks = range(k, min(k + p, n_components))
fig = plot_ica_components(
ica, picks=picks, ch_type=ch_type, res=res, vmax=vmax,
cmap=cmap, sensors=sensors, colorbar=colorbar, title=title,
show=show, outlines=outlines, contours=contours,
image_interp=image_interp, inst=inst, plot_std=plot_std,
topomap_args=topomap_args, image_args=image_args,
psd_args=psd_args, reject=reject, sphere=sphere)
figs.append(fig)
return figs
else:
picks = _picks_to_idx(ica.info, picks)
ch_type = _get_ch_type(ica, ch_type)
cmap = _setup_cmap(cmap, n_axes=len(picks))
data = np.dot(ica.mixing_matrix_[:, picks].T,
ica.pca_components_[:ica.n_components_])
data_picks, pos, merge_channels, names, ch_type, sphere, clip_origin = \
_prepare_topomap_plot(ica, ch_type, sphere=sphere)
outlines = _make_head_outlines(sphere, pos, outlines, clip_origin)
data = np.atleast_2d(data)
data = data[:, data_picks]
# prepare data for iteration
fig, axes, _, _ = _prepare_trellis(len(data), ncols=5)
if title is None:
title = 'ICA components'
fig.suptitle(title)
titles = list()
for ii, data_, ax in zip(picks, data, axes):
kwargs = dict(color='gray') if ii in ica.exclude else dict()
titles.append(ax.set_title(ica._ica_names[ii], fontsize=12, **kwargs))
if merge_channels:
data_, names_ = _merge_ch_data(data_, ch_type, names.copy())
vmin_, vmax_ = _setup_vmin_vmax(data_, vmin, vmax)
im = plot_topomap(
data_.flatten(), pos, vmin=vmin_, vmax=vmax_, res=res, axes=ax,
cmap=cmap[0], outlines=outlines, contours=contours,
image_interp=image_interp, show=False, sensors=sensors,
ch_type=ch_type, **topomap_args)[0]
im.axes.set_label(ica._ica_names[ii])
if colorbar:
cbar, cax = _add_colorbar(ax, im, cmap, title="AU",
side="right", pad=.05, format='%3.2f')
cbar.ax.tick_params(labelsize=12)
cbar.set_ticks((vmin_, vmax_))
_hide_frame(ax)
del pos
tight_layout(fig=fig)
fig.subplots_adjust(top=0.88, bottom=0.)
fig.canvas.draw()
# add title selection interactivity
def onclick_title(event, ica=ica, titles=titles):
# check if any title was pressed
title_pressed = None
for title in titles:
if title.contains(event)[0]:
title_pressed = title
break
# title was pressed -> identify the IC
if title_pressed is not None:
label = title_pressed.get_text()
ic = int(label[-3:])
# add or remove IC from exclude depending on current state
if ic in ica.exclude:
ica.exclude.remove(ic)
title_pressed.set_color('k')
else:
ica.exclude.append(ic)
title_pressed.set_color('gray')
fig.canvas.draw()
fig.canvas.mpl_connect('button_press_event', onclick_title)
# add plot_properties interactivity only if inst was passed
if isinstance(inst, (BaseRaw, BaseEpochs)):
def onclick_topo(event, ica=ica, inst=inst):
# check which component to plot
if event.inaxes is not None:
label = event.inaxes.get_label()
if label.startswith('ICA'):
ic = int(label[-3:])
ica.plot_properties(inst, picks=ic, show=True,
plot_std=plot_std,
topomap_args=topomap_args,
image_args=image_args,
psd_args=psd_args, reject=reject)
fig.canvas.mpl_connect('button_press_event', onclick_topo)
plt_show(show)
return fig
@fill_doc
def plot_tfr_topomap(tfr, tmin=None, tmax=None, fmin=None, fmax=None,
ch_type=None, baseline=None, mode='mean',
vmin=None, vmax=None, cmap=None, sensors=True,
colorbar=True, unit=None, res=64, size=2,
cbar_fmt='%1.1e', show_names=False, title=None,
axes=None, show=True, outlines='head',
contours=6, sphere=None):
"""Plot topographic maps of specific time-frequency intervals of TFR data.
Parameters
----------
tfr : AverageTFR
The AverageTFR object.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point available
is used.
fmin : None | float
The first frequency to display. If None the first frequency available
is used.
fmax : None | float
The last frequency to display. If None the last frequency available is
used.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are collected in
pairs and the mean for each pair is plotted. If None, then channels are
chosen in the order given above.
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction. If None do
not apply it. If baseline is (a, b) the interval is between "a (s)" and
"b (s)". If a is None the beginning of the data is used and if b is
None then b is set to the end of the interval. If baseline is equal to
(None, None) the whole time interval is used.
mode : 'mean' | 'ratio' | 'logratio' | 'percent' | 'zscore' | 'zlogratio' | None
Perform baseline correction by
- subtracting the mean baseline power ('mean')
- dividing by the mean baseline power ('ratio')
- dividing by the mean baseline power and taking the log ('logratio')
- subtracting the mean baseline power followed by dividing by the
mean baseline power ('percent')
- subtracting the mean baseline power and dividing by the standard
deviation of the baseline power ('zscore')
- dividing by the mean baseline power, taking the log, and dividing
by the standard deviation of the baseline power ('zlogratio')
If None no baseline correction is applied.
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data) or in case
data contains only positive values 0. If callable, the output equals
vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range. If None, the
maximum value is used. If callable, the output equals vmax(data).
Defaults to None.
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap to
use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging the
colorbar with left and right mouse button. Left mouse button moves the
scale up and down and right mouse button adjusts the range. Hitting
space bar resets the range. Up and down arrows can be used to change
the colormap. If None (default), 'Reds' is used for all positive data,
otherwise defaults to 'RdBu_r'. If 'interactive', translates to
(None, True).
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib plot
format string (e.g., 'r+'). If True (default), circles will be used.
colorbar : bool
Plot a colorbar.
unit : str | None
The unit of the channel type used for colorbar labels.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches (only applies when plotting multiple
topomaps at a time).
cbar_fmt : str
String format for colorbar values.
%(topomap_show_names)s
title : str | None
Plot title. If None (default), no title is displayed.
axes : instance of Axes | None
The axes to plot to. If None the axes is defined automatically.
show : bool
Show figure if True.
%(topomap_outlines)s
contours : int | array of float
The number of contour lines to draw. If 0, no contours will be drawn.
When an integer, matplotlib ticker locator is used to find suitable
values for the contour thresholds (may sometimes be inaccurate, use
array for accuracy). If an array, the values represent the levels for
the contours. If colorbar=True, the ticks in colorbar correspond to the
contour levels. Defaults to 6.
%(topomap_sphere_auto)s
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
""" # noqa: E501
import matplotlib.pyplot as plt
ch_type = _get_ch_type(tfr, ch_type)
picks, pos, merge_channels, names, _, sphere, clip_origin = \
_prepare_topomap_plot(tfr, ch_type, sphere=sphere)
outlines = _make_head_outlines(sphere, pos, outlines, clip_origin)
if not show_names:
names = None
data = tfr.data[picks, :, :]
# merging grads before rescaling makes ERDs visible
if merge_channels:
data, names = _merge_ch_data(data, ch_type, names, method='mean')
data = rescale(data, tfr.times, baseline, mode, copy=True)
# crop time
itmin, itmax = None, None
idx = np.where(_time_mask(tfr.times, tmin, tmax))[0]
if tmin is not None:
itmin = idx[0]
if tmax is not None:
itmax = idx[-1] + 1
# crop freqs
ifmin, ifmax = None, None
idx = np.where(_time_mask(tfr.freqs, fmin, fmax))[0]
if fmin is not None:
ifmin = idx[0]
if fmax is not None:
ifmax = idx[-1] + 1
data = data[:, ifmin:ifmax, itmin:itmax]
data = np.mean(np.mean(data, axis=2), axis=1)[:, np.newaxis]
norm = False if np.min(data) < 0 else True
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax, norm)
cmap = _setup_cmap(cmap, norm=norm)
axes = plt.subplots(figsize=(size, size))[1] if axes is None else axes
fig = axes.figure
_hide_frame(axes)
locator = None
if not isinstance(contours, (list, np.ndarray)):
locator, contours = _set_contour_locator(vmin, vmax, contours)
if title is not None:
axes.set_title(title)
fig_wrapper = list()
selection_callback = partial(_onselect, tfr=tfr, pos=pos, ch_type=ch_type,
itmin=itmin, itmax=itmax, ifmin=ifmin,
ifmax=ifmax, cmap=cmap[0], fig=fig_wrapper)
if not isinstance(contours, (list, np.ndarray)):
_, contours = _set_contour_locator(vmin, vmax, contours)
im, _ = plot_topomap(data[:, 0], pos, vmin=vmin, vmax=vmax,
axes=axes, cmap=cmap[0], image_interp='bilinear',
contours=contours, names=names, show_names=show_names,
show=False, onselect=selection_callback,
sensors=sensors, res=res, ch_type=ch_type,
outlines=outlines, sphere=sphere)
if colorbar:
from matplotlib import ticker
unit = _handle_default('units', unit)['misc']
cbar, cax = _add_colorbar(axes, im, cmap, title=unit, format=cbar_fmt)
if locator is None:
locator = ticker.MaxNLocator(nbins=5)
cbar.locator = locator
cbar.update_ticks()
cbar.ax.tick_params(labelsize=12)
plt_show(show)
return fig
@fill_doc
def plot_evoked_topomap(evoked, times="auto", ch_type=None,
vmin=None, vmax=None, cmap=None, sensors=True,
colorbar=True, scalings=None,
units=None, res=64, size=1, cbar_fmt='%3.1f',
time_unit='s', time_format=None, proj=False,
show=True, show_names=False, title=None, mask=None,
mask_params=None, outlines='head', contours=6,
image_interp='bilinear', average=None,
axes=None, extrapolate=_EXTRAPOLATE_DEFAULT,
sphere=None, border=_BORDER_DEFAULT,
nrows=1, ncols='auto'):
"""Plot topographic maps of specific time points of evoked data.
Parameters
----------
evoked : Evoked
The Evoked object.
times : float | array of float | "auto" | "peaks" | "interactive"
The time point(s) to plot. If "auto", the number of ``axes`` determines
the amount of time point(s). If ``axes`` is also None, at most 10
topographies will be shown with a regular time spacing between the
first and last time instant. If "peaks", finds time points
automatically by checking for local maxima in global field power. If
"interactive", the time can be set interactively at run-time by using a
slider.
%(topomap_ch_type)s
%(topomap_vmin_vmax)s
%(topomap_cmap)s
%(topomap_sensors)s
%(topomap_colorbar)s
%(topomap_scalings)s
%(topomap_units)s
%(topomap_res)s
%(topomap_size)s
%(topomap_cbar_fmt)s
time_unit : str
The units for the time axis, can be "ms" or "s" (default).
.. versionadded:: 0.16
time_format : str | None
String format for topomap values. Defaults (None) to "%%01d ms" if
``time_unit='ms'``, "%%0.3f s" if ``time_unit='s'``, and
"%%g" otherwise. Can be an empty string to omit the time label.
%(plot_proj)s
%(show)s
%(topomap_show_names)s
%(title_None)s
%(topomap_mask)s
%(topomap_mask_params)s
%(topomap_outlines)s
%(topomap_contours)s
%(topomap_image_interp)s
%(topomap_average)s
%(topomap_axes)s
%(topomap_extrapolate)s
.. versionadded:: 0.18
%(topomap_sphere_auto)s
%(topomap_border)s
nrows : int | 'auto'
The number of rows of topographies to plot. Defaults to 1. If 'auto',
obtains the number of rows depending on the amount of times to plot
and the number of cols. Not valid when times == 'interactive'.
.. versionadded:: 0.20
ncols : int | 'auto'
The number of columns of topographies to plot. If 'auto' (default),
obtains the number of columns depending on the amount of times to plot
and the number of rows. Not valid when times == 'interactive'.
.. versionadded:: 0.20
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
Notes
-----
When existing ``axes`` are provided and ``colorbar=True``, note that the
colorbar scale will only accurately reflect topomaps that are generated in
the same call as the colorbar. Note also that the colorbar will not be
resized automatically when ``axes`` are provided; use matplotlib's
:meth:`axes.set_position() <matplotlib.axes.Axes.set_position>` method or
:doc:`gridspec <matplotlib:tutorials/intermediate/gridspec>` interface to
adjust the colorbar size yourself.
"""
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from matplotlib.widgets import Slider
from ..evoked import Evoked
_validate_type(evoked, Evoked, 'evoked')
_validate_type(colorbar, bool, 'colorbar')
evoked = evoked.copy() # make a copy, since we'll be picking
ch_type = _get_ch_type(evoked, ch_type)
# time units / formatting
time_unit, _ = _check_time_unit(time_unit, evoked.times)
scaling_time = 1. if time_unit == 's' else 1e3
_validate_type(time_format, (None, str), 'time_format')
if time_format is None:
time_format = '%0.3f s' if time_unit == 's' else '%01d ms'
del time_unit
# mask_params defaults
mask_params = _handle_default('mask_params', mask_params)
mask_params['markersize'] *= size / 2.
mask_params['markeredgewidth'] *= size / 2.
# setup various parameters, and prepare outlines
picks, pos, merge_channels, names, ch_type, sphere, clip_origin = \
_prepare_topomap_plot(evoked, ch_type, sphere=sphere)
outlines = _make_head_outlines(sphere, pos, outlines, clip_origin)
# check interactive
axes_given = axes is not None
interactive = isinstance(times, str) and times == 'interactive'
if interactive and axes_given:
raise ValueError("User-provided axes not allowed when "
"times='interactive'.")
# units, scalings
key = 'grad' if ch_type.startswith('planar') else ch_type
scaling = _handle_default('scalings', scalings)[key]
unit = _handle_default('units', units)[key]
# ch_names (required for NIRS)
ch_names = names
if not show_names:
names = None
# apply projections before picking. NOTE: the `if proj is True`
# anti-pattern is needed here to exclude proj='interactive'
_check_option('proj', proj, (True, False, 'interactive', 'reconstruct'))
if proj is True and not evoked.proj:
evoked.apply_proj()
elif proj == 'reconstruct':
evoked._reconstruct_proj()
data = evoked.data
# remove compensation matrices (safe: only plotting & already made copy)
evoked.info['comps'] = []
evoked = evoked._pick_drop_channels(picks)
# determine which times to plot
if isinstance(axes, plt.Axes):
axes = [axes]
n_peaks = len(axes) - int(colorbar) if axes_given else None
times = _process_times(evoked, times, n_peaks)
n_times = len(times)
space = 1 / (2. * evoked.info['sfreq'])
if (max(times) > max(evoked.times) + space or
min(times) < min(evoked.times) - space):
raise ValueError(f'Times should be between {evoked.times[0]:0.3} and '
f'{evoked.times[-1]:0.3}.')
# create axes
want_axes = n_times + int(colorbar)
if interactive:
height_ratios = [5, 1]
nrows = 2
ncols = want_axes
width = size * ncols
height = size + max(0, 0.1 * (4 - size)) + bool(title) * 0.5
fig = figure_nobar(figsize=(width * 1.5, height * 1.5))
g_kwargs = {'left': 0.2, 'right': 0.8, 'bottom': 0.05, 'top': 0.9}
gs = GridSpec(nrows, ncols, height_ratios=height_ratios, **g_kwargs)
axes = []
for ax_idx in range(n_times):
axes.append(plt.subplot(gs[0, ax_idx]))
elif axes is None:
fig, axes, ncols, nrows = _prepare_trellis(
n_times, ncols=ncols, nrows=nrows, title=title,
colorbar=colorbar, size=size)
else:
nrows, ncols = None, None # Deactivate ncols when axes were passed
fig = axes[0].get_figure()
# check: enough space for colorbar?
if len(axes) != want_axes:
cbar_err = ' plus one for the colorbar' if colorbar else ''
raise RuntimeError(f'You must provide {want_axes} axes (one for '
f'each time{cbar_err}), got {len(axes)}.')
# figure margins
side_margin = plt.rcParams['figure.subplot.wspace'] / (2 * want_axes)
top_margin = max((0.05 if title is None else 0.25), .2 / size)
fig.subplots_adjust(left=side_margin, right=1 - side_margin, bottom=0,
top=1 - top_margin)
# find first index that's >= (to rounding error) to each time point
time_idx = [np.where(_time_mask(evoked.times, tmin=t, tmax=None,
sfreq=evoked.info['sfreq']))[0][0]
for t in times]
# do averaging if requested
avg_err = '"average" must be `None` or a positive number of seconds'
if average is None:
data = data[np.ix_(picks, time_idx)]
elif not _is_numeric(average):
raise TypeError(f'{avg_err}; got type {type(average)}.')
elif average <= 0:
raise ValueError(f'{avg_err}; got {average}.')
else:
data_ = np.zeros((len(picks), len(time_idx)))
ave_time = average / 2.
iter_times = evoked.times[time_idx]
for ii, (idx, tmin_, tmax_) in enumerate(zip(time_idx,
iter_times - ave_time,
iter_times + ave_time)):
my_range = (tmin_ < evoked.times) & (evoked.times < tmax_)
data_[:, ii] = data[picks][:, my_range].mean(-1)
data = data_
# apply scalings and merge channels
data *= scaling
if merge_channels:
data, ch_names = _merge_ch_data(data, ch_type, ch_names)
if ch_type in _fnirs_types:
merge_channels = False
# apply mask if requested
if mask is not None:
if ch_type == 'grad':
mask_ = (mask[np.ix_(picks[::2], time_idx)] |
mask[np.ix_(picks[1::2], time_idx)])
else: # mag, eeg, planar1, planar2
mask_ = mask[np.ix_(picks, time_idx)]
# set up colormap
vlims = [_setup_vmin_vmax(data[:, i], vmin, vmax, norm=merge_channels)
for i in range(n_times)]
vmin = np.min(vlims)
vmax = np.max(vlims)
cmap = _setup_cmap(cmap, n_axes=n_times, norm=vmin >= 0)
# set up contours
if not isinstance(contours, (list, np.ndarray)):
_, contours = _set_contour_locator(vmin, vmax, contours)
# prepare for main loop over times
kwargs = dict(vmin=vmin, vmax=vmax, sensors=sensors, res=res, names=names,
show_names=show_names, cmap=cmap[0], mask_params=mask_params,
outlines=outlines, contours=contours,
image_interp=image_interp, show=False,
extrapolate=extrapolate, sphere=sphere, border=border,
ch_type=ch_type)
images, contours_ = [], []
# loop over times
for idx, time in enumerate(times):
adjust_for_cbar = colorbar and ncols is not None and idx >= ncols - 1
ax_idx = idx + 1 if adjust_for_cbar else idx
tp, cn, interp = _plot_topomap(
data[:, idx], pos, axes=axes[ax_idx],
mask=mask_[:, idx] if mask is not None else None, **kwargs)
images.append(tp)
if cn is not None:
contours_.append(cn)
if time_format != '':
axes[ax_idx].set_title(time_format % (time * scaling_time))
if interactive:
axes.append(plt.subplot(gs[1, :-1]))
slider = Slider(axes[-1], 'Time', evoked.times[0], evoked.times[-1],
times[0], valfmt='%1.2fs')
slider.vline.remove() # remove initial point indicator
func = _merge_ch_data if merge_channels else lambda x: x
changed_callback = partial(_slider_changed, ax=axes[0],
data=evoked.data, times=evoked.times,
pos=pos, scaling=scaling, func=func,
time_format=time_format,
scaling_time=scaling_time, kwargs=kwargs)
slider.on_changed(changed_callback)
ts = np.tile(evoked.times, len(evoked.data)).reshape(evoked.data.shape)
axes[-1].plot(ts, evoked.data, color='k')
axes[-1].slider = slider
if title is not None:
plt.suptitle(title, verticalalignment='top', size='x-large')
if colorbar:
if interactive:
cax = plt.subplot(gs[0, -1])
_resize_cbar(cax, ncols, size)
elif nrows is None or ncols is None:
# axes were given by the user, so don't resize the colorbar
cax = axes[-1]
else: # use the entire last column
cax = axes[ncols - 1]
_resize_cbar(cax, ncols, size)
if unit is not None:
cax.set_title(unit)
cbar = fig.colorbar(images[-1], ax=cax, cax=cax, format=cbar_fmt)
if cn is not None:
cbar.set_ticks(contours)
cbar.ax.tick_params(labelsize=7)
if cmap[1]:
for im in images:
im.axes.CB = DraggableColorbar(cbar, im)
if proj == 'interactive':
_check_delayed_ssp(evoked)
params = dict(
evoked=evoked, fig=fig, projs=evoked.info['projs'], picks=picks,
images=images, contours_=contours_, pos=pos, time_idx=time_idx,
res=res, plot_update_proj_callback=_plot_update_evoked_topomap,
merge_channels=merge_channels, scale=scaling, axes=axes,
contours=contours, interp=interp, extrapolate=extrapolate)
_draw_proj_checkbox(None, params)
plt_show(show, block=False)
if axes_given:
fig.canvas.draw()
return fig
def _resize_cbar(cax, n_fig_axes, size=1):
"""Resize colorbar."""
cpos = cax.get_position()
if size <= 1:
cpos.x0 = 1 - (0.7 + 0.1 / size) / n_fig_axes
cpos.x1 = cpos.x0 + 0.1 / n_fig_axes
cpos.y0 = 0.2
cpos.y1 = 0.7
cax.set_position(cpos)
def _slider_changed(val, ax, data, times, pos, scaling, func, time_format,
scaling_time, kwargs):
"""Handle selection in interactive topomap."""
idx = np.argmin(np.abs(times - val))
data = func(data[:, idx]).ravel() * scaling
ax.clear()
im, _ = plot_topomap(data, pos, axes=ax, **kwargs)
if hasattr(ax, 'CB'):
ax.CB.mappable = im
_resize_cbar(ax.CB.cbar.ax, 2)
if time_format is not None:
ax.set_title(time_format % (val * scaling_time))
def _plot_topomap_multi_cbar(data, pos, ax, title=None, unit=None, vmin=None,
vmax=None, cmap=None, outlines='head',
colorbar=False, cbar_fmt='%3.3f',
sphere=None, ch_type='eeg'):
"""Plot topomap multi cbar."""
_hide_frame(ax)
vmin = np.min(data) if vmin is None else vmin
vmax = np.max(data) if vmax is None else vmax
# this definition of "norm" allows non-diverging colormap for cases where
# min & vmax are both negative (e.g., when they are power in dB)
signs = np.sign([vmin, vmax])
norm = len(set(signs)) == 1 or np.any(signs == 0)
cmap = _setup_cmap(cmap, norm=norm)
if title is not None:
ax.set_title(title, fontsize=10)
im, _ = plot_topomap(data, pos, vmin=vmin, vmax=vmax, axes=ax,
cmap=cmap[0], image_interp='bilinear', contours=0,
outlines=outlines, show=False, sphere=sphere,
ch_type=ch_type)
if colorbar:
cbar, cax = _add_colorbar(ax, im, cmap, pad=0.25, title=None,
size="10%", format=cbar_fmt)
cbar.set_ticks((vmin, vmax))
if unit is not None:
cbar.ax.set_ylabel(unit, fontsize=8)
cbar.ax.tick_params(labelsize=8)
@verbose
def plot_epochs_psd_topomap(epochs, bands=None,
tmin=None, tmax=None, proj=False,
bandwidth=None, adaptive=False, low_bias=True,
normalization='length', ch_type=None,
cmap=None, agg_fun=None, dB=False, n_jobs=1,
normalize=False, cbar_fmt='auto',
outlines='head', axes=None, show=True,
sphere=None, vlim=(None, None), verbose=None):
"""Plot the topomap of the power spectral density across epochs.
Parameters
----------
epochs : instance of Epochs
The epochs object.
%(psd_topo_bands)s
tmin : float | None
Start time to consider.
tmax : float | None
End time to consider.
proj : bool
Apply projection.
bandwidth : float
The bandwidth of the multi taper windowing function in Hz. The default
value is a window half-bandwidth of 4 Hz.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
low_bias : bool
Only use tapers with more than 90%% spectral concentration within
bandwidth.
normalization : str
Either "full" or "length" (default). If "full", the PSD will
be normalized by the sampling rate as well as the length of
the signal (as in nitime).
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are collected in
pairs and the mean for each pair is plotted. If None, then first
available channel type from order given above is used. Defaults to
None.
%(psd_topo_cmap)s
%(psd_topo_agg_fun)s
%(psd_topo_dB)s
%(n_jobs)s
%(psd_topo_normalize)s
%(psd_topo_cbar_fmt)s
%(topomap_outlines)s
%(psd_topo_axes)s
show : bool
Show figure if True.
%(topomap_sphere_auto)s
%(psd_topo_vlim_joint)s
%(verbose)s
Returns
-------
fig : instance of Figure
Figure distributing one image per channel across sensor topography.
"""
ch_type = _get_ch_type(epochs, ch_type)
units = _handle_default('units', None)
unit = units[ch_type]
picks, pos, merge_channels, names, ch_type, sphere, clip_origin = \
_prepare_topomap_plot(epochs, ch_type, sphere=sphere)
outlines = _make_head_outlines(sphere, pos, outlines, clip_origin)
psds, freqs = psd_multitaper(epochs, tmin=tmin, tmax=tmax,
bandwidth=bandwidth, adaptive=adaptive,
low_bias=low_bias,
normalization=normalization, picks=picks,
proj=proj, n_jobs=n_jobs)
psds = np.mean(psds, axis=0)
if merge_channels:
psds, names = _merge_ch_data(psds, ch_type, names, method='mean')
return plot_psds_topomap(
psds=psds, freqs=freqs, pos=pos, agg_fun=agg_fun,
bands=bands, cmap=cmap, dB=dB, normalize=normalize,
cbar_fmt=cbar_fmt, outlines=outlines, axes=axes, show=show,
sphere=sphere, vlim=vlim, unit=unit, ch_type=ch_type)
@fill_doc
def plot_psds_topomap(
psds, freqs, pos, agg_fun=None, bands=None,
cmap=None, dB=True, normalize=False, cbar_fmt='%0.3f', outlines='head',
axes=None, show=True, sphere=None, vlim=(None, None), unit=None,
ch_type='eeg'):
"""Plot spatial maps of PSDs.
Parameters
----------
psds : np.ndarray of float, shape (n_channels, n_freqs)
Power spectral densities
freqs : np.ndarray of float, shape (n_freqs)
Frequencies used to compute psds.
pos : numpy.ndarray of float, shape (n_sensors, 2)
The positions of the sensors.
%(psd_topo_agg_fun)s
%(psd_topo_bands)s
%(psd_topo_cmap)s
%(psd_topo_dB)s
%(psd_topo_normalize)s
%(psd_topo_cbar_fmt)s
%(topomap_outlines)s
%(psd_topo_axes)s
show : bool
Show figure if True.
%(topomap_sphere)s
%(psd_topo_vlim_joint)s
unit : str | None
Measurement unit to be displayed with the colorbar. If ``None``, no
unit is displayed (only "power" or "dB" as appropriate).
%(topomap_ch_type)s
Returns
-------
fig : instance of matplotlib.figure.Figure
Figure with a topomap subplot for each band.
"""
import matplotlib.pyplot as plt
sphere = _check_sphere(sphere)
if cbar_fmt == 'auto':
cbar_fmt = '%0.1f' if dB else '%0.3f'
if bands is None:
bands = [(0, 4, 'Delta (0-4 Hz)'), (4, 8, 'Theta (4-8 Hz)'),
(8, 12, 'Alpha (8-12 Hz)'), (12, 30, 'Beta (12-30 Hz)'),
(30, 45, 'Gamma (30-45 Hz)')]
else: # upconvert single freqs to band upper/lower edges as needed
bin_spacing = np.diff(freqs)[0]
bin_edges = np.array([0, bin_spacing]) - bin_spacing / 2
bands = [tuple(bin_edges + freqs[np.argmin(np.abs(freqs - band[0]))]) +
(band[1],) if len(band) == 2 else band for band in bands]
if agg_fun is None:
agg_fun = np.sum if normalize else np.mean
if normalize:
psds /= psds.sum(axis=-1, keepdims=True)
assert np.allclose(psds.sum(axis=-1), 1.)
n_axes = len(bands)
if axes is not None:
_validate_if_list_of_axes(axes, n_axes)
fig = axes[0].figure
else:
fig, axes = plt.subplots(1, n_axes, figsize=(2 * n_axes, 1.5))
if n_axes == 1:
axes = [axes]
# handle vmin/vmax
if vlim == 'joint':
_freq_masks = [(fmin < freqs) & (freqs < fmax)
for (fmin, fmax, _) in bands]
_datas = [agg_fun(psds[:, _freq_mask], axis=1)
for _freq_mask in _freq_masks]
_datas = [10 * np.log10(_d) if (dB and not normalize) else _d
for _d in _datas]
vmin = np.array(_datas).min()
vmax = np.array(_datas).max()
else:
vmin, vmax = vlim
if unit is None:
unit = 'dB' if dB and not normalize else 'power'
else:
if '/' in unit:
unit = '(%s)' % unit
unit += '²/Hz'
if dB and not normalize:
unit += ' (dB)'
for ax, (fmin, fmax, title) in zip(axes, bands):
freq_mask = (fmin < freqs) & (freqs < fmax)
if freq_mask.sum() == 0:
raise RuntimeError('No frequencies in band "%s" (%s, %s)'
% (title, fmin, fmax))
data = agg_fun(psds[:, freq_mask], axis=1)
if dB and not normalize:
data = 10 * np.log10(data)
_plot_topomap_multi_cbar(data, pos, ax, title=title, vmin=vmin,
vmax=vmax, cmap=cmap, outlines=outlines,
colorbar=True, unit=unit, cbar_fmt=cbar_fmt,
sphere=sphere, ch_type=ch_type)
tight_layout(fig=fig)
fig.canvas.draw()
plt_show(show)
return fig
@fill_doc
def plot_layout(layout, picks=None, show_axes=False, show=True):
"""Plot the sensor positions.
Parameters
----------
layout : None | Layout
Layout instance specifying sensor positions.
%(picks_nostr)s
show_axes : bool
Show layout axes if True. Defaults to False.
show : bool
Show figure if True. Defaults to True.
Returns
-------
fig : instance of Figure
Figure containing the sensor topography.
Notes
-----
.. versionadded:: 0.12.0
"""
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(max(plt.rcParams['figure.figsize']),) * 2)
ax = fig.add_subplot(111)
fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None,
hspace=None)
ax.set(xticks=[], yticks=[], aspect='equal')
outlines = dict(border=([0, 1, 1, 0, 0], [0, 0, 1, 1, 0]))
_draw_outlines(ax, outlines)
picks = _picks_to_idx(len(layout.names), picks)
pos = layout.pos[picks]
names = np.array(layout.names)[picks]
for ii, (p, ch_id) in enumerate(zip(pos, names)):
center_pos = np.array((p[0] + p[2] / 2., p[1] + p[3] / 2.))
ax.annotate(ch_id, xy=center_pos, horizontalalignment='center',
verticalalignment='center', size='x-small')
if show_axes:
x1, x2, y1, y2 = p[0], p[0] + p[2], p[1], p[1] + p[3]
ax.plot([x1, x1, x2, x2, x1], [y1, y2, y2, y1, y1], color='k')
ax.axis('off')
tight_layout(fig=fig, pad=0, w_pad=0, h_pad=0)
plt_show(show)
return fig
def _onselect(eclick, erelease, tfr, pos, ch_type, itmin, itmax, ifmin, ifmax,
cmap, fig, layout=None):
"""Handle drawing average tfr over channels called from topomap."""
import matplotlib.pyplot as plt
from matplotlib.collections import PathCollection
ax = eclick.inaxes
xmin = min(eclick.xdata, erelease.xdata)
xmax = max(eclick.xdata, erelease.xdata)
ymin = min(eclick.ydata, erelease.ydata)
ymax = max(eclick.ydata, erelease.ydata)
indices = ((pos[:, 0] < xmax) & (pos[:, 0] > xmin) &
(pos[:, 1] < ymax) & (pos[:, 1] > ymin))
colors = ['r' if ii else 'k' for ii in indices]
indices = np.where(indices)[0]
for collection in ax.collections:
if isinstance(collection, PathCollection): # this is our "scatter"
collection.set_color(colors)
ax.figure.canvas.draw()
if len(indices) == 0:
return
data = tfr.data
if ch_type == 'mag':
picks = pick_types(tfr.info, meg=ch_type, ref_meg=False)
data = np.mean(data[indices, ifmin:ifmax, itmin:itmax], axis=0)
chs = [tfr.ch_names[picks[x]] for x in indices]
elif ch_type == 'grad':
grads = _pair_grad_sensors(tfr.info, layout=layout,
topomap_coords=False)
idxs = list()
for idx in indices:
idxs.append(grads[idx * 2])
idxs.append(grads[idx * 2 + 1]) # pair of grads
data = np.mean(data[idxs, ifmin:ifmax, itmin:itmax], axis=0)
chs = [tfr.ch_names[x] for x in idxs]
elif ch_type == 'eeg':
picks = pick_types(tfr.info, meg=False, eeg=True, ref_meg=False)
data = np.mean(data[indices, ifmin:ifmax, itmin:itmax], axis=0)
chs = [tfr.ch_names[picks[x]] for x in indices]
logger.info('Averaging TFR over channels ' + str(chs))
if len(fig) == 0:
fig.append(figure_nobar())
if not plt.fignum_exists(fig[0].number):
fig[0] = figure_nobar()
ax = fig[0].add_subplot(111)
itmax = len(tfr.times) - 1 if itmax is None else min(itmax,
len(tfr.times) - 1)
ifmax = len(tfr.freqs) - 1 if ifmax is None else min(ifmax,
len(tfr.freqs) - 1)
if itmin is None:
itmin = 0
if ifmin is None:
ifmin = 0
extent = (tfr.times[itmin] * 1e3, tfr.times[itmax] * 1e3, tfr.freqs[ifmin],
tfr.freqs[ifmax])
title = 'Average over %d %s channels.' % (len(chs), ch_type)
ax.set_title(title)
ax.set_xlabel('Time (ms)')
ax.set_ylabel('Frequency (Hz)')
img = ax.imshow(data, extent=extent, aspect="auto", origin="lower",
cmap=cmap)
if len(fig[0].get_axes()) < 2:
fig[0].get_axes()[1].cbar = fig[0].colorbar(mappable=img)
else:
fig[0].get_axes()[1].cbar.on_mappable_changed(mappable=img)
fig[0].canvas.draw()
plt.figure(fig[0].number)
plt_show(True)
def _prepare_topomap(pos, ax, check_nonzero=True):
"""Prepare the topomap axis and check positions.
Hides axis frame and check that position information is present.
"""
_hide_frame(ax)
if check_nonzero and not pos.any():
raise RuntimeError('No position information found, cannot compute '
'geometries for topomap.')
def _hide_frame(ax):
"""Hide axis frame for topomaps."""
ax.get_yticks()
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
ax.set_frame_on(False)
def _check_extrapolate(extrapolate, ch_type):
_check_option('extrapolate', extrapolate, ('box', 'local', 'head', 'auto'))
if extrapolate == 'auto':
extrapolate = 'local' if ch_type in _MEG_CH_TYPES_SPLIT else 'head'
return extrapolate
@verbose
def _init_anim(ax, ax_line, ax_cbar, params, merge_channels, sphere, ch_type,
extrapolate, verbose):
"""Initialize animated topomap."""
logger.info('Initializing animation...')
data = params['data']
items = list()
if params['butterfly']:
all_times = params['all_times']
for idx in range(len(data)):
ax_line.plot(all_times, data[idx], color='k', lw=1)
vmin, vmax = _setup_vmin_vmax(data, None, None)
ax_line.set(yticks=np.around(np.linspace(vmin, vmax, 5), -1),
xlim=all_times[[0, -1]])
params['line'] = ax_line.axvline(all_times[0], color='r')
items.append(params['line'])
if merge_channels:
from mne.channels.layout import _merge_ch_data
data, _ = _merge_ch_data(data, 'grad', [])
norm = True if np.min(data) > 0 else False
cmap = 'Reds' if norm else 'RdBu_r'
vmin, vmax = _setup_vmin_vmax(data, None, None, norm)
outlines = _make_head_outlines(sphere, params['pos'], 'head',
params['clip_origin'])
_hide_frame(ax)
extent, Xi, Yi, interp = _setup_interp(
params['pos'], 64, extrapolate, sphere, outlines, 0)
patch_ = _get_patch(outlines, extrapolate, interp, ax)
params['Zis'] = list()
for frame in params['frames']:
params['Zis'].append(interp.set_values(data[:, frame])(Xi, Yi))
Zi = params['Zis'][0]
zi_min = np.nanmin(params['Zis'])
zi_max = np.nanmax(params['Zis'])
cont_lims = np.linspace(zi_min, zi_max, 7, endpoint=False)[1:]
params.update({'vmin': vmin, 'vmax': vmax, 'Xi': Xi, 'Yi': Yi, 'Zi': Zi,
'extent': extent, 'cmap': cmap, 'cont_lims': cont_lims})
# plot map and contour
im = ax.imshow(Zi, cmap=cmap, vmin=vmin, vmax=vmax, origin='lower',
aspect='equal', extent=extent,
interpolation='bilinear')
ax.autoscale(enable=True, tight=True)
ax.figure.colorbar(im, cax=ax_cbar)
cont = ax.contour(Xi, Yi, Zi, levels=cont_lims, colors='k', linewidths=1)
im.set_clip_path(patch_)
text = ax.text(0.55, 0.95, '', transform=ax.transAxes, va='center',
ha='right')
params['text'] = text
items.append(im)
items.append(text)
for col in cont.collections:
col.set_clip_path(patch_)
outlines_ = _draw_outlines(ax, outlines)
params.update({'patch': patch_, 'outlines': outlines_})
ax.figure.tight_layout()
return tuple(items) + tuple(cont.collections)
def _animate(frame, ax, ax_line, params):
"""Update animated topomap."""
if params['pause']:
frame = params['frame']
time_idx = params['frames'][frame]
if params['time_unit'] == 'ms':
title = '%6.0f ms' % (params['times'][frame] * 1e3,)
else:
title = '%6.3f s' % (params['times'][frame],)
if params['blit']:
text = params['text']
else:
ax.cla() # Clear old contours.
text = ax.text(0.45, 1.15, '', transform=ax.transAxes)
for k, (x, y) in params['outlines'].items():
if 'mask' in k:
continue
ax.plot(x, y, color='k', linewidth=1, clip_on=False)
_hide_frame(ax)
text.set_text(title)
vmin = params['vmin']
vmax = params['vmax']
Xi = params['Xi']
Yi = params['Yi']
Zi = params['Zis'][frame]
extent = params['extent']
cmap = params['cmap']
patch = params['patch']
im = ax.imshow(Zi, cmap=cmap, vmin=vmin, vmax=vmax, origin='lower',
aspect='equal', extent=extent, interpolation='bilinear')
cont_lims = params['cont_lims']
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore')
cont = ax.contour(
Xi, Yi, Zi, levels=cont_lims, colors='k', linewidths=1)
im.set_clip_path(patch)
for col in cont.collections:
col.set_clip_path(patch)
items = [im, text]
if params['butterfly']:
all_times = params['all_times']
line = params['line']
line.remove()
ylim = ax_line.get_ylim()
params['line'] = ax_line.axvline(all_times[time_idx], color='r')
ax_line.set_ylim(ylim)
items.append(params['line'])
params['frame'] = frame
return tuple(items) + tuple(cont.collections)
def _pause_anim(event, params):
"""Pause or continue the animation on mouse click."""
params['pause'] = not params['pause']
def _key_press(event, params):
"""Handle key presses for the animation."""
if event.key == 'left':
params['pause'] = True
params['frame'] = max(params['frame'] - 1, 0)
elif event.key == 'right':
params['pause'] = True
params['frame'] = min(params['frame'] + 1, len(params['frames']) - 1)
def _topomap_animation(evoked, ch_type, times, frame_rate, butterfly, blit,
show, time_unit, sphere, extrapolate, *, verbose=None):
"""Make animation of evoked data as topomap timeseries.
See mne.evoked.Evoked.animate_topomap.
"""
from matplotlib import pyplot as plt, animation
if ch_type is None:
ch_type = _picks_by_type(evoked.info)[0][0]
if ch_type not in ('mag', 'grad', 'eeg',
'hbo', 'hbr', 'fnirs_od', 'fnirs_cw_amplitude'):
raise ValueError("Channel type not supported. Supported channel "
"types include 'mag', 'grad', 'eeg'. 'hbo', 'hbr', "
"'fnirs_cw_amplitude', and 'fnirs_od'.")
time_unit, _ = _check_time_unit(time_unit, evoked.times)
if times is None:
times = np.linspace(evoked.times[0], evoked.times[-1], 10)
times = np.array(times)
if times.ndim != 1:
raise ValueError('times must be 1D, got %d dimensions' % times.ndim)
if max(times) > evoked.times[-1] or min(times) < evoked.times[0]:
raise ValueError('All times must be inside the evoked time series.')
frames = [np.abs(evoked.times - time).argmin() for time in times]
picks, pos, merge_channels, _, ch_type, sphere, clip_origin = \
_prepare_topomap_plot(evoked, ch_type, sphere=sphere)
data = evoked.data[picks, :]
data *= _handle_default('scalings')[ch_type]
fig = plt.figure(figsize=(6, 5))
shape = (8, 12)
colspan = shape[1] - 1
rowspan = shape[0] - bool(butterfly)
ax = plt.subplot2grid(shape, (0, 0), rowspan=rowspan, colspan=colspan)
if butterfly:
ax_line = plt.subplot2grid(shape, (rowspan, 0), colspan=colspan)
else:
ax_line = None
if isinstance(frames, Integral):
frames = np.linspace(0, len(evoked.times) - 1, frames).astype(int)
ax_cbar = plt.subplot2grid(shape, (0, colspan), rowspan=rowspan)
ax_cbar.set_title(_handle_default('units')[ch_type], fontsize=10)
extrapolate = _check_extrapolate(extrapolate, ch_type)
params = dict(data=data, pos=pos, all_times=evoked.times, frame=0,
frames=frames, butterfly=butterfly, blit=blit,
pause=False, times=times, time_unit=time_unit,
clip_origin=clip_origin)
init_func = partial(_init_anim, ax=ax, ax_cbar=ax_cbar, ax_line=ax_line,
params=params, merge_channels=merge_channels,
sphere=sphere, ch_type=ch_type,
extrapolate=extrapolate, verbose=verbose)
animate_func = partial(_animate, ax=ax, ax_line=ax_line, params=params)
pause_func = partial(_pause_anim, params=params)
fig.canvas.mpl_connect('button_press_event', pause_func)
key_press_func = partial(_key_press, params=params)
fig.canvas.mpl_connect('key_press_event', key_press_func)
if frame_rate is None:
frame_rate = evoked.info['sfreq'] / 10.
interval = 1000 / frame_rate # interval is in ms
anim = animation.FuncAnimation(fig, animate_func, init_func=init_func,
frames=len(frames), interval=interval,
blit=blit)
fig.mne_animation = anim # to make sure anim is not garbage collected
plt_show(show, block=False)
if 'line' in params:
# Finally remove the vertical line so it does not appear in saved fig.
params['line'].remove()
return fig, anim
def _set_contour_locator(vmin, vmax, contours):
"""Set correct contour levels."""
locator = None
if isinstance(contours, Integral) and contours > 0:
from matplotlib import ticker
# nbins = ticks - 1, since 2 of the ticks are vmin and vmax, the
# correct number of bins is equal to contours + 1.
locator = ticker.MaxNLocator(nbins=contours + 1)
contours = locator.tick_values(vmin, vmax)
return locator, contours
def _plot_corrmap(data, subjs, indices, ch_type, ica, label, show, outlines,
cmap, contours, template=False, sphere=None):
"""Customize ica.plot_components for corrmap."""
if not template:
title = 'Detected components'
if label is not None:
title += ' of type ' + label
else:
title = "Supplied template"
picks = list(range(len(data)))
p = 20
if len(picks) > p: # plot components by sets of 20
n_components = len(picks)
figs = [_plot_corrmap(data[k:k + p], subjs[k:k + p],
indices[k:k + p], ch_type, ica, label, show,
outlines=outlines, cmap=cmap, contours=contours)
for k in range(0, n_components, p)]
return figs
elif np.isscalar(picks):
picks = [picks]
data_picks, pos, merge_channels, names, _, sphere, clip_origin = \
_prepare_topomap_plot(ica, ch_type, sphere=sphere)
outlines = _make_head_outlines(sphere, pos, outlines, clip_origin)
data = np.atleast_2d(data)
data = data[:, data_picks]
# prepare data for iteration
fig, axes, _, _ = _prepare_trellis(len(picks), ncols=5)
fig.suptitle(title)
for ii, data_, ax, subject, idx in zip(picks, data, axes, subjs, indices):
if template:
ttl = 'Subj. {}, {}'.format(subject, ica._ica_names[idx])
ax.set_title(ttl, fontsize=12)
else:
ax.set_title('Subj. {}'.format(subject))
if merge_channels:
data_, _ = _merge_ch_data(data_, ch_type, [])
vmin_, vmax_ = _setup_vmin_vmax(data_, None, None)
plot_topomap(data_.flatten(), pos, vmin=vmin_, vmax=vmax_,
res=64, axes=ax, cmap=cmap, outlines=outlines,
contours=contours, show=False, image_interp='bilinear')[0]
_hide_frame(ax)
tight_layout(fig=fig)
fig.subplots_adjust(top=0.8)
fig.canvas.draw()
plt_show(show)
return fig
def _trigradient(x, y, z):
"""Take gradients of z on a mesh."""
from matplotlib.tri import CubicTriInterpolator, Triangulation
with warnings.catch_warnings(): # catch matplotlib warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
tri = Triangulation(x, y)
tci = CubicTriInterpolator(tri, z)
dx, dy = tci.gradient(tri.x, tri.y)
return dx, dy
@fill_doc
def plot_arrowmap(data, info_from, info_to=None, scale=3e-10, vmin=None,
vmax=None, cmap=None, sensors=True, res=64, axes=None,
names=None, show_names=False, mask=None, mask_params=None,
outlines='head', contours=6, image_interp='bilinear',
show=True, onselect=None, extrapolate=_EXTRAPOLATE_DEFAULT,
sphere=None):
"""Plot arrow map.
Compute arrowmaps, based upon the Hosaka-Cohen transformation
:footcite:`CohenHosaka1976`, these arrows represents an estimation of the
current flow underneath the MEG sensors. They are a poor man's MNE.
Since planar gradiometers takes gradients along latitude and longitude,
they need to be projected to the flattened manifold span by magnetometer
or radial gradiometers before taking the gradients in the 2D Cartesian
coordinate system for visualization on the 2D topoplot. You can use the
``info_from`` and ``info_to`` parameters to interpolate from
gradiometer data to magnetometer data.
Parameters
----------
data : array, shape (n_channels,)
The data values to plot.
info_from : instance of Info
The measurement info from data to interpolate from.
info_to : instance of Info | None
The measurement info to interpolate to. If None, it is assumed
to be the same as info_from.
scale : float, default 3e-10
To scale the arrows.
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range.
If None, the maximum absolute value is used. If callable, the output
equals vmax(data). Defaults to None.
cmap : matplotlib colormap | None
Colormap to use. If None, 'Reds' is used for all positive data,
otherwise defaults to 'RdBu_r'.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib plot
format string (e.g., 'r+' for red plusses). If True (default), circles
will be used.
res : int
The resolution of the topomap image (n pixels along each side).
axes : instance of Axes | None
The axes to plot to. If None, a new figure will be created.
names : list | None
List of channel names. If None, channel names are not plotted.
%(topomap_show_names)s
If ``True``, a list of names must be provided (see ``names`` keyword).
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indices set to ``True`` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals::
dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)
%(topomap_outlines)s
contours : int | array of float
The number of contour lines to draw. If 0, no contours will be drawn.
If an array, the values represent the levels for the contours. The
values are in µV for EEG, fT for magnetometers and fT/m for
gradiometers. Defaults to 6.
image_interp : str
The image interpolation to be used. All matplotlib options are
accepted.
show : bool
Show figure if True.
onselect : callable | None
Handle for a function that is called when the user selects a set of
channels by rectangle selection (matplotlib ``RectangleSelector``). If
None interactive selection is disabled. Defaults to None.
%(topomap_extrapolate)s
.. versionadded:: 0.18
%(topomap_sphere_auto)s
Returns
-------
fig : matplotlib.figure.Figure
The Figure of the plot.
Notes
-----
.. versionadded:: 0.17
References
----------
.. footbibliography::
"""
from matplotlib import pyplot as plt
from ..forward import _map_meg_or_eeg_channels
sphere = _check_sphere(sphere, info_from)
ch_type = _picks_by_type(info_from)
if len(ch_type) > 1:
raise ValueError('Multiple channel types are not supported.'
'All channels must either be of type \'grad\' '
'or \'mag\'.')
else:
ch_type = ch_type[0][0]
if ch_type not in ('mag', 'grad'):
raise ValueError("Channel type '%s' not supported. Supported channel "
"types are 'mag' and 'grad'." % ch_type)
if info_to is None and ch_type == 'mag':
info_to = info_from
else:
ch_type = _picks_by_type(info_to)
if len(ch_type) > 1:
raise ValueError("Multiple channel types are not supported.")
else:
ch_type = ch_type[0][0]
if ch_type != 'mag':
raise ValueError("only 'mag' channel type is supported. "
"Got %s" % ch_type)
if info_to is not info_from:
info_to = pick_info(info_to, pick_types(info_to, meg=True))
info_from = pick_info(info_from, pick_types(info_from, meg=True))
# XXX should probably support the "origin" argument
mapping = _map_meg_or_eeg_channels(
info_from, info_to, origin=(0., 0., 0.04), mode='accurate')
data = np.dot(mapping, data)
_, pos, _, _, _, sphere, clip_origin = \
_prepare_topomap_plot(info_to, 'mag', sphere=sphere)
outlines = _make_head_outlines(
sphere, pos, outlines, clip_origin)
if axes is None:
fig, axes = plt.subplots()
else:
fig = axes.figure
plot_topomap(data, pos, axes=axes, vmin=vmin, vmax=vmax, cmap=cmap,
sensors=sensors, res=res, names=names, show_names=show_names,
mask=mask, mask_params=mask_params, outlines=outlines,
contours=contours, image_interp=image_interp, show=False,
onselect=onselect, extrapolate=extrapolate, sphere=sphere,
ch_type=ch_type)
x, y = tuple(pos.T)
dx, dy = _trigradient(x, y, data)
dxx = dy.data
dyy = -dx.data
axes.quiver(x, y, dxx, dyy, scale=scale, color='k', lw=1, clip_on=False)
axes.figure.canvas.draw_idle()
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore')
tight_layout(fig=fig)
plt_show(show)
return fig
|
bsd-3-clause
|
Ohjeah/sparsereg
|
sparsereg/model/bayes.py
|
1
|
4327
|
import warnings
import numpy as np
from sklearn.base import RegressorMixin
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model.base import LinearModel
from sklearn.utils.validation import check_X_y
from sparsereg.model.base import PrintMixin
eps = np.finfo(np.float64).eps
def scale_sigma(est, X_offset, X_scale):
if est.fit_intercept:
std_intercept = np.sqrt(np.abs(X_offset @ np.diag(est.sigma_).T))
else:
std_intercept = 0
sigma = np.diag(est.sigma_) / (X_scale + eps)
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
std_coef = np.sqrt(sigma)
return std_intercept, std_coef
class JMAP(LinearModel, RegressorMixin, PrintMixin):
def __init__(
self,
ae0=1e-6,
be0=1e-6,
af0=1e-6,
bf0=1e-6,
max_iter=300,
tol=1e-3,
normalize=False,
fit_intercept=True,
copy_X=True,
):
self.max_iter = max_iter
self.normalize = normalize
self.fit_intercept = fit_intercept
self.copy_X = copy_X
self.tol = tol
self.ae0 = ae0
self.be0 = be0
self.af0 = af0
self.bf0 = bf0
warnings.warn(
f"Consider using sklearn.linear_model.BayesianRidge instead of {self.__class__.__name__}."
)
def fit(self, x, y):
x, y = check_X_y(x, y, accept_sparse=[], y_numeric=True, multi_output=False) # boilerplate
x, y, X_offset, y_offset, X_scale = self._preprocess_data(
x, y, fit_intercept=self.fit_intercept, normalize=self.normalize, copy=self.copy_X
)
fh, vf, ve, sigma = jmap(
y, x, self.ae0, self.be0, self.af0, self.bf0, max_iter=self.max_iter, tol=self.tol
)
self.X_offset_ = X_offset
self.X_scale_ = X_scale
self.sigma_ = sigma
self.ve_ = ve
self.vf_ = vf
self.coef_ = fh
self.alpha_ = 1.0 / np.mean(ve)
self.lambda_ = 1.0 / np.mean(vf)
self.std_intercept_, self.std_coef_ = scale_sigma(self, X_offset, X_scale)
self._set_intercept(X_offset, y_offset, X_scale)
return self
def predict(self, X, return_std=False):
y_mean = self._decision_function(X)
if return_std is False:
return y_mean
else:
if self.normalize:
X = (X - self.X_offset_) / self.X_scale_
sigmas_squared_data = ((X @ self.sigma_) * X).sum(axis=1)
y_std = np.sqrt(sigmas_squared_data + (1.0 / self.alpha_))
return y_mean, y_std
def _converged(fhs, tol=0.1):
if len(fhs) < 2:
return False
rtol = np.sum(np.abs((fhs[-1] - fhs[-2]) / fhs[-1]))
return rtol <= tol
def jmap(g, H, ae0, be0, af0, bf0, max_iter=1000, tol=1e-4, rcond=None, observer=None):
"""Maximum a posteriori estimator for g = H @ f + e
p(g | f) = normal(H f, ve I)
p(ve) = inverse_gauss(ae0, be0)
p(f | vf) = normal(0, vf I)
p(vf) = inverse_gauss(af0, bf0)
JMAP: maximizes p(f,ve,vf|g) = p(g | f) p(f | vf) p(ve) p(vf) / p(g)
with respect to f, ve and vf
Original Author: Ali Mohammad-Djafari, April 2015
Args:
g:
H:
ae0:
be0:
af0:
bf0:
max_iter:
rcond:
Returns:
"""
n_features, n_samples = H.shape
HtH = H.T @ H
Htg = H.T @ g
ve0 = be0 / (ae0 - 1)
vf0 = bf0 / (af0 - 1)
lambda_ = ve0 / vf0
fh, *_ = np.linalg.lstsq(HtH + lambda_ * np.eye(n_samples, n_samples), Htg, rcond=rcond)
fhs = [fh]
for _ in range(max_iter):
dg = g - H @ fh
ae = ae0 + 1.5
be = be0 + 0.5 * dg ** 2
ve = be / ae + eps
iVe = np.diag(1 / ve)
af = af0 + 1.5
bf = bf0 + 0.5 * fh ** 2
vf = bf / af + eps
iVf = np.diag(1.0 / vf)
HR = H.T @ iVe @ H + iVf
fh, *_ = np.linalg.lstsq(HR, H.T @ iVe @ g, rcond=rcond)
fhs.append(fh)
if observer is not None:
observer(fh, vf, ve)
if _converged(fhs, tol=tol):
break
else:
warnings.warn(f"jmap did not converge after {max_iter} iterations.", ConvergenceWarning)
sigma = np.linalg.inv(HR)
return fh, vf, ve, sigma
|
mit
|
Moriadry/tensorflow
|
tensorflow/contrib/metrics/python/kernel_tests/histogram_ops_test.py
|
130
|
9577
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for histogram_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.metrics.python.ops import histogram_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class Strict1dCumsumTest(test.TestCase):
"""Test this private function."""
def test_empty_tensor_returns_empty(self):
with self.test_session():
tensor = constant_op.constant([])
result = histogram_ops._strict_1d_cumsum(tensor, 0)
expected = constant_op.constant([])
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_1_tensor_works(self):
with self.test_session():
tensor = constant_op.constant([3], dtype=dtypes.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 1)
expected = constant_op.constant([3], dtype=dtypes.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_3_tensor_works(self):
with self.test_session():
tensor = constant_op.constant([1, 2, 3], dtype=dtypes.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 3)
expected = constant_op.constant([1, 3, 6], dtype=dtypes.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
class AUCUsingHistogramTest(test.TestCase):
def setUp(self):
self.rng = np.random.RandomState(0)
def test_empty_labels_and_scores_gives_nan_auc(self):
with self.test_session():
labels = constant_op.constant([], shape=[0], dtype=dtypes.bool)
scores = constant_op.constant([], shape=[0], dtype=dtypes.float32)
score_range = [0, 1.]
auc, update_op = histogram_ops.auc_using_histogram(labels, scores,
score_range)
variables.local_variables_initializer().run()
update_op.run()
self.assertTrue(np.isnan(auc.eval()))
def test_perfect_scores_gives_auc_1(self):
self._check_auc(
nbins=100,
desired_auc=1.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_terrible_scores_gives_auc_0(self):
self._check_auc(
nbins=100,
desired_auc=0.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_many_common_conditions(self):
for nbins in [50]:
for desired_auc in [0.3, 0.5, 0.8]:
for score_range in [[-1, 1], [-10, 0]]:
for frac_true in [0.3, 0.8]:
# Tests pass with atol = 0.03. Moved up to 0.05 to avoid flakes.
self._check_auc(
nbins=nbins,
desired_auc=desired_auc,
score_range=score_range,
num_records=100,
frac_true=frac_true,
atol=0.05,
num_updates=50)
def test_large_class_imbalance_still_ok(self):
# With probability frac_true ** num_records, each batch contains only True
# records. In this case, ~ 95%.
# Tests pass with atol = 0.02. Increased to 0.05 to avoid flakes.
self._check_auc(
nbins=100,
desired_auc=0.8,
score_range=[-1, 1.],
num_records=10,
frac_true=0.995,
atol=0.05,
num_updates=1000)
def test_super_accuracy_with_many_bins_and_records(self):
# Test passes with atol = 0.0005. Increased atol to avoid flakes.
self._check_auc(
nbins=1000,
desired_auc=0.75,
score_range=[0, 1.],
num_records=1000,
frac_true=0.5,
atol=0.005,
num_updates=100)
def _check_auc(self,
nbins=100,
desired_auc=0.75,
score_range=None,
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=10):
"""Check auc accuracy against synthetic data.
Args:
nbins: nbins arg from contrib.metrics.auc_using_histogram.
desired_auc: Number in [0, 1]. The desired auc for synthetic data.
score_range: 2-tuple, (low, high), giving the range of the resultant
scores. Defaults to [0, 1.].
num_records: Positive integer. The number of records to return.
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually
be True.
atol: Absolute tolerance for final AUC estimate.
num_updates: Update internal histograms this many times, each with a new
batch of synthetic data, before computing final AUC.
Raises:
AssertionError: If resultant AUC is not within atol of theoretical AUC
from synthetic data.
"""
score_range = [0, 1.] or score_range
with self.test_session():
labels = array_ops.placeholder(dtypes.bool, shape=[num_records])
scores = array_ops.placeholder(dtypes.float32, shape=[num_records])
auc, update_op = histogram_ops.auc_using_histogram(
labels, scores, score_range, nbins=nbins)
variables.local_variables_initializer().run()
# Updates, then extract auc.
for _ in range(num_updates):
labels_a, scores_a = synthetic_data(desired_auc, score_range,
num_records, self.rng, frac_true)
update_op.run(feed_dict={labels: labels_a, scores: scores_a})
labels_a, scores_a = synthetic_data(desired_auc, score_range, num_records,
self.rng, frac_true)
# Fetch current auc, and verify that fetching again doesn't change it.
auc_eval = auc.eval()
self.assertAlmostEqual(auc_eval, auc.eval(), places=5)
msg = ('nbins: %s, desired_auc: %s, score_range: %s, '
'num_records: %s, frac_true: %s, num_updates: %s') % (nbins,
desired_auc,
score_range,
num_records,
frac_true,
num_updates)
np.testing.assert_allclose(desired_auc, auc_eval, atol=atol, err_msg=msg)
def synthetic_data(desired_auc, score_range, num_records, rng, frac_true):
"""Create synthetic boolean_labels and scores with adjustable auc.
Args:
desired_auc: Number in [0, 1], the theoretical AUC of resultant data.
score_range: 2-tuple, (low, high), giving the range of the resultant scores
num_records: Positive integer. The number of records to return.
rng: Initialized np.random.RandomState random number generator
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually be
True.
Returns:
boolean_labels: np.array, dtype=bool.
scores: np.array, dtype=np.float32
"""
# We prove here why the method (below) for computing AUC works. Of course we
# also checked this against sklearn.metrics.roc_auc_curve.
#
# First do this for score_range = [0, 1], then rescale.
# WLOG assume AUC >= 0.5, otherwise we will solve for AUC >= 0.5 then swap
# the labels.
# So for AUC in [0, 1] we create False and True labels
# and corresponding scores drawn from:
# F ~ U[0, 1], T ~ U[x, 1]
# We have,
# AUC
# = P[T > F]
# = P[T > F | F < x] P[F < x] + P[T > F | F > x] P[F > x]
# = (1 * x) + (0.5 * (1 - x)).
# Inverting, we have:
# x = 2 * AUC - 1, when AUC >= 0.5.
assert 0 <= desired_auc <= 1
assert 0 < frac_true < 1
if desired_auc < 0.5:
flip_labels = True
desired_auc = 1 - desired_auc
frac_true = 1 - frac_true
else:
flip_labels = False
x = 2 * desired_auc - 1
labels = rng.binomial(1, frac_true, size=num_records).astype(bool)
num_true = labels.sum()
num_false = num_records - labels.sum()
# Draw F ~ U[0, 1], and T ~ U[x, 1]
false_scores = rng.rand(num_false)
true_scores = x + rng.rand(num_true) * (1 - x)
# Reshape [0, 1] to score_range.
def reshape(scores):
return score_range[0] + scores * (score_range[1] - score_range[0])
false_scores = reshape(false_scores)
true_scores = reshape(true_scores)
# Place into one array corresponding with the labels.
scores = np.nan * np.ones(num_records, dtype=np.float32)
scores[labels] = true_scores
scores[~labels] = false_scores
if flip_labels:
labels = ~labels
return labels, scores
if __name__ == '__main__':
test.main()
|
apache-2.0
|
DiCarloLab-Delft/PycQED_py3
|
pycqed/analysis_v2/fluxing_analysis.py
|
1
|
84501
|
import lmfit
from uncertainties import ufloat
from pycqed.analysis import measurement_analysis as ma
from collections import OrderedDict
from mpl_toolkits.axes_grid1 import make_axes_locatable
import pycqed.analysis_v2.base_analysis as ba
import numpy as np
from scipy.spatial import ConvexHull
from pycqed.analysis.tools.plotting import (
set_xlabel,
set_ylabel,
plot_fit,
hsluv_anglemap45,
SI_prefix_and_scale_factor,
)
from pycqed.analysis import analysis_toolbox as a_tools
from pycqed.analysis import measurement_analysis as ma_old
from pycqed.analysis.analysis_toolbox import color_plot
import matplotlib.pyplot as plt
import matplotlib.colors as col
from pycqed.analysis.fitting_models import (
CosFunc,
Cos_guess,
avoided_crossing_freq_shift,
ChevronInvertedFunc,
ChevronFunc,
ChevronGuess,
)
import pycqed.analysis_v2.simple_analysis as sa
import scipy.cluster.hierarchy as hcluster
from copy import deepcopy
import pycqed.analysis.tools.plot_interpolation as plt_interp
from pycqed.utilities import general as gen
from pycqed.instrument_drivers.meta_instrument.LutMans import flux_lutman as flm
from datetime import datetime
from pycqed.measurement.optimization import multi_targets_phase_offset
from pycqed.analysis_v2.tools.plotting import (
scatter_pnts_overlay,
contour_overlay,
annotate_pnts,
)
from pycqed.analysis_v2.tools import contours2d as c2d
import logging
log = logging.getLogger(__name__)
class Chevron_Analysis(ba.BaseDataAnalysis):
def __init__(
self,
ts: str = None,
label=None,
ch_idx=0,
coupling="g",
min_fit_amp=0,
auto=True,
):
"""
Analyzes a Chevron and fits the avoided crossing.
Parameters
----------
ts: str
timestamp of the datafile
label: str
label to find the datafile (optional)
ch_idx: int
channel to use when fitting the avoided crossing
coupling: Enum("g", "J1", "J2")
used to label the avoided crossing and calculate related quantities
min_fit_amp:
minimal maplitude of the fitted cosine for each line cut.
Oscillations with a smaller amplitude will be ignored in the fit
of the avoided crossing.
auto: bool
if True run all parts of the analysis.
"""
super().__init__(do_fitting=True)
self.ts = ts
self.label = label
self.coupling = coupling
self.ch_idx = ch_idx
self.min_fit_amp = min_fit_amp
if auto:
self.run_analysis()
def extract_data(self):
self.raw_data_dict = OrderedDict()
a = ma.MeasurementAnalysis(timestamp=self.ts, label=self.label, auto=False)
a.get_naming_and_values_2D()
a.finish()
self.timestamps = [a.timestamp_string]
self.raw_data_dict["timestamps"] = self.timestamps
self.raw_data_dict["timestamp_string"] = a.timestamp
for attr in [
"sweep_points",
"sweep_points_2D",
"measured_values",
"parameter_names",
"parameter_units",
"value_names",
"value_units",
]:
self.raw_data_dict[attr] = getattr(a, attr)
self.raw_data_dict["folder"] = a.folder
def process_data(self):
self.proc_data_dict = OrderedDict()
# select the relevant data
x = self.raw_data_dict["sweep_points"]
t = self.raw_data_dict["sweep_points_2D"]
Z = self.raw_data_dict["measured_values"][self.ch_idx].T
# fit frequencies to each individual cut (time trace)
freqs = []
freqs_std = []
fit_results = []
amps = []
for xi, z in zip(x, Z.T):
CosModel = lmfit.Model(CosFunc)
CosModel.guess = Cos_guess
pars = CosModel.guess(CosModel, z, t)
fr = CosModel.fit(data=z, t=t, params=pars)
amps.append(fr.params["amplitude"].value)
freqs.append(fr.params["frequency"].value)
freqs_std.append(fr.params["frequency"].stderr)
fit_results.append(fr)
# N.B. the fit results are not saved in self.fit_res as this would
# bloat the datafiles.
self.proc_data_dict["fit_results"] = np.array(fit_results)
self.proc_data_dict["amp_fits"] = np.array(amps)
self.proc_data_dict["freq_fits"] = np.array(freqs)
self.proc_data_dict["freq_fits_std"] = np.array(freqs_std)
# take a Fourier transform (nice for plotting)
fft_data = abs(np.fft.fft(Z.T).T)
fft_freqs = np.fft.fftfreq(len(t), d=t[1] - t[0])
sort_vec = np.argsort(fft_freqs)
fft_data_sorted = fft_data[sort_vec, :]
fft_freqs_sorted = fft_freqs[sort_vec]
self.proc_data_dict["fft_data_sorted"] = fft_data_sorted
self.proc_data_dict["fft_freqs_sorted"] = fft_freqs_sorted
def run_fitting(self):
super().run_fitting()
fit_mask = np.where(self.proc_data_dict["amp_fits"] > self.min_fit_amp)
avoided_crossing_mod = lmfit.Model(avoided_crossing_freq_shift)
# hardcoded guesses! Bad practice, needs a proper guess func
avoided_crossing_mod.set_param_hint("a", value=3e9)
avoided_crossing_mod.set_param_hint("b", value=-2e9)
avoided_crossing_mod.set_param_hint("g", value=20e6, min=0)
params = avoided_crossing_mod.make_params()
self.fit_res["avoided_crossing"] = avoided_crossing_mod.fit(
data=self.proc_data_dict["freq_fits"][fit_mask],
flux=self.raw_data_dict["sweep_points"][fit_mask],
params=params,
)
def analyze_fit_results(self):
self.proc_data_dict["quantities_of_interest"] = {}
# Extract quantities of interest from the fit
self.proc_data_dict["quantities_of_interest"] = {}
qoi = self.proc_data_dict["quantities_of_interest"]
g = self.fit_res["avoided_crossing"].params["g"]
qoi["g"] = ufloat(g.value, g.stderr)
self.coupling_msg = ""
if self.coupling == "J1":
qoi["J1"] = qoi["g"]
qoi["J2"] = qoi["g"] * np.sqrt(2)
self.coupling_msg += (
r"Measured $J_1$ = {} MHz".format(qoi["J1"] * 1e-6) + "\n"
)
self.coupling_msg += r"Expected $J_2$ = {} MHz".format(qoi["J2"] * 1e-6)
elif self.coupling == "J2":
qoi["J1"] = qoi["g"] / np.sqrt(2)
qoi["J2"] = qoi["g"]
self.coupling_msg += (
r"Expected $J_1$ = {} MHz".format(qoi["J1"] * 1e-6) + "\n"
)
self.coupling_msg += r"Measured $J_2$ = {} MHz".format(qoi["J2"] * 1e-6)
else:
self.coupling_msg += "g = {}".format(qoi["g"])
def prepare_plots(self):
for i, val_name in enumerate(self.raw_data_dict["value_names"]):
self.plot_dicts["chevron_{}".format(val_name)] = {
"plotfn": plot_chevron,
"x": self.raw_data_dict["sweep_points"],
"y": self.raw_data_dict["sweep_points_2D"],
"Z": self.raw_data_dict["measured_values"][i].T,
"xlabel": self.raw_data_dict["parameter_names"][0],
"ylabel": self.raw_data_dict["parameter_names"][1],
"zlabel": self.raw_data_dict["value_names"][i],
"xunit": self.raw_data_dict["parameter_units"][0],
"yunit": self.raw_data_dict["parameter_units"][1],
"zunit": self.raw_data_dict["value_units"][i],
"title": self.raw_data_dict["timestamp_string"]
+ "\n"
+ "Chevron {}".format(val_name),
}
self.plot_dicts["chevron_fft"] = {
"plotfn": plot_chevron_FFT,
"x": self.raw_data_dict["sweep_points"],
"xunit": self.raw_data_dict["parameter_units"][0],
"fft_freqs": self.proc_data_dict["fft_freqs_sorted"],
"fft_data": self.proc_data_dict["fft_data_sorted"],
"freq_fits": self.proc_data_dict["freq_fits"],
"freq_fits_std": self.proc_data_dict["freq_fits_std"],
"fit_res": self.fit_res["avoided_crossing"],
"coupling_msg": self.coupling_msg,
"title": self.raw_data_dict["timestamp_string"]
+ "\n"
+ "Fourier transform of Chevron",
}
def plot_chevron(x, y, Z, xlabel, xunit, ylabel, yunit, zlabel, zunit, title, ax, **kw):
colormap = ax.pcolormesh(
x,
y,
Z,
cmap="viridis", # norm=norm,
linewidth=0,
rasterized=True,
# assumes digitized readout
vmin=0,
vmax=1,
)
set_xlabel(ax, xlabel, xunit)
set_ylabel(ax, ylabel, yunit)
ax.set_title(title)
ax_divider = make_axes_locatable(ax)
cax = ax_divider.append_axes("right", size="5%", pad="2%")
cbar = plt.colorbar(colormap, cax=cax, orientation="vertical")
cax.set_ylabel("L1 (%)")
set_ylabel(cax, zlabel, zunit)
def plot_chevron_FFT(
x,
xunit,
fft_freqs,
fft_data,
freq_fits,
freq_fits_std,
fit_res,
coupling_msg,
title,
ax,
**kw
):
colormap = ax.pcolormesh(
x,
fft_freqs,
fft_data,
cmap="viridis", # norm=norm,
linewidth=0,
rasterized=True,
vmin=0,
vmax=5,
)
ax.errorbar(
x=x,
y=freq_fits,
yerr=freq_fits_std,
ls="--",
c="r",
alpha=0.5,
label="Extracted freqs",
)
x_fine = np.linspace(x[0], x[-1], 200)
plot_fit(x, fit_res, ax=ax, c="C1", label="Avoided crossing fit", ls=":")
set_xlabel(ax, "Flux bias", xunit)
set_ylabel(ax, "Frequency", "Hz")
ax.legend(loc=(1.05, 0.7))
ax.text(1.05, 0.5, coupling_msg, transform=ax.transAxes)
class Chevron_Alignment_Analysis(sa.Basic2DInterpolatedAnalysis):
"""
"""
def __init__(
self,
t_start: str = None,
t_stop: str = None,
label: str = "",
data_file_path: str = None,
close_figs: bool = True,
options_dict: dict = None,
extract_only: bool = False,
do_fitting: bool = True,
auto: bool = True,
save_qois: bool = True,
fit_from: str = "",
fit_threshold: float = None,
sq_pulse_duration: float = None,
peak_is_inverted: bool = True,
):
self.fit_from = fit_from
self.fit_threshold = fit_threshold
self.sq_pulse_duration = sq_pulse_duration
self.peak_is_inverted = peak_is_inverted
if do_fitting and sq_pulse_duration is None:
log.error(
"Pulse duration is required for fitting. Fitting will be skipped!"
)
do_fitting = do_fitting and sq_pulse_duration is not None
super().__init__(
t_start=t_start,
t_stop=t_stop,
label=label,
data_file_path=data_file_path,
close_figs=close_figs,
options_dict=options_dict,
extract_only=extract_only,
do_fitting=do_fitting,
save_qois=save_qois,
auto=auto,
interp_method="linear",
)
def extract_data(self):
super().extract_data()
def process_data(self):
super().process_data()
pdd = self.proc_data_dict
bias_axis = "x" if "FBL" in self.raw_data_dict["xlabel"].upper() else "y"
pdd["bias_axis"] = bias_axis
amps_axis = "y" if bias_axis == "x" else "x"
pdd["amps_axis"] = amps_axis
unique_bias_values = np.unique(self.raw_data_dict[bias_axis])
pdd["unique_bias_values"] = unique_bias_values
bias_1D_cuts = []
pdd["bias_1D_cuts"] = bias_1D_cuts
bias_strs = []
pdd["bias_strs"] = bias_strs
for unique_bias in unique_bias_values:
is_this_unique = self.raw_data_dict[bias_axis] == unique_bias
is_neg_amp = self.raw_data_dict[amps_axis] < 0
is_pos_amp = self.raw_data_dict[amps_axis] > 0
idxs_amps = np.where(is_this_unique)[0]
idxs_amps_neg = np.where(is_this_unique * is_neg_amp)[0]
idxs_amps_pos = np.where(is_this_unique * is_pos_amp)[0]
amps_neg = self.raw_data_dict[amps_axis][idxs_amps_neg]
amps_pos = self.raw_data_dict[amps_axis][idxs_amps_pos]
amps = self.raw_data_dict[amps_axis][idxs_amps]
mv = self.raw_data_dict["measured_values"][:, idxs_amps]
mv_neg = self.raw_data_dict["measured_values"][:, idxs_amps_neg]
mv_pos = self.raw_data_dict["measured_values"][:, idxs_amps_pos]
bias_1D_cuts.append(
{
"amps_neg": amps_neg,
"amps_pos": amps_pos,
"mv_neg": mv_neg,
"mv_pos": mv_pos,
"amps": amps,
"mv": mv,
}
)
scale_factor, unit = SI_prefix_and_scale_factor(
val=unique_bias, unit=self.proc_data_dict["yunit"]
)
bias_strs.append("{:4g} ({})".format(unique_bias * scale_factor, unit))
# values stored in quantities of interest will be saved in the data file
self.proc_data_dict["quantities_of_interest"] = {}
def prepare_fitting(self):
t = self.sq_pulse_duration
fit_d = self.fit_dicts
pdd = self.proc_data_dict
if self.fit_from != "":
fit_from_idx = self.raw_data_dict["value_names"].index(self.fit_from)
else:
fit_from_idx = 1
self.fit_from = self.raw_data_dict["value_names"][fit_from_idx]
for i, bdict in enumerate(pdd["bias_1D_cuts"]):
# Allow fitting the populations of both qubits
fit_func = ChevronInvertedFunc if self.peak_is_inverted else ChevronFunc
chevron_model = lmfit.Model(fit_func)
chevron_model.guess = ChevronGuess
fit_key = "chevron_fit_{}".format(i)
fit_xvals = bdict["amps"]
fit_yvals = bdict["mv"][fit_from_idx]
if self.fit_threshold is not None:
# For some cases the fit might not work well due to noise
# This is to fit above a threshold only
selection = (
(fit_yvals < self.fit_threshold)
if self.peak_is_inverted
else (fit_yvals > self.fit_threshold)
)
sel_idx = np.where(selection)[0]
fit_yvals = fit_yvals[sel_idx]
fit_xvals = fit_xvals[sel_idx]
fit_d[fit_key] = {
"model": chevron_model,
"guessfn_pars": {"model": chevron_model, "t": t},
"fit_xvals": {"amp": fit_xvals},
"fit_yvals": {"data": fit_yvals},
}
def analyze_fit_results(self):
pdd = self.proc_data_dict
ubv = pdd["unique_bias_values"]
fit_res = self.fit_res
qoi = pdd["quantities_of_interest"]
centers_diffs = []
chevron_centers_L = []
chevron_centers_R = []
chevron_centers_L_vals = []
chevron_centers_R_vals = []
for bias, fit_key in zip(ubv, fit_res.keys()):
amp_center_1 = fit_res[fit_key].params["amp_center_1"]
amp_center_2 = fit_res[fit_key].params["amp_center_2"]
centers = [amp_center_1, amp_center_2]
arg_amp_L = np.argmin([amp_center_1.value, amp_center_2.value])
arg_amp_R = np.argmax([amp_center_1.value, amp_center_2.value])
stderr_L = (
centers[arg_amp_L].stderr
if centers[arg_amp_L].stderr is not None
else np.nan
)
stderr_R = (
centers[arg_amp_R].stderr
if centers[arg_amp_R].stderr is not None
else np.nan
)
chevron_centers_L.append(ufloat(centers[arg_amp_L].value, stderr_L))
chevron_centers_R.append(ufloat(centers[arg_amp_R].value, stderr_R))
chevron_centers_L_vals.append(centers[arg_amp_L].value)
chevron_centers_R_vals.append(centers[arg_amp_R].value)
centers_diffs.append(centers[arg_amp_L].value + centers[arg_amp_R].value)
pdd["chevron_centers_L"] = chevron_centers_L
pdd["chevron_centers_R"] = chevron_centers_R
pdd["centers_diffs"] = centers_diffs
bias_calibration_coeffs = np.polyfit(centers_diffs, ubv, 1)
pdd["bias_calibration_coeffs"] = bias_calibration_coeffs
calib_bias = bias_calibration_coeffs[1]
pdd["calibration_bias"] = calib_bias
bias_calibration_coeffs_L = np.polyfit(chevron_centers_L_vals, ubv, 1)
bias_calibration_coeffs_R = np.polyfit(chevron_centers_R_vals, ubv, 1)
p = bias_calibration_coeffs_L
int_pnt_L = (calib_bias - p[1]) / p[0]
p = bias_calibration_coeffs_R
int_pnt_R = (calib_bias - p[1]) / p[0]
pdd["interaction_pnts"] = (int_pnt_L, int_pnt_R)
amp_interaction_pnt = (np.abs(int_pnt_L) + np.abs(int_pnt_R)) / 2
pdd["amp_interaction_pnt"] = amp_interaction_pnt
qoi["calibration_bias"] = calib_bias
qoi["amp_interaction_pnt"] = amp_interaction_pnt
def prepare_plots(self):
# assumes that value names are unique in an experiment
super().prepare_plots()
bias_1D_cuts = self.proc_data_dict["bias_1D_cuts"]
num_cuts = len(bias_1D_cuts)
for i, val_name in enumerate(self.proc_data_dict["value_names"]):
ax_id = "all_bias_1D_cuts_" + val_name
self.plot_dicts[ax_id] = {
"ax_id": ax_id,
"plotfn": plot_chevron_bias_1D_cuts,
"bias_1D_cuts_dicts": bias_1D_cuts,
"xlabel": self.proc_data_dict["xlabel"],
"xunit": self.proc_data_dict["xunit"],
"ylabel": val_name,
"yunit": self.proc_data_dict["value_units"][i],
"title": "{}\n{}".format(
self.timestamp, self.proc_data_dict["measurementstring"]
),
"title_neg": val_name + " (amp < 0)",
"title_pos": val_name + " (amp > 0)",
"sharex": False,
"sharey": True,
"plotsize": (13, 5 * num_cuts),
"numplotsy": num_cuts,
"numplotsx": 2,
"mv_indx": i,
}
if self.do_fitting:
self._prepare_fit_plots()
def _prepare_fit_plots(self):
pdd = self.proc_data_dict
pd = self.plot_dicts
for i, fit_key in enumerate(self.fit_res.keys()):
bias_str = pdd["bias_strs"][i]
pd[fit_key + "_L"] = {
"ax_id": "all_bias_1D_cuts_" + self.fit_from,
"plotfn": self.plot_fit,
"fit_res": self.fit_dicts[fit_key]["fit_res"],
"plot_init": self.options_dict["plot_init"],
"setlabel": "Fit [flux bias = " + bias_str + "]",
"do_legend": True,
"ax_row": i,
"ax_col": 0,
}
pd[fit_key + "_R"] = {
"ax_id": "all_bias_1D_cuts_" + self.fit_from,
"plotfn": self.plot_fit,
"fit_res": self.fit_dicts[fit_key]["fit_res"],
"plot_init": self.options_dict["plot_init"],
"setlabel": "Fit [flux bias = " + bias_str + "]",
"do_legend": True,
"ax_row": i,
"ax_col": 1,
}
pd["all_bias_1D_cuts_" + self.fit_from][
"fit_threshold"
] = self.fit_threshold
pd["all_bias_1D_cuts_" + self.fit_from][
"fit_threshold"
] = self.fit_threshold
center_L = pdd["chevron_centers_L"][i]
center_R = pdd["chevron_centers_R"][i]
pd[fit_key + "_L_center"] = {
"ax_id": "all_bias_1D_cuts_" + self.fit_from,
"plotfn": plot_chevron_center_on_1D_cut,
"center_amp_ufloat": center_L,
"label": center_L,
"ax_row": i,
"ax_col": 0,
}
pd[fit_key + "_R_center"] = {
"ax_id": "all_bias_1D_cuts_" + self.fit_from,
"plotfn": plot_chevron_center_on_1D_cut,
"center_amp_ufloat": center_R,
"label": center_R,
"ax_row": i,
"ax_col": 1,
}
calib_bias = pdd["calibration_bias"]
scale_factor, unit = SI_prefix_and_scale_factor(
val=calib_bias, unit=pdd["yunit"]
)
calib_bias_str = "{:4g} ({})".format(calib_bias * scale_factor, unit)
poly_calib = np.poly1d(pdd["bias_calibration_coeffs"])
xs = np.array(pdd["centers_diffs"])[[0, -1]]
amp_interaction_pnt = pdd["amp_interaction_pnt"]
for i, val_name in enumerate(pdd["value_names"]):
# Order here matters due to the legend
self.plot_dicts["int_pnts_" + val_name] = {
"ax_id": val_name,
"plotfn": self.plot_line,
"func": "scatter",
"xvals": [pdd["interaction_pnts"][0], pdd["interaction_pnts"][1]],
"yvals": [calib_bias, calib_bias],
"marker": "o",
"color": "gold",
"line_kws": {"edgecolors": "gray", "linewidth": 0.7, "s": 100},
"setlabel": "Amp at interaction: {:3g}".format(amp_interaction_pnt),
}
self.plot_dicts["bias_fit_calib_" + val_name] = {
"ax_id": val_name,
"plotfn": self.plot_matplot_ax_method,
"func": "axhline",
"plot_kws": {
"y": calib_bias,
"ls": "--",
"color": "red",
"label": "Sweet spot bias: " + calib_bias_str,
},
}
self.plot_dicts["bias_fit_" + val_name] = {
"ax_id": val_name,
"plotfn": self.plot_line,
"xvals": xs,
"yvals": poly_calib(xs),
"setlabel": "Flux bias fit",
"do_legend": True,
"marker": "",
"linestyles": "r--",
"color": "red",
}
self.plot_dicts["bias_fit_data_" + val_name] = {
"ax_id": val_name,
"plotfn": self.plot_line,
"func": "scatter",
"xvals": pdd["centers_diffs"],
"yvals": pdd["unique_bias_values"],
"marker": "o",
"color": "orange",
"line_kws": {"edgecolors": "gray", "linewidth": 0.5},
}
def plot_chevron_bias_1D_cuts(bias_1D_cuts_dicts, mv_indx, fig=None, ax=None, **kw):
if ax is None:
num_cuts = len(bias_1D_cuts_dicts)
fig, ax = plt.subplots(
num_cuts, 2, sharex=False, sharey=True, figsize=(13, 5 * num_cuts)
)
fig.tight_layout()
xlabel = kw.get("xlabel", "")
ylabel = kw.get("ylabel", "")
x_unit = kw.get("xunit", "")
y_unit = kw.get("yunit", "")
fit_threshold = kw.get("fit_threshold", None)
title_neg = kw.pop("title_neg", None)
title_pos = kw.pop("title_pos", None)
if title_neg is not None:
ax[0][0].set_title(title_neg)
if title_pos is not None:
ax[0][1].set_title(title_pos)
edgecolors = "grey"
linewidth = 0.2
cmap = "plasma"
for i, d in enumerate(bias_1D_cuts_dicts):
ax[i][0].scatter(
d["amps_neg"],
d["mv_neg"][mv_indx],
edgecolors=edgecolors,
linewidth=linewidth,
c=range(len(d["amps_neg"])),
cmap=cmap,
)
ax[i][0].set_xlim(np.min(d["amps_neg"]), np.max(d["amps_neg"]))
ax[i][1].scatter(
d["amps_pos"],
d["mv_pos"][mv_indx],
edgecolors=edgecolors,
linewidth=linewidth,
c=range(len(d["amps_pos"])),
cmap=cmap,
)
ax[i][1].set_xlim(np.min(d["amps_pos"]), np.max(d["amps_pos"]))
# shide the spines between
ax[i][0].spines["right"].set_visible(False)
ax[i][1].spines["left"].set_visible(False)
ax[i][0].yaxis.tick_left()
ax[i][1].tick_params(labelleft=False)
ax[i][1].yaxis.tick_right()
set_ylabel(ax[i][0], ylabel, unit=y_unit)
if fit_threshold is not None:
label = "Fit threshold"
ax[i][0].axhline(fit_threshold, ls="--", color="green", label=label)
ax[i][1].axhline(fit_threshold, ls="--", color="green", label=label)
set_xlabel(ax[-1][0], xlabel, unit=x_unit)
set_xlabel(ax[-1][1], xlabel, unit=x_unit)
return fig, ax
def plot_chevron_center_on_1D_cut(
center_amp_ufloat, ax_row, ax_col, label, ax, fig=None, **kw
):
ax[ax_row][ax_col].axvline(
center_amp_ufloat.n, ls="--", label="Center: " + str(label)
)
ax[ax_row][ax_col].legend()
ax[ax_row][ax_col].axvline(
center_amp_ufloat.n - center_amp_ufloat.s, ls=":", color="grey"
)
ax[ax_row][ax_col].axvline(
center_amp_ufloat.n + center_amp_ufloat.s, ls=":", color="grey"
)
return fig, ax
class Conditional_Oscillation_Heatmap_Analysis(ba.BaseDataAnalysis):
"""
Intended for the analysis of CZ tuneup heatmaps
The data can be from an experiment or simulation
"""
def __init__(
self,
t_start: str = None,
t_stop: str = None,
label: str = "",
data_file_path: str = None,
close_figs: bool = True,
options_dict: dict = None,
extract_only: bool = False,
do_fitting: bool = False,
save_qois: bool = True,
auto: bool = True,
interp_method: str = "linear",
plt_orig_pnts: bool = True,
plt_contour_phase: bool = True,
plt_contour_L1: bool = False,
plt_optimal_values: bool = True,
plt_optimal_values_max: int = 1,
plt_clusters: bool = True,
clims: dict = None,
# e.g. clims={'L1': [0, 0.3], "Cost func": [0., 100]},
L1_contour_levels: list = [1, 5, 10],
phase_contour_levels: list = [90, 180, 270],
find_local_optimals: bool = True,
phase_thr=5,
L1_thr=0.5,
clustering_thr=10 / 360,
cluster_from_interp: bool = True,
_opt_are_interp: bool = True,
sort_clusters_by: str = "cost",
target_cond_phase: float = 180.0,
single_q_phase_offset: bool = False,
calc_L1_from_missing_frac: bool = True,
calc_L1_from_offset_diff: bool = False,
hull_clustering_thr=0.1,
hull_phase_thr=5,
hull_L1_thr=5,
gen_optima_hulls=True,
plt_optimal_hulls=True,
comparison_timestamp: str = None,
interp_grid_data: bool = False,
save_cond_phase_contours: list = [180],
):
self.plt_orig_pnts = plt_orig_pnts
self.plt_contour_phase = plt_contour_phase
self.plt_contour_L1 = plt_contour_L1
self.plt_optimal_values = plt_optimal_values
self.plt_optimal_values_max = plt_optimal_values_max
self.plt_clusters = plt_clusters
# Optimals are interpolated
# Manually set to false if the default analysis flow is changed
# e.g. in get_guesses_from_cz_sim in flux_lutman
# In that case we re-evaluate the optimals to be able to return
# true values and not interpolated, even though the optimal is
# obtained from interpolation
self._opt_are_interp = _opt_are_interp
self.clims = clims
self.L1_contour_levels = L1_contour_levels
self.phase_contour_levels = phase_contour_levels
self.find_local_optimals = find_local_optimals
self.phase_thr = phase_thr
self.L1_thr = L1_thr
self.clustering_thr = clustering_thr
self.cluster_from_interp = cluster_from_interp
# This alows for different strategies of scoring several optima
# NB: When interpolation we will not get any lower value than what
# already exists on the landscape
self.sort_clusters_by = sort_clusters_by
assert sort_clusters_by in {"cost", "L1_av_around"}
self.target_cond_phase = target_cond_phase
# Used when applying Pi pulses to check if both single qubits
# have the same phase as in the ideal case
self.single_q_phase_offset = single_q_phase_offset
# Handy calculation for comparing experiment and simulations
# but using the same analysis code
self.calc_L1_from_missing_frac = calc_L1_from_missing_frac
self.calc_L1_from_offset_diff = calc_L1_from_offset_diff
# Compare to any other dataset that has the same shape for
# 'measured_values'
self.comparison_timestamp = comparison_timestamp
# Used to generate the vertices of hulls that can be used later
# reoptimize only in the regions of interest
self.hull_clustering_thr = hull_clustering_thr
self.hull_phase_thr = hull_phase_thr
self.hull_L1_thr = hull_L1_thr
self.gen_optima_hulls = gen_optima_hulls
self.plt_optimal_hulls = plt_optimal_hulls
self.interp_method = interp_method
# Be able to also analyze linear 2D sweeps without interpolating
self.interp_grid_data = interp_grid_data
self.save_cond_phase_contours = save_cond_phase_contours
# FIXME this is overkill, using .upper() and .lower() would simplify
cost_func_Names = {
"Cost func",
"Cost func.",
"cost func",
"cost func.",
"cost function",
"Cost function",
"Cost function value",
}
L1_names = {"L1", "Leakage", "half missing fraction"}
ms_names = {
"missing fraction",
"Missing fraction",
"missing frac",
"missing frac.",
"Missing frac",
"Missing frac.",
}
cond_phase_names = {
"Cond phase",
"Cond. phase",
"Conditional phase",
"cond phase",
"cond. phase",
"conditional phase",
}
offset_diff_names = {
"offset difference",
"offset diff",
"offset diff.",
"Offset difference",
"Offset diff",
"Offset diff.",
}
phase_q0_names = {"Q0 phase", "phase q0"}
# also account for possible underscores instead of a spaces between words
allNames = [
cost_func_Names,
L1_names,
ms_names,
cond_phase_names,
offset_diff_names,
phase_q0_names,
]
allNames = [
names.union({name.replace(" ", "_") for name in names})
for names in allNames
]
allNames = [
names.union(
{name + " 1" for name in names}.union({name + " 2" for name in names})
)
for names in allNames
]
[
self.cost_func_Names,
self.L1_names,
self.ms_names,
self.cond_phase_names,
self.offset_diff_names,
self.phase_q0_names,
] = allNames
super().__init__(
t_start=t_start,
t_stop=t_stop,
label=label,
data_file_path=data_file_path,
close_figs=close_figs,
options_dict=options_dict,
extract_only=extract_only,
do_fitting=do_fitting,
save_qois=save_qois,
)
if auto:
self.run_analysis()
def extract_data(self):
self.raw_data_dict = OrderedDict()
self.timestamps = a_tools.get_timestamps_in_range(
self.t_start, self.t_stop, label=self.labels
)
self.raw_data_dict["timestamps"] = self.timestamps
self.timestamp = self.timestamps[0]
a = ma_old.MeasurementAnalysis(
timestamp=self.timestamp, auto=False, close_file=False
)
a.get_naming_and_values()
for idx, lab in enumerate(["x", "y"]):
self.raw_data_dict[lab] = a.sweep_points[idx]
self.raw_data_dict["{}label".format(lab)] = a.parameter_names[idx]
self.raw_data_dict["{}unit".format(lab)] = a.parameter_units[idx]
self.raw_data_dict["measured_values"] = a.measured_values
self.raw_data_dict["value_names"] = a.value_names
self.raw_data_dict["value_units"] = a.value_units
self.raw_data_dict["measurementstring"] = a.measurementstring
self.raw_data_dict["folder"] = a.folder
a.finish()
def prepare_plots(self):
# assumes that value names are unique in an experiment
super().prepare_plots()
anglemap = hsluv_anglemap45
found_optimals = np.size(self.proc_data_dict["x_optimal"]) > 0
for i, val_name in enumerate(self.proc_data_dict["value_names"]):
zlabel = "{} ({})".format(val_name, self.proc_data_dict["value_units"][i])
self.plot_dicts[val_name] = {
"ax_id": val_name,
"plotfn": color_plot,
"x": self.proc_data_dict["x_int"],
"y": self.proc_data_dict["y_int"],
"z": self.proc_data_dict["interpolated_values"][i],
"xlabel": self.proc_data_dict["xlabel"],
"x_unit": self.proc_data_dict["xunit"],
"ylabel": self.proc_data_dict["ylabel"],
"y_unit": self.proc_data_dict["yunit"],
"zlabel": zlabel,
"title": "{}\n{}".format(
self.timestamp, self.proc_data_dict["measurementstring"]
),
}
if self.plt_orig_pnts:
self.plot_dicts[val_name + "_non_interpolated"] = {
"ax_id": val_name,
"plotfn": scatter_pnts_overlay,
"x": self.proc_data_dict["x"],
"y": self.proc_data_dict["y"],
}
unit = self.proc_data_dict["value_units"][i]
vmin = np.min(self.proc_data_dict["interpolated_values"][i])
vmax = np.max(self.proc_data_dict["interpolated_values"][i])
if unit == "deg":
self.plot_dicts[val_name]["cbarticks"] = np.arange(0.0, 360.1, 45)
self.plot_dicts[val_name]["cmap_chosen"] = anglemap
self.plot_dicts[val_name]["clim"] = [0.0, 360.0]
elif unit == "%":
self.plot_dicts[val_name]["cmap_chosen"] = "hot"
elif unit.startswith("∆ "):
self.plot_dicts[val_name]["cmap_chosen"] = "terrain"
# self.plot_dicts[val_name]['cmap_chosen'] = 'RdBu'
vcenter = 0
if vmin * vmax < 0:
divnorm = col.DivergingNorm(vmin=vmin, vcenter=vcenter, vmax=vmax)
self.plot_dicts[val_name]["norm"] = divnorm
else:
self.plot_dicts[val_name]["clim"] = [
np.max((vcenter, vmin)),
np.min((vcenter, vmax)),
]
if self.clims is not None and val_name in self.clims.keys():
self.plot_dicts[val_name]["clim"] = self.clims[val_name]
# Visual indicator when saturating the color range
clims = self.clims[val_name]
cbarextend = "min" if min(clims) > vmin else "neither"
cbarextend = "max" if max(clims) < vmax else cbarextend
cbarextend = (
"both" if min(clims) > vmin and max(clims) < vmax else cbarextend
)
self.plot_dicts[val_name]["cbarextend"] = cbarextend
if self.plt_contour_phase:
# Find index of Conditional Phase
z_cond_phase = None
for j, val_name_j in enumerate(self.proc_data_dict["value_names"]):
if val_name_j in self.cond_phase_names:
z_cond_phase = self.proc_data_dict["interpolated_values"][j]
break
if z_cond_phase is not None:
self.plot_dicts[val_name + "_cond_phase_contour"] = {
"ax_id": val_name,
"plotfn": contour_overlay,
"x": self.proc_data_dict["x_int"],
"y": self.proc_data_dict["y_int"],
"z": z_cond_phase,
"colormap": anglemap,
"cyclic_data": True,
"contour_levels": self.phase_contour_levels,
"vlim": (0, 360),
# "linestyles": "-",
}
else:
log.warning("No data found named {}".format(self.cond_phase_names))
if self.plt_contour_L1:
# Find index of Leakage or Missing Fraction
z_L1 = None
for j, val_name_j in enumerate(self.proc_data_dict["value_names"]):
if val_name_j in self.L1_names or val_name_j in self.ms_names:
z_L1 = self.proc_data_dict["interpolated_values"][j]
break
if z_L1 is not None:
vlim = (
self.proc_data_dict["interpolated_values"][j].min(),
self.proc_data_dict["interpolated_values"][j].max(),
)
contour_levels = np.array(self.L1_contour_levels)
# Leakage is estimated as (Missing fraction/2)
contour_levels = (
contour_levels
if self.proc_data_dict["value_names"][j] in self.L1_names
else 2 * contour_levels
)
self.plot_dicts[val_name + "_L1_contour"] = {
"ax_id": val_name,
"plotfn": contour_overlay,
"x": self.proc_data_dict["x_int"],
"y": self.proc_data_dict["y_int"],
"z": z_L1,
# 'unit': self.proc_data_dict['value_units'][j],
"contour_levels": contour_levels,
"vlim": vlim,
"colormap": "hot",
"linestyles": "-",
# "linestyles": "dashdot",
}
else:
log.warning("No data found named {}".format(self.L1_names))
if self.plt_optimal_hulls and self.gen_optima_hulls:
sorted_hull_vertices = self.proc_data_dict["hull_vertices"]
for hull_i, hull_vertices in sorted_hull_vertices.items():
vertices_x, vertices_y = np.transpose(hull_vertices)
# Close the start and end of the line
x_vals = np.concatenate((vertices_x, vertices_x[:1]))
y_vals = np.concatenate((vertices_y, vertices_y[:1]))
self.plot_dicts[val_name + "_hull_{}".format(hull_i)] = {
"ax_id": val_name,
"plotfn": self.plot_line,
"xvals": x_vals,
"yvals": y_vals,
"marker": "",
"linestyles": "-",
"color": "blue",
}
if (
self.plt_optimal_values
and found_optimals
and val_name in self.cost_func_Names
):
self.plot_dicts[val_name + "_optimal_pars"] = {
"ax_id": val_name,
"ypos": -0.25,
"xpos": 0,
"plotfn": self.plot_text,
"box_props": "fancy",
"line_kws": {"alpha": 0},
"text_string": self.get_readable_optimals(
optimal_end=self.plt_optimal_values_max
),
"horizontalalignment": "left",
"verticalaligment": "top",
"fontsize": 14,
}
if self.plt_clusters and found_optimals:
self.plot_dicts[val_name + "_clusters"] = {
"ax_id": val_name,
"plotfn": scatter_pnts_overlay,
"x": self.proc_data_dict["clusters_pnts_x"],
"y": self.proc_data_dict["clusters_pnts_y"],
"color": None,
"edgecolors": None if self.cluster_from_interp else "black",
"marker": "o",
# 'linewidth': 1,
"c": self.proc_data_dict["clusters_pnts_colors"],
}
if self.plt_optimal_values and found_optimals:
self.plot_dicts[val_name + "_optimal_pnts_annotate"] = {
"ax_id": val_name,
"plotfn": annotate_pnts,
"txt": np.arange(np.size(self.proc_data_dict["x_optimal"])),
"x": self.proc_data_dict["x_optimal"],
"y": self.proc_data_dict["y_optimal"],
}
# Extra plot to easily identify the location of the optimal hulls
# and cond. phase contours
sorted_hull_vertices = self.proc_data_dict.get("hull_vertices", [])
if self.gen_optima_hulls and len(sorted_hull_vertices):
for hull_id, hull_vertices in sorted_hull_vertices.items():
vertices_x, vertices_y = np.transpose(hull_vertices)
# Close the start and end of the line
x_vals = np.concatenate((vertices_x, vertices_x[:1]))
y_vals = np.concatenate((vertices_y, vertices_y[:1]))
self.plot_dicts["hull_" + hull_id] = {
"ax_id": "hull_and_contours",
"plotfn": self.plot_line,
"xvals": x_vals,
"xlabel": self.raw_data_dict["xlabel"],
"xunit": self.raw_data_dict["xunit"],
"yvals": y_vals,
"ylabel": self.raw_data_dict["ylabel"],
"yunit": self.raw_data_dict["yunit"],
"yrange": self.options_dict.get("yrange", None),
"xrange": self.options_dict.get("xrange", None),
"setlabel": "hull #" + hull_id,
"title": "{}\n{}".format(
self.timestamp, self.proc_data_dict["measurementstring"]
),
"do_legend": True,
"legend_pos": "best",
"marker": "", # don't use markers
"linestyle": "-",
# Fixing the assigned color so that it can be matched on
# other plots
"color": "C" + str(int(hull_id) % 10),
}
if len(self.save_cond_phase_contours):
c_dict = self.proc_data_dict["cond_phase_contours"]
i = 0
for level, contours in c_dict.items():
for contour_id, contour in contours.items():
x_vals, y_vals = np.transpose(contour)
self.plot_dicts["contour_" + level + "_" + contour_id] = {
"ax_id": "hull_and_contours",
"plotfn": self.plot_line,
"xvals": x_vals,
"xlabel": self.raw_data_dict["xlabel"],
"xunit": self.raw_data_dict["xunit"],
"yvals": y_vals,
"ylabel": self.raw_data_dict["ylabel"],
"yunit": self.raw_data_dict["yunit"],
"yrange": self.options_dict.get("yrange", None),
"xrange": self.options_dict.get("xrange", None),
"setlabel": level + " #" + contour_id,
"title": "{}\n{}".format(
self.timestamp, self.proc_data_dict["measurementstring"]
),
"do_legend": True,
"legend_pos": "best",
"legend_ncol": 2,
"marker": "", # don't use markers
"linestyle": "--",
# Continuing the color cycle
"color": "C" + str(len(sorted_hull_vertices) % 10 + i),
}
i += 1
# Plotting all quantities along the raw contours of conditional phase
mvac = self.proc_data_dict.get("measured_values_along_contours", [])
for i, mv_levels_dict in enumerate(mvac):
# We iterate over all measured quantities and for each create a
# plot that has the measured quantity along all contours
j = 0
for level, cntrs_dict in mv_levels_dict.items():
for cntr_id, mvs in cntrs_dict.items():
c_pnts = self.proc_data_dict["cond_phase_contours"][level][cntr_id]
x_vals = c2d.distance_along_2D_contour(c_pnts, True, True)
vln = self.proc_data_dict["value_names"][i]
vlu = self.proc_data_dict["value_units"][i]
plt_dict_label = "contour_" + vln + "_" + level + "_#" + cntr_id
self.plot_dicts[plt_dict_label] = {
"ax_id": "contours_" + vln,
"plotfn": self.plot_line,
"xvals": x_vals,
"xlabel": "Normalized distance along contour",
"xunit": "a.u.",
"yvals": mvs,
"ylabel": vln,
"yunit": vlu,
"setlabel": level + " #" + cntr_id,
"title": "{}\n{}".format(
self.timestamp, self.proc_data_dict["measurementstring"]
),
"do_legend": True,
"legend_pos": "best",
"legend_ncol": 2,
"marker": "", # don't use markers
"linestyle": "-",
"color": "C" + str(len(sorted_hull_vertices) % 10 + j),
}
j += 1
# Plotting all quantities along the raw contours of conditional phase
# only inside hulls
mvac = self.proc_data_dict.get("measured_values_along_contours_in_hulls", [])
for i, hulls_dict in enumerate(mvac):
# We iterate over all measured quantities and for each create a
# plot that has the measured quantity along all contours
for hull_id, mv_levels_dict in hulls_dict.items():
j = 0
for level, cntrs_dict in mv_levels_dict.items():
for cntr_id, c_dict in cntrs_dict.items():
c_pnts = c_dict["pnts"]
mvs = c_dict["vals"]
if len(c_pnts):
# Only do stuff if there are any point in the hull
x_vals = c2d.distance_along_2D_contour(c_pnts, True, True)
vln = self.proc_data_dict["value_names"][i]
vlu = self.proc_data_dict["value_units"][i]
plt_dict_label = (
"contour_"
+ vln
+ "_hull_#"
+ hull_id
+ level
+ "_#"
+ cntr_id
)
self.plot_dicts[plt_dict_label] = {
"ax_id": "contours_" + vln + "_in_hull",
"plotfn": self.plot_line,
"xvals": x_vals,
"xlabel": "Normalized distance along contour",
"xunit": "a.u.",
"yvals": mvs,
"ylabel": vln,
"yunit": vlu,
"setlabel": level + " #" + cntr_id,
"title": "{}\n{}".format(
self.timestamp,
self.proc_data_dict["measurementstring"],
),
"do_legend": True,
"legend_pos": "best",
"legend_ncol": 2,
"marker": "", # don't use markers
"linestyle": "-",
"color": "C" + str(len(sorted_hull_vertices) % 10 + j),
}
plt_dict_label = (
"contour_"
+ vln
+ "_hull_#"
+ hull_id
+ level
+ "_#"
+ cntr_id
+ "_hull_color"
)
# We plot with the contour color so that things
# can be matched with the contours on the 2D plot
extra_pnts_idx = len(x_vals) // 3
self.plot_dicts[plt_dict_label] = {
"ax_id": "contours_" + vln + "_in_hull",
"plotfn": self.plot_line,
"xvals": x_vals[[0, extra_pnts_idx, -extra_pnts_idx, -1]],
"xlabel": "Normalized distance along contour",
"xunit": "a.u.",
"yvals": mvs[[0, extra_pnts_idx, -extra_pnts_idx, -1]],
"ylabel": vln,
"yunit": vlu,
"setlabel": "hull #" + hull_id,
"title": "{}\n{}".format(
self.timestamp,
self.proc_data_dict["measurementstring"],
),
"do_legend": True,
"legend_pos": "best",
"legend_ncol": 2,
"marker": "o", # don't use markers
"linestyle": "",
"color": "C" + str(int(hull_id) % 10),
}
j += 1
def process_data(self):
self.proc_data_dict = deepcopy(self.raw_data_dict)
phase_q0_name = "phase_q0"
phase_q1_name = "phase_q1"
if self.single_q_phase_offset and {phase_q0_name, phase_q1_name} <= set(
self.proc_data_dict["value_names"]
):
# This was used for some debugging
self.proc_data_dict["value_names"].append("phase_q1 - phase_q0")
self.proc_data_dict["value_units"].append("deg")
phase_q0 = self.proc_data_dict["measured_values"][
self.proc_data_dict["value_names"].index(phase_q0_name)
]
phase_q1 = self.proc_data_dict["measured_values"][
self.proc_data_dict["value_names"].index(phase_q1_name)
]
self.proc_data_dict["measured_values"] = np.vstack(
(self.proc_data_dict["measured_values"], (phase_q1 - phase_q0) % 360)
)
# Calculate L1 from missing fraction and/or offset difference if available
vln_set = set(self.proc_data_dict["value_names"])
for names, do_calc in [
(self.ms_names, self.calc_L1_from_missing_frac),
(self.offset_diff_names, self.calc_L1_from_offset_diff),
]:
found_name = len(vln_set.intersection(names)) > 0
if do_calc and found_name:
name = vln_set.intersection(names).pop()
self.proc_data_dict["value_names"].append("half " + name)
self.proc_data_dict["value_units"].append("%")
L1_equiv = (
self.proc_data_dict["measured_values"][
self.proc_data_dict["value_names"].index(name)
]
/ 2
)
self.proc_data_dict["measured_values"] = np.vstack(
(self.proc_data_dict["measured_values"], L1_equiv)
)
vln = self.proc_data_dict["value_names"]
measured_vals = self.proc_data_dict["measured_values"]
vlu = self.proc_data_dict["value_units"]
# Calculate comparison heatmaps
if self.comparison_timestamp is not None:
coha_comp = Conditional_Oscillation_Heatmap_Analysis(
t_start=self.comparison_timestamp, extract_only=True
)
# Because there is no standart what measured quantities are named
# have to do some magic name matching here
for names in [
self.cost_func_Names,
self.L1_names,
self.ms_names,
self.cond_phase_names,
self.offset_diff_names,
self.phase_q0_names,
]:
inters_this = names.intersection(self.proc_data_dict["value_names"])
inters_comp = names.intersection(
coha_comp.proc_data_dict["value_names"]
)
if len(inters_this) > 0 and len(inters_comp) > 0:
this_name = inters_this.pop()
comp_name = inters_comp.pop()
indx_this_name = self.proc_data_dict["value_names"].index(this_name)
self.proc_data_dict["value_names"].append(
"[{}]\n{} - {}".format(
self.comparison_timestamp, comp_name, this_name
)
)
self.proc_data_dict["value_units"].append(
"∆ " + self.proc_data_dict["value_units"][indx_this_name]
)
this_mv = self.proc_data_dict["measured_values"][indx_this_name]
ref_mv = coha_comp.proc_data_dict["measured_values"][
coha_comp.proc_data_dict["value_names"].index(comp_name)
]
delta_mv = ref_mv - this_mv
self.proc_data_dict["measured_values"] = np.vstack(
(self.proc_data_dict["measured_values"], delta_mv)
)
self.proc_data_dict["interpolated_values"] = []
self.proc_data_dict["interpolators"] = []
interps = self.proc_data_dict["interpolators"]
for i in range(len(self.proc_data_dict["value_names"])):
if self.proc_data_dict["value_units"][i] == "deg":
interp_method = "deg"
else:
interp_method = self.interp_method
ip = plt_interp.HeatmapInterpolator(
self.proc_data_dict["x"],
self.proc_data_dict["y"],
self.proc_data_dict["measured_values"][i],
interp_method=interp_method,
rescale=True,
)
interps.append(ip)
x_int, y_int, z_int = plt_interp.interpolate_heatmap(
x=self.proc_data_dict["x"],
y=self.proc_data_dict["y"],
ip=ip,
n=300, # avoid calculation of areas
interp_grid_data=self.interp_grid_data,
)
self.proc_data_dict["interpolated_values"].append(z_int)
interp_vals = self.proc_data_dict["interpolated_values"]
self.proc_data_dict["x_int"] = x_int
self.proc_data_dict["y_int"] = y_int
# Processing for optimal points
if not self.cluster_from_interp:
where = [(name in self.cost_func_Names) for name in vln]
cost_func_indxs = np.where(where)[0][0]
cost_func = measured_vals[cost_func_indxs]
try:
where = [(name in self.cond_phase_names) for name in vln]
cond_phase_indx = np.where(where)[0][0]
cond_phase_arr = measured_vals[cond_phase_indx]
except Exception:
# Ignore if was not measured
log.error("\n" + gen.get_formatted_exception())
try:
where = [(name in self.L1_names) for name in vln]
L1_indx = np.where(where)[0][0]
L1_arr = measured_vals[L1_indx]
except Exception:
# Ignore if was not measured
log.error("\n" + gen.get_formatted_exception())
theta_f_arr = self.proc_data_dict["x"]
lambda_2_arr = self.proc_data_dict["y"]
extract_optimals_from = "measured_values"
else:
where = [(name in self.cost_func_Names) for name in vln]
cost_func_indxs = np.where(where)[0][0]
cost_func = interp_vals[cost_func_indxs]
cost_func = interp_to_1D_arr(z_int=cost_func)
where = [(name in self.cond_phase_names) for name in vln]
cond_phase_indx = np.where(where)[0][0]
cond_phase_arr = interp_vals[cond_phase_indx]
cond_phase_arr = interp_to_1D_arr(z_int=cond_phase_arr)
where = [(name in self.L1_names) for name in vln]
L1_indx = np.where(where)[0][0]
L1_arr = interp_vals[L1_indx]
L1_arr = interp_to_1D_arr(z_int=L1_arr)
theta_f_arr = self.proc_data_dict["x_int"]
lambda_2_arr = self.proc_data_dict["y_int"]
theta_f_arr, lambda_2_arr = interp_to_1D_arr(
x_int=theta_f_arr, y_int=lambda_2_arr
)
extract_optimals_from = "interpolated_values"
if self.find_local_optimals:
optimal_idxs, clusters_by_indx = get_optimal_pnts_indxs(
theta_f_arr=theta_f_arr,
lambda_2_arr=lambda_2_arr,
cond_phase_arr=cond_phase_arr,
L1_arr=L1_arr,
cost_arr=cost_func,
target_phase=self.target_cond_phase,
phase_thr=self.phase_thr,
L1_thr=self.L1_thr,
clustering_thr=self.clustering_thr,
sort_by_mode=self.sort_clusters_by,
)
else:
optimal_idxs = np.array([cost_func.argmin()])
clusters_by_indx = np.array([optimal_idxs])
if self.cluster_from_interp:
x_arr = theta_f_arr
y_arr = lambda_2_arr
else:
x_arr = self.proc_data_dict["x"]
y_arr = self.proc_data_dict["y"]
clusters_pnts_x = np.array([])
clusters_pnts_y = np.array([])
clusters_pnts_colors = np.array([])
for l, cluster_by_indx in enumerate(clusters_by_indx):
clusters_pnts_x = np.concatenate((clusters_pnts_x, x_arr[cluster_by_indx]))
clusters_pnts_y = np.concatenate((clusters_pnts_y, y_arr[cluster_by_indx]))
clusters_pnts_colors = np.concatenate(
(clusters_pnts_colors, np.full(np.shape(cluster_by_indx)[0], l))
)
self.proc_data_dict["optimal_idxs"] = optimal_idxs
self.proc_data_dict["clusters_pnts_x"] = clusters_pnts_x
self.proc_data_dict["clusters_pnts_y"] = clusters_pnts_y
self.proc_data_dict["clusters_pnts_colors"] = clusters_pnts_colors
self.proc_data_dict["x_optimal"] = x_arr[optimal_idxs]
self.proc_data_dict["y_optimal"] = y_arr[optimal_idxs]
optimal_pars_values = []
for x, y in zip(
self.proc_data_dict["x_optimal"], self.proc_data_dict["y_optimal"]
):
optimal_pars_values.append(
{self.proc_data_dict["xlabel"]: x, self.proc_data_dict["ylabel"]: y}
)
self.proc_data_dict["optimal_pars_values"] = optimal_pars_values
self.proc_data_dict["optimal_pars_units"] = {
self.proc_data_dict["xlabel"]: self.proc_data_dict["xunit"],
self.proc_data_dict["ylabel"]: self.proc_data_dict["yunit"],
}
optimal_measured_values = []
optimal_measured_units = []
mvs = self.proc_data_dict[extract_optimals_from]
for optimal_idx in optimal_idxs:
optimal_measured_values.append(
{name: np.ravel(mvs[ii])[optimal_idx] for ii, name in enumerate(vln)}
)
optimal_measured_units = {name: vlu[ii] for ii, name in enumerate(vln)}
self.proc_data_dict["optimal_measured_values"] = optimal_measured_values
self.proc_data_dict["optimal_measured_units"] = optimal_measured_units
if self.gen_optima_hulls:
self._proc_hulls()
if len(self.save_cond_phase_contours):
self._proc_cond_phase_contours(angle_thr=0.3)
self._proc_mv_along_contours()
if self.gen_optima_hulls:
self._proc_mv_along_contours_in_hulls()
# Save quantities of interest
save_these = {
"optimal_pars_values",
"optimal_pars_units",
"optimal_measured_values",
"optimal_measured_units",
"clusters_pnts_y",
"clusters_pnts_x",
"clusters_pnts_colors",
"hull_vertices",
"cond_phase_contours",
"cond_phase_contours_simplified",
}
pdd = self.proc_data_dict
quantities_of_interest = dict()
for save_this in save_these:
if save_this in pdd.keys():
if pdd[save_this] is not None:
quantities_of_interest[save_this] = pdd[save_this]
if bool(quantities_of_interest):
self.proc_data_dict["quantities_of_interest"] = quantities_of_interest
def _proc_hulls(self):
# Must be at the end of the main process_data
vln = self.proc_data_dict["value_names"]
interp_vals = self.proc_data_dict["interpolated_values"]
# where = [(name in self.cost_func_Names) for name in vln]
# cost_func_indxs = np.where(where)[0][0]
# cost_func = interp_vals[cost_func_indxs]
# cost_func = interp_to_1D_arr(z_int=cost_func)
where = [(name in self.cond_phase_names) for name in vln]
cond_phase_indx = np.where(where)[0][0]
cond_phase_arr = interp_vals[cond_phase_indx]
cond_phase_arr = interp_to_1D_arr(z_int=cond_phase_arr)
# Avoid runtime errors
cond_phase_arr[np.isnan(cond_phase_arr)] = 359.0
where = [(name in self.L1_names) for name in vln]
L1_indx = np.where(where)[0][0]
L1_arr = interp_vals[L1_indx]
L1_arr = interp_to_1D_arr(z_int=L1_arr)
# Avoid runtime errors
L1_arr[np.isnan(L1_arr)] = 100
x_int = self.proc_data_dict["x_int"]
y_int = self.proc_data_dict["y_int"]
x_int_reshaped, y_int_reshaped = interp_to_1D_arr(x_int=x_int, y_int=y_int)
sorted_hull_vertices = generate_optima_hull_vertices(
x_arr=x_int_reshaped,
y_arr=y_int_reshaped,
L1_arr=L1_arr,
cond_phase_arr=cond_phase_arr,
target_phase=self.target_cond_phase,
clustering_thr=self.hull_clustering_thr,
phase_thr=self.hull_phase_thr,
L1_thr=self.hull_L1_thr,
)
# We save this as a dictionary so that we don't have hdf5 issues
self.proc_data_dict["hull_vertices"] = {
str(h_i): hull_vertices
for h_i, hull_vertices in enumerate(sorted_hull_vertices)
}
log.debug("Hulls are sorted by increasing y value.")
def _proc_cond_phase_contours(self, angle_thr: float = 0.5):
"""
Increasing `angle_thr` will make the contours' paths more coarse
but more simple
"""
# get the interpolated cond. phase data (if any)
vln = self.proc_data_dict["value_names"]
interp_vals = self.proc_data_dict["interpolated_values"]
x_int = self.proc_data_dict["x_int"]
y_int = self.proc_data_dict["y_int"]
where = [(name in self.cond_phase_names) for name in vln]
cond_phase_indx = np.where(where)[0][0]
cond_phase_int = interp_vals[cond_phase_indx]
c_dict = OrderedDict()
c_dict_orig = OrderedDict()
if len(cond_phase_int):
# use the contours function to generate them
levels_list = self.save_cond_phase_contours
contours = contour_overlay(
x_int,
y_int,
cond_phase_int,
contour_levels=levels_list,
cyclic_data=True,
vlim=(0, 360),
return_contours_only=True
)
for i, level in enumerate(levels_list):
# Just saving in more friendly format
# Each entry in the `c_dict` is a dict of 2D arrays for
# disjoint contours for the same `level`
same_level_dict = OrderedDict()
same_level_dict_orig = OrderedDict()
for j, c in enumerate(contours[i]):
# To save in hdf5 several unpredictably shaped np.arrays
# we need a dictionary format here
# By convention we will make the contours start left to
# right on the x axis
if c[0][0] > c[-1][0]:
c = np.flip(c, axis=0)
same_level_dict_orig[str(j)] = c
same_level_dict[str(j)] = c2d.simplify_2D_path(c, angle_thr)
c_dict[str(level)] = same_level_dict
c_dict_orig[str(level)] = same_level_dict_orig
else:
log.debug("Conditional phase data for contours not found.")
self.proc_data_dict["cond_phase_contours_simplified"] = c_dict
self.proc_data_dict["cond_phase_contours"] = c_dict_orig
def _proc_mv_along_contours(self):
interpolators = self.proc_data_dict["interpolators"]
self.proc_data_dict["measured_values_along_contours"] = []
mvac = self.proc_data_dict["measured_values_along_contours"]
cpc = self.proc_data_dict["cond_phase_contours"]
for interp in interpolators:
mv_levels_dict = OrderedDict()
for level, cntrs_dict in cpc.items():
mv_cntrs_dict = OrderedDict()
for cntr_id, pnts in cntrs_dict.items():
scaled_pnts = interp.scale(pnts)
mv_cntrs_dict[cntr_id] = interp(*scaled_pnts.T)
mv_levels_dict[level] = mv_cntrs_dict
mvac.append(mv_levels_dict)
def _proc_mv_along_contours_in_hulls(self):
self.proc_data_dict["measured_values_along_contours_in_hulls"] = []
hvs = self.proc_data_dict["hull_vertices"]
mvach = self.proc_data_dict["measured_values_along_contours_in_hulls"]
cpc = self.proc_data_dict["cond_phase_contours"]
for i, mvac in enumerate(self.proc_data_dict["measured_values_along_contours"]):
hulls_dict = OrderedDict()
for hull_id, hv in hvs.items():
mv_levels_dict = OrderedDict()
for level, cntrs_dict in cpc.items():
mv_cntrs_dict = OrderedDict()
for cntr_id, pnts in cntrs_dict.items():
where = np.where(c2d.in_hull(pnts, hv))
# The empty entries are kept in here so that the color
# matching between plots can be achieved
mv_cntrs_dict[cntr_id] = {
"pnts": pnts[where],
"vals": mvac[level][cntr_id][where],
}
mv_levels_dict[level] = mv_cntrs_dict
hulls_dict[hull_id] = mv_levels_dict
mvach.append(hulls_dict)
def plot_text(self, pdict, axs):
"""
Helper function that adds text to a plot
Overriding here in order to make the text bigger
and put it below the the cost function figure
"""
pfunc = getattr(axs, pdict.get("func", "text"))
plot_text_string = pdict["text_string"]
plot_xpos = pdict.get("xpos", 0.98)
plot_ypos = pdict.get("ypos", 0.98)
fontsize = pdict.get("fontsize", 10)
verticalalignment = pdict.get("verticalalignment", "top")
horizontalalignment = pdict.get("horizontalalignment", "left")
fontdict = {
"horizontalalignment": horizontalalignment,
"verticalalignment": verticalalignment,
}
if fontsize is not None:
fontdict["fontsize"] = fontsize
# fancy box props is based on the matplotlib legend
box_props = pdict.get("box_props", "fancy")
if box_props == "fancy":
box_props = self.fancy_box_props
# pfunc is expected to be ax.text
pfunc(
x=plot_xpos,
y=plot_ypos,
s=plot_text_string,
transform=axs.transAxes,
bbox=box_props,
fontdict=fontdict,
)
def get_readable_optimals(
self,
optimal_pars_values=None,
optimal_measured_values=None,
optimal_start: int = 0,
optimal_end: int = np.inf,
sig_digits: int = 4,
opt_are_interp=None,
):
if not optimal_pars_values:
optimal_pars_values = self.proc_data_dict["optimal_pars_values"]
if not optimal_measured_values:
optimal_measured_values = self.proc_data_dict["optimal_measured_values"]
if opt_are_interp is None:
opt_are_interp = self._opt_are_interp
optimals_max = len(optimal_pars_values)
string = ""
for opt_idx in range(optimal_start, int(min(optimal_end + 1, optimals_max))):
string += "========================\n"
string += "Optimal #{}\n".format(opt_idx)
string += "========================\n"
for pv_name, pv_value in optimal_pars_values[opt_idx].items():
string += "{} = {:.{sig_digits}g} {}\n".format(
pv_name,
pv_value,
self.proc_data_dict["optimal_pars_units"][pv_name],
sig_digits=sig_digits,
)
string += "------------\n"
if (
self.cluster_from_interp
and opt_are_interp
and optimal_pars_values is self.proc_data_dict["optimal_pars_values"]
):
string += "[!!! Interpolated values !!!]\n"
for mv_name, mv_value in optimal_measured_values[opt_idx].items():
string += "{} = {:.{sig_digits}g} {}\n".format(
mv_name,
mv_value,
self.proc_data_dict["optimal_measured_units"][mv_name],
sig_digits=sig_digits,
)
return string
def get_optimal_pnts_indxs(
theta_f_arr,
lambda_2_arr,
cond_phase_arr,
L1_arr,
cost_arr,
target_phase=180,
phase_thr=5,
L1_thr=0.3,
clustering_thr=10 / 360,
tolerances=[1, 2, 3],
sort_by_mode="cost",
):
"""
target_phase and low L1 need to match roughtly cost function's minima
Args:
target_phase: unit = deg
phase_thr: unit = deg, only points with cond phase below this threshold
will be used for clustering
L1_thr: unit = %, only points with leakage below this threshold
will be used for clustering
clustering_thr: unit = deg, represents distance between points on the
landscape (lambda_2 gets normalized to [0, 360])
tolerances: phase_thr and L1_thr will be multiplied by the values in
this list successively if no points are found for the first element
in the list
"""
x = np.array(theta_f_arr)
y = np.array(lambda_2_arr)
# Normalize distance
x_min = np.min(x)
x_max = np.max(x)
y_min = np.min(y)
y_max = np.max(y)
x_norm = (x - x_min) / (x_max - x_min)
y_norm = (y - y_min) / (y_max - y_min)
# Select points based on low leakage and on how close to the
# target_phase they are
for tol in tolerances:
phase_thr *= tol
L1_thr *= tol
cond_phase_dev_f = multi_targets_phase_offset(target_phase, 2 * target_phase)
# np.abs(cond_phase_arr - target_phase)
cond_phase_abs_diff = cond_phase_dev_f(cond_phase_arr)
sel = cond_phase_abs_diff <= phase_thr
sel = sel * (L1_arr <= L1_thr)
# sel = sel * (x_norm > y_norm)
# Exclude point on the boundaries of the entire landscape
# This is because of some interpolation problems
sel = (
sel * (x < np.max(x)) * (x > np.min(x)) * (y < np.max(y)) * (y > np.min(y))
)
selected_points_indxs = np.where(sel)[0]
if np.size(selected_points_indxs) == 0:
log.warning(
"No optimal points found with |target_phase - cond phase| < {} and L1 < {}.".format(
phase_thr, L1_thr
)
)
if tol == tolerances[-1]:
log.warning("No optima found giving up.")
return np.array([], dtype=int), np.array([], dtype=int)
log.warning(
"Increasing tolerance for phase_thr and L1 to x{}.".format(tol + 1)
)
elif np.size(selected_points_indxs) == 1:
return np.array(selected_points_indxs), np.array([selected_points_indxs])
else:
x_filt = x_norm[selected_points_indxs]
y_filt = y_norm[selected_points_indxs]
break
# Cluster points based on distance
x_y_filt = np.transpose([x_filt, y_filt])
clusters = hcluster.fclusterdata(x_y_filt, clustering_thr, criterion="distance")
# Sorting the clusters
cluster_id_min = np.min(clusters)
cluster_id_max = np.max(clusters)
clusters_by_indx = []
optimal_idxs = []
av_L1 = []
# av_cp_diff = []
# neighbors_num = []
if sort_by_mode == "cost":
# Iterate over all clusters and calculate the metrics we want
for cluster_id in range(cluster_id_min, cluster_id_max + 1):
cluster_indxs = np.where(clusters == cluster_id)
indxs_in_orig_array = selected_points_indxs[cluster_indxs]
min_cost_idx = np.argmin(cost_arr[indxs_in_orig_array])
optimal_idx = indxs_in_orig_array[min_cost_idx]
optimal_idxs.append(optimal_idx)
clusters_by_indx.append(indxs_in_orig_array)
# Low cost function is considered the most interesting optimum
sort_by = cost_arr[optimal_idxs]
if np.any(np.array(sort_by) != np.sort(sort_by)):
log.debug(" Optimal points rescored based on cost function.")
elif sort_by_mode == "L1_av_around":
# Iterate over all clusters and calculate the metrics we want
for cluster_id in range(cluster_id_min, cluster_id_max + 1):
cluster_indxs = np.where(clusters == cluster_id)
indxs_in_orig_array = selected_points_indxs[cluster_indxs]
L1_av_around = [
av_around(x_norm, y_norm, L1_arr, idx, clustering_thr * 1.5)[0]
for idx in indxs_in_orig_array
]
min_idx = np.argmin(L1_av_around)
optimal_idx = indxs_in_orig_array[min_idx]
optimal_idxs.append(optimal_idx)
clusters_by_indx.append(indxs_in_orig_array)
# sq_dist = (x_norm - x_norm[optimal_idx])**2 + (y_norm - y_norm[optimal_idx])**2
# neighbors_indx = np.where(sq_dist <= (clustering_thr * 1.5)**2)
# neighbors_num.append(np.size(neighbors_indx))
# av_cp_diff.append(np.average(cond_phase_abs_diff[neighbors_indx]))
# av_L1.append(np.average(L1_arr[neighbors_indx]))
av_L1.append(L1_av_around[min_idx])
# Here I tried different strategies for scoring the local optima
# For landscapes that didn't look very regular
# low leakage is best
w1 = (
np.array(av_L1)
/ np.max(av_L1)
/ # normalize to maximum leakage
# and consider bigger clusters more interesting
np.array([it for it in map(np.size, clusters_by_indx)])
)
# value more the points with more neighbors as a confirmation of
# low leakage area and also scores less points near the boundaries
# of the sampling area
# w2 = (1 - np.flip(np.array(neighbors_num) / np.max(neighbors_num)))
# Very few points will actually be precisely on the target phase contour
# Therefore not used
# low phase diff is best
# w3 = np.array(av_cp_diff) / np.max(av_cp_diff)
sort_by = w1 # + w2 + w3
if np.any(np.array(sort_by) != np.sort(sort_by)):
log.debug(" Optimal points rescored based on low leakage areas.")
optimal_idxs = np.array(optimal_idxs)[np.argsort(sort_by)]
clusters_by_indx = np.array(clusters_by_indx)[np.argsort(sort_by)]
return optimal_idxs, clusters_by_indx
def generate_optima_hull_vertices(
x_arr,
y_arr,
cond_phase_arr,
L1_arr,
target_phase=180,
phase_thr=5,
L1_thr=np.inf,
clustering_thr=0.1,
tolerances=[1, 2, 3],
):
"""
WARNING: docstring
Args:
target_phase: unit = deg
phase_thr: unit = deg, only points with cond phase below this threshold
will be used for clustering
L1_thr: unit = %, only points with leakage below this threshold
will be used for clustering
clustering_thr: unit = deg, represents distance between points on the
landscape (lambda_2 gets normalized to [0, 360])
tolerances: phase_thr and L1_thr will be multiplied by the values in
this list successively if no points are found for the first element
in the list
"""
x = np.array(x_arr)
y = np.array(y_arr)
# Normalize distance
x_min = np.min(x)
x_max = np.max(x)
y_min = np.min(y)
y_max = np.max(y)
x_norm = (x - x_min) / (x_max - x_min)
y_norm = (y - y_min) / (y_max - y_min)
# Select points based on low leakage and on how close to the
# target_phase they are
for tol in tolerances:
phase_thr *= tol
L1_thr *= tol
cond_phase_dev_f = multi_targets_phase_offset(target_phase, 2 * target_phase)
cond_phase_abs_diff = cond_phase_dev_f(cond_phase_arr)
sel = cond_phase_abs_diff <= phase_thr
sel = sel * (L1_arr <= L1_thr)
selected_points_indxs = np.where(sel)[0]
if np.size(selected_points_indxs) == 0:
log.warning(
"No optimal points found with |target_phase - cond phase| < {} and L1 < {}.".format(
phase_thr, L1_thr
)
)
if tol == tolerances[-1]:
log.warning("No optima found giving up.")
return []
log.warning(
"Increasing tolerance for phase_thr and L1 to x{}.".format(tol + 1)
)
else:
x_filt = x_norm[selected_points_indxs]
y_filt = y_norm[selected_points_indxs]
break
# Cluster points based on distance
x_y_filt = np.transpose([x_filt, y_filt])
clusters = hcluster.fclusterdata(x_y_filt, clustering_thr, criterion="distance")
# Sorting the clusters
cluster_id_min = np.min(clusters)
cluster_id_max = np.max(clusters)
clusters_by_indx = []
sort_by_idx = []
# Iterate over all clusters and calculate the metrics we want
for cluster_id in range(cluster_id_min, cluster_id_max + 1):
cluster_indxs = np.where(clusters == cluster_id)
indxs_in_orig_array = selected_points_indxs[cluster_indxs]
clusters_by_indx.append(indxs_in_orig_array)
min_sort_idx = np.argmin(y[indxs_in_orig_array])
optimal_idx = indxs_in_orig_array[min_sort_idx]
sort_by_idx.append(optimal_idx)
# Low cost function is considered the most interesting optimum
sort_by = y[sort_by_idx]
if np.any(np.array(sort_by) != np.sort(sort_by)):
log.debug(" Optimal points rescored.")
# optimal_idxs = np.array(optimal_idxs)[np.argsort(sort_by)]
clusters_by_indx = np.array(clusters_by_indx)[np.argsort(sort_by)]
x_y = np.transpose([x, y])
sorted_hull_vertices = []
# Generate the list of vertices for each optimal hull
for cluster_by_indx in clusters_by_indx:
pnts_for_hull = x_y[cluster_by_indx]
try:
hull = ConvexHull(pnts_for_hull)
vertices = hull.points[hull.vertices]
angle_thr = 5.0
# Remove redundant points that deviate little from a straight line
simplified_hull = c2d.simplify_2D_path(vertices, angle_thr)
sorted_hull_vertices.append(simplified_hull)
except Exception as e:
# There might not be enough points for a hull
log.debug(e)
return sorted_hull_vertices
def av_around(x, y, z, idx, radius):
sq_dist = (x - x[idx]) ** 2 + (y - y[idx]) ** 2
neighbors_indx = np.where(sq_dist <= radius ** 2)
return np.average(z[neighbors_indx]), neighbors_indx
def interp_to_1D_arr(x_int=None, y_int=None, z_int=None, slice_above_len=None):
"""
Turns interpolated heatmaps into linear 1D array
Intended for data reshaping for get_optimal_pnts_indxs
"""
if slice_above_len is not None:
if x_int is not None:
size = np.size(x_int)
slice_step = np.int(np.ceil(size / slice_above_len))
x_int = np.array(x_int)[::slice_step]
if y_int is not None:
size = np.size(y_int)
slice_step = np.int(np.ceil(size / slice_above_len))
y_int = np.array(y_int)[::slice_step]
if z_int is not None:
size_0 = np.shape(z_int)[0]
size_1 = np.shape(z_int)[1]
slice_step_0 = np.int(np.ceil(size_0 / slice_above_len))
slice_step_1 = np.int(np.ceil(size_1 / slice_above_len))
z_int = np.array(z_int)[::slice_step_0, ::slice_step_1]
if x_int is not None and y_int is not None and z_int is not None:
x_int_1D = np.ravel(np.repeat([x_int], np.size(y_int), axis=0))
y_int_1D = np.ravel(np.repeat([y_int], np.size(x_int), axis=1))
z_int_1D = np.ravel(z_int)
return x_int_1D, y_int_1D, z_int
elif z_int is not None:
z_int_1D = np.ravel(z_int)
return z_int_1D
elif x_int is not None and y_int is not None:
x_int_1D = np.ravel(np.repeat([x_int], np.size(y_int), axis=0))
y_int_1D = np.ravel(np.repeat([y_int], np.size(x_int), axis=1))
return x_int_1D, y_int_1D
else:
return None
|
mit
|
mraspaud/dask
|
dask/array/tests/test_percentiles.py
|
1
|
1897
|
import pytest
pytest.importorskip('numpy')
from dask.array.utils import assert_eq
import dask.array as da
import numpy as np
def same_keys(a, b):
def key(k):
if isinstance(k, str):
return (k, -1, -1, -1)
else:
return k
return sorted(a.dask, key=key) == sorted(b.dask, key=key)
def test_percentile():
d = da.ones((16,), chunks=(4,))
assert_eq(da.percentile(d, [0, 50, 100]),
np.array([1, 1, 1], dtype=d.dtype))
x = np.array([0, 0, 5, 5, 5, 5, 20, 20])
d = da.from_array(x, chunks=(3,))
result = da.percentile(d, [0, 50, 100])
assert_eq(da.percentile(d, [0, 50, 100]),
np.array([0, 5, 20], dtype=result.dtype))
assert same_keys(da.percentile(d, [0, 50, 100]),
da.percentile(d, [0, 50, 100]))
assert not same_keys(da.percentile(d, [0, 50, 100]),
da.percentile(d, [0, 50]))
x = np.array(['a', 'a', 'd', 'd', 'd', 'e'])
d = da.from_array(x, chunks=(3,))
assert_eq(da.percentile(d, [0, 50, 100]),
np.array(['a', 'd', 'e'], dtype=x.dtype))
@pytest.mark.skip
def test_percentile_with_categoricals():
try:
import pandas as pd
except ImportError:
return
x0 = pd.Categorical(['Alice', 'Bob', 'Charlie', 'Dennis', 'Alice', 'Alice'])
x1 = pd.Categorical(['Alice', 'Bob', 'Charlie', 'Dennis', 'Alice', 'Alice'])
dsk = {('x', 0): x0, ('x', 1): x1}
x = da.Array(dsk, 'x', chunks=((6, 6),))
p = da.percentile(x, [50])
assert (p.compute().categories == x0.categories).all()
assert (p.compute().codes == [0]).all()
assert same_keys(da.percentile(x, [50]),
da.percentile(x, [50]))
def test_percentiles_with_empty_arrays():
x = da.ones(10, chunks=((5, 0, 5),))
assert_eq(da.percentile(x, [10, 50, 90]), np.array([1, 1, 1], dtype=x.dtype))
|
bsd-3-clause
|
nmayorov/scikit-learn
|
sklearn/metrics/cluster/tests/test_bicluster.py
|
394
|
1770
|
"""Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
|
bsd-3-clause
|
DailyActie/Surrogate-Model
|
examples/sklearn_glm.py
|
1
|
1487
|
# MIT License
#
# Copyright (c) 2016 Daily Actie
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Author: Quan Pan <[email protected]>
# License: MIT License
# Create: 2016-12-02
from sklearn.linear_model import Lars
# X = [[0., 0.], [1., 1.], [10., 10.]]
X = [[0.0], [1.0], [10.0]]
y = [0.0, 1.0, 10.0]
# x_preb = [[5., 5.], [-10., -10.]]
x_preb = [[5.], [-10.]]
clf = Lars(n_nonzero_coefs=1)
clf.fit(X, y)
print(clf.coef_)
y_pred = clf.predict(x_preb)
print y_pred
|
mit
|
zhouzhaoze/dip
|
project2/Proj03-02/histogram_equalization.py
|
1
|
1283
|
#!/bin/python
# *-* encoding=utf-8 *-*
'''
Image Priting Program Based on Haftoning
'''
import numpy, scipy
from scipy import ndimage
from scipy import misc
import matplotlib.pyplot as plt
def compute_historgram(img):
hstg = {}
for i in range(img.shape[0]):
for j in range(img.shape[1]):
c = hstg.get(img[i][j], 0)
hstg[img[i][j]] = c + 1
return hstg
def histogram_equalization(img):
L = 256
M, N = img.shape[0], img.shape[1]
hstg = compute_historgram(img)
T = {}
s = 0.0
for gray_level in range(256):
s += hstg.get(gray_level, 0)
T[gray_level] = (L-1) * s / (M * N)
return process_image_point_wise(img, T)
def process_image_point_wise(img, gray_level_map):
process_img = img.copy()
for i in range(img.shape[0]):
for j in range(img.shape[1]):
process_img[i][j] = gray_level_map[img[i][j]]
return process_img
def main():
img_file = 'Fig0308(a)(fractured_spine).tif'
img = misc.imread(img_file)
# taks (a)
hstg = compute_historgram(img)
for key in hstg.keys():
print key, hstg[key]
# task (b)
htsg_img = histogram_equalization(img)
misc.imsave('hstg_%s' % img_file, htsg_img)
if __name__ == '__main__':
main()
|
apache-2.0
|
JesusTorrado/cosmo_mini_toolbox
|
src/Likelihood_Planck.py
|
1
|
15530
|
import os
import sys
import numpy as np
from collections import OrderedDict as odict
import matplotlib.pyplot as plt
# Internal
from CMBspectrum import CMBspectrum
try:
import clik
except ImportError:
raise ImportError("The likelihood code seems not to have been installed "+
"in your system.")
class Likelihood_Planck():
"""
Class for calculating log-likelihoods.
Once initialised, the method 'Likelihood_Planck.get_loglik(spectrum)' can be
called any number of times for different 'spectrum' (instances of 'CMBspectrum').
Mandatory arguments:
--------------------
base_folder: str
Folder in which the likelihood data folders/files are found.
Optional arguments:
-------------------
likelihoods: list of elements from ["commander", "camspec", "lowlike"]
Names of the likelihoods to be computed.
"""
def __init__(self, base_folder=None, likelihoods=None):
fullnames = odict([["commander", "commander_v4.1_lm49.clik"],
["camspec", "CAMspec_v6.2TN_2013_02_26_dist.clik"],
["lowlike", "lowlike_v222.clik"]])
likelihoods_fullnames = []
if likelihoods:
for lik in likelihoods:
if lik.lower() in fullnames:
likelihoods_fullnames.append(fullnames[lik.lower()])
else:
raise ValueError("Likelihood name not recognised: %s.\n"%lik+
"Valid ones are "+str(fullnames.keys()))
self._likelihoods_names = likelihoods_fullnames
else:
self._likelihoods_names = fullnames.values()
# Initialize!
self._likelihoods = odict()
for lik in self._likelihoods_names:
full_path = os.path.join(base_folder, lik)
try:
self._likelihoods[lik] = clik.clik(full_path)
except clik.lkl.CError:
raise ValueError("'clik' failed to initialise the requested "+
"likelihood %s"%lik+", probably because it was"+
" not found on the given folder: '%s'"%full_path)
# Get nuisance parameters
self._nuisance_parameters = dict([lik_name,{}]
for lik_name in self._likelihoods_names)
for lik in self._likelihoods_names:
names = self._likelihoods[lik].extra_parameter_names
self._nuisance_parameters[lik] = ({} if not names else
odict([[pname,None] for pname in names]))
# Interface methods #########################################################
def set_nuisance(self, n_dict=None, n_file=None):
"""
Set the value of the nuisance parameters.
Specify a dictionary via "n_dict" as "nuisance['param']=value"
or a file name which contains the parameter values in different lines as
'param = value'.
"""
assert n_dict or n_file and not(n_dict and n_file), \
("A dictionary of values as 'n_dict={...}' OR a file name as"+
+"'n_file='...' must be specified.")
if n_file:
nuisance_dict = {}
try:
nui = open(n_file, "r")
except IOError:
raise IOError("Nuisance parameters file not found: "+n_file)
err_par = "Some parameter definition is not correctly formatted: "
for line in nui:
if line.strip() and line.strip()[0] != "#":
aux = [a.strip() for a in line.split()]
assert aux[1] == "=", (
"Some parameter definition is not correctly formatted:"+
" line: '%s'")
par, val = aux[0], aux[2]
try :
val = float(val)
except ValueError:
raise ValueError("Some parameter definition is not correctly formatted:"+
" line: '%s'")
nuisance_dict[par] = val
if n_dict:
nuisance_dict = n_dict
# Both cases, fill values
for lik in self._likelihoods_names:
for p in self._nuisance_parameters[lik]:
try:
self._nuisance_parameters[lik][p] = nuisance_dict[p]
except KeyError:
raise KeyError("Nuisance parameter '%s' not defined!"%p)
def get_loglik(self, spectrum, verbose=False):
"""
Returns a dictionary containing the contribution to the log-likelihood
of each of the likelihoods requested.
A summary of the information can be printed on screen using the keyword
'verbose=True'.
"""
spectrum_prepared = self._prepare_spectrum(spectrum)
return self._get_loglik_internal(spectrum_prepared, verbose=verbose)
def compare_loglik(self, test_CMBspectrum, reference_CMBspectrum,
# Main arguments
delta_l=20, accumulated=False,
format="-loglik", save_file=None,
verbose=False,
# Fine tuning arguments
black_and_white=False,
ticks_fontsize=10, labels_fontsize=14,
transparent=False, dpi=150):
"""
Prints a comparison of the log-likelihood of two spectra along
multipoles.
WARNING: Since different multipoles are correlated, and this routine
works by substituting the value of the spectrum at small sets of
multipoles, the resulting value of the log-likelihood is in
general inexact.
Mandatory arguments:
--------------------
test_CMBspectrum: instance of CMBspectrum
Model CMB spectrum to be tested.
reference_CMBspectrum: instance of CMBspectrum
Fiducial CMB spectrum against which to test the test model.
Main arguments:
---------------
delta_l: int (default: 20)
Size of the bins where the diff. of log-likelihood is calculated.
If the correlation between multipoles is high, small values are
discouraged.
accumulated: bool (default: False)
Plot the accumulated difference for growing multipoles.
It adds a legend to distinguish between local and accumulated.
format: str (default: "-loglik")
Quantity to plot
* '-loglik' for the log likelihood
* 'chisq' for the chi squared, i.e., 2*(-loglik)
save_file: str (default: None)
If defined, instead of showing the plot, it is saved into the given
file name.
verbose: bool (default: False)
If True, a summary of the progress is printed on screen.
Fine tuning parameters:
-----------------------
black_and_white: bool (default: False)
If True, prints a version of the plot prepared for B&W printing.
ticks_fontsize=10, labels_fontsize=14
Fontsize of the axes ticks and labels
transparent: bool (default: False)
Transparency of the background of the plot.
dpi: int (default: 150)
Resolution used if the plot is saved to a file
"""
# Prepare both spectra
test_prepared = self._prepare_spectrum(test_CMBspectrum)
reference_prepared = self._prepare_spectrum(reference_CMBspectrum)
# Go alog the multipoles and get the likelihood
reference_loglik = \
self._get_loglik_internal(reference_prepared, verbose=False)
l_max = dict([lik, max(self._likelihoods[lik].lmax)]
for lik in self._likelihoods_names)
n_cls = dict([name, len([int(i) for i in lik.has_cl if int(i)])]
for name, lik in self._likelihoods.items())
l_initials = np.arange(0, 1+max(l_max.values()), delta_l)
l_finals = [l_initials[i+1]-1 for i in range(len(l_initials[:-1]))]
l_finals += [max(l_max.values())]
l_intervals = zip(l_initials, l_finals)
l_midpoints = np.array([(ini+fin)/2. for ini, fin in l_intervals])
if verbose:
print ("Calculating likelihood differences along multipoles " +
"with Delta_l = %d"%delta_l)
print "Progress: l =",
sys.stdout.flush()
loglik_differences = []
for (l_ini, l_fin) in l_intervals:
test_step = dict([lik, np.copy(reference_prepared[lik])]
for lik in reference_prepared)
if verbose:
print "[%d, %d] "%(l_ini, l_fin),
sys.stdout.flush()
this_loglik = {}
for lik in self._likelihoods_names:
# Don't calculate the likelihood more times than necessary
if l_ini > l_max[lik]:
this_loglik[lik] = reference_loglik[lik]
continue
for i_cl in range(n_cls[lik]):
test_step[lik][i_cl*(1+l_max[lik])+l_ini:
i_cl*(1+l_max[lik])+l_fin+1] = \
np.copy(
test_prepared[lik][i_cl*(1+l_max[lik])+l_ini:
i_cl*(1+l_max[lik])+l_fin+1])
this_loglik[lik] = self._get_loglik_internal(test_step,
only=[lik])[lik]
loglik_differences.append(dict([lik, (reference_loglik[lik]
-this_loglik[lik])]
for lik in reference_loglik))
if verbose:
print ""
# Prepare for plotting. Necessary but stupid hack: flattening
factor = 2 if format=="chisq" else 1
total_differences = np.array([factor*sum(v for v in ll.values())
for ll in loglik_differences]
).flatten()
accum_differences = np.array([sum(total_differences[:i+1])
for i in range(len(total_differences))]
).flatten()
# Prepare plot
fig = plt.figure()
ax = fig.add_subplot(111)
opts = [{"color": "blue", "linestyle":"-"},
{"color": "red", "linestyle":"-"}]
if black_and_white:
opts[0]["color"] = "gray"
opts[1]["color"] = "black"
# Not necessary when using filling
#ax.axhline(0, linestyle=':', color='grey')
ax.plot(l_midpoints, total_differences, label="Local", **opts[0])
ax.fill_between(l_midpoints, total_differences[:],
color=opts[0]["color"], alpha=0.40, linewidth=0)
if accumulated:
ax.plot(l_midpoints, accum_differences, label="Accumulated",
**opts[1])
# Plot legend only if accumulated plot
leg = ax.legend(loc=3, fancybox=True, prop={'size':10})
leg.get_frame().set_alpha(float(0.8))
# Format
ax.set_xlabel(r"$\mathrm{Multipole,}\,\ell$", fontsize=labels_fontsize)
ax.set_ylabel(r"$-%s \Delta\ln\mathcal{L}$"%(
"2\," if format=="chisq" else ""), fontsize=labels_fontsize)
leg = ax.legend(loc=3, fancybox=True, prop={'size':10})
plt.setp(ax.get_yticklabels(), fontsize=ticks_fontsize)
plt.setp(ax.get_xticklabels(), fontsize=ticks_fontsize)
# Transparency
fig.frameon = False
leg.get_frame().set_alpha(float(0.8))
# Plot
if not save_file:
plt.show()
else:
plotting_command = (
"plt.savefig('%s', transparent = %s, dpi = %s, "%(
save_file, transparent, int(dpi))+
"bbox_inches='tight', pad_inches=0.1)")
eval(plotting_command)
plt.close()
return l_intervals, total_differences, accum_differences
# Internal methods ##########################################################
def _prepare_spectrum(self, spectrum):
"""
Given a 'CMBspectrum' instance, prepares a dictionary of the spectra
required by each likelihood, in the correct format to be feeded directly
to 'clik'.
The output is to be passed to 'Likelihood_Planck._get_loglik_internal()'.
"""
# Check that the input is correct
assert isinstance(spectrum, CMBspectrum), \
"The spectrum provided must be an instance of 'CMBspectrum'."
# Check that nuisance parameters are defined (if one is, all are)
for lik in self._likelihoods_names:
if self._nuisance_parameters[lik]:
assert self._nuisance_parameters[lik].values()[0], (
"Nuisance parameters not yet defined! Set their values using "+
"'Likelihoods.set_nuisance()'.")
# Format of Clik : TT EE BB TE TB EB ( l = 0, 1, 2, ... !!!)
l = list(spectrum.ll())
pre = range(int(l[0]))
l = np.array(pre + l)
prepared = np.zeros([len(l), 6])
# NOTICE that this sets C_0 = C_1 = 0
prepared[2:, 0] = spectrum.lCl("TT", units="muK", l_prefactor=False)
prepared[2:, 1] = spectrum.lCl("EE", units="muK", l_prefactor=False)
prepared[2:, 3] = spectrum.lCl("TE", units="muK", l_prefactor=False)
# Prepare the vectors for the likelihoods:
vectors = {}
for lik in self._likelihoods_names:
vectors[lik] = []
# Check enough multipoles
l_max = self._likelihoods[lik].lmax
assert len(l) >= max(l_max), (
"Not enought multipoles for likelihood "+
"'%s' : needs %d, got %d"%(lik, max(l_max), len(l)))
# Which spectra
which_cls = [int(i) for i in self._likelihoods[lik].has_cl]
for i, cli in enumerate(which_cls):
if cli:
vectors[lik] += prepared[:(1+l_max[i]), i].tolist()
# Nuisance
for par,val in self._nuisance_parameters[lik].items():
vectors[lik].append(val)
return vectors
def _get_loglik_internal(self, spectrum_prepared, only=None, verbose=False):
"""
Actually calculates the likelihood of a previously prepared spectrum,
i.e. the output of 'Likelihood_Planck._prepare_spectrum()'.
"""
likelihoods = self._likelihoods_names
if only:
assert all([lik in likelihoods for lik in only]), \
"Likelihood not recognised: '%s'"%lik
likelihoods = only
loglik = {}
for lik in likelihoods:
if verbose:
print "*** Computing : "+lik
loglik[lik] = self._likelihoods[lik](spectrum_prepared[lik])
if verbose:
print "loglik = ",loglik[lik]
print "chi2eff = ",-2*loglik[lik]
suma = sum(a[0] for a in loglik.values())
if verbose:
print "*** TOTAL :"
print "loglik = ",suma
print "chi2eff = ",-2*suma
return loglik
|
gpl-3.0
|
tylerjereddy/py_sphere_Voronoi
|
docs/conf.py
|
2
|
8750
|
# -*- coding: utf-8 -*-
#
# py_sphere_Voronoi documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 26 09:58:03 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import mock
MOCK_MODULES = ['numpy','scipy','scipy.spatial','scipy.spatial.distance','numpy.linalg','pandas','numpy.random']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(1, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'numpydoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
authors = u'Tyler Reddy, Ross Hemsley, Edd Edmondson, Nikolai Nowaczyk, Joe Pitt-Francis'
project = u'py_sphere_Voronoi'
copyright = u'2014-2015, ' + authors
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'py_sphere_Voronoidoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'py_sphere_Voronoi.tex', u'py\\_sphere\\_Voronoi Documentation',
u'Tyler Reddy', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'py_sphere_voronoi', u'py_sphere_Voronoi Documentation',
[u'Tyler Reddy'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'py_sphere_Voronoi', u'py_sphere_Voronoi Documentation',
u'Tyler Reddy', 'py_sphere_Voronoi', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
mit
|
sunpy/solarbextrapolation
|
solarbextrapolation/extrapolators/base.py
|
3
|
7115
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 28 19:17:31 2015
@author: alex_
"""
# General Imports
import matplotlib as mpl
mpl.use('TkAgg') # Force mpl backend not to use qt. Else we have a conflict.
import numpy as np
import pickle
import time
from datetime import datetime
from collections import namedtuple
import warnings
import inspect
#from sunpy.sun._constants import physical_constants as con
# SunPy imports
import sunpy.map
from sunpy.sun import constants, sun
from sunpy.time import parse_time, is_time
from astropy.table import Table
import astropy.units as u
# Internal imports
#from solarbextrapolation.utilities import *
from solarbextrapolation.map3dclasses import Map3D
class Extrapolators(object):
"""
Common class for all 3D vector field extrapolation routines.
Each routine, created by building a subclass, will have wildly varying
capabilities and input arguments so this have been left intentionally
minimal.
The primary method to override is extrapolation(), the primary method to
call is extrapolate() which will both call extrapolation() and save the
result if a filepath argument is given.
Parameters
----------
map_magnetogram : `sunpy.map.GenericMap`
The sunpy map containing the boundary magnetogram data.
filepath : `string`
The optional filepath for automatic saving of extrapolation results.
notes : `string`
The optional notes regarding thius run of the extrapolation routine.
extrapolator_routine : `string`
The name for the extrapolation routine.
zshape : `int`
The vertical grid size.
xrange : `astropy.unit.Quantity`, optional
The x edge to edge coordinates. If defined will manually scale the
boundary data.
yrange : `astropy.units.quantity.Quantity`, optional
The y edge to edge coordinates. If defined will manually scale the
boundary data.
zrange : `astropy.unit.Quantity`
The vertical edge to edge coordinates for the vertical range.
notes : `string`
User specified notes that will be added to the metadata.
"""
def __init__(self, map_magnetogram, **kwargs):
"""
Construct an extrapolator using the given 2D map.
"""
self.map_boundary_data = map_magnetogram
self.meta = { 'boundary_1_meta': self.map_boundary_data.meta }
self.meta['extrapolator_notes'] = kwargs.get('notes', '')
# Normalise the units to SI May possible be added here
# Crop the boundary data if required.
self.xrange = kwargs.get('xrange', self.map_boundary_data.xrange)
self.yrange = kwargs.get('yrange', self.map_boundary_data.yrange)
self.map_boundary_data = self.map_boundary_data.submap(self.xrange, self.yrange)
self.xobsrange = self.map_boundary_data.xrange
self.yobsrange = self.map_boundary_data.yrange
#print '\n\nHelp for u:'
#print 'help(u): ' + str(help(u))
#print '\n\n'
self.zrange = kwargs.get('zrange', u.Quantity([0.0, 1.0] * u.Mm) )
self.shape = np.asarray([self.map_boundary_data.data.shape[1],
self.map_boundary_data.data.shape[0],
long(kwargs.get('zshape', 1L))])
self.filepath = kwargs.get('filepath', None)
self.routine = kwargs.get('extrapolator_routine', type(self))
def _angle_to_length(self, arc, **kwargs):
"""
Approximate a surface length from the observed arc length.
Uses the small angle approximation.
"""
r = self.map_boundary_data.dsun - self.map_boundary_data.rsun_meters
length = (r * arc.to(u.radian))
return length.to(u.m, equivalencies=u.dimensionless_angles())
def _to_SI(self, **kwargs):
"""
"""
# Scale the x/y ranges
# Setup the equivilence
obs_distance = self.map_boundary_data.dsun - self.map_boundary_data.rsun_meters
radian_length = [ (u.radian, u.meter, lambda x: obs_distance * x, lambda x: x / obs_distance) ]
# Extract the maps x/yrange values and convert to length units
#x_range = self.map_boundary_data.xrange
#x_range = ( decompose_ang_len(x_range[0], equivalencies=radian_length),
# decompose_ang_len(x_range[1], equivalencies=radian_length) )
#x_range =
#y_range = self.map_boundary_data.yrange
"""
x_range = self.map_boundary_data.xrange.to(u.meter, equivalencies=radian_length)
y_range = self.map_boundary_data.yrange.to(u.meter, equivalencies=radian_length)
# Normalise to start at 0.0
x_range = [self.map_boundary_data.xrange[0] - self.map_boundary_data.xrange[0],
self.map_boundary_data.xrange[1] - self.map_boundary_data.xrange[0]]
y_range = [self.map_boundary_data.yrange[0] - self.map_boundary_data.yrange[0],
self.map_boundary_data.yrange[1] - self.map_boundary_data.yrange[0]]
"""
# Scale the magnetic field units
ori_bunit = u.Unit(self.map_boundary_data.meta.get('bunit', 'Tesla'))
scale_factor = ori_bunit.to(u.T)
self.map_boundary_data = self.map_boundary_data * scale_factor
self.map_boundary_data.meta['bunit'] = 'Tesla'
self.meta['boundary_1_meta']['bunit'] = 'Tesla'
def _extrapolation(self, **kwargs):
"""
The method for running an extrapolation routine.
This is the primary method to be edited in subclasses for specific
extrapolation routine implementations.
"""
# Add some type checking, we want a map object, check for .unit attribute.
# Extrapolation code goes here.
arr_4d = np.zeros([self.map_boundary_data.data.shape[0], self.map_boundary_data.data.shape[1], 1, 3])
# Calculate the ranges in each dimension in length units (meters)
x_range = self._angle_to_length(self.xrange)
y_range = self._angle_to_length(self.yrange)
z_range = self.zrange
# Turn the 4D array into a Map3D object.
map_output = Map3D( arr_4d, self.meta, xrange=x_range, yrange=y_range, zrange=z_range, xobsrange=self.xobsrange, yobsrange=self.yobsrange )
return map_output
def extrapolate(self, **kwargs):
"""
Method to be called to run the extrapolation.
Times and saves the extrapolation where applicable.
"""
# Record the time and duration of the extrapolation.
dt_start = datetime.now()
tim_start = time.time()
arr_output = self._extrapolation(**kwargs)
tim_duration = time.time() - tim_start
# Add the duration and time to the meta/header data.
arr_output.meta['extrapolator_start_time'] = dt_start.isoformat()
arr_output.meta['extrapolator_duration'] = tim_duration
arr_output.meta['extrapolator_duration_unit'] = u.s
# Save the Map3D if a filepath has been set. (to avoid loosing work)
if self.filepath:
arr_output.save(self.filepath)
return arr_output
|
mit
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/mpl_toolkits/tests/__init__.py
|
5
|
2335
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import difflib
import os
from matplotlib import rcParams, rcdefaults, use
_multiprocess_can_split_ = True
# Check that the test directories exist
if not os.path.exists(os.path.join(
os.path.dirname(__file__), 'baseline_images')):
raise IOError(
'The baseline image directory does not exist. '
'This is most likely because the test data is not installed. '
'You may need to install matplotlib from source to get the '
'test data.')
def setup():
# The baseline images are created in this locale, so we should use
# it during all of the tests.
import locale
import warnings
from matplotlib.backends import backend_agg, backend_pdf, backend_svg
try:
locale.setlocale(locale.LC_ALL, str('en_US.UTF-8'))
except locale.Error:
try:
locale.setlocale(locale.LC_ALL, str('English_United States.1252'))
except locale.Error:
warnings.warn(
"Could not set locale to English/United States. "
"Some date-related tests may fail")
use('Agg', warn=False) # use Agg backend for these tests
# These settings *must* be hardcoded for running the comparison
# tests and are not necessarily the default values as specified in
# rcsetup.py
rcdefaults() # Start with all defaults
rcParams['font.family'] = 'Bitstream Vera Sans'
rcParams['text.hinting'] = False
rcParams['text.hinting_factor'] = 8
def assert_str_equal(reference_str, test_str,
format_str=('String {str1} and {str2} do not '
'match:\n{differences}')):
"""
Assert the two strings are equal. If not, fail and print their
diffs using difflib.
"""
if reference_str != test_str:
diff = difflib.unified_diff(reference_str.splitlines(1),
test_str.splitlines(1),
'Reference', 'Test result',
'', '', 0)
raise ValueError(format_str.format(str1=reference_str,
str2=test_str,
differences=''.join(diff)))
|
bsd-2-clause
|
rexshihaoren/scikit-learn
|
examples/cluster/plot_affinity_propagation.py
|
349
|
2304
|
"""
=================================================
Demo of affinity propagation clustering algorithm
=================================================
Reference:
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
print(__doc__)
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=300, centers=centers, cluster_std=0.5,
random_state=0)
##############################################################################
# Compute Affinity Propagation
af = AffinityPropagation(preference=-50).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.close('all')
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
plt.plot(X[class_members, 0], X[class_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
|
bsd-3-clause
|
mweisman/QGIS
|
python/plugins/processing/algs/MeanAndStdDevPlot.py
|
6
|
3372
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
MeanAndStdDevPlot.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import matplotlib.pyplot as plt
import matplotlib.pylab as lab
import numpy as np
from PyQt4.QtCore import *
from qgis.core import *
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.parameters.ParameterTable import ParameterTable
from processing.parameters.ParameterTableField import ParameterTableField
from processing.outputs.OutputHTML import OutputHTML
from processing.tools import *
class MeanAndStdDevPlot(GeoAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
NAME_FIELD = 'NAME_FIELD'
MEAN_FIELD = 'MEAN_FIELD'
STDDEV_FIELD = 'STDDEV_FIELD'
def processAlgorithm(self, progress):
uri = self.getParameterValue(self.INPUT)
layer = dataobjects.getObjectFromUri(uri)
namefieldname = self.getParameterValue(self.NAME_FIELD)
meanfieldname = self.getParameterValue(self.MEAN_FIELD)
stddevfieldname = self.getParameterValue(self.STDDEV_FIELD)
output = self.getOutputValue(self.OUTPUT)
values = vector.getAttributeValues(layer, namefieldname,
meanfieldname, stddevfieldname)
plt.close()
ind = np.arange(len(values[namefieldname]))
width = 0.8
plt.bar(ind, values[meanfieldname], width, color='r',
yerr=values[stddevfieldname],
error_kw=dict(ecolor='yellow'),
)
plt.xticks(ind, values[namefieldname], rotation=45)
plotFilename = output + '.png'
lab.savefig(plotFilename)
f = open(output, 'w')
f.write('<img src="' + plotFilename + '"/>')
f.close()
def defineCharacteristics(self):
self.name = 'Mean and standard deviation plot'
self.group = 'Graphics'
self.addParameter(ParameterTable(self.INPUT, 'Input table'))
self.addParameter(ParameterTableField(self.NAME_FIELD,
'Category name field', self.INPUT,
ParameterTableField.DATA_TYPE_ANY))
self.addParameter(ParameterTableField(self.MEAN_FIELD, 'Mean field',
self.INPUT))
self.addParameter(ParameterTableField(self.STDDEV_FIELD, 'StdDev field'
, self.INPUT))
self.addOutput(OutputHTML(self.OUTPUT, 'Output'))
|
gpl-2.0
|
xhappy/selfMachineLearning
|
cs231n-assignment/assigment1/SVMTest.py
|
1
|
12598
|
from CifarDataUtil import CifarDataUtil
from SVM import SVM
from linear_classifier import LinearSVM
from gradient_check import *
import numpy as np
import matplotlib.pyplot as plt
import time
import math
def cross_validation (X_train, y_train, X_val, y_val):
#############################################################################################
# Use the validation set to tune hyperparameters (regularization strength and
# learning rate). You should experiment with different ranges for the learning
# rates and regularization strengths; if you are careful you should be able to
# get a classification accuracy of about 0.4 on the validation set.
#############################################################################################
learning_rates = [1e-7, 5e-5]
regularization_strengths = [5e4, 1e5]
# results is dictionary mapping tuples of the form
# (learning_rate, regularization_strength) to tuples of the form
# (training_accuracy, validation_accuracy). The accuracy is simply the fraction
# of data points that are correctly classified.
results = {}
best_val = -1 # The highest validation accuracy that we have seen so far.
best_svm = None # The LinearSVM object that achieved the highest validation rate.
################################################################################
# TODO: #
# Write code that chooses the best hyperparameters by tuning on the validation #
# set. For each combination of hyperparameters, train a linear SVM on the #
# training set, compute its accuracy on the training and validation sets, and #
# store these numbers in the results dictionary. In addition, store the best #
# validation accuracy in best_val and the LinearSVM object that achieves this #
# accuracy in best_svm. #
# #
# Hint: You should use a small value for num_iters as you develop your #
# validation code so that the SVMs don't take much time to train; once you are #
# confident that your validation code works, you should rerun the validation #
# code with a larger value for num_iters. #
################################################################################
iters = 2000 #100
for lr in learning_rates:
for rs in regularization_strengths:
svm = LinearSVM()
svm.train(X_train, y_train, learning_rate=lr, reg=rs, num_iters=iters)
y_train_pred = svm.predict(X_train)
acc_train = np.mean(y_train == y_train_pred)
y_val_pred = svm.predict(X_val)
acc_val = np.mean(y_val == y_val_pred)
results[(lr, rs)] = (acc_train, acc_val)
if best_val < acc_val:
best_val = acc_val
best_svm = svm
################################################################################
# END OF YOUR CODE #
################################################################################
# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print('lr %e reg %e train accuracy: %f val accuracy: %f' % (
lr, reg, train_accuracy, val_accuracy))
print('best validation accuracy achieved during cross-validation: %f' % best_val)
return results, best_svm
def visualizeCrossValidation(results):
# Visualize the cross-validation results
x_scatter = [math.log10(x[0]) for x in results]
y_scatter = [math.log10(x[1]) for x in results]
# plot training accuracy
marker_size = 100
colors = [results[x][0] for x in results]
plt.subplot(2, 1, 1)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 training accuracy')
# plot validation accuracy
colors = [results[x][1] for x in results] # default size of markers is 20
plt.subplot(2, 1, 2)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 validation accuracy')
plt.show()
def testSetOnBestSVM(X_test, y_test, best_svm):
# Evaluate the best svm on test set
y_test_pred = best_svm.predict(X_test)
test_accuracy = np.mean(y_test == y_test_pred)
print('linear SVM on raw pixels final test set accuracy: %f' % test_accuracy)
def visualizeWeightForAllClasses(best_svm):
# Visualize the learned weights for each class.
# Depending on your choice of learning rate and regularization strength, these may
# or may not be nice to look at.
w = best_svm.W[:-1,:] # strip out the bias
w = w.reshape(32, 32, 3, 10)
w_min, w_max = np.min(w), np.max(w)
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for i in range(10):
plt.subplot(2, 5, i + 1)
# Rescale the weights to be between 0 and 255
wimg = 255.0 * (w[:, :, :, i].squeeze() - w_min) / (w_max - w_min)
plt.imshow(wimg.astype('uint8'))
plt.axis('off')
plt.title(classes[i])
plt.show()
if __name__ == '__main__':
#####################################################################
# Data Loading and Preprocessing:
#####################################################################
filePath = 'C:/Code/PythonCode/cases/source/cs231n/assignment/assignment1_2017/cs231n/datasets/cifar-10-batches-py'
cifarDataObj = CifarDataUtil(filePath)
X_train, y_train, X_test, y_test = cifarDataObj.loadData()
# reshape the data to be: 1000*32*32*3
X_train, X_test = cifarDataObj.reShapeDataAsImage(X_train, X_test)
print('X_train.shape: ', X_train.shape)
print('X_test.shape: ', X_test.shape)
print('----------')
#####################################################################
# Split the data into train, val, and test sets:
#####################################################################
# Split the data into train, val, and test sets. In addition we will
# create a small development set as a subset of the training data;
# we can use this for development so our code runs faster.
num_training = 49000
num_validation = 1000
num_test = 1000
num_dev = 500
# Our validation set will be num_validation points from the original
# training set.
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
# Our training set will be the first num_train points from the original
# training set.
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
# We will also make a development set, which is a small subset of
# the training set.
mask = np.random.choice(num_training, num_dev, replace=False)
X_dev = X_train[mask]
y_dev = y_train[mask]
# We use the first num_test points of the original test set as our
# test set.
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
print('Train data shape: ', X_train.shape)
print('Train labels shape: ', y_train.shape)
print('Validation data shape: ', X_val.shape)
print('Validation labels shape: ', y_val.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
print('---------')
#####################################################################
# Preprocessing: reshape the image data into rows:
#####################################################################
# Preprocessing: reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_val = np.reshape(X_val, (X_val.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
X_dev = np.reshape(X_dev, (X_dev.shape[0], -1))
# As a sanity check, print out the shapes of the data
print('Training data shape: ', X_train.shape)
print('Validation data shape: ', X_val.shape)
print('Test data shape: ', X_test.shape)
print('dev data shape: ', X_dev.shape)
print('---------')
#####################################################################
# Preprocessing: subtract the mean image:
#####################################################################
# Preprocessing: subtract the mean image
# first: compute the image mean based on the training data
mean_image = np.mean(X_train, axis=0)
# second: subtract the mean image from train and test data
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
X_dev -= mean_image
# third: append the bias dimension of ones (i.e. bias trick) so that our SVM
# only has to worry about optimizing a single weight matrix W.
X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))])
X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))])
X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))])
X_dev = np.hstack([X_dev, np.ones((X_dev.shape[0], 1))])
print(X_train.shape, X_val.shape, X_test.shape, X_dev.shape)
print('------------')
# generate a random SVM weight matrix of small numbers
W = np.random.randn(3073, 10) * 0.0001
ticNaive = time.time()
svmTrainObj = SVM(W, X_train, y_train, 0.000005)
loss, grad = svmTrainObj.svm_loss_naive()
tocNaive = time.time()
print('X_train.shape: ', X_train.shape, ' loss: ', loss, ' grad: ', grad, ' grad.shape: ', grad.shape)
ticVector = time.time()
loss2, grad2 = svmTrainObj.svm_loss_vectorized()
tocVector = time.time()
print('X_train.shape: ', X_train.shape, ' loss2: ', loss2, ' grad2: ', grad2, ' grad2.shape: ', grad2.shape)
print('Naive cost time: ', tocNaive-ticNaive)
print('Vector cost time: ', tocVector - ticVector)
# Numerically compute the gradient along several randomly chosen dimensions, and
# compare them with your analytically computed gradient. The numbers should match
# almost exactly along all dimensions.f
svmDevObj1 = SVM(W, X_dev, y_dev, 0.0)
# Compute the loss and its gradient at W.
loss, grad = svmDevObj1.svm_loss_naive()
f = lambda w: svmDevObj1.svm_loss_naive()[0]
grad_numerical = grad_check_sparse(f, W, grad)
print('grad_numerical: ', grad_numerical)
# do the gradient check once again with regularization turned on
# you didn't forget the regularization gradient did you?
svmDevObj2 = SVM(W, X_dev, y_dev, 5e1)
loss, grad = svmDevObj2.svm_loss_naive()
f = lambda w: svmDevObj2.svm_loss_naive()[0]
grad_numerical = grad_check_sparse(f, W, grad)
print('grad_numerical: ', grad_numerical)
#####################
linearSVM = LinearSVM()
tic = time.time()
loss_hist = linearSVM.train(X_train, y_train, learning_rate = 1e-7, reg = 2.5e4, num_iters = 1500, verbose = True)
toc = time.time()
print('That took %fs' % (toc - tic))
# A useful debugging strategy is to plot the loss as a function of
# iteration number:
plt.plot(loss_hist)
plt.xlabel('Iteration number')
plt.ylabel('Loss value')
plt.show()
# Write the LinearSVM.predict function and evaluate the performance on both the
# training and validation set
y_train_pred = linearSVM.predict(X_train)
print('training accuracy: %f' % (np.mean(y_train == y_train_pred), ))
y_val_pred = linearSVM.predict(X_val)
print('validation accuracy: %f' % (np.mean(y_val == y_val_pred), ))
results, best_svm = cross_validation (X_train, y_train, X_val, y_val)
visualizeCrossValidation(results)
testSetOnBestSVM(X_test, y_test, best_svm)
visualizeWeightForAllClasses(best_svm)
|
mit
|
harrywy/cuda-convnet2
|
convdata.py
|
174
|
14675
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from python_util.data import *
import numpy.random as nr
import numpy as n
import random as r
from time import time
from threading import Thread
from math import sqrt
import sys
#from matplotlib import pylab as pl
from PIL import Image
from StringIO import StringIO
from time import time
import itertools as it
class JPEGBatchLoaderThread(Thread):
def __init__(self, dp, batch_num, label_offset, list_out):
Thread.__init__(self)
self.list_out = list_out
self.label_offset = label_offset
self.dp = dp
self.batch_num = batch_num
@staticmethod
def load_jpeg_batch(rawdics, dp, label_offset):
if type(rawdics) != list:
rawdics = [rawdics]
nc_total = sum(len(r['data']) for r in rawdics)
jpeg_strs = list(it.chain.from_iterable(rd['data'] for rd in rawdics))
labels = list(it.chain.from_iterable(rd['labels'] for rd in rawdics))
img_mat = n.empty((nc_total * dp.data_mult, dp.inner_pixels * dp.num_colors), dtype=n.float32)
lab_mat = n.zeros((nc_total, dp.get_num_classes()), dtype=n.float32)
dp.convnet.libmodel.decodeJpeg(jpeg_strs, img_mat, dp.img_size, dp.inner_size, dp.test, dp.multiview)
lab_vec = n.tile(n.asarray([(l[nr.randint(len(l))] if len(l) > 0 else -1) + label_offset for l in labels], dtype=n.single).reshape((nc_total, 1)), (dp.data_mult,1))
for c in xrange(nc_total):
lab_mat[c, [z + label_offset for z in labels[c]]] = 1
lab_mat = n.tile(lab_mat, (dp.data_mult, 1))
return {'data': img_mat[:nc_total * dp.data_mult,:],
'labvec': lab_vec[:nc_total * dp.data_mult,:],
'labmat': lab_mat[:nc_total * dp.data_mult,:]}
def run(self):
rawdics = self.dp.get_batch(self.batch_num)
p = JPEGBatchLoaderThread.load_jpeg_batch(rawdics,
self.dp,
self.label_offset)
self.list_out.append(p)
class ColorNoiseMakerThread(Thread):
def __init__(self, pca_stdevs, pca_vecs, num_noise, list_out):
Thread.__init__(self)
self.pca_stdevs, self.pca_vecs = pca_stdevs, pca_vecs
self.num_noise = num_noise
self.list_out = list_out
def run(self):
noise = n.dot(nr.randn(self.num_noise, 3).astype(n.single) * self.pca_stdevs.T, self.pca_vecs.T)
self.list_out.append(noise)
class ImageDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.data_mean = self.batch_meta['data_mean'].astype(n.single)
self.color_eig = self.batch_meta['color_pca'][1].astype(n.single)
self.color_stdevs = n.c_[self.batch_meta['color_pca'][0].astype(n.single)]
self.color_noise_coeff = dp_params['color_noise']
self.num_colors = 3
self.img_size = int(sqrt(self.batch_meta['num_vis'] / self.num_colors))
self.mini = dp_params['minibatch_size']
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.img_size
self.inner_pixels = self.inner_size **2
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 5*2
self.data_mult = self.num_views if self.multiview else 1
self.batch_size = self.batch_meta['batch_size']
self.label_offset = 0 if 'label_offset' not in self.batch_meta else self.batch_meta['label_offset']
self.scalar_mean = dp_params['scalar_mean']
# Maintain pointers to previously-returned data matrices so they don't get garbage collected.
self.data = [None, None] # These are pointers to previously-returned data matrices
self.loader_thread, self.color_noise_thread = None, None
self.convnet = dp_params['convnet']
self.num_noise = self.batch_size
self.batches_generated, self.loaders_started = 0, 0
self.data_mean_crop = self.data_mean.reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((1,3*self.inner_size**2))
if self.scalar_mean >= 0:
self.data_mean_crop = self.scalar_mean
def showimg(self, img):
from matplotlib import pylab as pl
pixels = img.shape[0] / 3
size = int(sqrt(pixels))
img = img.reshape((3,size,size)).swapaxes(0,2).swapaxes(0,1)
pl.imshow(img, interpolation='nearest')
pl.show()
def get_data_dims(self, idx=0):
if idx == 0:
return self.inner_size**2 * 3
if idx == 2:
return self.get_num_classes()
return 1
def start_loader(self, batch_idx):
self.load_data = []
self.loader_thread = JPEGBatchLoaderThread(self,
self.batch_range[batch_idx],
self.label_offset,
self.load_data)
self.loader_thread.start()
def start_color_noise_maker(self):
color_noise_list = []
self.color_noise_thread = ColorNoiseMakerThread(self.color_stdevs, self.color_eig, self.num_noise, color_noise_list)
self.color_noise_thread.start()
return color_noise_list
def set_labels(self, datadic):
pass
def get_data_from_loader(self):
if self.loader_thread is None:
self.start_loader(self.batch_idx)
self.loader_thread.join()
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
else:
# Set the argument to join to 0 to re-enable batch reuse
self.loader_thread.join()
if not self.loader_thread.is_alive():
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
#else:
# print "Re-using batch"
self.advance_batch()
def add_color_noise(self):
# At this point the data already has 0 mean.
# So I'm going to add noise to it, but I'm also going to scale down
# the original data. This is so that the overall scale of the training
# data doesn't become too different from the test data.
s = self.data[self.d_idx]['data'].shape
cropped_size = self.get_data_dims(0) / 3
ncases = s[0]
if self.color_noise_thread is None:
self.color_noise_list = self.start_color_noise_maker()
self.color_noise_thread.join()
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
else:
self.color_noise_thread.join(0)
if not self.color_noise_thread.is_alive():
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases*3, cropped_size))
self.color_noise = self.color_noise[:ncases,:].reshape((3*ncases, 1))
self.data[self.d_idx]['data'] += self.color_noise * self.color_noise_coeff
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases, 3* cropped_size))
self.data[self.d_idx]['data'] *= 1.0 / (1.0 + self.color_noise_coeff) # <--- NOTE: This is the slow line, 0.25sec. Down from 0.75sec when I used division.
def get_next_batch(self):
self.d_idx = self.batches_generated % 2
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.get_data_from_loader()
# Subtract mean
self.data[self.d_idx]['data'] -= self.data_mean_crop
if self.color_noise_coeff > 0 and not self.test:
self.add_color_noise()
self.batches_generated += 1
return epoch, batchnum, [self.data[self.d_idx]['data'].T, self.data[self.d_idx]['labvec'].T, self.data[self.d_idx]['labmat'].T]
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data, add_mean=True):
mean = self.data_mean_crop.reshape((data.shape[0],1)) if data.flags.f_contiguous or self.scalar_mean else self.data_mean_crop.reshape((data.shape[0],1))
return n.require((data + (mean if add_mean else 0)).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
class CIFARDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.img_size = 32
self.num_colors = 3
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.batch_meta['img_size']
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 9
self.scalar_mean = dp_params['scalar_mean']
self.data_mult = self.num_views if self.multiview else 1
self.data_dic = []
for i in batch_range:
self.data_dic += [unpickle(self.get_data_file_name(i))]
self.data_dic[-1]["labels"] = n.require(self.data_dic[-1]['labels'], dtype=n.single)
self.data_dic[-1]["labels"] = n.require(n.tile(self.data_dic[-1]["labels"].reshape((1, n.prod(self.data_dic[-1]["labels"].shape))), (1, self.data_mult)), requirements='C')
self.data_dic[-1]['data'] = n.require(self.data_dic[-1]['data'] - self.scalar_mean, dtype=n.single, requirements='C')
self.cropped_data = [n.zeros((self.get_data_dims(), self.data_dic[0]['data'].shape[1]*self.data_mult), dtype=n.single) for x in xrange(2)]
self.batches_generated = 0
self.data_mean = self.batch_meta['data_mean'].reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((self.get_data_dims(), 1))
def get_next_batch(self):
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
bidx = batchnum - self.batch_range[0]
cropped = self.cropped_data[self.batches_generated % 2]
self.__trim_borders(self.data_dic[bidx]['data'], cropped)
cropped -= self.data_mean
self.batches_generated += 1
return epoch, batchnum, [cropped, self.data_dic[bidx]['labels']]
def get_data_dims(self, idx=0):
return self.inner_size**2 * self.num_colors if idx == 0 else 1
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data):
return n.require((data + self.data_mean).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
def __trim_borders(self, x, target):
y = x.reshape(self.num_colors, self.img_size, self.img_size, x.shape[1])
if self.test: # don't need to loop over cases
if self.multiview:
start_positions = [(0,0), (0, self.border_size), (0, self.border_size*2),
(self.border_size, 0), (self.border_size, self.border_size), (self.border_size, self.border_size*2),
(self.border_size*2, 0), (self.border_size*2, self.border_size), (self.border_size*2, self.border_size*2)]
end_positions = [(sy+self.inner_size, sx+self.inner_size) for (sy,sx) in start_positions]
for i in xrange(self.num_views):
target[:,i * x.shape[1]:(i+1)* x.shape[1]] = y[:,start_positions[i][0]:end_positions[i][0],start_positions[i][1]:end_positions[i][1],:].reshape((self.get_data_dims(),x.shape[1]))
else:
pic = y[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size, :] # just take the center for now
target[:,:] = pic.reshape((self.get_data_dims(), x.shape[1]))
else:
for c in xrange(x.shape[1]): # loop over cases
startY, startX = nr.randint(0,self.border_size*2 + 1), nr.randint(0,self.border_size*2 + 1)
endY, endX = startY + self.inner_size, startX + self.inner_size
pic = y[:,startY:endY,startX:endX, c]
if nr.randint(2) == 0: # also flip the image with 50% probability
pic = pic[:,:,::-1]
target[:,c] = pic.reshape((self.get_data_dims(),))
class DummyConvNetLogRegDataProvider(LabeledDummyDataProvider):
def __init__(self, data_dim):
LabeledDummyDataProvider.__init__(self, data_dim)
self.img_size = int(sqrt(data_dim/3))
def get_next_batch(self):
epoch, batchnum, dic = LabeledDummyDataProvider.get_next_batch(self)
dic = {'data': dic[0], 'labels': dic[1]}
print dic['data'].shape, dic['labels'].shape
return epoch, batchnum, [dic['data'], dic['labels']]
# Returns the dimensionality of the two data matrices returned by get_next_batch
def get_data_dims(self, idx=0):
return self.batch_meta['num_vis'] if idx == 0 else 1
|
apache-2.0
|
envhyf/wrftools
|
wrftools/nctools.py
|
1
|
11612
|
import string
import pandas as pd
import numpy as np
from netCDF4 import Dataset, MFDataset
from netCDF4 import num2date, date2num
import datetime
import loghelper
import json
LOGGER = "ncdump"
HGT2DNUM = 9999 # height to encode 2D variables as (must be numeric and not clash with real heights)
HGT2DSTR = "2D" # how 2D vars are specified in config file (can be string)
class UnknownFormat(Exception):
pass
class ConfigError(Exception):
pass
class DatasetError(Exception):
pass
def get_coordinate_vars(dataset, coords=None):
""" Gets coordinate variables associated with dimensions,
doing some conversion to character array and time units
Arguments:
dataset -- a NetCDF4 Dataset object
coords -- a list of variable names to treat as coordinates. If None, then
coordinate variables are selected based on dimension names"""
logger = loghelper.get(LOGGER)
logger.debug("get_coordinate_vars()")
dims = dataset.dimensions
vars = dataset.variables
ndims = len(dims)
# if coordinate variables are not specified, fetch all variables
# with the same name as dimensions (it they exist)
if not coords:
logger.debug("no coordinate variables given, finding automatically")
coords = [ d for d in dims if vars.get(d) ]
# package the result as a dictionary
result = {}
for c in coords:
cvar = vars[c]
if str(cvar.dtype)=="|S1":
result[c] = _char_array_to_str(cvar)
elif _is_time(cvar):
result[c] = num2date(cvar[:], units=cvar.units,calendar=cvar.calendar)
else:
result[c] = cvar[:]
return result
def melt(ncfiles, vars=None, global_atts=None,var_atts=None, coord_vars=None, missing=None):
""" Build a (molten) Pandas DataFrame from a series of netcdf files. This is a flexible, but very
memory-inneficient data structure, so be careful calling this with large netcdf files.
Arguments:
ncfiles -- the input filenames
vars -- the variables to read, if None all variables in files read
var_atts -- variable attributes to include in each line of output, default all
global_atts -- global attributes to include in each row of output
coord_vars -- variables to treat as coordinates, if None will use variables with
the same name as dimensions"""
logger = loghelper.get_logger(LOGGER)
frames = []
if len(ncfiles)==1:
dataset = Dataset(ncfiles[0])
else:
dataset = MFDataset(ncfiles)
coord_vars = get_coordinate_vars(dataset, coord_vars)
variables = dataset.variables
# get global attributes in dataset
# shouldn't really use this, but it works
dataset_atts = dataset.__dict__
use_global_atts = _lookup(global_atts, dataset_atts, missing)
# if no vars specified, use all in ncfiles
if (vars==None or vars==["all"]): vars = list(variables.keys())
# variables are a function of var(reftime,leadtime,height,location)
# or var(reftime,leadtime,location)
usevars = [v for v in vars if v not in coord_vars]
logger.debug("usevars: %s" % usevars)
# There must be a clean way of doing this in a general
# way, but I don't have the time to code this properly,
# so I'm looping over fixed and hard-coded dimension names
location = coord_vars['location']
reftime = coord_vars['reftime']
leadtime = coord_vars['leadtime']
height = coord_vars['height']
#lat = coord_vars['lat']
#lon = coord_vars['lon']
nloc = len(location)
nreftime = len(reftime)
nleadtime = len(leadtime)
# dimension order is reftime, leadtime, location, height
# or reftime, leadtime, location
vars2D = [v for v in usevars if len(variables[v].shape)==3]
vars3D = [v for v in usevars if len(variables[v].shape)==4]
series = []
for v in vars2D:
vname = v
variable = variables[v]
use_var_atts = _lookup(var_atts, variable.__dict__, missing)
factors = [reftime, leadtime, [HGT2DNUM], location, [vname]] + map(_listify, use_global_atts.values()) + map(_listify,use_var_atts.values())
names = ['reftime', 'leadtime', 'height', 'location','variable'] + use_global_atts.keys() + use_var_atts.keys()
index = pd.MultiIndex.from_product(factors, names=names)
#index = pd.MultiIndex.from_tuples([(ref,lead,loc,HGT2DNUM,vname) for ref in reftime for lead in leadtime for loc in location], names=['reftime', 'leadtime', 'location', 'height','variable'])
if type(variable[:]) == np.ma.core.MaskedArray:
data = variable[:].flatten().filled(np.nan).astype(np.float)
else:
data = variable[:].flatten().astype(np.float)
series.append( pd.Series(data=data, index=index, name='value'))
for v in vars3D:
variable = variables[v]
vname = v
use_var_atts = _lookup(var_atts, variable.__dict__, missing)
for h,hgt in enumerate(height):
subvar = variable[:,:,:,h]
vname = "%s.%03d" % (v,hgt)
vname = v
factors = [reftime, leadtime, [hgt], location, [vname]] + map(_listify, use_global_atts.values()) + map(_listify,use_var_atts.values())
names = ['reftime', 'leadtime', 'height', 'location','variable'] + use_global_atts.keys() + use_var_atts.keys()
index = pd.MultiIndex.from_product(factors, names=names)
#index = pd.MultiIndex.from_tuples([(ref,lead,loc,hgt,vname) for ref in reftime for lead in leadtime for loc in location], names=['reftime', 'leadtime', 'location','height', 'variable'])
if type(subvar) == np.ma.core.MaskedArray:
data = subvar[:].flatten().filled(np.nan).astype(np.float)
else:
data = subvar[:].flatten().astype(np.float)
series.append(pd.Series(data=data, index=index, name='value'))
# this is molten data, to use Haldey Wickham's terminology
# or perhaps 5th normal form?
result = pd.concat(series, axis=0).reset_index()
return result
def _parse_filter(filter_str):
return map(float, filter_str.split(':'))
def filter(frame, rules):
""" Filters a data frame according to a set of rules specified in rules i.e.
'column name' : [list of values] |
'column name' : tuple of numeric limits (min, max)
Arguments:
frame -- the DataFrame to filter
rules -- rules to apply """
logger = loghelper.get(LOGGER)
logger.debug("%d rows before filtering" % len(frame))
logger.debug(json.dumps(rules, indent=4))
for column, rule in rules.items():
logger.debug("filtering on %s" % column)
# if a list of values is provided, filter on those
if type(rule)==list:
frame = frame[frame[column].isin(rule)]
# else if a tuple is provided, treat those as min, max values
elif type(rule)==str:
min, max = _parse_filter(rule)
frame = frame[(frame[column]>=min) & (frame[column]<=max)]
else:
raise ConfigError("filter type not understood, please give list or tuple")
logger.debug("%d rows" % len(frame))
return frame
def _listify(element):
""" Ensures elements are contained in a list """
# either it is a list already, or it is None
if type(element)==type([]) or element==None:
return element
else:
return [element]
def concat(frame, cols, name=None, delim=".", formatter=None, drop=True, inplace=True):
if name==None: name = delim.join(cols)
if not inplace:frame = frame.copy()
vals = _concat_cols(frame, cols, delim=delim, formatter=formatter)
_drop_inplace(frame, cols)
frame[name] = vals
if not inplace: return frame
def _concat_cols(frame, cols, delim='.', formatter=None):
""" mashes two columns into one by concantenating their columns. Default applies
str to each column and separates with a dot
Arguments:
frame -- data frame to operate on
cols -- list of columns to join
delimiter -- delimiter to use to sepearate values
"""
logger = loghelper.get(LOGGER)
logger.warn("Peformance warning, string concatenation of columns not done very efficiently")
if len(cols)!=2:
raise NotYetImplemented("concatenating other than two columns is not yet implemented")
if formatter!=None:
if type(formatter==type([])):
result = frame[cols[0]].apply(formatter[0]) + delim + frame[cols[1]].apply(formatter[1])
elif type(formatter==type({})):
result = frame[cols[0]].apply(formatter[cols[0]]) + delim + frame[cols[1]].apply(formatter[cols[1]])
else:
result = frame[cols[0]].map(str) + delim + frame[cols[1]].map(str)
return result
def _lookup(keys, target, missing):
result = {}
for key in keys:
# the attribute really is there
if key in target:
result[key] = target[key]
# else it is missing, but a default has been provided
elif key in missing:
result[key] = missing[key]
# else we are screwed, and should probably raise an exception
else:
raise KeyError("attribute not found and no default missing value given")
return result
def _valid_time(reftime, leadtime):
refdt = num2date(reftime[:], units=reftime.units, calendar=reftime.calendar)
inittime = refdt[0]
if "hour" in leadtime.units:
delta = datetime.timedelta(0,60*60)
elif "minute" in leadtime.units:
delta = datetime.timedelta(0,60)
else:
raise FileInputError("leadtime units of %s not supported" % leadtime.units)
validtime = refdt[:] + (leadtime[:] * delta)
return validtime
def _char_array_to_str(chars):
"""Converts a NetCDF masked character array into an array of strings"""
logger = loghelper.get(LOGGER)
# assert we have two dimensions
assert(len(chars.shape)==2)
dim0 = chars.shape[0]
dim1 = chars.shape[1]
# if it is a masked array, replace masked with blanks
if hasattr(chars[:], 'mask'):
# first fill in masked elements with blanks
filled = chars[:].filled(' ')
else:
filled = chars
# join character arrays across last dimension
strs = [''.join(filled[n,:]) for n in range(dim0) ]
# then strip away the blanks
strs = map(string.strip, strs)
# return as an array of strings
return np.array(strs)
def _is_time(variable):
"""Determines whether a NetCDF variable represents time"""
return hasattr(variable, 'calendar')
def _drop_inplace(frame, cols):
lcols = _listify(cols)
for c in lcols:
del(frame[c])
def _drop(frame, cols):
lcols = _listify(cols)
new_cols = [c for c in frame.columns if c not in lcols]
return frame[new_cols]
def _rename(frame, mapping):
new_names = [ mapping[c] if c in mapping else c for c in frame.columns]
frame.columns = new_names
return frame
|
gpl-3.0
|
cbgaindia/parsers
|
research/table_extraction_using_block_detection/code/image_to_block_feature_generator.py
|
1
|
10928
|
'''Classes that can consume images to generate blocks and their features
(textual and geometrical)
'''
from itertools import groupby
from collections import Counter
import re
import subprocess
import cv2
import pandas as pd
import numpy as np
class ImageToBlocks(object):
'''Convert Images to images with block like structures.
Args:
- img (obj:`numpy.Array`): A numpy array of the image.
- block_threshold (tuple:(int, int)): A tuple containing threshold params
namely vertical and horizontal for block generation.
'''
def __init__(self, img, block_threshold):
self.img = img
self.block_threshold = block_threshold
def generate_blocks(self):
'''Generate blocked image from images of pdf pages using custom implementation of Run
Length Smoothing Algorithm (RLSA)
Returns:
Image with blocked structures.
NOTE: This method is slow and will be removed in the coming iterations.
'''
_, thresh1 = cv2.threshold(self.img, 0, 1, cv2.THRESH_BINARY_INV)
img_iter = np.nditer(thresh1, flags=['multi_index'])
c_vertical, c_horizontal = self.block_threshold
temp_thresh = thresh1.copy()
while not img_iter.finished:
point_x, point_y = img_iter.multi_index
x_threshold = point_x + c_horizontal
y_threshold = point_y + c_vertical
neg_x_threshold = point_x - c_horizontal
neg_y_threshold = point_y - c_vertical
if (thresh1[point_x:x_threshold, point_y:y_threshold].any()
or thresh1[point_x:x_threshold, point_y:neg_y_threshold].any()
or thresh1[point_x:neg_x_threshold, point_y:y_threshold].any()
or thresh1[point_x:neg_x_threshold, point_y:neg_y_threshold].any()):
temp_thresh[point_x, point_y] = 1
else:
temp_thresh[point_x, point_y] = 0
img_iter.iternext()
return temp_thresh
def generate_blocks_dilation(self):
'''Generate blocked images from images of pdf pages using opencv's
dilate function.
https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.html#dilation
'''
kernel = np.ones((5, 10), np.uint8)
_, thresh1 = cv2.threshold(self.img, 0, 1, cv2.THRESH_BINARY_INV)
return cv2.dilate(thresh1, kernel, iterations=5)
class BlockGeometricalFeatureGenerator(ImageToBlocks):
'''Extract geometrical feature for each block in a dataframe.
Args:
- img (obj:`numpy.Array`): A numpy array of the image.
- block_threshold (tuple:(int, int)): A tuple containing threshold params
namely vertgenerical and horizontal for block generation.
'''
def __init__(self, img, block_threshold, dilate=False):
self.dilate = dilate
self.image_with_blocks = None
super(BlockGeometricalFeatureGenerator, self).__init__(img, block_threshold)
@staticmethod
def __get_block_stats_df(stats, centroids):
'''Convert stats from cv2.connectedComponentsWithStats to dataframe.
Args:
- stats (obj:`numpy.Array`): the stats generated from openCV
Returns:
A dataframe with stats
'''
stats_columns = ["left", "top", "width", "height", "area"]
block_stats = pd.DataFrame(stats, columns=stats_columns)
block_stats['centroid_x'], block_stats['centroid_y'] = centroids[:, 0], centroids[:, 1]
# Ignore the label 0 since it is the background
block_stats.drop(0, inplace=True)
return block_stats
def extract_block_stats(self):
'''Extract Geometrical features from img with blocks.
Returns:
A dataframe with each row as block and its geom features.
'''
if self.dilate:
self.img_with_blocks = self.generate_blocks_dilation()
else:
self.img_with_blocks = self.generate_blocks()
_, _, stats, centroids = cv2.connectedComponentsWithStats(self.img_with_blocks)
block_stats = self.__get_block_stats_df(stats, centroids)
block_stats['right'] = block_stats.left + block_stats.width
block_stats['bottom'] = block_stats.top + block_stats.height
block_stats['pos'] = block_stats.index
return block_stats
@staticmethod
def overlay_img_with_blocks(img, blocks):
'''A utility function to overlay blocked image over the actual image.
'''
raise NotImplementedError('To be Implemented in Version 0.2')
class BlockTextualFeatureGenerator(BlockGeometricalFeatureGenerator):
'''Extract Textual Features of each block.
Args:
- img (obj:`numpy.Array`): Matrix form of the image.
- horizontal_ratio (float): ratio of page_width and image_width.
- vertical_ratio (float): ratio of page_height and image_height.
- page_num (int): Page number from where to read the text.
- pdf_file_path (string): Path of the pdf file.
- block_threshold (tuple:(int, int)): A tuple containing threshold params
namely vertical and horizontal for block generation.
- post_processors (list:[functions]): A list of functions that can process
the blocks generated.
'''
TEXT_REGEX = '[a-zA-Z_]+'
COMMA_SEP_REGEX = '^(-|[1-9])[0-9]*(,[0-9]).*$'
def __init__(self, img, horizontal_ratio,
vertical_ratio, page_num,
pdf_file_path, block_threshold,
post_processors=[],
dilate=False):
#image params
self.img = img
self.block_threshold = block_threshold
# these are required for scaling boundaries while reading text.
self.horizontal_ratio = horizontal_ratio
self.vertical_ratio = vertical_ratio
# We would need the pdf path and page num to extract text from the pdf using pdf2txt
self.pdf_file_path = pdf_file_path
self.page_num = page_num
# post processors
self.post_processors = post_processors
self.dilate = dilate
@staticmethod
def check_text_for_continous_dashes(text):
'''
Check if the text in the block contains continous dashes.
'''
for char, count in [[k, len(list(g))] for k, g in groupby(text)]:
if char == '-' and count > 2:
return True
return False
def get_text_from_pdf(self, point_x, point_y, width, height):
'''Extract text of a given block from the pdf
'''
cmd_ext = 'pdftotext'
cmd_page_params = ' -f {0} -l {0}'.format(self.page_num + 1)
cmd_tail = ' -x {0} -y {1} -W {2} -H {3} "{4}" -'.format(int(point_x),
int(point_y),
int(width),
int(height),
self.pdf_file_path)
command = cmd_ext + cmd_page_params + cmd_tail
return subprocess.check_output(command, shell=True)
def generate_text_data(self, row):
'''Generate Text features for a given block.
'''
point_x = (row['left'] * self.horizontal_ratio)
point_y = (row['top'] * self.vertical_ratio)
width = (row['width'] * self.horizontal_ratio) + 5
height = (row['height'] * self.vertical_ratio) + 5
text = self.get_text_from_pdf(point_x, point_y, width, height)
character_count = Counter(text)
if self.check_text_for_continous_dashes(text):
row['text'] = text.strip().replace('-', '').replace('\n', '')
elif character_count['.'] > 0 and character_count['.'] < 3:
row['text'] = text.strip().replace('-', '').replace('.', '').replace('\n', '')
else:
row['text'] = text.strip()
row['text_length'] = len(row['text'])
row['possible_row_merger'] = '\n' in row['text']
text_matched = re.findall(self.TEXT_REGEX, row['text'])
comma_sep_matcher = re.compile(self.COMMA_SEP_REGEX)
row['comma_separated_numbers_present'] = comma_sep_matcher.match(row['text']
.replace('\n', ' '))
row['is_text'] = len(text_matched) > 0
try:
row['number'] = int(row['text'].replace(',', ''))
except:
row['number'] = None
return row
def get_processed_blocks(self, block_stats):
'''
Apply the list of post processors passed.
'''
processed_block_stats = block_stats
for func in self.post_processors:
processed_block_stats = func(processed_block_stats)
return processed_block_stats
def generate(self):
'''Extract text based features from each block.
Returns:
A Dataframe with each row as block and text based features.
'''
block_stats = self.extract_block_stats()
# Check for blank page
if len(block_stats.index) > 3:
block_stats_with_text_data = block_stats.apply(self.generate_text_data, axis=1)
return self.get_processed_blocks(block_stats_with_text_data)
return block_stats
# Post processors for image block feature generator
def filter_unwanted_blocks(block_features):
'''Blank blocks and footer are not desired the blocks that we need to process,
thus we remove them.
'''
# remove blank blocks
filtered_block_features = block_features[block_features.text_length != 0]
# remove footer
return filtered_block_features[filtered_block_features.top < (block_features.top.max() * .95)]
def separate_blocks(block_features):
'''Some blocks are joined together in the blocks generated we separate them
into multiple blocks.
'''
processed_blocks = pd.DataFrame()
for index, row in block_features.iterrows():
splitted_row = []
if row.possible_row_merger is True:
for index, value in enumerate(row.text.split('\n')):
new_row = {}
for col in row.index:
new_row[col] = row[col]
new_height = row.height // len(row.text.split('\n'))
new_row['height'] = new_height
new_row['top'] = row.top + (index * new_height)
new_row['bottom'] = new_row['top'] + new_height
new_row['text'] = value
new_row['possible_row_merger'] = False
splitted_row.append(new_row)
processed_blocks = processed_blocks.append(splitted_row)
else:
processed_blocks = processed_blocks.append(row)
return processed_blocks
|
mit
|
datapythonista/pandas
|
pandas/tests/resample/test_time_grouper.py
|
1
|
10921
|
from datetime import datetime
from operator import methodcaller
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Series,
Timestamp,
)
import pandas._testing as tm
from pandas.core.groupby.grouper import Grouper
from pandas.core.indexes.datetimes import date_range
test_series = Series(np.random.randn(1000), index=date_range("1/1/2000", periods=1000))
def test_apply():
grouper = Grouper(freq="A", label="right", closed="right")
grouped = test_series.groupby(grouper)
def f(x):
return x.sort_values()[-3:]
applied = grouped.apply(f)
expected = test_series.groupby(lambda x: x.year).apply(f)
applied.index = applied.index.droplevel(0)
expected.index = expected.index.droplevel(0)
tm.assert_series_equal(applied, expected)
def test_count():
test_series[::3] = np.nan
expected = test_series.groupby(lambda x: x.year).count()
grouper = Grouper(freq="A", label="right", closed="right")
result = test_series.groupby(grouper).count()
expected.index = result.index
tm.assert_series_equal(result, expected)
result = test_series.resample("A").count()
expected.index = result.index
tm.assert_series_equal(result, expected)
def test_numpy_reduction():
result = test_series.resample("A", closed="right").prod()
expected = test_series.groupby(lambda x: x.year).agg(np.prod)
expected.index = result.index
tm.assert_series_equal(result, expected)
def test_apply_iteration():
# #2300
N = 1000
ind = date_range(start="2000-01-01", freq="D", periods=N)
df = DataFrame({"open": 1, "close": 2}, index=ind)
tg = Grouper(freq="M")
_, grouper, _ = tg._get_grouper(df)
# Errors
grouped = df.groupby(grouper, group_keys=False)
def f(df):
return df["close"] / df["open"]
# it works!
result = grouped.apply(f)
tm.assert_index_equal(result.index, df.index)
@pytest.mark.parametrize(
"name, func",
[
("Int64Index", tm.makeIntIndex),
("Index", tm.makeUnicodeIndex),
("Float64Index", tm.makeFloatIndex),
("MultiIndex", lambda m: tm.makeCustomIndex(m, 2)),
],
)
def test_fails_on_no_datetime_index(name, func):
n = 2
index = func(n)
df = DataFrame({"a": np.random.randn(n)}, index=index)
msg = (
"Only valid with DatetimeIndex, TimedeltaIndex "
f"or PeriodIndex, but got an instance of '{name}'"
)
with pytest.raises(TypeError, match=msg):
df.groupby(Grouper(freq="D"))
def test_aaa_group_order():
# GH 12840
# check TimeGrouper perform stable sorts
n = 20
data = np.random.randn(n, 4)
df = DataFrame(data, columns=["A", "B", "C", "D"])
df["key"] = [
datetime(2013, 1, 1),
datetime(2013, 1, 2),
datetime(2013, 1, 3),
datetime(2013, 1, 4),
datetime(2013, 1, 5),
] * 4
grouped = df.groupby(Grouper(key="key", freq="D"))
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 1)), df[::5])
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 2)), df[1::5])
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 3)), df[2::5])
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 4)), df[3::5])
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 5)), df[4::5])
def test_aggregate_normal(resample_method):
"""Check TimeGrouper's aggregation is identical as normal groupby."""
data = np.random.randn(20, 4)
normal_df = DataFrame(data, columns=["A", "B", "C", "D"])
normal_df["key"] = [1, 2, 3, 4, 5] * 4
dt_df = DataFrame(data, columns=["A", "B", "C", "D"])
dt_df["key"] = [
datetime(2013, 1, 1),
datetime(2013, 1, 2),
datetime(2013, 1, 3),
datetime(2013, 1, 4),
datetime(2013, 1, 5),
] * 4
normal_grouped = normal_df.groupby("key")
dt_grouped = dt_df.groupby(Grouper(key="key", freq="D"))
expected = getattr(normal_grouped, resample_method)()
dt_result = getattr(dt_grouped, resample_method)()
expected.index = date_range(start="2013-01-01", freq="D", periods=5, name="key")
tm.assert_equal(expected, dt_result)
# if TimeGrouper is used included, 'nth' doesn't work yet
"""
for func in ['nth']:
expected = getattr(normal_grouped, func)(3)
expected.index = date_range(start='2013-01-01',
freq='D', periods=5, name='key')
dt_result = getattr(dt_grouped, func)(3)
tm.assert_frame_equal(expected, dt_result)
"""
@pytest.mark.parametrize(
"method, method_args, unit",
[
("sum", {}, 0),
("sum", {"min_count": 0}, 0),
("sum", {"min_count": 1}, np.nan),
("prod", {}, 1),
("prod", {"min_count": 0}, 1),
("prod", {"min_count": 1}, np.nan),
],
)
def test_resample_entirely_nat_window(method, method_args, unit):
s = Series([0] * 2 + [np.nan] * 2, index=date_range("2017", periods=4))
result = methodcaller(method, **method_args)(s.resample("2d"))
expected = Series(
[0.0, unit], index=pd.DatetimeIndex(["2017-01-01", "2017-01-03"], freq="2D")
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"func, fill_value",
[("min", np.nan), ("max", np.nan), ("sum", 0), ("prod", 1), ("count", 0)],
)
def test_aggregate_with_nat(func, fill_value):
# check TimeGrouper's aggregation is identical as normal groupby
# if NaT is included, 'var', 'std', 'mean', 'first','last'
# and 'nth' doesn't work yet
n = 20
data = np.random.randn(n, 4).astype("int64")
normal_df = DataFrame(data, columns=["A", "B", "C", "D"])
normal_df["key"] = [1, 2, np.nan, 4, 5] * 4
dt_df = DataFrame(data, columns=["A", "B", "C", "D"])
dt_df["key"] = [
datetime(2013, 1, 1),
datetime(2013, 1, 2),
pd.NaT,
datetime(2013, 1, 4),
datetime(2013, 1, 5),
] * 4
normal_grouped = normal_df.groupby("key")
dt_grouped = dt_df.groupby(Grouper(key="key", freq="D"))
normal_result = getattr(normal_grouped, func)()
dt_result = getattr(dt_grouped, func)()
pad = DataFrame([[fill_value] * 4], index=[3], columns=["A", "B", "C", "D"])
expected = normal_result.append(pad)
expected = expected.sort_index()
dti = date_range(start="2013-01-01", freq="D", periods=5, name="key")
expected.index = dti._with_freq(None) # TODO: is this desired?
tm.assert_frame_equal(expected, dt_result)
assert dt_result.index.name == "key"
def test_aggregate_with_nat_size():
# GH 9925
n = 20
data = np.random.randn(n, 4).astype("int64")
normal_df = DataFrame(data, columns=["A", "B", "C", "D"])
normal_df["key"] = [1, 2, np.nan, 4, 5] * 4
dt_df = DataFrame(data, columns=["A", "B", "C", "D"])
dt_df["key"] = [
datetime(2013, 1, 1),
datetime(2013, 1, 2),
pd.NaT,
datetime(2013, 1, 4),
datetime(2013, 1, 5),
] * 4
normal_grouped = normal_df.groupby("key")
dt_grouped = dt_df.groupby(Grouper(key="key", freq="D"))
normal_result = normal_grouped.size()
dt_result = dt_grouped.size()
pad = Series([0], index=[3])
expected = normal_result.append(pad)
expected = expected.sort_index()
expected.index = date_range(
start="2013-01-01", freq="D", periods=5, name="key"
)._with_freq(None)
tm.assert_series_equal(expected, dt_result)
assert dt_result.index.name == "key"
def test_repr():
# GH18203
result = repr(Grouper(key="A", freq="H"))
expected = (
"TimeGrouper(key='A', freq=<Hour>, axis=0, sort=True, "
"closed='left', label='left', how='mean', "
"convention='e', origin='start_day')"
)
assert result == expected
result = repr(Grouper(key="A", freq="H", origin="2000-01-01"))
expected = (
"TimeGrouper(key='A', freq=<Hour>, axis=0, sort=True, "
"closed='left', label='left', how='mean', "
"convention='e', origin=Timestamp('2000-01-01 00:00:00'))"
)
assert result == expected
@pytest.mark.parametrize(
"method, method_args, expected_values",
[
("sum", {}, [1, 0, 1]),
("sum", {"min_count": 0}, [1, 0, 1]),
("sum", {"min_count": 1}, [1, np.nan, 1]),
("sum", {"min_count": 2}, [np.nan, np.nan, np.nan]),
("prod", {}, [1, 1, 1]),
("prod", {"min_count": 0}, [1, 1, 1]),
("prod", {"min_count": 1}, [1, np.nan, 1]),
("prod", {"min_count": 2}, [np.nan, np.nan, np.nan]),
],
)
def test_upsample_sum(method, method_args, expected_values):
s = Series(1, index=date_range("2017", periods=2, freq="H"))
resampled = s.resample("30T")
index = pd.DatetimeIndex(
["2017-01-01T00:00:00", "2017-01-01T00:30:00", "2017-01-01T01:00:00"],
freq="30T",
)
result = methodcaller(method, **method_args)(resampled)
expected = Series(expected_values, index=index)
tm.assert_series_equal(result, expected)
def test_groupby_resample_interpolate():
# GH 35325
d = {"price": [10, 11, 9], "volume": [50, 60, 50]}
df = DataFrame(d)
df["week_starting"] = date_range("01/01/2018", periods=3, freq="W")
result = (
df.set_index("week_starting")
.groupby("volume")
.resample("1D")
.interpolate(method="linear")
)
expected_ind = pd.MultiIndex.from_tuples(
[
(50, "2018-01-07"),
(50, Timestamp("2018-01-08")),
(50, Timestamp("2018-01-09")),
(50, Timestamp("2018-01-10")),
(50, Timestamp("2018-01-11")),
(50, Timestamp("2018-01-12")),
(50, Timestamp("2018-01-13")),
(50, Timestamp("2018-01-14")),
(50, Timestamp("2018-01-15")),
(50, Timestamp("2018-01-16")),
(50, Timestamp("2018-01-17")),
(50, Timestamp("2018-01-18")),
(50, Timestamp("2018-01-19")),
(50, Timestamp("2018-01-20")),
(50, Timestamp("2018-01-21")),
(60, Timestamp("2018-01-14")),
],
names=["volume", "week_starting"],
)
expected = DataFrame(
data={
"price": [
10.0,
9.928571428571429,
9.857142857142858,
9.785714285714286,
9.714285714285714,
9.642857142857142,
9.571428571428571,
9.5,
9.428571428571429,
9.357142857142858,
9.285714285714286,
9.214285714285714,
9.142857142857142,
9.071428571428571,
9.0,
11.0,
],
"volume": [50.0] * 15 + [60],
},
index=expected_ind,
)
tm.assert_frame_equal(result, expected)
|
bsd-3-clause
|
kevin-intel/scikit-learn
|
sklearn/neighbors/tests/test_neighbors_tree.py
|
15
|
9427
|
# License: BSD 3 clause
import pickle
import itertools
import numpy as np
import pytest
from sklearn.neighbors import DistanceMetric
from sklearn.neighbors._ball_tree import (
BallTree, kernel_norm, DTYPE, ITYPE,
NeighborsHeap as NeighborsHeapBT,
simultaneous_sort as simultaneous_sort_bt,
nodeheap_sort as nodeheap_sort_bt)
from sklearn.neighbors._kd_tree import (
KDTree, NeighborsHeap as NeighborsHeapKDT,
simultaneous_sort as simultaneous_sort_kdt,
nodeheap_sort as nodeheap_sort_kdt)
from sklearn.utils import check_random_state
from numpy.testing import assert_array_almost_equal, assert_allclose
rng = np.random.RandomState(42)
V_mahalanobis = rng.rand(3, 3)
V_mahalanobis = np.dot(V_mahalanobis, V_mahalanobis.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'minkowski': dict(p=3),
'chebyshev': {},
'seuclidean': dict(V=rng.random_sample(DIMENSION)),
'wminkowski': dict(p=3, w=rng.random_sample(DIMENSION)),
'mahalanobis': dict(V=V_mahalanobis)}
KD_TREE_METRICS = ['euclidean', 'manhattan', 'chebyshev', 'minkowski']
BALL_TREE_METRICS = list(METRICS)
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
@pytest.mark.parametrize('Cls', [KDTree, BallTree])
@pytest.mark.parametrize("kernel", ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine'])
@pytest.mark.parametrize("h", [0.01, 0.1, 1])
@pytest.mark.parametrize("rtol", [0, 1E-5])
@pytest.mark.parametrize("atol", [1E-6, 1E-2])
@pytest.mark.parametrize("breadth_first", [True, False])
def test_kernel_density(Cls, kernel, h, rtol, atol, breadth_first,
n_samples=100, n_features=3):
rng = check_random_state(1)
X = rng.random_sample((n_samples, n_features))
Y = rng.random_sample((n_samples, n_features))
dens_true = compute_kernel_slow(Y, X, kernel, h)
tree = Cls(X, leaf_size=10)
dens = tree.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true,
atol=atol, rtol=max(rtol, 1e-7))
@pytest.mark.parametrize('Cls', [KDTree, BallTree])
def test_neighbor_tree_query_radius(Cls, n_samples=100, n_features=10):
rng = check_random_state(0)
X = 2 * rng.random_sample(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
tree = Cls(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = tree.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
@pytest.mark.parametrize('Cls', [KDTree, BallTree])
def test_neighbor_tree_query_radius_distance(Cls, n_samples=100,
n_features=10):
rng = check_random_state(0)
X = 2 * rng.random_sample(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
tree = Cls(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = tree.query_radius([query_pt], r + eps,
return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
@pytest.mark.parametrize('Cls', [KDTree, BallTree])
@pytest.mark.parametrize('dualtree', (True, False))
def test_neighbor_tree_two_point(Cls, dualtree, n_samples=100, n_features=3):
rng = check_random_state(0)
X = rng.random_sample((n_samples, n_features))
Y = rng.random_sample((n_samples, n_features))
r = np.linspace(0, 1, 10)
tree = Cls(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
counts = tree.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
@pytest.mark.parametrize('NeighborsHeap', [NeighborsHeapBT, NeighborsHeapKDT])
def test_neighbors_heap(NeighborsHeap, n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
rng = check_random_state(0)
for row in range(n_pts):
d_in = rng.random_sample(2 * n_nbrs).astype(DTYPE, copy=False)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
@pytest.mark.parametrize('nodeheap_sort', [nodeheap_sort_bt,
nodeheap_sort_kdt])
def test_node_heap(nodeheap_sort, n_nodes=50):
rng = check_random_state(0)
vals = rng.random_sample(n_nodes).astype(DTYPE, copy=False)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
@pytest.mark.parametrize('simultaneous_sort', [simultaneous_sort_bt,
simultaneous_sort_kdt])
def test_simultaneous_sort(simultaneous_sort, n_rows=10, n_pts=201):
rng = check_random_state(0)
dist = rng.random_sample((n_rows, n_pts)).astype(DTYPE, copy=False)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE, copy=False)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
@pytest.mark.parametrize('Cls', [KDTree, BallTree])
def test_gaussian_kde(Cls, n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
rng = check_random_state(0)
x_in = rng.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
tree = Cls(x_in[:, None])
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
dens_tree = tree.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_tree, dens_gkde, decimal=3)
@pytest.mark.parametrize(
'Cls, metric',
itertools.chain(
[(KDTree, metric) for metric in KD_TREE_METRICS],
[(BallTree, metric) for metric in BALL_TREE_METRICS]))
@pytest.mark.parametrize('k', (1, 3, 5))
@pytest.mark.parametrize('dualtree', (True, False))
@pytest.mark.parametrize('breadth_first', (True, False))
def test_nn_tree_query(Cls, metric, k, dualtree, breadth_first):
rng = check_random_state(0)
X = rng.random_sample((40, DIMENSION))
Y = rng.random_sample((10, DIMENSION))
kwargs = METRICS[metric]
kdt = Cls(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
@pytest.mark.parametrize(
"Cls, metric",
[(KDTree, 'euclidean'), (BallTree, 'euclidean'),
(BallTree, dist_func)])
@pytest.mark.parametrize('protocol', (0, 1, 2))
def test_pickle(Cls, metric, protocol):
rng = check_random_state(0)
X = rng.random_sample((10, 3))
if hasattr(metric, '__call__'):
kwargs = {'p': 2}
else:
kwargs = {}
tree1 = Cls(X, leaf_size=1, metric=metric, **kwargs)
ind1, dist1 = tree1.query(X)
s = pickle.dumps(tree1, protocol=protocol)
tree2 = pickle.loads(s)
ind2, dist2 = tree2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
assert isinstance(tree2, Cls)
|
bsd-3-clause
|
bokeh/bokeh
|
examples/reference/models/select_server.py
|
1
|
1213
|
## Bokeh server for Select
import pandas as pd
from bokeh.io import curdoc
from bokeh.layouts import row
from bokeh.models import ColumnDataSource, Select
from bokeh.plotting import figure
x=[3,4,6,12,10,1,5,6,3,8]
y=[7,1,3,4,1,6,10,4,10,3]
label=['Red', 'Orange', 'Red', 'Orange','Red', 'Orange','Red', 'Orange','Red', 'Orange',]
df=pd.DataFrame({'x':x,'y':y,'label':label})
source = ColumnDataSource(data=dict(x=df.x, y=df.y,label=df.label))
plot_figure = figure(title='Select',height=450, width=600,
tools="save,reset", toolbar_location="below")
plot_figure.scatter('x', 'y', source=source, size=10,color='label')
select = Select(title="Filter plot by color:", value="All", options=["All", "Red", "Orange"])
def select_click(attr,old,new):
active_select=select.value ##Getting radio button value
# filter the dataframe with value in select
if active_select!='All':
selected_df=df[df['label']==active_select]
else:
selected_df=df.copy()
source.data=dict(x=selected_df.x, y=selected_df.y,label=selected_df.label)
select.on_change('value',select_click)
layout=row(select, plot_figure)
curdoc().add_root(layout)
curdoc().title = "Select Bokeh Server"
|
bsd-3-clause
|
skidzo/sympy
|
sympy/interactive/tests/test_ipythonprinting.py
|
24
|
6208
|
"""Tests that the IPython printing module is properly loaded. """
from sympy.interactive.session import init_ipython_session
from sympy.external import import_module
from sympy.utilities.pytest import raises
# run_cell was added in IPython 0.11
ipython = import_module("IPython", min_module_version="0.11")
# disable tests if ipython is not present
if not ipython:
disabled = True
def test_ipythonprinting():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("ip = get_ipython()")
app.run_cell("inst = ip.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import Symbol")
# Printing without printing extension
app.run_cell("a = format(Symbol('pi'))")
app.run_cell("a2 = format(Symbol('pi')**2)")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
assert app.user_ns['a']['text/plain'] == "pi"
assert app.user_ns['a2']['text/plain'] == "pi**2"
else:
assert app.user_ns['a'][0]['text/plain'] == "pi"
assert app.user_ns['a2'][0]['text/plain'] == "pi**2"
# Load printing extension
app.run_cell("from sympy import init_printing")
app.run_cell("init_printing()")
# Printing with printing extension
app.run_cell("a = format(Symbol('pi'))")
app.run_cell("a2 = format(Symbol('pi')**2)")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
assert app.user_ns['a']['text/plain'] in (u'\N{GREEK SMALL LETTER PI}', 'pi')
assert app.user_ns['a2']['text/plain'] in (u' 2\n\N{GREEK SMALL LETTER PI} ', ' 2\npi ')
else:
assert app.user_ns['a'][0]['text/plain'] in (u'\N{GREEK SMALL LETTER PI}', 'pi')
assert app.user_ns['a2'][0]['text/plain'] in (u' 2\n\N{GREEK SMALL LETTER PI} ', ' 2\npi ')
def test_print_builtin_option():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("ip = get_ipython()")
app.run_cell("inst = ip.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import Symbol")
app.run_cell("from sympy import init_printing")
app.run_cell("a = format({Symbol('pi'): 3.14, Symbol('n_i'): 3})")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
text = app.user_ns['a']['text/plain']
raises(KeyError, lambda: app.user_ns['a']['text/latex'])
else:
text = app.user_ns['a'][0]['text/plain']
raises(KeyError, lambda: app.user_ns['a'][0]['text/latex'])
# Note : Unicode of Python2 is equivalent to str in Python3. In Python 3 we have one
# text type: str which holds Unicode data and two byte types bytes and bytearray.
# XXX: How can we make this ignore the terminal width? This test fails if
# the terminal is too narrow.
assert text in ("{pi: 3.14, n_i: 3}",
u'{n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3, \N{GREEK SMALL LETTER PI}: 3.14}',
"{n_i: 3, pi: 3.14}",
u'{\N{GREEK SMALL LETTER PI}: 3.14, n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3}')
# If we enable the default printing, then the dictionary's should render
# as a LaTeX version of the whole dict: ${\pi: 3.14, n_i: 3}$
app.run_cell("inst.display_formatter.formatters['text/latex'].enabled = True")
app.run_cell("init_printing(use_latex=True)")
app.run_cell("a = format({Symbol('pi'): 3.14, Symbol('n_i'): 3})")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
text = app.user_ns['a']['text/plain']
latex = app.user_ns['a']['text/latex']
else:
text = app.user_ns['a'][0]['text/plain']
latex = app.user_ns['a'][0]['text/latex']
assert text in ("{pi: 3.14, n_i: 3}",
u'{n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3, \N{GREEK SMALL LETTER PI}: 3.14}',
"{n_i: 3, pi: 3.14}",
u'{\N{GREEK SMALL LETTER PI}: 3.14, n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3}')
assert latex == r'$$\left \{ n_{i} : 3, \quad \pi : 3.14\right \}$$'
app.run_cell("inst.display_formatter.formatters['text/latex'].enabled = True")
app.run_cell("init_printing(use_latex=True, print_builtin=False)")
app.run_cell("a = format({Symbol('pi'): 3.14, Symbol('n_i'): 3})")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
text = app.user_ns['a']['text/plain']
raises(KeyError, lambda: app.user_ns['a']['text/latex'])
else:
text = app.user_ns['a'][0]['text/plain']
raises(KeyError, lambda: app.user_ns['a'][0]['text/latex'])
# Note : Unicode of Python2 is equivalent to str in Python3. In Python 3 we have one
# text type: str which holds Unicode data and two byte types bytes and bytearray.
# Python 3.3.3 + IPython 0.13.2 gives: '{n_i: 3, pi: 3.14}'
# Python 3.3.3 + IPython 1.1.0 gives: '{n_i: 3, pi: 3.14}'
# Python 2.7.5 + IPython 1.1.0 gives: '{pi: 3.14, n_i: 3}'
assert text in ("{pi: 3.14, n_i: 3}", "{n_i: 3, pi: 3.14}")
def test_matplotlib_bad_latex():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("import IPython")
app.run_cell("ip = get_ipython()")
app.run_cell("inst = ip.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import init_printing, Matrix")
app.run_cell("init_printing(use_latex='matplotlib')")
# The png formatter is not enabled by default in this context
app.run_cell("inst.display_formatter.formatters['image/png'].enabled = True")
# Make sure no warnings are raised by IPython
app.run_cell("import warnings")
app.run_cell("warnings.simplefilter('error', IPython.core.formatters.FormatterWarning)")
# This should not raise an exception
app.run_cell("a = format(Matrix([1, 2, 3]))")
# issue 9799
app.run_cell("from sympy import Piecewise, Symbol, Eq")
app.run_cell("x = Symbol('x'); pw = format(Piecewise((1, Eq(x, 0)), (0, True)))")
|
bsd-3-clause
|
0x4C4A/SS-2014
|
LD3/ld3_script.py
|
1
|
3275
|
# -*- coding: utf-8 -*-
# Signāli un sistēmas. 3. Laboratorijas darbs
# == Taisnstūra loga ietekme uz signāla spektru ==
import sys
import numpy as np
import matplotlib.pyplot as plt
from PyQt4 import QtGui, QtCore
from scipy.fftpack import fft
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
class Window(QtGui.QDialog):
def __init__(self, parent=None):
super(Window, self).__init__(parent)
# a figure instance to plot on
self.figure = plt.figure()
self.setWindowTitle('Singnala spektra atkariba no taisnstura loga platuma')
# this is the Canvas Widget that displays the `figure`
# it takes the `figure` instance as a parameter to __init__
self.canvas = FigureCanvas(self.figure)
# this is the Navigation widget
# it takes the Canvas widget and a parent
self.toolbar = NavigationToolbar(self.canvas, self)
# Make a slidebar
sld = QtGui.QSlider(QtCore.Qt.Horizontal, self)
sld.setFocusPolicy(QtCore.Qt.StrongFocus)
sld.setGeometry(30, 40, 200, 30)
sld.setMaximum(40)
sld.setMinimum(1)
sld.setTickInterval(1)
sld.setTickPosition(2)
sld.setValue(20)
sld.valueChanged[int].connect(self.changeValue)
# Make a Line Edit widget
self.qle = QtGui.QLineEdit(self)
self.qle.setReadOnly(1)
#self.qle.insert('Taisnstura loga platums:')
# set the layout
layout = QtGui.QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
layout.addWidget(sld)
layout.addWidget(self.qle)
self.setLayout(layout)
def changeValue(self, value):
''' plot '''
# Laika parametri
T = value/10.
sampRate = samples/T
x = np.linspace(0, T, samples)
# Logots signāls
y = np.sin(2*np.pi*x)+np.sin(2*np.pi*x*1.5)
# Diskrēts spektrs
S = fft(y)/samples
fs = np.arange(0, sampRate, 1/T)
# Vienlaidu spektrs
fx0 = np.arange(-2, 10, 0.001)
S0 = 0.5*np.sinc(T*fx0)
# plot
sign = self.figure.add_subplot(211)
spectr = self.figure.add_subplot(212)
# Atceļ veco
sign.hold(False)
spectr.hold(False)
# Uzliek jauno
sign.plot(x, y, '.-k')
sign.legend(['Ierobezots signals'], 1)
spectr.stem(fs, abs(S), linefmt='k', markerfmt='.k'), spectr.hold(True)
spectr.plot(fx0+1, abs(S0), '-.b')
spectr.legend(['Signala spektrs'], 1)
spectr.axis([0., 5., 0, 0.8])#, sign.axis([0, 4., -1, 1])
spectr.grid(b = True, which='both', linewidth=2), sign.grid(b = True)
# Papildina Line Edit widget ar loga platumu
t = 'Taisnstura loga platums: {}xT'.format(T)
self.qle.setSelection(0, len(t))
self.qle.insert(t)
# Atjauno canvas
self.canvas.draw()
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
# Siulācijas laika patametri
samples = 128
# GUI
main = Window()
main.changeValue(20)
main.show()
sys.exit(app.exec_())
|
cc0-1.0
|
akionakamura/scikit-learn
|
sklearn/datasets/species_distributions.py
|
198
|
7923
|
"""
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs
from os.path import join
from os.path import exists
try:
# Python 2
from urllib2 import urlopen
PY2 = True
except ImportError:
# Python 3
from urllib.request import urlopen
PY2 = False
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = join(DIRECTORY_URL, "samples.zip")
COVERAGES_URL = join(DIRECTORY_URL, "coverages.zip")
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = header[b'NODATA_value']
if nodata != -9999:
print(nodata)
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
if PY2:
# Numpy recarray wants Python 2 str but not unicode
names = F.readline().strip().split(',')
else:
# Numpy recarray wants Python 3 str but not bytes...
names = F.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
--------
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
if not exists(join(data_home, DATA_ARCHIVE_NAME)):
print('Downloading species data from %s to %s' % (SAMPLES_URL,
data_home))
X = np.load(BytesIO(urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print('Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home))
X = np.load(BytesIO(urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
print(' - converting', f)
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, join(data_home, DATA_ARCHIVE_NAME), compress=9)
else:
bunch = joblib.load(join(data_home, DATA_ARCHIVE_NAME))
return bunch
|
bsd-3-clause
|
davidharvey1986/pyRRG
|
trainStarGalClass/generateTrainingData.py
|
1
|
5658
|
'''
This program is going to generate the trainig data.
It needs a load of images at varying depth, of which the moments
have been measured.
Form this I will generate a table of features and then the final column,
which will be galaxy (1) or star (0)
For now I will just use the SLACS images that i have downloaded.
But in the future i will require varying depth
'''
import ipdb as pdb
import RRGtools as at
import glob as glob
import pyfits as fits
import numpy as np
import numpy.lib.recfunctions as rec
import pickle as pkl
import os as os
def generateTrainingData(allGalaxyFiles=None, \
trainingDataPklFile='DataTrained.pkl'):
'''
Generate a table of data using the data in the file trainingData
'''
if allGalaxyFiles is None:
allGalaxyFiles = glob.glob('trainingData/*uncor*')
if os.path.isfile( trainingDataPklFile ):
allTrainingData, allTrainingAnswers = \
pkl.load(open(trainingDataPklFile,'rb'), encoding='latin1')
else:
allTrainingData, allTrainingAnswers = \
filesToRecArray( allGalaxyFiles )
pkl.dump([allTrainingData, allTrainingAnswers], \
open(trainingDataPklFile,'wb'))
return allTrainingData, allTrainingAnswers
def filesToRecArray( files ):
'''
Take a list of fits files and append them together in
to one rec-array
They MUST have the same dtypes
'''
for i, iFile in enumerate(files):
data, iStarGalClass = matchStarGalaxiesToData( iFile )
if i==0:
allDataArray = rec2array( data )
iDataNoNan, starGalNoNan = \
removeNans( allDataArray, iStarGalClass )
starGalClass = starGalNoNan
allData = iDataNoNan
else:
iFileData = rec2array( data )
iDataNoNan, starGalNoNan = \
removeNans( iFileData, iStarGalClass )
allData = np.vstack( (allData, iDataNoNan))
starGalClass = np.append(starGalClass, starGalNoNan)
return allData, starGalClass
def generateTestData():
'''
This will pick one of the datasets
in trainingData to test on
'''
testGalaxyFiles = glob.glob('TestData/*uncor.cat')
featureLabels = getFeatureLabels( testGalaxyFiles[-1] )
testFeatures, testAnswers = \
generateTrainingData(allGalaxyFiles=testGalaxyFiles, \
trainingDataPklFile='testData.pkl')
return featureLabels, testFeatures, testAnswers
def rec2array( recArray):
'''
the sklearn classifier requires a normal array
of homogenous dtype, so i need to convert
'''
#dont include the errors in this fit
includeNames = [ i for i in list(recArray.columns.names) if not 'err' in i ]
#includeNames.remove('skymed')
#includeNames.remove('exp_time')
#includeNames.remove('skysw')
#includeNames.remove('skysd')
includeNames = \
['MAG_AUTO','gal_size','MU_MAX','MAG_ISO','RADIUS','FLUX_AUTO',\
'xxxx','yyyy','xyyy','xxyy','xx','xy','yy','e1','e2','prob',\
'ell','skymed','exp_time','skysd']
newArray = np.zeros((len(recArray),len(includeNames)), float)
for i, iField in enumerate(includeNames):
newArray[:,i] = recArray[iField]
return newArray
def getFeatureLabels( fitsFile ):
includeNames = fits.open(fitsFile)[1].data.columns.names
#remove all those with err in it
namesNoErr = [ i for i in includeNames if not 'err' in i ]
#includeNames.remove('skymed')
#includeNames.remove('exp_time')
#includeNames.remove('skysw')
#includeNames.remove('skysd')
print(namesNoErr)
#namesNoErr = \
# ['MAG_AUTO','gal_size','MU_MAX','MAG_ISO','RADIUS',\
# 'xxxx','yyyy','xyyy','xxyy','ell']
#,'skymed']
namesNoErr = \
['MAG_AUTO','gal_size','MU_MAX','MAG_ISO','RADIUS','FLUX_AUTO',\
'xxxx','yyyy','xyyy','xxyy','xx','xy','yy','e1','e2','prob',\
'ell','skymed','exp_time','skysd']
print(namesNoErr)
return np.array(namesNoErr)
def matchStarGalaxiesToData( iFile ):
'''
I need to add a column to 'data' that has the classifcation
of stars (0), galaxies(1) and neither (-1)
'''
cluster=iFile.split('_')[0]
data = fits.open(iFile)[1].data
classification = np.zeros(len(data))-1
newIDcol = \
[fits.Column(name='ID', format='D', array=np.arange(len(data)))]
dataCols = data.columns + fits.ColDefs(newIDcol)
dataWithID = fits.BinTableHDU.from_columns(dataCols)
dataWithID.writeto('DataID.fits', clobber=True)
matchedGalaxyData = at.run_match(cluster+'_galaxies.fits',\
'DataID.fits')[1].data
classification[matchedGalaxyData['ID'].astype(int)] = 1
matchedStarData = at.run_match(cluster+'_stars.fits',\
'DataID.fits')[1].data
classification[matchedStarData['ID'].astype(int)] = 0
return data, classification
def removeNans( newArray, starGal ):
#remove nan
#newArray[ np.isfinite(newArray) == False ] = -99
nanCheck = np.isfinite(np.sum(newArray, axis=1))
newArrayNansRemoved = newArray[nanCheck, :]
Nremoved = newArray.shape[0] - newArrayNansRemoved.shape[0]
print(("%i/%i removed due to nans" % (Nremoved, newArray.shape[0])))
nanCheckField = np.isfinite(np.sum(newArray, axis=0))
return newArrayNansRemoved, starGal[nanCheck]
|
mit
|
mjgrav2001/scikit-learn
|
sklearn/datasets/tests/test_samples_generator.py
|
35
|
15016
|
from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
|
bsd-3-clause
|
druzhkov-paul/deep_forest
|
hierarchical.py
|
1
|
13967
|
from rf_embedding import (RandomTreesEmbeddingSupervised, RandomTreesEmbeddingUnsupervised,
GBTreesEmbeddingSupervised, XGBTEmbeddingSupervised)
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from xgboost import XGBClassifier
# from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.base import clone
import sys
import argparse
import numpy as np
import cv2
import timeit
import pickle
# from os.path import join
from ast import literal_eval
from utils import load_annotation, print_progress_bar, stop_progress_bar
class ArgsFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
pass
parser = argparse.ArgumentParser(formatter_class=ArgsFormatter,
description="")
parser.add_argument("--annotation_train", required=True, help="")
parser.add_argument("--annotation_test", required=True, help="")
parser.add_argument("--patch_size", default=16, type=int, help="")
parser.add_argument("--patch_stride", default=4, type=int, help="")
parser.add_argument("--patches_per_image", default=100, help="")
parser.add_argument("--ntrees", default=10, type=int, help="")
parser.add_argument("--depth", default=5, type=int, help="")
parser.add_argument("--levels", default=2, type=int, help="")
parser.add_argument("--out", default="/tmp", help="")
parser.add_argument("--fe", default="rf", help="")
parser.add_argument("--classifier", default="gbt", help="")
parser.add_argument("--onehot", action="store_true", help="")
parser.add_argument("--njobs", default=8, type=int, help="")
parser.add_argument("--init", default="patches", help="patches or hogs or load")
parser.add_argument("--image_size", default="32,32", help="initial patch size")
args = parser.parse_args()
print(" ".join(sys.argv))
print("reading train annotation...")
annotation_train = load_annotation(args.annotation_train)
print("reading test annotation...")
annotation_test = load_annotation(args.annotation_test)
def extract_patches_2d(image, patch_size, patch_stride=1,
max_patches=None, random_state=None, reshape=True):
from sklearn.utils.validation import check_array, check_random_state
from sklearn.feature_extraction.image import extract_patches, _compute_n_patches
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
stride = patch_stride
if type(patch_stride) is not int:
assert len(stride) == 2
stride = np.ones(len(image.shape), dtype=int)
stride[:2] = patch_stride
extracted_patches = extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=stride)
if reshape:
extracted_patches = extracted_patches.reshape(extracted_patches.shape[0], extracted_patches.shape[1], -1)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(extracted_patches.shape[0], size=n_patches)
j_s = rng.randint(extracted_patches.shape[1], size=n_patches)
patches = extracted_patches[i_s, j_s, :]
else:
patches = extracted_patches
return patches
def transform_feature_map(feature_maps, feature_extractor, patch_size, patch_stride):
for i, fm in enumerate(feature_maps):
patches = extract_patches_2d(fm, patch_size, patch_stride)
dim = patches.shape
patches = patches.reshape(dim[0] * dim[1], -1)
feature_maps[i] = feature_extractor.transform(patches).reshape(dim[0], dim[1], -1)
return feature_maps
def draw_random_samples(feature_maps, labels, patch_size, patch_stride, patches_per_feature_map):
X = []
Y = []
for image, label in zip(feature_maps, labels):
if type(patches_per_feature_map) == list:
patches_num = patches_per_feature_map[label]
else:
patches_num = patches_per_feature_map
patches = extract_patches_2d(image, patch_size, patch_stride, max_patches=patches_num)
X.append(patches)
Y += [label for _ in patches]
X = np.concatenate(X, axis=0)
Y = np.array(Y)
return X, Y
def classify_image(fm, label, receptive_size, stride, classifier):
patches = extract_patches_2d(fm, receptive_size, stride)
patches = patches.reshape(patches.shape[0] * patches.shape[1], -1)
predictions = classifier.predict(patches)
unique, counts = np.unique(predictions, return_counts=True)
label_predicted = unique[np.argmax(counts)]
return label, label_predicted
def evaluate(feature_maps, labels, receptive_size, stride, classifier, dump_file=None):
from joblib import Parallel, delayed, cpu_count
from sklearn.metrics import confusion_matrix
start_time = timeit.default_timer()
# njobs = cpu_count()
d = Parallel(n_jobs=1)(delayed(classify_image)(fm, label, receptive_size, stride, classifier)
for fm, label in zip(feature_maps, labels))
if dump_file is not None:
with file(dump_file, "w") as f:
for x in zip(*d):
f.write(",".join(map(str, x)) + "\n")
cm = confusion_matrix(*zip(*d))
accuracy = float(cm.diagonal().sum()) / cm.sum()
print("evaluation", timeit.default_timer() - start_time)
return accuracy, cm
def patches_extractor(image):
stride = (args.patch_stride, args.patch_stride)
patches = extract_patches_2d(image, [args.patch_size, args.patch_size], patch_stride=stride)
return patches
def hog_extractor(image):
win_stride = (args.patch_stride, args.patch_stride)
win_size = (args.patch_size, args.patch_size)
hog_descriptor = cv2.HOGDescriptor(_winSize=win_size, _blockSize=(8, 8), _blockStride=(4, 4),
_cellSize=(4, 4), _nbins=9)
features = hog_descriptor.compute(image, winStride=win_stride)
new_shape = ((image.shape[0] - win_size[0]) / win_stride[0] + 1,
(image.shape[1] - win_size[1]) / win_stride[1] + 1,
hog_descriptor.getDescriptorSize())
features = features.reshape(new_shape)
return features
# TODO. Add low level CNN features?
if args.init == "patches":
initial_feature_extractor = patches_extractor
elif args.init == "hogs":
initial_feature_extractor = hog_extractor
elif args.init != "load":
print("Error. Unknown initial representation '{}'.".format(args.init))
exit(0)
def get_initial_feature_representation(annotation):
images = []
labels = []
image_size = literal_eval(args.image_size)
start_time = timeit.default_timer()
for a in annotation:
image_path = a["image"]
image = cv2.imread(image_path)
if image is None:
continue
image = cv2.resize(image, image_size, image)
feature_map = initial_feature_extractor(image)
images.append(feature_map)
labels.append(a["label"])
print("initial", timeit.default_timer() - start_time)
return images, labels
print("obtaining initial feature representation...")
if args.init == "load":
images_train = pickle.load(file("images_train.pkl", "r"))
labels_train = pickle.load(file("labels_train.pkl", "r"))
images_test = pickle.load(file("images_test.pkl", "r"))
labels_test = pickle.load(file("labels_test.pkl", "r"))
else:
images_train, labels_train = get_initial_feature_representation(annotation_train)
pickle.dump(images_train, file("images_train_" + args.init + ".pkl", "w"))
pickle.dump(labels_train, file("labels_train_" + args.init + ".pkl", "w"))
images_test, labels_test = get_initial_feature_representation(annotation_test)
pickle.dump(images_test, file("images_test_" + args.init + ".pkl", "w"))
pickle.dump(labels_test, file("labels_test_" + args.init + ".pkl", "w"))
base_classifier_ = None
if base_classifier_ is None:
if args.classifier == "rf":
base_classifier = RandomForestClassifier(n_estimators=1000, max_depth=15, n_jobs=args.njobs)
elif args.classifier == "gbt":
base_classifier = XGBClassifier(n_estimators=1500, max_depth=10, learning_rate=0.01)
elif args.classifier == "linsvm":
base_classifier = LinearSVC()
else:
print("Unknown classifier {}.".format(args.classifier))
exit(0)
else:
base_classifier = clone(base_classifier_)
print("=" * 80)
print("test: {} levels, {} trees, {} depth".format(args.levels, args.ntrees, args.depth))
assert args.levels >= 0
feature_extractors = []
subpatch_size = [2, 2]
subpatch_stride = [1, 1]
patches_per_image = map(int, args.patches_per_image.split(','))
if len(patches_per_image) == 1:
patches_per_image = patches_per_image[0]
X = []
Y = []
for level in range(args.levels):
print("extracting patches for training...")
X, Y = draw_random_samples(images_train, labels_train, subpatch_size,
subpatch_stride, patches_per_image)
print X.shape, Y.shape, np.mean(Y)
print("training classifier...")
start_time = timeit.default_timer()
classifier = clone(base_classifier)
classifier.fit(X, Y)
print("train score", classifier.score(X, Y))
print("training", timeit.default_timer() - start_time)
print("evaluating classifier...")
accuracy_train, cm_train = evaluate(images_train, labels_train, subpatch_size, subpatch_stride, classifier)
print("Train accuracy: {:.2%}".format(accuracy_train))
print(cm_train)
print(cm_train.astype('float') / cm_train.sum(axis=1)[:, np.newaxis])
accuracy_test, cm_test = evaluate(images_test, labels_test,
subpatch_size, subpatch_stride, classifier,
dump_file="test_predictions_{}.csv".format(level))
print("Test accuracy: {:.2%}".format(accuracy_test))
print(cm_test)
print(cm_test.astype('float') / cm_test.sum(axis=1)[:, np.newaxis])
print("\tgoing deeper to level {}...".format(level))
print("training feature representation...")
if args.fe == "rfu":
feature_extractor = RandomTreesEmbeddingUnsupervised(n_estimators=args.ntrees,
max_depth=args.depth,
use_one_hot=args.onehot,
n_jobs=args.njobs)
feature_extractor.fit(X)
feature_extractors.append(feature_extractor)
elif args.fe == "rfs":
feature_extractor = RandomTreesEmbeddingSupervised(n_estimators=args.ntrees,
max_depth=args.depth,
use_one_hot=args.onehot,
n_jobs=args.njobs)
feature_extractor.fit_transform(X, Y)
feature_extractors.append(feature_extractor)
elif args.fe == "gbt":
feature_extractor = GBTreesEmbeddingSupervised(n_estimators=args.ntrees,
max_depth=args.depth,
use_one_hot=args.onehot,
n_jobs=args.njobs)
feature_extractor.fit_transform(X, Y)
feature_extractors.append(feature_extractor)
elif args.fe == "xgbt":
# For multiclass problems number of trees will be K times bigger.
feature_extractor = XGBTEmbeddingSupervised(n_estimators=args.ntrees,
max_depth=args.depth,
use_one_hot=args.onehot,
silent=True)
feature_extractor.fit_transform(X, Y)
feature_extractors.append(feature_extractor)
else:
print("Error. Unknown feature extractor '{}'.".format(args.fe))
exit(0)
print("transforming train samples...")
images_train = transform_feature_map(images_train, feature_extractor, subpatch_size, subpatch_stride)
print("transforming test samples...")
images_test = transform_feature_map(images_test, feature_extractor, subpatch_size, subpatch_stride)
print("extracting patches for training...")
X, Y = draw_random_samples(images_train, labels_train, subpatch_size, subpatch_stride, patches_per_image)
print X.shape, Y.shape
print("training final classifier...")
classifier = clone(base_classifier)
classifier.fit(X, Y)
print("evaluating classifier...")
accuracy_train, cm_train = evaluate(images_train, labels_train, subpatch_size, subpatch_stride, classifier)
print("Train accuracy: {:.2%}".format(accuracy_train))
print(cm_train)
print(cm_train.astype('float') / cm_train.sum(axis=1)[:, np.newaxis])
accuracy_test, cm_test = evaluate(images_test, labels_test, subpatch_size,
subpatch_stride, classifier,
dump_file="test_predictions_{}.csv".format(args.levels))
print("Test accuracy: {:.2%}".format(accuracy_test))
print(cm_test)
print(cm_test.astype('float') / cm_test.sum(axis=1)[:, np.newaxis])
# ntrees = np.fromstring(args.ntrees, sep=",", dtype=int)
# depths = np.fromstring(args.depth, sep=",", dtype=int)
# for trees in ntrees:
# for depth in depths:
# test(args.levels, ntrees=trees, depth=depth,
# base_classifier_=RandomForestClassifier(n_estimators=100, max_depth=5))
|
gpl-3.0
|
tswast/google-cloud-python
|
translate/docs/conf.py
|
2
|
11927
|
# -*- coding: utf-8 -*-
#
# google-cloud-translate documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
__version__ = "0.1.0"
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.6.3"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_flags = ["members"]
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# Allow markdown includes (so releases.md can include CHANGLEOG.md)
# http://www.sphinx-doc.org/en/master/markdown.html
source_parsers = {".md": "recommonmark.parser.CommonMarkParser"}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"google-cloud-translate"
copyright = u"2017, Google"
author = u"Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for Python",
"github_user": "googleapis",
"github_repo": "google-cloud-python",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-translate-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"google-cloud-translate.tex",
u"google-cloud-translate Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"google-cloud-translate",
u"google-cloud-translate Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"google-cloud-translate",
u"google-cloud-translate Documentation",
author,
"google-cloud-translate",
"GAPIC library for the {metadata.shortName} v3beta1 service",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("http://python.readthedocs.org/en/latest/", None),
"gax": ("https://gax-python.readthedocs.org/en/latest/", None),
"google-auth": ("https://google-auth.readthedocs.io/en/stable", None),
"google-gax": ("https://gax-python.readthedocs.io/en/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest", None),
"grpc": ("https://grpc.io/grpc/python/", None),
"requests": ("https://requests.kennethreitz.org/en/stable/", None),
"fastavro": ("https://fastavro.readthedocs.io/en/stable/", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
|
apache-2.0
|
xubenben/scikit-learn
|
examples/applications/topics_extraction_with_nmf_lda.py
|
133
|
3517
|
"""
========================================================================================
Topics extraction with Non-Negative Matrix Factorization And Latent Dirichlet Allocation
========================================================================================
This is an example of applying Non Negative Matrix Factorization
and Latent Dirichlet Allocation on a corpus of documents and
extract additive models of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware that the time
complexity is polynomial in NMF. In LDA, the time complexity is
proportional to (n_samples * iterations).
"""
# Author: Olivier Grisel <[email protected]>
# Lars Buitinck <[email protected]>
# Chyi-Kwei Yau <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
t0 = time()
print("Loading dataset and extracting features...")
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
data_samples = dataset.data[:n_samples]
# use tf-idf feature for NMF model
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tfidf = tfidf_vectorizer.fit_transform(data_samples)
# use tf feature for LDA model
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tf = tf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with tf-idf feature, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
nmf = NMF(n_components=n_topics, random_state=1).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model:")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
print("\nFitting LDA models with tf feature, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=5,
learning_method='online', learning_offset=50.,
random_state=0)
lda.fit(tf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
|
bsd-3-clause
|
cmusatyalab/openface
|
demos/web/websocket-server.py
|
2
|
12805
|
#!/usr/bin/env python2
#
# Copyright 2015-2016 Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
fileDir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(fileDir, "..", ".."))
import txaio
txaio.use_twisted()
from autobahn.twisted.websocket import WebSocketServerProtocol, \
WebSocketServerFactory
from twisted.internet import task, defer
from twisted.internet.ssl import DefaultOpenSSLContextFactory
from twisted.python import log
import argparse
import cv2
import imagehash
import json
from PIL import Image
import numpy as np
import os
import StringIO
import urllib
import base64
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
from sklearn.manifold import TSNE
from sklearn.svm import SVC
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import openface
modelDir = os.path.join(fileDir, '..', '..', 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
# For TLS connections
tls_crt = os.path.join(fileDir, 'tls', 'server.crt')
tls_key = os.path.join(fileDir, 'tls', 'server.key')
parser = argparse.ArgumentParser()
parser.add_argument('--dlibFacePredictor', type=str, help="Path to dlib's face predictor.",
default=os.path.join(dlibModelDir, "shape_predictor_68_face_landmarks.dat"))
parser.add_argument('--networkModel', type=str, help="Path to Torch network model.",
default=os.path.join(openfaceModelDir, 'nn4.small2.v1.t7'))
parser.add_argument('--imgDim', type=int,
help="Default image dimension.", default=96)
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--unknown', type=bool, default=False,
help='Try to predict unknown people')
parser.add_argument('--port', type=int, default=9000,
help='WebSocket Port')
args = parser.parse_args()
align = openface.AlignDlib(args.dlibFacePredictor)
net = openface.TorchNeuralNet(args.networkModel, imgDim=args.imgDim,
cuda=args.cuda)
class Face:
def __init__(self, rep, identity):
self.rep = rep
self.identity = identity
def __repr__(self):
return "{{id: {}, rep[0:5]: {}}}".format(
str(self.identity),
self.rep[0:5]
)
class OpenFaceServerProtocol(WebSocketServerProtocol):
def __init__(self):
super(OpenFaceServerProtocol, self).__init__()
self.images = {}
self.training = True
self.people = []
self.svm = None
if args.unknown:
self.unknownImgs = np.load("./examples/web/unknown.npy")
def onConnect(self, request):
print("Client connecting: {0}".format(request.peer))
self.training = True
def onOpen(self):
print("WebSocket connection open.")
def onMessage(self, payload, isBinary):
raw = payload.decode('utf8')
msg = json.loads(raw)
print("Received {} message of length {}.".format(
msg['type'], len(raw)))
if msg['type'] == "ALL_STATE":
self.loadState(msg['images'], msg['training'], msg['people'])
elif msg['type'] == "NULL":
self.sendMessage('{"type": "NULL"}')
elif msg['type'] == "FRAME":
self.processFrame(msg['dataURL'], msg['identity'])
self.sendMessage('{"type": "PROCESSED"}')
elif msg['type'] == "TRAINING":
self.training = msg['val']
if not self.training:
self.trainSVM()
elif msg['type'] == "ADD_PERSON":
self.people.append(msg['val'].encode('ascii', 'ignore'))
print(self.people)
elif msg['type'] == "UPDATE_IDENTITY":
h = msg['hash'].encode('ascii', 'ignore')
if h in self.images:
self.images[h].identity = msg['idx']
if not self.training:
self.trainSVM()
else:
print("Image not found.")
elif msg['type'] == "REMOVE_IMAGE":
h = msg['hash'].encode('ascii', 'ignore')
if h in self.images:
del self.images[h]
if not self.training:
self.trainSVM()
else:
print("Image not found.")
elif msg['type'] == 'REQ_TSNE':
self.sendTSNE(msg['people'])
else:
print("Warning: Unknown message type: {}".format(msg['type']))
def onClose(self, wasClean, code, reason):
print("WebSocket connection closed: {0}".format(reason))
def loadState(self, jsImages, training, jsPeople):
self.training = training
for jsImage in jsImages:
h = jsImage['hash'].encode('ascii', 'ignore')
self.images[h] = Face(np.array(jsImage['representation']),
jsImage['identity'])
for jsPerson in jsPeople:
self.people.append(jsPerson.encode('ascii', 'ignore'))
if not training:
self.trainSVM()
def getData(self):
X = []
y = []
for img in self.images.values():
X.append(img.rep)
y.append(img.identity)
numIdentities = len(set(y + [-1])) - 1
if numIdentities == 0:
return None
if args.unknown:
numUnknown = y.count(-1)
numIdentified = len(y) - numUnknown
numUnknownAdd = (numIdentified / numIdentities) - numUnknown
if numUnknownAdd > 0:
print("+ Augmenting with {} unknown images.".format(numUnknownAdd))
for rep in self.unknownImgs[:numUnknownAdd]:
# print(rep)
X.append(rep)
y.append(-1)
X = np.vstack(X)
y = np.array(y)
return (X, y)
def sendTSNE(self, people):
d = self.getData()
if d is None:
return
else:
(X, y) = d
X_pca = PCA(n_components=50).fit_transform(X, X)
tsne = TSNE(n_components=2, init='random', random_state=0)
X_r = tsne.fit_transform(X_pca)
yVals = list(np.unique(y))
colors = cm.rainbow(np.linspace(0, 1, len(yVals)))
# print(yVals)
plt.figure()
for c, i in zip(colors, yVals):
name = "Unknown" if i == -1 else people[i]
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=name)
plt.legend()
imgdata = StringIO.StringIO()
plt.savefig(imgdata, format='png')
imgdata.seek(0)
content = 'data:image/png;base64,' + \
urllib.quote(base64.b64encode(imgdata.buf))
msg = {
"type": "TSNE_DATA",
"content": content
}
self.sendMessage(json.dumps(msg))
def trainSVM(self):
print("+ Training SVM on {} labeled images.".format(len(self.images)))
d = self.getData()
if d is None:
self.svm = None
return
else:
(X, y) = d
numIdentities = len(set(y + [-1]))
if numIdentities <= 1:
return
param_grid = [
{'C': [1, 10, 100, 1000],
'kernel': ['linear']},
{'C': [1, 10, 100, 1000],
'gamma': [0.001, 0.0001],
'kernel': ['rbf']}
]
self.svm = GridSearchCV(SVC(C=1), param_grid, cv=5).fit(X, y)
def processFrame(self, dataURL, identity):
head = "data:image/jpeg;base64,"
assert(dataURL.startswith(head))
imgdata = base64.b64decode(dataURL[len(head):])
imgF = StringIO.StringIO()
imgF.write(imgdata)
imgF.seek(0)
img = Image.open(imgF)
buf = np.fliplr(np.asarray(img))
rgbFrame = np.zeros((300, 400, 3), dtype=np.uint8)
rgbFrame[:, :, 0] = buf[:, :, 2]
rgbFrame[:, :, 1] = buf[:, :, 1]
rgbFrame[:, :, 2] = buf[:, :, 0]
if not self.training:
annotatedFrame = np.copy(buf)
# cv2.imshow('frame', rgbFrame)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# return
identities = []
# bbs = align.getAllFaceBoundingBoxes(rgbFrame)
bb = align.getLargestFaceBoundingBox(rgbFrame)
bbs = [bb] if bb is not None else []
for bb in bbs:
# print(len(bbs))
landmarks = align.findLandmarks(rgbFrame, bb)
alignedFace = align.align(args.imgDim, rgbFrame, bb,
landmarks=landmarks,
landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
if alignedFace is None:
continue
phash = str(imagehash.phash(Image.fromarray(alignedFace)))
if phash in self.images:
identity = self.images[phash].identity
else:
rep = net.forward(alignedFace)
# print(rep)
if self.training:
self.images[phash] = Face(rep, identity)
# TODO: Transferring as a string is suboptimal.
# content = [str(x) for x in cv2.resize(alignedFace, (0,0),
# fx=0.5, fy=0.5).flatten()]
content = [str(x) for x in alignedFace.flatten()]
msg = {
"type": "NEW_IMAGE",
"hash": phash,
"content": content,
"identity": identity,
"representation": rep.tolist()
}
self.sendMessage(json.dumps(msg))
else:
if len(self.people) == 0:
identity = -1
elif len(self.people) == 1:
identity = 0
elif self.svm:
identity = self.svm.predict(rep)[0]
else:
print("hhh")
identity = -1
if identity not in identities:
identities.append(identity)
if not self.training:
bl = (bb.left(), bb.bottom())
tr = (bb.right(), bb.top())
cv2.rectangle(annotatedFrame, bl, tr, color=(153, 255, 204),
thickness=3)
for p in openface.AlignDlib.OUTER_EYES_AND_NOSE:
cv2.circle(annotatedFrame, center=landmarks[p], radius=3,
color=(102, 204, 255), thickness=-1)
if identity == -1:
if len(self.people) == 1:
name = self.people[0]
else:
name = "Unknown"
else:
name = self.people[identity]
cv2.putText(annotatedFrame, name, (bb.left(), bb.top() - 10),
cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.75,
color=(152, 255, 204), thickness=2)
if not self.training:
msg = {
"type": "IDENTITIES",
"identities": identities
}
self.sendMessage(json.dumps(msg))
plt.figure()
plt.imshow(annotatedFrame)
plt.xticks([])
plt.yticks([])
imgdata = StringIO.StringIO()
plt.savefig(imgdata, format='png')
imgdata.seek(0)
content = 'data:image/png;base64,' + \
urllib.quote(base64.b64encode(imgdata.buf))
msg = {
"type": "ANNOTATED",
"content": content
}
plt.close()
self.sendMessage(json.dumps(msg))
def main(reactor):
log.startLogging(sys.stdout)
factory = WebSocketServerFactory()
factory.protocol = OpenFaceServerProtocol
ctx_factory = DefaultOpenSSLContextFactory(tls_key, tls_crt)
reactor.listenSSL(args.port, factory, ctx_factory)
return defer.Deferred()
if __name__ == '__main__':
task.react(main)
|
apache-2.0
|
tharunkalwa/pympler
|
pympler/classtracker_stats.py
|
7
|
27011
|
"""
Provide saving, loading and presenting gathered `ClassTracker` statistics.
"""
import os
import sys
from pympler.util.compat import pickle
from copy import deepcopy
from pympler.util.stringutils import trunc, pp, pp_timestamp
from pympler.asizeof import Asized
__all__ = ["Stats", "ConsoleStats", "HtmlStats"]
def _merge_asized(base, other, level=0):
"""
Merge **Asized** instances `base` and `other` into `base`.
"""
ref2key = lambda ref: ref.name.split(':')[0]
base.size += other.size
base.flat += other.flat
if level > 0:
base.name = ref2key(base)
# Add refs from other to base. Any new refs are appended.
base.refs = list(base.refs) # we may need to append items
refs = {}
for ref in base.refs:
refs[ref2key(ref)] = ref
for ref in other.refs:
key = ref2key(ref)
if key in refs:
_merge_asized(refs[key], ref, level=level+1)
else:
# Don't modify existing Asized instances => deepcopy
base.refs.append(deepcopy(ref))
base.refs[-1].name = key
def _merge_objects(tref, merged, obj):
"""
Merge the snapshot size information of multiple tracked objects. The
tracked object `obj` is scanned for size information at time `tref`.
The sizes are merged into **Asized** instance `merged`.
"""
size = None
for (timestamp, tsize) in obj.snapshots:
if timestamp == tref:
size = tsize
if size:
_merge_asized(merged, size)
def _format_trace(trace):
"""
Convert the (stripped) stack-trace to a nice readable format. The stack
trace `trace` is a list of frame records as returned by
**inspect.stack** but without the frame objects.
Returns a string.
"""
lines = []
for fname, lineno, func, src, _ in trace:
if src:
for line in src:
lines.append(' '+line.strip()+'\n')
lines.append(' %s:%4d in %s\n' % (fname, lineno, func))
return ''.join(lines)
class Stats(object):
"""
Presents the memory statistics gathered by a `ClassTracker` based on user
preferences.
"""
def __init__(self, tracker=None, filename=None, stream=None):
"""
Initialize the data log structures either from a `ClassTracker` instance
(argument `tracker`) or a previously dumped file (argument `filename`).
:param tracker: ClassTracker instance
:param filename: filename of previously dumped statistics
:param stream: where to print statistics, defaults to ``sys.stdout``
"""
if stream:
self.stream = stream
else:
self.stream = sys.stdout
self.tracker = tracker
if tracker:
self.index = tracker.index
self.snapshots = tracker.snapshots
else:
self.index = None
self.snapshots = None
self.sorted = []
if filename:
self.load_stats(filename)
def load_stats(self, fdump):
"""
Load the data from a dump file.
The argument `fdump` can be either a filename or an open file object
that requires read access.
"""
if isinstance(fdump, type('')):
fdump = open(fdump, 'rb')
self.index = pickle.load(fdump)
self.snapshots = pickle.load(fdump)
self.sorted = []
def dump_stats(self, fdump, close=True):
"""
Dump the logged data to a file.
The argument `file` can be either a filename or an open file object
that requires write access. `close` controls if the file is closed
before leaving this method (the default behaviour).
"""
if self.tracker:
self.tracker.stop_periodic_snapshots()
if isinstance(fdump, type('')):
fdump = open(fdump, 'wb')
pickle.dump(self.index, fdump, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(self.snapshots, fdump, protocol=pickle.HIGHEST_PROTOCOL)
if close:
fdump.close()
def _init_sort(self):
"""
Prepare the data to be sorted.
If not yet sorted, import all tracked objects from the tracked index.
Extend the tracking information by implicit information to make
sorting easier (DSU pattern).
"""
if not self.sorted:
# Identify the snapshot that tracked the largest amount of memory.
tmax = None
maxsize = 0
for snapshot in self.snapshots:
if snapshot.tracked_total > maxsize:
tmax = snapshot.timestamp
for key in list(self.index.keys()):
for tobj in self.index[key]:
tobj.classname = key
tobj.size = tobj.get_max_size()
tobj.tsize = tobj.get_size_at_time(tmax)
self.sorted.extend(self.index[key])
def sort_stats(self, *args):
"""
Sort the tracked objects according to the supplied criteria. The
argument is a string identifying the basis of a sort (example: 'size'
or 'classname'). When more than one key is provided, then additional
keys are used as secondary criteria when there is equality in all keys
selected before them. For example, ``sort_stats('name', 'size')`` will
sort all the entries according to their class name, and resolve all
ties (identical class names) by sorting by size. The criteria are
fields in the tracked object instances. Results are stored in the
``self.sorted`` list which is used by ``Stats.print_stats()`` and other
methods. The fields available for sorting are:
'classname'
the name with which the class was registered
'name'
the classname
'birth'
creation timestamp
'death'
destruction timestamp
'size'
the maximum measured size of the object
'tsize'
the measured size during the largest snapshot
'repr'
string representation of the object
Note that sorts on size are in descending order (placing most memory
consuming items first), whereas name, repr, and creation time searches
are in ascending order (alphabetical).
The function returns self to allow calling functions on the result::
stats.sort_stats('size').reverse_order().print_stats()
"""
criteria = ('classname', 'tsize', 'birth', 'death',
'name', 'repr', 'size')
if not set(criteria).issuperset(set(args)):
raise ValueError("Invalid sort criteria")
if not args:
args = criteria
def args_to_tuple(obj):
keys = []
for attr in args:
attribute = getattr(obj, attr)
if attr in ('tsize', 'size'):
attribute = -attribute
keys.append(attribute)
return tuple(keys)
self._init_sort()
self.sorted.sort(key=args_to_tuple)
return self
def reverse_order(self):
"""
Reverse the order of the tracked instance index `self.sorted`.
"""
self._init_sort()
self.sorted.reverse()
return self
def annotate(self):
"""
Annotate all snapshots with class-based summaries.
"""
for snapshot in self.snapshots:
self.annotate_snapshot(snapshot)
def annotate_snapshot(self, snapshot):
"""
Store additional statistical data in snapshot.
"""
if hasattr(snapshot, 'classes'):
return
snapshot.classes = {}
for classname in list(self.index.keys()):
total = 0
active = 0
merged = Asized(0, 0)
for tobj in self.index[classname]:
_merge_objects(snapshot.timestamp, merged, tobj)
total += tobj.get_size_at_time(snapshot.timestamp)
if tobj.birth < snapshot.timestamp and \
(tobj.death is None or tobj.death > snapshot.timestamp):
active += 1
try:
pct = total * 100.0 / snapshot.total
except ZeroDivisionError: # pragma: no cover
pct = 0
try:
avg = total / active
except ZeroDivisionError:
avg = 0
snapshot.classes[classname] = dict(sum=total,
avg=avg,
pct=pct,
active=active)
snapshot.classes[classname]['merged'] = merged
@property
def tracked_classes(self):
"""Return a list of all tracked classes occurring in any snapshot."""
return sorted(list(self.index.keys()))
class ConsoleStats(Stats):
"""
Presentation layer for `Stats` to be used in text-based consoles.
"""
def _print_refs(self, refs, total, prefix=' ',
level=1, minsize=0, minpct=0.1):
"""
Print individual referents recursively.
"""
lrefs = list(refs)
lrefs.sort(key=lambda x: x.size)
lrefs.reverse()
for ref in lrefs:
if ref.size > minsize and (ref.size*100.0/total) > minpct:
self.stream.write('%-50s %-14s %3d%% [%d]\n' % (
trunc(prefix+str(ref.name), 50),
pp(ref.size),
int(ref.size*100.0/total),
level
))
self._print_refs(ref.refs, total, prefix=prefix+' ',
level=level+1)
def print_object(self, tobj):
"""
Print the gathered information of object `tobj` in human-readable format.
"""
if tobj.death:
self.stream.write('%-32s ( free ) %-35s\n' % (
trunc(tobj.name, 32, left=1), trunc(tobj.repr, 35)))
else:
self.stream.write('%-32s 0x%08x %-35s\n' % (
trunc(tobj.name, 32, left=1),
tobj.id,
trunc(tobj.repr, 35)
))
if tobj.trace:
self.stream.write(_format_trace(tobj.trace))
for (timestamp, size) in tobj.snapshots:
self.stream.write(' %-30s %s\n' % (
pp_timestamp(timestamp), pp(size.size)
))
self._print_refs(size.refs, size.size)
if tobj.death is not None:
self.stream.write(' %-30s finalize\n' % (
pp_timestamp(tobj.death),
))
def print_stats(self, clsname=None, limit=1.0):
"""
Write tracked objects to stdout. The output can be filtered and
pruned. Only objects are printed whose classname contain the substring
supplied by the `clsname` argument. The output can be pruned by
passing a `limit` value.
:param clsname: Only print objects whose classname contain the given
substring.
:param limit: If `limit` is a float smaller than one, only the supplied
percentage of the total tracked data is printed. If `limit` is
bigger than one, this number of tracked objects are printed.
Tracked objects are first filtered, and then pruned (if specified).
"""
if self.tracker:
self.tracker.stop_periodic_snapshots()
if not self.sorted:
self.sort_stats()
_sorted = self.sorted
if clsname:
_sorted = [to for to in _sorted if clsname in to.classname]
if limit < 1.0:
limit = max(1, int(len(self.sorted) * limit))
_sorted = _sorted[:int(limit)]
# Emit per-instance data
for tobj in _sorted:
self.print_object(tobj)
def print_summary(self):
"""
Print per-class summary for each snapshot.
"""
# Emit class summaries for each snapshot
classlist = self.tracked_classes
fobj = self.stream
fobj.write('---- SUMMARY '+'-'*66+'\n')
for snapshot in self.snapshots:
self.annotate_snapshot(snapshot)
fobj.write('%-35s %11s %12s %12s %5s\n' % (
trunc(snapshot.desc, 35),
'active',
pp(snapshot.asizeof_total),
'average',
'pct'
))
for classname in classlist:
info = snapshot.classes.get(classname)
fobj.write(' %-33s %11d %12s %12s %4d%%\n' % (
trunc(classname, 33),
info['active'],
pp(info['sum']),
pp(info['avg']),
info['pct']
))
fobj.write('-'*79+'\n')
class HtmlStats(Stats):
"""
Output the `ClassTracker` statistics as HTML pages and graphs.
"""
style = """<style type="text/css">
table { width:100%; border:1px solid #000; border-spacing:0px; }
td, th { border:0px; }
div { width:200px; padding:10px; background-color:#FFEECC; }
#nb { border:0px; }
#tl { margin-top:5mm; margin-bottom:5mm; }
#p1 { padding-left: 5px; }
#p2 { padding-left: 50px; }
#p3 { padding-left: 100px; }
#p4 { padding-left: 150px; }
#p5 { padding-left: 200px; }
#p6 { padding-left: 210px; }
#p7 { padding-left: 220px; }
#hl { background-color:#FFFFCC; }
#r1 { background-color:#BBBBBB; }
#r2 { background-color:#CCCCCC; }
#r3 { background-color:#DDDDDD; }
#r4 { background-color:#EEEEEE; }
#r5,#r6,#r7 { background-color:#FFFFFF; }
#num { text-align:right; }
</style>
"""
nopylab_msg = """<div color="#FFCCCC">Could not generate %s chart!
Install <a href="http://matplotlib.sourceforge.net/">Matplotlib</a>
to generate charts.</div>\n"""
chart_tag = '<img src="%s">\n'
header = "<html><head><title>%s</title>%s</head><body>\n"
tableheader = '<table border="1">\n'
tablefooter = '</table>\n'
footer = '</body></html>\n'
refrow = """<tr id="r%(level)d">
<td id="p%(level)d">%(name)s</td>
<td id="num">%(size)s</td>
<td id="num">%(pct)3.1f%%</td></tr>"""
def _print_refs(self, fobj, refs, total, level=1, minsize=0, minpct=0.1):
"""
Print individual referents recursively.
"""
lrefs = list(refs)
lrefs.sort(key=lambda x: x.size)
lrefs.reverse()
if level == 1:
fobj.write('<table>\n')
for ref in lrefs:
if ref.size > minsize and (ref.size*100.0/total) > minpct:
data = dict(level=level,
name=trunc(str(ref.name), 128),
size=pp(ref.size),
pct=ref.size*100.0/total)
fobj.write(self.refrow % data)
self._print_refs(fobj, ref.refs, total, level=level+1)
if level == 1:
fobj.write("</table>\n")
class_summary = """<p>%(cnt)d instances of %(cls)s were registered. The
average size is %(avg)s, the minimal size is %(min)s, the maximum size is
%(max)s.</p>\n"""
class_snapshot = '''<h3>Snapshot: %(name)s, %(total)s occupied by instances of
class %(cls)s</h3>\n'''
def print_class_details(self, fname, classname):
"""
Print detailed statistics and instances for the class `classname`. All
data will be written to the file `fname`.
"""
fobj = open(fname, "w")
fobj.write(self.header % (classname, self.style))
fobj.write("<h1>%s</h1>\n" % (classname))
sizes = [tobj.get_max_size() for tobj in self.index[classname]]
total = 0
for s in sizes:
total += s
data = {'cnt': len(self.index[classname]), 'cls': classname}
data['avg'] = pp(total / len(sizes))
data['max'] = pp(max(sizes))
data['min'] = pp(min(sizes))
fobj.write(self.class_summary % data)
fobj.write(self.charts[classname])
fobj.write("<h2>Coalesced Referents per Snapshot</h2>\n")
for snapshot in self.snapshots:
if classname in snapshot.classes:
merged = snapshot.classes[classname]['merged']
fobj.write(self.class_snapshot % {
'name': snapshot.desc, 'cls':classname, 'total': pp(merged.size)
})
if merged.refs:
self._print_refs(fobj, merged.refs, merged.size)
else:
fobj.write('<p>No per-referent sizes recorded.</p>\n')
fobj.write("<h2>Instances</h2>\n")
for tobj in self.index[classname]:
fobj.write('<table id="tl" width="100%" rules="rows">\n')
fobj.write('<tr><td id="hl" width="140px">Instance</td><td id="hl">%s at 0x%08x</td></tr>\n' % (tobj.name, tobj.id))
if tobj.repr:
fobj.write("<tr><td>Representation</td><td>%s </td></tr>\n" % tobj.repr)
fobj.write("<tr><td>Lifetime</td><td>%s - %s</td></tr>\n" % (pp_timestamp(tobj.birth), pp_timestamp(tobj.death)))
if tobj.trace:
trace = "<pre>%s</pre>" % (_format_trace(tobj.trace))
fobj.write("<tr><td>Instantiation</td><td>%s</td></tr>\n" % trace)
for (timestamp, size) in tobj.snapshots:
fobj.write("<tr><td>%s</td>" % pp_timestamp(timestamp))
if not size.refs:
fobj.write("<td>%s</td></tr>\n" % pp(size.size))
else:
fobj.write("<td>%s" % pp(size.size))
self._print_refs(fobj, size.refs, size.size)
fobj.write("</td></tr>\n")
fobj.write("</table>\n")
fobj.write(self.footer)
fobj.close()
snapshot_cls_header = """<tr>
<th id="hl">Class</th>
<th id="hl" align="right">Instance #</th>
<th id="hl" align="right">Total</th>
<th id="hl" align="right">Average size</th>
<th id="hl" align="right">Share</th></tr>\n"""
snapshot_cls = """<tr>
<td>%(cls)s</td>
<td align="right">%(active)d</td>
<td align="right">%(sum)s</td>
<td align="right">%(avg)s</td>
<td align="right">%(pct)3.2f%%</td></tr>\n"""
snapshot_summary = """<p>Total virtual memory assigned to the program at that time
was %(sys)s, which includes %(overhead)s profiling overhead. The
ClassTracker tracked %(tracked)s in total. The measurable objects
including code objects but excluding overhead have a total size of
%(asizeof)s.</p>\n"""
def relative_path(self, filepath, basepath=None):
"""
Convert the filepath path to a relative path against basepath. By
default basepath is self.basedir.
"""
if basepath is None:
basepath = self.basedir
if not basepath:
return filepath
if filepath.startswith(basepath):
rel = filepath[len(basepath):]
if rel and rel[0] == os.sep:
rel = rel[1:]
return rel
def create_title_page(self, filename, title=''):
"""
Output the title page.
"""
fobj = open(filename, "w")
fobj.write(self.header % (title, self.style))
fobj.write("<h1>%s</h1>\n" % title)
fobj.write("<h2>Memory distribution over time</h2>\n")
fobj.write(self.charts['snapshots'])
fobj.write("<h2>Snapshots statistics</h2>\n")
fobj.write('<table id="nb">\n')
classlist = list(self.index.keys())
classlist.sort()
for snapshot in self.snapshots:
fobj.write('<tr><td>\n')
fobj.write('<table id="tl" rules="rows">\n')
fobj.write("<h3>%s snapshot at %s</h3>\n" % (
snapshot.desc or 'Untitled',
pp_timestamp(snapshot.timestamp)
))
data = {}
data['sys'] = pp(snapshot.system_total.vsz)
data['tracked'] = pp(snapshot.tracked_total)
data['asizeof'] = pp(snapshot.asizeof_total)
data['overhead'] = pp(getattr(snapshot, 'overhead', 0))
fobj.write(self.snapshot_summary % data)
if snapshot.tracked_total:
fobj.write(self.snapshot_cls_header)
for classname in classlist:
data = snapshot.classes[classname].copy()
data['cls'] = '<a href="%s">%s</a>' % (self.relative_path(self.links[classname]), classname)
data['sum'] = pp(data['sum'])
data['avg'] = pp(data['avg'])
fobj.write(self.snapshot_cls % data)
fobj.write('</table>')
fobj.write('</td><td>\n')
if snapshot.tracked_total:
fobj.write(self.charts[snapshot])
fobj.write('</td></tr>\n')
fobj.write("</table>\n")
fobj.write(self.footer)
fobj.close()
def create_lifetime_chart(self, classname, filename=''):
"""
Create chart that depicts the lifetime of the instance registered with
`classname`. The output is written to `filename`.
"""
try:
from pylab import figure, title, xlabel, ylabel, plot, savefig
except ImportError:
return HtmlStats.nopylab_msg % (classname+" lifetime")
cnt = []
for tobj in self.index[classname]:
cnt.append([tobj.birth, 1])
if tobj.death:
cnt.append([tobj.death, -1])
cnt.sort()
for i in range(1, len(cnt)):
cnt[i][1] += cnt[i-1][1]
#if cnt[i][0] == cnt[i-1][0]:
# del cnt[i-1]
x = [t for [t,c] in cnt]
y = [c for [t,c] in cnt]
figure()
xlabel("Execution time [s]")
ylabel("Instance #")
title("%s instances" % classname)
plot(x, y, 'o')
savefig(filename)
return self.chart_tag % (os.path.basename(filename))
def create_snapshot_chart(self, filename=''):
"""
Create chart that depicts the memory allocation over time apportioned to
the tracked classes.
"""
try:
from pylab import figure, title, xlabel, ylabel, plot, fill, legend, savefig
import matplotlib.mlab as mlab
except ImportError:
return self.nopylab_msg % ("memory allocation")
classlist = self.tracked_classes
times = [snapshot.timestamp for snapshot in self.snapshots]
base = [0] * len(self.snapshots)
poly_labels = []
polys = []
for cn in classlist:
pct = [snapshot.classes[cn]['pct'] for snapshot in self.snapshots]
if max(pct) > 3.0:
sz = [float(fp.classes[cn]['sum'])/(1024*1024) for fp in self.snapshots]
sz = [sx+sy for sx, sy in zip(base, sz)]
xp, yp = mlab.poly_between(times, base, sz)
polys.append( ((xp, yp), {'label': cn}) )
poly_labels.append(cn)
base = sz
figure()
title("Snapshot Memory")
xlabel("Execution Time [s]")
ylabel("Virtual Memory [MiB]")
sizes = [float(fp.asizeof_total)/(1024*1024) for fp in self.snapshots]
plot(times, sizes, 'r--', label='Total')
sizes = [float(fp.tracked_total)/(1024*1024) for fp in self.snapshots]
plot(times, sizes, 'b--', label='Tracked total')
for (args, kwds) in polys:
fill(*args, **kwds)
legend(loc=2)
savefig(filename)
return self.chart_tag % (self.relative_path(filename))
def create_pie_chart(self, snapshot, filename=''):
"""
Create a pie chart that depicts the distribution of the allocated memory
for a given `snapshot`. The chart is saved to `filename`.
"""
try:
from pylab import figure, title, pie, axes, savefig
from pylab import sum as pylab_sum
except ImportError:
return self.nopylab_msg % ("pie_chart")
# Don't bother illustrating a pie without pieces.
if not snapshot.tracked_total:
return ''
classlist = []
sizelist = []
for k, v in list(snapshot.classes.items()):
if v['pct'] > 3.0:
classlist.append(k)
sizelist.append(v['sum'])
sizelist.insert(0, snapshot.asizeof_total - pylab_sum(sizelist))
classlist.insert(0, 'Other')
#sizelist = [x*0.01 for x in sizelist]
title("Snapshot (%s) Memory Distribution" % (snapshot.desc))
figure(figsize=(8,8))
axes([0.1, 0.1, 0.8, 0.8])
pie(sizelist, labels=classlist)
savefig(filename, dpi=50)
return self.chart_tag % (self.relative_path(filename))
def create_html(self, fname, title="ClassTracker Statistics"):
"""
Create HTML page `fname` and additional files in a directory derived
from `fname`.
"""
# Create a folder to store the charts and additional HTML files.
self.basedir = os.path.dirname(os.path.abspath(fname))
self.filesdir = os.path.splitext(fname)[0] + '_files'
if not os.path.isdir(self.filesdir):
os.mkdir(self.filesdir)
self.filesdir = os.path.abspath(self.filesdir)
self.links = {}
# Annotate all snapshots in advance
self.annotate()
# Create charts. The tags to show the images are returned and stored in
# the self.charts dictionary. This allows to return alternative text if
# the chart creation framework is not available.
self.charts = {}
fn = os.path.join(self.filesdir, 'timespace.png')
self.charts['snapshots'] = self.create_snapshot_chart(fn)
for fp, idx in zip(self.snapshots, list(range(len(self.snapshots)))):
fn = os.path.join(self.filesdir, 'fp%d.png' % (idx))
self.charts[fp] = self.create_pie_chart(fp, fn)
for cn in list(self.index.keys()):
fn = os.path.join(self.filesdir, cn.replace('.', '_')+'-lt.png')
self.charts[cn] = self.create_lifetime_chart(cn, fn)
# Create HTML pages first for each class and then the index page.
for cn in list(self.index.keys()):
fn = os.path.join(self.filesdir, cn.replace('.', '_')+'.html')
self.links[cn] = fn
self.print_class_details(fn, cn)
self.create_title_page(fname, title=title)
|
apache-2.0
|
nikitasingh981/scikit-learn
|
sklearn/covariance/robust_covariance.py
|
6
|
30178
|
"""
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = check_array(X, ensure_min_samples=2, estimator='fast_mcd')
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X, ensure_min_samples=2, estimator='MinCovDet')
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
|
bsd-3-clause
|
Stonelinks/jsbsim
|
tests/TestInitialConditions.py
|
2
|
15187
|
# TestInitialConditions.py
#
# A regression test that checks that IC are correctly read from the IC file
# then loaded in the ic/ properties. It also checks that the correct ICs are
# reported in the data written in CSV files.
#
# Copyright (c) 2015 Bertrand Coconnier
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, see <http://www.gnu.org/licenses/>
#
import os, math
import xml.etree.ElementTree as et
import pandas as pd
from JSBSim_utils import CreateFDM, append_xml, ExecuteUntil, JSBSimTestCase, RunTest
# Values copied from FGJSBBase.cpp and FGXMLElement.cpp
convtoft = {'FT': 1.0, 'M': 3.2808399, 'IN': 1.0/12.0}
convtofps = {'FT/SEC': 1.0, 'KTS': 1.68781}
convtodeg = {'DEG': 1.0, 'RAD': 57.295779513082320876798154814105}
convtokts = {'KTS': 1.0, 'FT/SEC': 1.0/1.68781}
class TestInitialConditions(JSBSimTestCase):
def getElementTrees(self, s):
# Read the IC file name from the script
tree = et.parse(s)
use_tag = tree.getroot().find('use')
aircraft_name = use_tag.attrib['aircraft']
aircraft_path = os.path.join('aircraft', aircraft_name)
path_to_jsbsim_aircrafts = self.sandbox.path_to_jsbsim_file(aircraft_path)
IC_file = append_xml(use_tag.attrib['initialize'])
IC_tree = et.parse(os.path.join(path_to_jsbsim_aircrafts, IC_file))
return (tree, IC_tree)
def test_initial_conditions_v1(self):
prop_output_to_CSV = ['velocities/vc-kts']
# A dictionary that contains the XML tags to extract from the IC file
# along with the name of the properties that contain the values
# extracted from the IC file.
vars = [{'tag': 'vt', 'unit': convtofps, 'default_unit': 'FT/SEC',
'ic_prop': 'ic/vt-fps', 'prop': 'velocities/vt-fps',
'CSV_header': 'V_{Total} (ft/s)'},
{'tag': 'vc', 'unit': convtokts, 'default_unit': 'KTS',
'ic_prop': 'ic/vc-kts', 'prop': 'velocities/vc-kts',
'CSV_header': '/fdm/jsbsim/velocities/vc-kts'},
{'tag': 'ubody', 'unit': convtofps, 'default_unit': 'FT/SEC',
'ic_prop': 'ic/u-fps', 'prop': 'velocities/u-fps',
'CSV_header': 'UBody'},
{'tag': 'vbody', 'unit': convtofps, 'default_unit': 'FT/SEC',
'ic_prop': 'ic/v-fps', 'prop': 'velocities/v-fps',
'CSV_header': 'VBody'},
{'tag': 'wbody', 'unit': convtofps, 'default_unit': 'FT/SEC',
'ic_prop': 'ic/w-fps', 'prop': 'velocities/w-fps',
'CSV_header': 'WBody'},
{'tag': 'vnorth', 'unit': convtofps, 'default_unit': 'FT/SEC',
'ic_prop': 'ic/vn-fps', 'prop': 'velocities/v-north-fps',
'CSV_header': 'V_{North} (ft/s)'},
{'tag': 'veast', 'unit': convtofps, 'default_unit': 'FT/SEC',
'ic_prop': 'ic/ve-fps', 'prop': 'velocities/v-east-fps',
'CSV_header': 'V_{East} (ft/s)'},
{'tag': 'vdown', 'unit': convtofps, 'default_unit': 'FT/SEC',
'ic_prop': 'ic/vd-fps', 'prop': 'velocities/v-down-fps',
'CSV_header': 'V_{Down} (ft/s)'},
{'tag': 'latitude', 'unit': convtodeg, 'default_unit': 'RAD',
'ic_prop': 'ic/lat-gc-deg', 'prop': 'position/lat-gc-deg',
'CSV_header': 'Latitude (deg)'},
{'tag': 'longitude', 'unit': convtodeg, 'default_unit': 'RAD',
'ic_prop': 'ic/long-gc-deg', 'prop': 'position/long-gc-deg',
'CSV_header': 'Longitude (deg)'},
{'tag': 'altitude', 'unit': convtoft, 'default_unit': 'FT',
'ic_prop': 'ic/h-agl-ft', 'prop': 'position/h-agl-ft',
'CSV_header': 'Altitude AGL (ft)'},
{'tag': 'altitudeAGL', 'unit': convtoft, 'default_unit': 'FT',
'ic_prop': 'ic/h-agl-ft', 'prop': 'position/h-agl-ft',
'CSV_header': 'Altitude AGL (ft)'},
{'tag': 'altitudeMSL', 'unit': convtoft, 'default_unit': 'FT',
'ic_prop': 'ic/h-sl-ft', 'prop': 'position/h-sl-ft',
'CSV_header': 'Altitude ASL (ft)'},
{'tag': 'phi', 'unit': convtodeg, 'default_unit': 'RAD',
'ic_prop': 'ic/phi-deg', 'prop': 'attitude/phi-deg',
'CSV_header': 'Phi (deg)'},
{'tag': 'theta', 'unit': convtodeg, 'default_unit': 'RAD',
'ic_prop': 'ic/theta-deg', 'prop': 'attitude/theta-deg',
'CSV_header': 'Theta (deg)'},
{'tag': 'psi', 'unit': convtodeg, 'default_unit': 'RAD',
'ic_prop': 'ic/psi-true-deg', 'prop': 'attitude/psi-deg',
'CSV_header': 'Psi (deg)'},
{'tag': 'elevation', 'unit': convtoft, 'default_unit': 'FT',
'ic_prop': 'ic/terrain-elevation-ft',
'prop': 'position/terrain-elevation-asl-ft',
'CSV_header': 'Terrain Elevation (ft)'}]
for s in self.script_list(('ZLT-NT-moored-1.xml',
'737_cruise_steady_turn_simplex.xml')):
(tree, IC_tree) = self.getElementTrees(s)
IC_root = IC_tree.getroot()
# Only testing version 1.0 of init files
if 'version' in IC_root.attrib and float(IC_root.attrib['version']) != 1.0:
continue
f, fdm = self.LoadScript(tree, s, prop_output_to_CSV)
self.CheckICValues(vars, 'script %s' % (f,), fdm, IC_root)
del fdm
def LoadScript(self, tree, script_path, prop_output_to_CSV=[]):
# Generate a CSV file to check that it is correctly initialized
# with the initial values
output_tag = et.SubElement(tree.getroot(), 'output')
output_tag.attrib['name'] = 'check_csv_values.csv'
output_tag.attrib['type'] = 'CSV'
output_tag.attrib['rate'] = '10'
position_tag = et.SubElement(output_tag, 'position')
position_tag.text = 'ON'
velocities_tag = et.SubElement(output_tag, 'velocities')
velocities_tag.text = 'ON'
for props in prop_output_to_CSV:
property_tag = et.SubElement(output_tag, 'property')
property_tag.text = props
f = os.path.split(script_path)[-1] # Script name
tree.write(f)
# Initialize the script
fdm = CreateFDM(self.sandbox)
fdm.load_script(f)
fdm.run_ic()
return (f, fdm)
def CheckICValues(self, vars, f, fdm, IC_root):
# Extract the IC values from XML
for var in vars:
var_tag = IC_root.find(var['tag'])
var['specified'] = var_tag is not None
if var_tag is None:
var['value'] = 0.0
continue
var['value'] = float(var_tag.text)
if 'unit' in var_tag.attrib:
conv = var['unit'][var_tag.attrib['unit']]
else:
conv = var['unit'][var['default_unit']]
var['value'] *= conv
# Sanity check, we just initialized JSBSim with the ICs, the time must
# be set to 0.0
self.assertEqual(fdm['simulation/sim-time-sec'], 0.0)
# Check that the properties (including in 'ic/') have been correctly
# initialized (i.e. that they contain the value read from the XML
# file).
for var in vars:
if not var['specified']:
continue
value = var['value']
prop = fdm[var['ic_prop']]
if var['tag'] == 'psi':
if abs(prop - 360.0) <= 1E-8:
prop = 0.0
self.assertAlmostEqual(value, prop, delta=1E-7,
msg="In %s: %s should be %f but found %f" % (f, var['tag'], value, prop))
prop = fdm[var['prop']]
if var['tag'] == 'psi':
if abs(prop - 360.0) <= 1E-8:
prop = 0.0
self.assertAlmostEqual(value, prop, delta=1E-7,
msg="In %s: %s should be %f but found %f" % (f, var['tag'], value, prop))
# Execute the first second of the script. This is to make sure that the
# CSV file is open and the ICs have been written in it.
try:
ExecuteUntil(fdm, 1.0)
except RuntimeError as e:
if e.args[0] == 'Trim Failed':
self.fail("Trim failed in %s" % (f,))
else:
raise
# Sanity check: make sure that the time step 0.0 has been copied in the
# CSV file.
ref = pd.read_csv('check_csv_values.csv')
self.assertEqual(ref['Time'][0], 0.0)
# Check that the value in the CSV file equals the value read from the
# IC file.
for var in vars:
if not var['specified']:
continue
value = var['value']
csv_value = ref[var['CSV_header']][0]
if var['tag'] == 'psi':
if abs(csv_value - 360.0) <= 1E-8:
csv_value = 0.0
self.assertAlmostEqual(value, csv_value, delta=1E-7,
msg="In %s: %s should be %f but found %f" % (f, var['tag'], value, csv_value))
def GetVariables(self, lat_tag):
vars = [{'tag': 'longitude', 'unit': convtodeg, 'default_unit': 'RAD',
'ic_prop': 'ic/long-gc-deg', 'prop': 'position/long-gc-deg',
'CSV_header': 'Longitude (deg)'},
{'tag': 'altitudeAGL', 'unit': convtoft, 'default_unit': 'FT',
'ic_prop': 'ic/h-agl-ft', 'prop': 'position/h-agl-ft',
'CSV_header': 'Altitude AGL (ft)'},
{'tag': 'altitudeMSL', 'unit': convtoft, 'default_unit': 'FT',
'ic_prop': 'ic/h-sl-ft', 'prop': 'position/h-sl-ft',
'CSV_header': 'Altitude ASL (ft)'}]
if lat_tag is None:
lat_vars = []
elif 'type' not in lat_tag.attrib or lat_tag.attrib['type'][:4] != "geod":
lat_vars = [{'tag': 'latitude', 'unit': convtodeg,
'default_unit': 'RAD', 'ic_prop': 'ic/lat-gc-deg',
'prop': 'position/lat-gc-deg',
'CSV_header': 'Latitude (deg)'}]
else:
lat_vars = [{'tag': 'latitude', 'unit': convtodeg,
'default_unit': 'RAD', 'ic_prop': 'ic/lat-geod-deg',
'prop': 'position/lat-geod-deg',
'CSV_header': 'Latitude Geodetic (deg)'}]
return lat_vars+vars
def test_geod_position_from_init_file_v2(self):
for s in self.script_list(('ZLT-NT-moored-1.xml',
'737_cruise_steady_turn_simplex.xml')):
(tree, IC_tree) = self.getElementTrees(s)
IC_root = IC_tree.getroot()
# Only testing version 2.0 of init files
if ('version' not in IC_root.attrib or float(IC_root.attrib['version']) != 2.0):
continue
position_tag = IC_root.find('position')
lat_tag = position_tag.find('latitude')
f, fdm = self.LoadScript(tree, s)
self.CheckICValues(self.GetVariables(lat_tag), 'script %s' % (f,),
fdm, position_tag)
del fdm
def test_initial_latitude(self):
Output_file = self.sandbox.path_to_jsbsim_file('tests', 'output.xml')
GEODETIC, ELEVATION, ALTITUDE = (1, 2, 4)
for v in ('', '_v2'):
IC_file = self.sandbox.path_to_jsbsim_file('aircraft', 'ball',
'reset00'+v+'.xml')
for i in xrange(8):
for latitude_pos in xrange(4):
IC_tree = et.parse(IC_file)
IC_root = IC_tree.getroot()
if v:
position_tag = IC_root.find('position')
latitude_tag = et.SubElement(position_tag, 'latitude')
latitude_tag.attrib['unit'] = 'DEG'
else:
position_tag = IC_root
latitude_tag = IC_root.find('latitude')
latitude_tag.text = str(latitude_pos*30.)
if i & GEODETIC:
latitude_tag.attrib['type'] = 'geod'
if i & ELEVATION:
elevation_tag = et.SubElement(IC_root, 'elevation')
elevation_tag.text = '1000.'
if i & ALTITUDE:
if v:
altitude_tag = position_tag.find('altitudeMSL')
altitude_tag.tag = 'altitudeAGL'
else:
altitude_tag = position_tag.find('altitude')
altitude_tag.tag = 'altitudeMSL'
IC_tree.write('IC.xml')
fdm = CreateFDM(self.sandbox)
fdm.load_model('ball')
fdm.set_output_directive(Output_file)
fdm.set_output_filename(1, 'check_csv_values.csv')
fdm.load_ic('IC.xml', False)
fdm.run_ic()
self.CheckICValues(self.GetVariables(latitude_tag),
'IC%d' % (i,), fdm, position_tag)
del fdm
def test_set_initial_geodetic_latitude(self):
script_path = self.sandbox.path_to_jsbsim_file('scripts',
'737_cruise.xml')
output_file = self.sandbox.path_to_jsbsim_file('tests', 'output.xml')
fdm = CreateFDM(self.sandbox)
fdm.load_script(script_path)
fdm.set_output_directive(output_file)
alt = fdm['ic/h-sl-ft']
glat = fdm['ic/lat-geod-deg'] - 30.
fdm['ic/lat-geod-deg'] = glat
fdm.run_ic()
self.assertAlmostEqual(fdm['ic/h-sl-ft'], alt)
self.assertAlmostEqual(fdm['ic/lat-geod-deg'], glat)
self.assertAlmostEqual(fdm['ic/lat-geod-rad'], glat*math.pi/180.)
self.assertAlmostEqual(fdm['position/lat-geod-deg'], glat)
# Sanity check: make sure that the time step 0.0 has been copied in the
# CSV file.
ref = pd.read_csv('output.csv')
self.assertEqual(ref['Time'][0], 0.0)
self.assertAlmostEqual(ref['Latitude Geodetic (deg)'][0], glat)
RunTest(TestInitialConditions)
|
lgpl-2.1
|
eco32i/ggplot
|
ggplot/utils/utils.py
|
13
|
5903
|
"""Helper methods for ggplot.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import matplotlib.pyplot as plt
import json
import os
import sys
import six
# API-docs from ggplot2: GPL-2 licensed
def ggsave(filename = None, plot = None, device = None, format = None,
path = None, scale = 1, width = None, height = None, units = "in",
dpi = 300, limitsize=True, **kwargs):
"""Save a ggplot with sensible defaults
ggsave is a convenient function for saving a plot. It defaults to
saving the last plot that you displayed, and for a default size uses
the size of the current graphics device. It also guesses the type of
graphics device from the extension. This means the only argument you
need to supply is the filename.
Parameters
----------
filename : str or file
file name or file to write the plot to
plot : ggplot
plot to save, defaults to last plot displayed
format : str
image format to use, automatically extract from
file name extension
path : str
path to save plot to (if you just want to set path and
not filename)
scale : number
scaling factor
width : number
width (defaults to the width of current plotting window)
height : number
height (defaults to the height of current plotting window)
units : str
units for width and height when either one is explicitly
specified (in, cm, or mm)
dpi : number
dpi to use for raster graphics
limitsize : bool
when `True` (the default), ggsave will not save images
larger than 50x50 inches, to prevent the common error
of specifying dimensions in pixels.
kwargs : dict
additional arguments to pass to matplotlib `savefig()`
Returns
-------
None
Examples
--------
>>> from ggplot import *
>>> gg = ggplot(aes(x='wt',y='mpg',label='name'),data=mtcars) + geom_text()
>>> ggsave("filename.png", gg)
Notes
-----
Incompatibilities to ggplot2:
- `format` can be use as a alternative to `device`
- ggsave will happily save matplotlib plots, if that was the last plot
"""
fig_kwargs = {}
fig_kwargs.update(kwargs)
# This is the case when we just use "ggsave(plot)"
if hasattr(filename, "draw"):
plot, filename = filename, plot
if plot is None:
figure = plt.gcf()
else:
if hasattr(plot, "draw"):
figure = plot.draw()
else:
raise Exception("plot is not a ggplot object")
if format and device:
raise Exception("Both 'format' and 'device' given: only use one")
# in the end the imageformat is in format
if device:
format = device
if format:
if not format in figure.canvas.get_supported_filetypes():
raise Exception("Unknown format: {0}".format(format))
fig_kwargs["format"] = format
if filename is None:
if plot:
# ggplot2 defaults to pdf
filename = str(plot.__hash__()) + "." +(format if format else "pdf")
else:
# ggplot2 has a way to get to the last plot, but we currently dont't
raise Exception("No filename given: please supply a filename")
if not isinstance(filename, six.string_types):
# so probably a file object
if format is None:
raise Exception("filename is not a string and no format given: please supply a format!")
if path:
filename = os.path.join(path, filename)
if units not in ["in", "cm", "mm"]:
raise Exception("units not 'in', 'cm', or 'mm'")
to_inch = {"in":lambda x:x,"cm":lambda x: x / 2.54, "mm":lambda x: x * 2.54 * 10}
from_inch = {"in":lambda x:x,"cm":lambda x: x * 2.54, "mm":lambda x: x * 2.54 * 10}
w, h = figure.get_size_inches()
issue_size = False
if width is None:
width = w
issue_size = True
else:
width = to_inch[units](width)
if height is None:
height = h
issue_size = True
else:
height = to_inch[units](height)
try:
scale = float(scale)
except:
raise Exception("Can't convert scale argument to a number: {0}".format(scale))
# ggplot2: if you specify a width *and* a scale, you get the width*scale image!
width = width * scale
height = height * scale
if issue_size:
msg = "Saving {0} x {1} {2} image.\n".format(from_inch[units](width), from_inch[units](height), units)
sys.stderr.write(msg)
if limitsize and (width > 25 or height > 25):
msg = "Dimensions exceed 25 inches (height and width are specified in inches/cm/mm, not pixels)." + \
" If you are sure you want these dimensions, use 'limitsize=False'."
raise Exception(msg)
fig_kwargs["dpi"] = dpi
#savefig(fname, dpi=None, facecolor='w', edgecolor='w',
# orientation='portrait', papertype=None, format=None,
# transparent=False, bbox_inches=None, pad_inches=0.1,
# frameon=None)
try:
figure.set_size_inches(width,height)
figure.savefig(filename, **fig_kwargs)
finally:
# restore the sizes
figure.set_size_inches(w,h)
# close figure, if it was drawn by ggsave
if not plot is None:
plt.close(figure)
def add_ggplotrc_params(obj):
# ggplotrc defaults
if "HOME" in os.environ:
ggrc = os.path.join(os.environ["HOME"], ".ggplotrc")
try:
klass = obj.__class__.__name__
ggrc = json.load(open(ggrc, 'r'))
if klass in ggrc:
for k, v in ggrc[klass].items():
setattr(obj, k, v)
except:
pass
|
bsd-2-clause
|
tensorflow/model-analysis
|
tensorflow_model_analysis/api/model_eval_lib_test.py
|
1
|
68195
|
# Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for using the model_eval_lib API."""
from __future__ import division
from __future__ import print_function
import json
import os
import tempfile
from absl.testing import absltest
from absl.testing import parameterized
import pandas as pd
import tensorflow as tf
from tensorflow_model_analysis import config
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import types
from tensorflow_model_analysis.api import model_eval_lib
from tensorflow_model_analysis.eval_saved_model import testutil
from tensorflow_model_analysis.eval_saved_model.example_trainers import csv_linear_classifier
from tensorflow_model_analysis.eval_saved_model.example_trainers import fixed_prediction_estimator
from tensorflow_model_analysis.eval_saved_model.example_trainers import linear_classifier
from tensorflow_model_analysis.eval_saved_model.example_trainers import linear_regressor
from tensorflow_model_analysis.evaluators import legacy_metrics_and_plots_evaluator
from tensorflow_model_analysis.evaluators import legacy_query_based_metrics_evaluator
from tensorflow_model_analysis.evaluators import metrics_plots_and_validations_evaluator
from tensorflow_model_analysis.evaluators.query_metrics import ndcg as legacy_ndcg
from tensorflow_model_analysis.evaluators.query_metrics import query_statistics
from tensorflow_model_analysis.extractors import legacy_feature_extractor
from tensorflow_model_analysis.extractors import legacy_predict_extractor
from tensorflow_model_analysis.extractors import slice_key_extractor
from tensorflow_model_analysis.metrics import calibration_plot
from tensorflow_model_analysis.metrics import metric_specs
from tensorflow_model_analysis.metrics import ndcg
from tensorflow_model_analysis.post_export_metrics import metric_keys
from tensorflow_model_analysis.post_export_metrics import post_export_metrics
from tensorflow_model_analysis.proto import validation_result_pb2
from tensorflow_model_analysis.slicer import slicer_lib
from tensorflow_model_analysis.view import view_types
from tensorflowjs.converters import converter as tfjs_converter
from google.protobuf import text_format
from tensorflow_metadata.proto.v0 import schema_pb2
try:
import tensorflow_ranking as tfr # pylint: disable=g-import-not-at-top
_TFR_IMPORTED = True
except (ImportError, tf.errors.NotFoundError):
_TFR_IMPORTED = False
_TEST_SEED = 982735
_TF_MAJOR_VERSION = int(tf.version.VERSION.split('.')[0])
class EvaluateTest(testutil.TensorflowModelAnalysisTest,
parameterized.TestCase):
def setUp(self):
super(EvaluateTest, self).setUp()
self.longMessage = True # pylint: disable=invalid-name
def _getTempDir(self):
return tempfile.mkdtemp()
def _exportEvalSavedModel(self, classifier):
temp_eval_export_dir = os.path.join(self._getTempDir(), 'eval_export_dir')
_, eval_export_dir = classifier(None, temp_eval_export_dir)
return eval_export_dir
def _writeTFExamplesToTFRecords(self, examples):
data_location = os.path.join(self._getTempDir(), 'input_data.rio')
with tf.io.TFRecordWriter(data_location) as writer:
for example in examples:
writer.write(example.SerializeToString())
return data_location
def _writeCSVToTextFile(self, examples):
data_location = os.path.join(self._getTempDir(), 'input_data.csv')
with open(data_location, 'w') as writer:
for example in examples:
writer.write(example + '\n')
return data_location
def assertMetricsAlmostEqual(self,
got_slicing_metrics,
expected_slicing_metrics,
output_name='',
subkey=''):
if got_slicing_metrics:
for (s, m) in got_slicing_metrics:
metrics = m[output_name][subkey]
self.assertIn(s, expected_slicing_metrics)
for metric_name in expected_slicing_metrics[s]:
self.assertIn(metric_name, metrics)
self.assertDictElementsAlmostEqual(
metrics[metric_name], expected_slicing_metrics[s][metric_name])
else:
# Only pass if expected_slicing_metrics also evaluates to False.
self.assertFalse(
expected_slicing_metrics, msg='Actual slicing_metrics was empty.')
def assertSliceMetricsEqual(self, expected_metrics, got_metrics):
self.assertCountEqual(
list(expected_metrics.keys()),
list(got_metrics.keys()),
msg='keys do not match. expected_metrics: %s, got_metrics: %s' %
(expected_metrics, got_metrics))
for key in expected_metrics.keys():
self.assertProtoEquals(
expected_metrics[key],
got_metrics[key],
msg='value for key %s does not match' % key)
def assertSliceListEqual(self, expected_list, got_list, value_assert_fn):
self.assertEqual(
len(expected_list),
len(got_list),
msg='expected_list: %s, got_list: %s' % (expected_list, got_list))
for index, (expected, got) in enumerate(zip(expected_list, got_list)):
(expected_key, expected_value) = expected
(got_key, got_value) = got
self.assertEqual(
expected_key, got_key, msg='key mismatch at index %d' % index)
value_assert_fn(expected_value, got_value)
def assertSlicePlotsListEqual(self, expected_list, got_list):
self.assertSliceListEqual(expected_list, got_list, self.assertProtoEquals)
def assertSliceMetricsListEqual(self, expected_list, got_list):
self.assertSliceListEqual(expected_list, got_list,
self.assertSliceMetricsEqual)
def testNoConstructFn(self):
model_location = self._exportEvalSavedModel(
linear_classifier.simple_linear_classifier)
examples = [self._makeExample(age=3.0, language='english', label=1.0)]
data_location = self._writeTFExamplesToTFRecords(examples)
eval_config = config.EvalConfig()
# No construct_fn should fail when Beam attempts to call the construct_fn.
eval_shared_model = types.EvalSharedModel(model_path=model_location)
with self.assertRaisesRegex(AttributeError,
'\'NoneType\' object has no attribute'):
model_eval_lib.run_model_analysis(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
data_location=data_location,
output_path=self._getTempDir())
# Using the default_eval_shared_model should pass as it has a construct_fn.
eval_shared_model = model_eval_lib.default_eval_shared_model(
eval_saved_model_path=model_location)
model_eval_lib.run_model_analysis(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
data_location=data_location,
output_path=self._getTempDir())
def testMixedEvalAndNonEvalSignatures(self):
examples = [self._makeExample(age=3.0, language='english', label=1.0)]
data_location = self._writeTFExamplesToTFRecords(examples)
eval_config = config.EvalConfig(model_specs=[
config.ModelSpec(name='model1'),
config.ModelSpec(name='model2', signature_name='eval')
])
eval_shared_models = [
model_eval_lib.default_eval_shared_model(
model_name='model1',
eval_saved_model_path='/model1/path',
eval_config=eval_config),
model_eval_lib.default_eval_shared_model(
model_name='model2',
eval_saved_model_path='/model2/path',
eval_config=eval_config),
]
with self.assertRaisesRegex(
NotImplementedError,
'support for mixing eval and non-eval estimator models is not '
'implemented'):
model_eval_lib.run_model_analysis(
eval_config=eval_config,
eval_shared_model=eval_shared_models,
data_location=data_location,
output_path=self._getTempDir())
@parameterized.named_parameters(('tflite', constants.TF_LITE),
('tfjs', constants.TF_JS))
def testMixedModelTypes(self, model_type):
examples = [self._makeExample(age=3.0, language='english', label=1.0)]
data_location = self._writeTFExamplesToTFRecords(examples)
eval_config = config.EvalConfig(model_specs=[
config.ModelSpec(name='model1'),
config.ModelSpec(name='model2', model_type=model_type)
])
eval_shared_models = [
model_eval_lib.default_eval_shared_model(
model_name='model1',
eval_saved_model_path='/model1/path',
eval_config=eval_config),
model_eval_lib.default_eval_shared_model(
model_name='model2',
eval_saved_model_path='/model2/path',
eval_config=eval_config)
]
with self.assertRaisesRegex(
NotImplementedError, 'support for mixing .* models is not implemented'):
model_eval_lib.run_model_analysis(
eval_config=eval_config,
eval_shared_model=eval_shared_models,
data_location=data_location,
output_path=self._getTempDir())
def testRunModelAnalysisExtraFieldsPlusFeatureExtraction(self):
model_location = self._exportEvalSavedModel(
linear_classifier.simple_linear_classifier)
examples = [
self._makeExample(age=3.0, language='english', label=1.0, my_slice='a'),
self._makeExample(age=3.0, language='chinese', label=0.0, my_slice='a'),
self._makeExample(age=4.0, language='english', label=1.0, my_slice='b'),
self._makeExample(age=5.0, language='chinese', label=1.0, my_slice='c'),
self._makeExample(age=5.0, language='hindi', label=1.0)
]
data_location = self._writeTFExamplesToTFRecords(examples)
slicing_specs = [slicer_lib.SingleSliceSpec(columns=['my_slice'])]
eval_shared_model = model_eval_lib.default_eval_shared_model(
eval_saved_model_path=model_location, example_weight_key='age')
extractors_with_feature_extraction = [
legacy_predict_extractor.PredictExtractor(
eval_shared_model, desired_batch_size=3, materialize=False),
legacy_feature_extractor.FeatureExtractor(
extract_source=constants.INPUT_KEY,
extract_dest=constants.FEATURES_PREDICTIONS_LABELS_KEY),
slice_key_extractor.SliceKeyExtractor(
slice_spec=slicing_specs, materialize=False)
]
eval_result = model_eval_lib.run_model_analysis(
eval_shared_model=model_eval_lib.default_eval_shared_model(
eval_saved_model_path=model_location, example_weight_key='age'),
data_location=data_location,
output_path=self._getTempDir(),
extractors=extractors_with_feature_extraction,
slice_spec=slicing_specs)
# We only check some of the metrics to ensure that the end-to-end
# pipeline works.
expected = {
(('my_slice', 'a'),): {
'accuracy': {
'doubleValue': 1.0
},
'my_mean_label': {
'doubleValue': 0.5
},
metric_keys.EXAMPLE_WEIGHT: {
'doubleValue': 6.0
},
metric_keys.EXAMPLE_COUNT: {
'doubleValue': 2.0
},
},
(('my_slice', 'b'),): {
'accuracy': {
'doubleValue': 1.0
},
'my_mean_label': {
'doubleValue': 1.0
},
metric_keys.EXAMPLE_WEIGHT: {
'doubleValue': 4.0
},
metric_keys.EXAMPLE_COUNT: {
'doubleValue': 1.0
},
},
(('my_slice', 'c'),): {
'accuracy': {
'doubleValue': 0.0
},
'my_mean_label': {
'doubleValue': 1.0
},
metric_keys.EXAMPLE_WEIGHT: {
'doubleValue': 5.0
},
metric_keys.EXAMPLE_COUNT: {
'doubleValue': 1.0
},
},
}
self.assertEqual(eval_result.model_location, model_location.decode())
self.assertEqual(eval_result.data_location, data_location)
self.assertEqual(eval_result.config.slicing_specs[0],
config.SlicingSpec(feature_keys=['my_slice']))
self.assertMetricsAlmostEqual(eval_result.slicing_metrics, expected)
self.assertFalse(eval_result.plots)
def testRunModelAnalysis(self):
model_location = self._exportEvalSavedModel(
linear_classifier.simple_linear_classifier)
examples = [
self._makeExample(age=3.0, language='english', label=1.0),
self._makeExample(age=3.0, language='chinese', label=0.0),
self._makeExample(age=4.0, language='english', label=1.0),
self._makeExample(age=5.0, language='chinese', label=1.0),
self._makeExample(age=5.0, language='hindi', label=1.0)
]
data_location = self._writeTFExamplesToTFRecords(examples)
slicing_specs = [slicer_lib.SingleSliceSpec(columns=['language'])]
eval_result = model_eval_lib.run_model_analysis(
eval_shared_model=model_eval_lib.default_eval_shared_model(
eval_saved_model_path=model_location, example_weight_key='age'),
data_location=data_location,
output_path=self._getTempDir(),
slice_spec=slicing_specs,
min_slice_size=2)
# We only check some of the metrics to ensure that the end-to-end
# pipeline works.
expected = {
(('language', 'hindi'),): {
u'__ERROR__': {
'debugMessage':
u'Example count for this slice key is lower than the '
u'minimum required value: 2. No data is aggregated for '
u'this slice.'
},
},
(('language', 'chinese'),): {
'accuracy': {
'doubleValue': 0.5
},
'my_mean_label': {
'doubleValue': 0.5
},
metric_keys.EXAMPLE_WEIGHT: {
'doubleValue': 8.0
},
metric_keys.EXAMPLE_COUNT: {
'doubleValue': 2.0
},
},
(('language', 'english'),): {
'accuracy': {
'doubleValue': 1.0
},
'my_mean_label': {
'doubleValue': 1.0
},
metric_keys.EXAMPLE_WEIGHT: {
'doubleValue': 7.0
},
metric_keys.EXAMPLE_COUNT: {
'doubleValue': 2.0
},
}
}
self.assertEqual(eval_result.model_location, model_location.decode())
self.assertEqual(eval_result.data_location, data_location)
self.assertEqual(eval_result.config.slicing_specs[0],
config.SlicingSpec(feature_keys=['language']))
self.assertMetricsAlmostEqual(eval_result.slicing_metrics, expected)
self.assertFalse(eval_result.plots)
def testRunModelAnalysisWithCustomizations(self):
model_location = self._exportEvalSavedModel(
linear_classifier.simple_linear_classifier)
examples = [
self._makeExample(age=3.0, language='english', label=1.0),
self._makeExample(age=3.0, language='chinese', label=0.0),
self._makeExample(age=4.0, language='english', label=1.0),
self._makeExample(age=5.0, language='chinese', label=1.0),
self._makeExample(age=5.0, language='hindi', label=1.0)
]
data_location = self._writeTFExamplesToTFRecords(examples)
slicing_specs = [config.SlicingSpec(feature_keys=['language'])]
options = config.Options()
options.min_slice_size.value = 2
eval_config = config.EvalConfig(
model_specs=[config.ModelSpec(model_type='my_model_type')],
slicing_specs=slicing_specs,
options=options)
# Use default model_loader for testing passing custom_model_loader
model_loader = model_eval_lib.default_eval_shared_model(
eval_saved_model_path=model_location,
example_weight_key='age').model_loader
eval_shared_model = model_eval_lib.default_eval_shared_model(
eval_saved_model_path=model_location, custom_model_loader=model_loader)
# Use PredictExtractor for testing passing custom_predict_extractor
extractors = model_eval_lib.default_extractors(
eval_shared_model=eval_shared_model,
eval_config=eval_config,
custom_predict_extractor=legacy_predict_extractor.PredictExtractor(
eval_shared_model=eval_shared_model, eval_config=eval_config))
eval_result = model_eval_lib.run_model_analysis(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
data_location=data_location,
output_path=self._getTempDir(),
extractors=extractors)
# We only check some of the metrics to ensure that the end-to-end
# pipeline works.
expected = {
(('language', 'hindi'),): {
u'__ERROR__': {
'debugMessage':
u'Example count for this slice key is lower than the '
u'minimum required value: 2. No data is aggregated for '
u'this slice.'
},
},
(('language', 'chinese'),): {
'accuracy': {
'doubleValue': 0.5
},
'my_mean_label': {
'doubleValue': 0.5
},
metric_keys.EXAMPLE_WEIGHT: {
'doubleValue': 8.0
},
metric_keys.EXAMPLE_COUNT: {
'doubleValue': 2.0
},
},
(('language', 'english'),): {
'accuracy': {
'doubleValue': 1.0
},
'my_mean_label': {
'doubleValue': 1.0
},
metric_keys.EXAMPLE_WEIGHT: {
'doubleValue': 7.0
},
metric_keys.EXAMPLE_COUNT: {
'doubleValue': 2.0
},
}
}
self.assertEqual(eval_result.model_location, model_location.decode())
self.assertEqual(eval_result.data_location, data_location)
self.assertEqual(eval_result.config.slicing_specs[0],
config.SlicingSpec(feature_keys=['language']))
self.assertMetricsAlmostEqual(eval_result.slicing_metrics, expected)
def testRunModelAnalysisMultipleModels(self):
examples = [
self._makeExample(age=3.0, language='english', label=1.0),
self._makeExample(age=3.0, language='chinese', label=0.0),
self._makeExample(age=4.0, language='english', label=1.0),
self._makeExample(age=5.0, language='chinese', label=1.0)
]
data_location = self._writeTFExamplesToTFRecords(examples)
model_specs = [
config.ModelSpec(
name='model1', signature_name='eval', example_weight_key='age'),
config.ModelSpec(
name='model2', signature_name='eval', example_weight_key='age')
]
metrics_specs = [
config.MetricsSpec(
metrics=[
config.MetricConfig(class_name='ExampleCount'),
config.MetricConfig(class_name='WeightedExampleCount')
],
model_names=['model1', 'model2'])
]
slicing_specs = [config.SlicingSpec(feature_values={'language': 'english'})]
options = config.Options()
eval_config = config.EvalConfig(
model_specs=model_specs,
metrics_specs=metrics_specs,
slicing_specs=slicing_specs,
options=options)
model_location1 = self._exportEvalSavedModel(
linear_classifier.simple_linear_classifier)
model1 = model_eval_lib.default_eval_shared_model(
model_name='model1',
eval_saved_model_path=model_location1,
eval_config=eval_config)
model_location2 = self._exportEvalSavedModel(
linear_classifier.simple_linear_classifier)
model2 = model_eval_lib.default_eval_shared_model(
model_name='model2',
eval_saved_model_path=model_location2,
eval_config=eval_config)
eval_shared_models = [model1, model2]
eval_results = model_eval_lib.run_model_analysis(
eval_shared_model=eval_shared_models,
eval_config=eval_config,
data_location=data_location,
output_path=self._getTempDir())
# We only check some of the metrics to ensure that the end-to-end
# pipeline works.
expected_result_1 = {
(('language', 'english'),): {
'example_count': {
'doubleValue': 2.0
},
'weighted_example_count': {
'doubleValue': 7.0
},
'my_mean_label': {
'doubleValue': 1.0
},
metric_keys.EXAMPLE_COUNT: {
'doubleValue': 2.0
},
}
}
expected_result_2 = {
(('language', 'english'),): {
'example_count': {
'doubleValue': 2.0
},
'weighted_example_count': {
'doubleValue': 7.0
},
'my_mean_label': {
'doubleValue': 1.0
},
metric_keys.EXAMPLE_COUNT: {
'doubleValue': 2.0
},
}
}
self.assertLen(eval_results._results, 2)
eval_result_1 = eval_results._results[0]
eval_result_2 = eval_results._results[1]
self.assertEqual(eval_result_1.model_location, model_location1.decode())
self.assertEqual(eval_result_2.model_location, model_location2.decode())
self.assertEqual(eval_result_1.data_location, data_location)
self.assertEqual(eval_result_2.data_location, data_location)
self.assertEqual(eval_result_1.config.slicing_specs[0],
config.SlicingSpec(feature_values={'language': 'english'}))
self.assertEqual(eval_result_2.config.slicing_specs[0],
config.SlicingSpec(feature_values={'language': 'english'}))
self.assertMetricsAlmostEqual(eval_result_1.slicing_metrics,
expected_result_1)
self.assertMetricsAlmostEqual(eval_result_2.slicing_metrics,
expected_result_2)
def testRunModelAnalysisWithModelAgnosticPredictions(self):
examples = [
self._makeExample(
age=3.0, language='english', label=1.0, prediction=0.9),
self._makeExample(
age=3.0, language='chinese', label=0.0, prediction=0.4),
self._makeExample(
age=4.0, language='english', label=1.0, prediction=0.7),
self._makeExample(
age=5.0, language='chinese', label=1.0, prediction=0.2)
]
data_location = self._writeTFExamplesToTFRecords(examples)
model_specs = [
config.ModelSpec(
prediction_key='prediction',
label_key='label',
example_weight_key='age')
]
metrics = [
config.MetricConfig(class_name='ExampleCount'),
config.MetricConfig(class_name='WeightedExampleCount'),
config.MetricConfig(class_name='BinaryAccuracy')
]
slicing_specs = [config.SlicingSpec(feature_keys=['language'])]
eval_config = config.EvalConfig(
model_specs=model_specs,
metrics_specs=[config.MetricsSpec(metrics=metrics)],
slicing_specs=slicing_specs)
eval_result = model_eval_lib.run_model_analysis(
eval_config=eval_config,
data_location=data_location,
output_path=self._getTempDir())
expected = {
(('language', 'chinese'),): {
'binary_accuracy': {
'doubleValue': 0.375
},
'weighted_example_count': {
'doubleValue': 8.0
},
'example_count': {
'doubleValue': 2.0
},
},
(('language', 'english'),): {
'binary_accuracy': {
'doubleValue': 1.0
},
'weighted_example_count': {
'doubleValue': 7.0
},
'example_count': {
'doubleValue': 2.0
},
}
}
self.assertEqual(eval_result.data_location, data_location)
self.assertEqual(eval_result.config.slicing_specs[0],
config.SlicingSpec(feature_keys=['language']))
self.assertMetricsAlmostEqual(eval_result.slicing_metrics, expected)
@parameterized.named_parameters(
('tf_keras', constants.TF_KERAS),
('tf_lite', constants.TF_LITE),
('tf_js', constants.TF_JS),
('baseline_missing', constants.TF_KERAS, True),
('rubber_stamp', constants.TF_KERAS, True, True),
('tf_keras_custom_metrics', constants.TF_KERAS, False, False, True),
)
def testRunModelAnalysisWithKerasModel(self,
model_type,
remove_baseline=False,
rubber_stamp=False,
add_custom_metrics=False):
# Custom metrics not supported in TFv1
if _TF_MAJOR_VERSION < 2:
add_custom_metrics = False
def _build_keras_model(eval_config,
export_name='export_dir',
rubber_stamp=False):
input_layer = tf.keras.layers.Input(shape=(28 * 28,), name='data')
output_layer = tf.keras.layers.Dense(
10, activation=tf.nn.softmax)(
input_layer)
model = tf.keras.models.Model(input_layer, output_layer)
model.compile(
optimizer=tf.keras.optimizers.Adam(lr=.001),
loss=tf.keras.losses.categorical_crossentropy)
if add_custom_metrics:
model.add_metric(tf.reduce_sum(input_layer), 'custom')
model_location = os.path.join(self._getTempDir(), export_name)
if model_type == constants.TF_LITE:
converter = tf.compat.v2.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
tf.io.gfile.makedirs(model_location)
with tf.io.gfile.GFile(os.path.join(model_location, 'tflite'),
'wb') as f:
f.write(tflite_model)
elif model_type == constants.TF_JS:
src_model_path = tempfile.mkdtemp()
model.save(src_model_path, save_format='tf')
tfjs_converter.convert([
'--input_format=tf_saved_model',
'--saved_model_tags=serve',
'--signature_name=serving_default',
src_model_path,
model_location,
])
else:
model.save(model_location, save_format='tf')
return model_eval_lib.default_eval_shared_model(
eval_saved_model_path=model_location,
eval_config=eval_config,
rubber_stamp=rubber_stamp)
examples = [
self._makeExample(
data=[0.0] * 28 * 28,
label=[0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]),
self._makeExample(
data=[1.0] * 28 * 28,
label=[0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0]),
self._makeExample(
data=[1.0] * 28 * 28,
label=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]),
]
data_location = self._writeTFExamplesToTFRecords(examples)
schema = text_format.Parse(
"""
tensor_representation_group {
key: ""
value {
tensor_representation {
key: "data"
value {
dense_tensor {
column_name: "data"
shape { dim { size: 784 } }
}
}
}
}
}
feature {
name: "data"
type: FLOAT
}
feature {
name: "label"
type: FLOAT
}
""", schema_pb2.Schema())
metrics_spec = config.MetricsSpec()
for metric in (tf.keras.metrics.AUC(),):
cfg = tf.keras.utils.serialize_keras_object(metric)
metrics_spec.metrics.append(
config.MetricConfig(
class_name=cfg['class_name'], config=json.dumps(cfg['config'])))
tf.keras.backend.clear_session()
slicing_specs = [
config.SlicingSpec(),
config.SlicingSpec(feature_keys=['non_existent_slice'])
]
metrics_spec.metrics.append(
config.MetricConfig(
class_name='WeightedExampleCount',
per_slice_thresholds=[
config.PerSliceMetricThreshold(
slicing_specs=slicing_specs,
threshold=config.MetricThreshold(
value_threshold=config.GenericValueThreshold(
lower_bound={'value': 1}))),
# Change thresholds would be ignored when rubber stamp is true.
config.PerSliceMetricThreshold(
slicing_specs=slicing_specs,
threshold=config.MetricThreshold(
change_threshold=config.GenericChangeThreshold(
direction=config.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': 1})))
]))
for class_id in (0, 5):
metrics_spec.binarize.class_ids.values.append(class_id)
eval_config = config.EvalConfig(
model_specs=[config.ModelSpec(label_key='label')],
metrics_specs=[metrics_spec])
if model_type != constants.TF_KERAS:
for s in eval_config.model_specs:
s.model_type = model_type
model = _build_keras_model(eval_config, rubber_stamp=rubber_stamp)
baseline = _build_keras_model(eval_config, 'baseline_export')
if remove_baseline:
eval_shared_model = model
else:
eval_shared_model = {'candidate': model, 'baseline': baseline}
output_path = self._getTempDir()
eval_results = model_eval_lib.run_model_analysis(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
data_location=data_location,
output_path=output_path,
schema=schema)
# Directly check validaton file since it is not in EvalResult.
validations_file = os.path.join(output_path, constants.VALIDATIONS_KEY)
self.assertTrue(os.path.exists(validations_file))
validation_records = []
for record in tf.compat.v1.python_io.tf_record_iterator(validations_file):
validation_records.append(
validation_result_pb2.ValidationResult.FromString(record))
self.assertLen(validation_records, 1)
# Change thresholds ignored when rubber stamping
expected_result = text_format.Parse(
"""
validation_ok: false
rubber_stamp: %s
missing_slices: {
feature_keys: "non_existent_slice"
}
validation_details {
slicing_details {
slicing_spec {
}
num_matching_slices: 1
}
}""" % rubber_stamp, validation_result_pb2.ValidationResult())
# Normal run with change threshold not satisfied.
if not rubber_stamp and not remove_baseline:
text_format.Parse(
"""
metric_validations_per_slice {
slice_key {}
failures {
metric_key {
name: "weighted_example_count"
sub_key { class_id {} }
model_name: "candidate"
is_diff: true
}
metric_threshold {
change_threshold {
absolute { value: 1 }
direction: HIGHER_IS_BETTER
}
}
metric_value { double_value {} }
}
failures {
metric_key {
name: "weighted_example_count"
sub_key {
class_id {
value: 5
}
}
model_name: "candidate"
is_diff: true
}
metric_threshold {
change_threshold {
absolute { value: 1}
direction: HIGHER_IS_BETTER
}
}
metric_value { double_value {} }
}
}""", expected_result)
self.assertProtoEquals(expected_result, validation_records[0])
def check_eval_result(eval_result, model_location):
self.assertEqual(eval_result.model_location, model_location)
self.assertEqual(eval_result.data_location, data_location)
self.assertLen(eval_result.slicing_metrics, 1)
got_slice_key, got_metrics = eval_result.slicing_metrics[0]
self.assertEqual(got_slice_key, ())
self.assertIn('', got_metrics) # output_name
got_metrics = got_metrics['']
expected_metrics = {
'classId:0': {
'auc': True,
},
'classId:5': {
'auc': True,
},
}
if (model_type not in (constants.TF_LITE, constants.TF_JS) and
_TF_MAJOR_VERSION >= 2):
expected_metrics[''] = {'loss': True}
if add_custom_metrics:
expected_metrics['']['custom'] = True
for class_id in expected_metrics:
self.assertIn(class_id, got_metrics)
for k in expected_metrics[class_id]:
self.assertIn(k, got_metrics[class_id])
# TODO(b/173657964): assert exception for the missing baseline but non
# rubber stamping test.
if rubber_stamp or remove_baseline:
self.assertIsInstance(eval_results, view_types.EvalResult)
check_eval_result(eval_results, model.model_path)
else:
self.assertLen(eval_results._results, 2)
eval_result_0, eval_result_1 = eval_results._results
check_eval_result(eval_result_0, model.model_path)
check_eval_result(eval_result_1, baseline.model_path)
def testRunModelAnalysisWithKerasMultiOutputModel(self):
def _build_keras_model(eval_config, export_name='export_dir'):
layers_per_output = {}
for output_name in ('output_1', 'output_2'):
layers_per_output[output_name] = tf.keras.layers.Input(
shape=(1,), name=output_name)
model = tf.keras.models.Model(layers_per_output, layers_per_output)
model.compile(loss=tf.keras.losses.categorical_crossentropy)
model_location = os.path.join(self._getTempDir(), export_name)
model.save(model_location, save_format='tf')
return model_eval_lib.default_eval_shared_model(
eval_saved_model_path=model_location,
eval_config=eval_config,
rubber_stamp=False)
examples = [
self._makeExample(output_1=1.0, output_2=0.0, label_1=0.0, label_2=0.0),
self._makeExample(output_1=0.7, output_2=0.3, label_1=1.0, label_2=1.0),
self._makeExample(output_1=0.5, output_2=0.8, label_1=0.0, label_2=1.0),
]
data_location = self._writeTFExamplesToTFRecords(examples)
metrics_spec = config.MetricsSpec(
output_names=['output_1', 'output_2'],
output_weights={
'output_1': 1.0,
'output_2': 1.0
})
for metric in (tf.keras.metrics.AUC(),):
cfg = tf.keras.utils.serialize_keras_object(metric)
metrics_spec.metrics.append(
config.MetricConfig(
class_name=cfg['class_name'], config=json.dumps(cfg['config'])))
slicing_specs = [
config.SlicingSpec(),
config.SlicingSpec(feature_keys=['non_existent_slice'])
]
metrics_spec.metrics.append(
config.MetricConfig(
class_name='WeightedExampleCount',
per_slice_thresholds=[
config.PerSliceMetricThreshold(
slicing_specs=slicing_specs,
threshold=config.MetricThreshold(
value_threshold=config.GenericValueThreshold(
lower_bound={'value': 1}))),
# Change thresholds would be ignored when rubber stamp is true.
config.PerSliceMetricThreshold(
slicing_specs=slicing_specs,
threshold=config.MetricThreshold(
change_threshold=config.GenericChangeThreshold(
direction=config.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': 1})))
]))
eval_config = config.EvalConfig(
model_specs=[
config.ModelSpec(label_keys={
'output_1': 'label_1',
'output_2': 'label_2'
})
],
metrics_specs=[metrics_spec])
model = _build_keras_model(eval_config)
baseline = _build_keras_model(eval_config, 'baseline_export')
eval_shared_model = {'candidate': model, 'baseline': baseline}
output_path = self._getTempDir()
eval_results = model_eval_lib.run_model_analysis(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
data_location=data_location,
output_path=output_path)
# Directly check validaton file since it is not in EvalResult.
validations_file = os.path.join(output_path, constants.VALIDATIONS_KEY)
self.assertTrue(os.path.exists(validations_file))
validation_records = []
for record in tf.compat.v1.python_io.tf_record_iterator(validations_file):
validation_records.append(
validation_result_pb2.ValidationResult.FromString(record))
self.assertLen(validation_records, 1)
expected_result = text_format.Parse(
"""
metric_validations_per_slice {
slice_key {}
failures {
metric_key {
name: "weighted_example_count"
model_name: "candidate"
output_name: "output_1"
is_diff: true
}
metric_threshold {
change_threshold {
absolute { value: 1 }
direction: HIGHER_IS_BETTER
}
}
metric_value { double_value {} }
}
failures {
metric_key {
name: "weighted_example_count"
model_name: "candidate"
output_name: "output_2"
is_diff: true
}
metric_threshold {
change_threshold {
absolute { value: 1}
direction: HIGHER_IS_BETTER
}
}
metric_value { double_value {} }
}
}
missing_slices {
feature_keys: "non_existent_slice"
}
validation_details {
slicing_details {
slicing_spec {}
num_matching_slices: 1
}
}""", validation_result_pb2.ValidationResult())
self.assertProtoEquals(expected_result, validation_records[0])
def check_eval_result(eval_result, model_location):
self.assertEqual(eval_result.model_location, model_location)
self.assertEqual(eval_result.data_location, data_location)
self.assertLen(eval_result.slicing_metrics, 1)
got_slice_key, got_metrics = eval_result.slicing_metrics[0]
self.assertEqual(got_slice_key, ())
self.assertIn('output_1', got_metrics)
self.assertIn('auc', got_metrics['output_1'][''])
self.assertIn('output_2', got_metrics)
self.assertIn('auc', got_metrics['output_2'][''])
# Aggregate metrics
self.assertIn('', got_metrics)
self.assertIn('auc', got_metrics[''][''])
# TODO(b/173657964): assert exception for the missing baseline but non
# rubber stamping test.
self.assertLen(eval_results._results, 2)
eval_result_0, eval_result_1 = eval_results._results
check_eval_result(eval_result_0, model.model_path)
check_eval_result(eval_result_1, baseline.model_path)
def testRunModelAnalysisWithQueryBasedMetrics(self):
input_layer = tf.keras.layers.Input(shape=(1,), name='age')
output_layer = tf.keras.layers.Dense(
1, activation=tf.nn.sigmoid)(
input_layer)
model = tf.keras.models.Model(input_layer, output_layer)
model.compile(
optimizer=tf.keras.optimizers.Adam(lr=.001),
loss=tf.keras.losses.binary_crossentropy)
features = {'age': [[20.0]]}
labels = [[1]]
example_weights = [1.0]
dataset = tf.data.Dataset.from_tensor_slices(
(features, labels, example_weights))
dataset = dataset.shuffle(buffer_size=1).repeat().batch(1)
model.fit(dataset, steps_per_epoch=1)
model_location = os.path.join(self._getTempDir(), 'export_dir')
model.save(model_location, save_format='tf')
schema = text_format.Parse(
"""
tensor_representation_group {
key: ""
value {
tensor_representation {
key: "age"
value {
dense_tensor {
column_name: "age"
shape { dim { size: 1 } }
}
}
}
tensor_representation {
key: "language"
value {
dense_tensor {
column_name: "language"
shape { dim { size: 1 } }
}
}
}
}
}
feature {
name: "age"
type: FLOAT
}
feature {
name: "language"
type: BYTES
}
feature {
name: "label"
type: FLOAT
}
""", schema_pb2.Schema())
examples = [
self._makeExample(age=3.0, language='english', label=1.0),
self._makeExample(age=5.0, language='chinese', label=0.0),
self._makeExample(age=3.0, language='english', label=0.0),
self._makeExample(age=5.0, language='chinese', label=1.0)
]
data_location = self._writeTFExamplesToTFRecords(examples)
slicing_specs = [config.SlicingSpec()]
# Test with both a TFMA metric (NDCG), a keras metric (Recall).
metrics = [
ndcg.NDCG(gain_key='age', name='ndcg', top_k_list=[1, 2]),
tf.keras.metrics.Recall(top_k=1),
]
# If tensorflow-ranking imported add MRRMetric.
if _TFR_IMPORTED:
metrics.append(tfr.keras.metrics.MRRMetric())
metrics_specs = metric_specs.specs_from_metrics(
metrics, query_key='language', include_weighted_example_count=True)
metrics_specs.append(
config.MetricsSpec(metrics=[
config.MetricConfig(
class_name='WeightedExampleCount',
threshold=config.MetricThreshold(
value_threshold=config.GenericValueThreshold(
lower_bound={'value': 0})))
]))
eval_config = config.EvalConfig(
model_specs=[config.ModelSpec(label_key='label')],
slicing_specs=slicing_specs,
metrics_specs=metrics_specs)
eval_shared_model = model_eval_lib.default_eval_shared_model(
eval_saved_model_path=model_location, eval_config=eval_config)
output_path = self._getTempDir()
eval_result = model_eval_lib.run_model_analysis(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
data_location=data_location,
output_path=output_path,
evaluators=[
metrics_plots_and_validations_evaluator
.MetricsPlotsAndValidationsEvaluator(
eval_config=eval_config, eval_shared_model=eval_shared_model)
],
schema=schema)
# Directly check validaton file since it is not in EvalResult.
validations_file = os.path.join(output_path, constants.VALIDATIONS_KEY)
self.assertTrue(os.path.exists(validations_file))
validation_records = []
for record in tf.compat.v1.python_io.tf_record_iterator(validations_file):
validation_records.append(
validation_result_pb2.ValidationResult.FromString(record))
self.assertLen(validation_records, 1)
self.assertTrue(validation_records[0].validation_ok)
self.assertEqual(eval_result.model_location, model_location)
self.assertEqual(eval_result.data_location, data_location)
self.assertLen(eval_result.slicing_metrics, 1)
got_slice_key, got_metrics = eval_result.slicing_metrics[0]
self.assertEqual(got_slice_key, ())
self.assertIn('', got_metrics) # output_name
got_metrics = got_metrics['']
expected_metrics = {
'': {
'example_count': True,
'weighted_example_count': True,
},
'topK:1': {
'ndcg': True,
'recall': True,
},
'topK:2': {
'ndcg': True,
},
}
if _TFR_IMPORTED:
expected_metrics['']['mrr_metric'] = True
for group in expected_metrics:
self.assertIn(group, got_metrics)
for k in expected_metrics[group]:
self.assertIn(k, got_metrics[group])
def testRunModelAnalysisWithLegacyQueryExtractor(self):
model_location = self._exportEvalSavedModel(
linear_classifier.simple_linear_classifier)
examples = [
self._makeExample(age=3.0, language='english', label=1.0),
self._makeExample(age=3.0, language='chinese', label=0.0),
self._makeExample(age=4.0, language='english', label=0.0),
self._makeExample(age=5.0, language='chinese', label=1.0)
]
data_location = self._writeTFExamplesToTFRecords(examples)
slicing_specs = [slicer_lib.SingleSliceSpec()]
eval_shared_model = model_eval_lib.default_eval_shared_model(
eval_saved_model_path=model_location, example_weight_key='age')
eval_result = model_eval_lib.run_model_analysis(
eval_shared_model=eval_shared_model,
data_location=data_location,
output_path=self._getTempDir(),
evaluators=[
legacy_metrics_and_plots_evaluator.MetricsAndPlotsEvaluator(
eval_shared_model),
legacy_query_based_metrics_evaluator.QueryBasedMetricsEvaluator(
query_id='language',
prediction_key='logistic',
combine_fns=[
query_statistics.QueryStatisticsCombineFn(),
legacy_ndcg.NdcgMetricCombineFn(
at_vals=[1], gain_key='label', weight_key='')
]),
],
slice_spec=slicing_specs)
# We only check some of the metrics to ensure that the end-to-end
# pipeline works.
expected = {
(): {
'post_export_metrics/total_queries': {
'doubleValue': 2.0
},
'post_export_metrics/min_documents': {
'doubleValue': 2.0
},
'post_export_metrics/max_documents': {
'doubleValue': 2.0
},
'post_export_metrics/total_documents': {
'doubleValue': 4.0
},
'post_export_metrics/ndcg@1': {
'doubleValue': 0.5
},
'post_export_metrics/example_weight': {
'doubleValue': 15.0
},
'post_export_metrics/example_count': {
'doubleValue': 4.0
},
}
}
self.assertEqual(eval_result.model_location, model_location.decode())
self.assertEqual(eval_result.data_location, data_location)
self.assertEqual(eval_result.config.slicing_specs[0], config.SlicingSpec())
self.assertMetricsAlmostEqual(eval_result.slicing_metrics, expected)
self.assertFalse(eval_result.plots)
def testRunModelAnalysisWithUncertainty(self):
model_location = self._exportEvalSavedModel(
linear_classifier.simple_linear_classifier)
examples = [
self._makeExample(age=3.0, language='english', label=1.0),
self._makeExample(age=3.0, language='chinese', label=0.0),
self._makeExample(age=4.0, language='english', label=1.0),
self._makeExample(age=5.0, language='chinese', label=1.0),
self._makeExample(age=5.0, language='hindi', label=1.0)
]
data_location = self._writeTFExamplesToTFRecords(examples)
slicing_specs = [slicer_lib.SingleSliceSpec(columns=['language'])]
eval_result = model_eval_lib.run_model_analysis(
eval_shared_model=model_eval_lib.default_eval_shared_model(
eval_saved_model_path=model_location, example_weight_key='age'),
data_location=data_location,
output_path=self._getTempDir(),
slice_spec=slicing_specs,
compute_confidence_intervals=True,
min_slice_size=2)
# We only check some of the metrics to ensure that the end-to-end
# pipeline works.
expected = {
(('language', 'hindi'),): {
u'__ERROR__': {
'debugMessage':
u'Example count for this slice key is lower than the '
u'minimum required value: 2. No data is aggregated for '
u'this slice.'
},
},
(('language', 'chinese'),): {
metric_keys.EXAMPLE_WEIGHT: {
'doubleValue': 8.0
},
metric_keys.EXAMPLE_COUNT: {
'doubleValue': 2.0
},
},
(('language', 'english'),): {
'accuracy': {
'boundedValue': {
'value': 1.0,
'lowerBound': 1.0,
'upperBound': 1.0,
'methodology': 'POISSON_BOOTSTRAP'
}
},
'my_mean_label': {
'boundedValue': {
'value': 1.0,
'lowerBound': 1.0,
'upperBound': 1.0,
'methodology': 'POISSON_BOOTSTRAP'
}
},
metric_keys.EXAMPLE_WEIGHT: {
'doubleValue': 7.0
},
metric_keys.EXAMPLE_COUNT: {
'doubleValue': 2.0
},
}
}
self.assertEqual(eval_result.model_location, model_location.decode())
self.assertEqual(eval_result.data_location, data_location)
self.assertEqual(eval_result.config.slicing_specs[0],
config.SlicingSpec(feature_keys=['language']))
self.assertMetricsAlmostEqual(eval_result.slicing_metrics, expected)
self.assertFalse(eval_result.plots)
def testRunModelAnalysisWithDeterministicConfidenceIntervals(self):
model_location = self._exportEvalSavedModel(
linear_classifier.simple_linear_classifier)
examples = [
self._makeExample(age=3.0, language='english', label=1.0),
self._makeExample(age=3.0, language='chinese', label=0.0),
self._makeExample(age=4.0, language='english', label=1.0),
self._makeExample(age=5.0, language='chinese', label=1.0),
self._makeExample(age=5.0, language='hindi', label=1.0)
]
data_location = self._writeTFExamplesToTFRecords(examples)
slicing_specs = [slicer_lib.SingleSliceSpec(columns=['language'])]
eval_result = model_eval_lib.run_model_analysis(
eval_shared_model=model_eval_lib.default_eval_shared_model(
eval_saved_model_path=model_location, example_weight_key='age'),
data_location=data_location,
output_path=self._getTempDir(),
slice_spec=slicing_specs,
compute_confidence_intervals=True,
min_slice_size=2,
random_seed_for_testing=_TEST_SEED)
# We only check some of the metrics to ensure that the end-to-end
# pipeline works.
expected = {
(('language', 'hindi'),): {
u'__ERROR__': {
'debugMessage':
u'Example count for this slice key is lower than the '
u'minimum required value: 2. No data is aggregated for '
u'this slice.'
},
},
(('language', 'chinese'),): {
metric_keys.EXAMPLE_WEIGHT: {
'doubleValue': 8.0
},
metric_keys.EXAMPLE_COUNT: {
'doubleValue': 2.0
},
},
(('language', 'english'),): {
'accuracy': {
'boundedValue': {
'value': 1.0,
'lowerBound': 1.0,
'upperBound': 1.0,
'methodology': 'POISSON_BOOTSTRAP'
}
},
'my_mean_label': {
'boundedValue': {
'value': 1.0,
'lowerBound': 1.0,
'upperBound': 1.0,
'methodology': 'POISSON_BOOTSTRAP'
}
},
metric_keys.EXAMPLE_WEIGHT: {
'doubleValue': 7.0
},
metric_keys.EXAMPLE_COUNT: {
'doubleValue': 2.0
},
}
}
self.assertEqual(eval_result.model_location, model_location.decode())
self.assertEqual(eval_result.data_location, data_location)
self.assertEqual(eval_result.config.slicing_specs[0],
config.SlicingSpec(feature_keys=['language']))
self.assertMetricsAlmostEqual(eval_result.slicing_metrics, expected)
for key, value in eval_result.slicing_metrics:
if (('language', 'english'),) == key:
metric = value['']['']['average_loss']
self.assertAlmostEqual(
0.171768754720, metric['boundedValue']['value'], delta=0.1)
metric = value['']['']['auc_precision_recall']
self.assertAlmostEqual(
0.99999940395, metric['boundedValue']['value'], delta=0.1)
self.assertFalse(eval_result.plots)
def testRunModelAnalysisWithSchema(self):
model_location = self._exportEvalSavedModel(
linear_regressor.simple_linear_regressor)
examples = [
self._makeExample(age=3.0, language='english', label=2.0),
self._makeExample(age=3.0, language='chinese', label=1.0),
self._makeExample(age=4.0, language='english', label=2.0),
self._makeExample(age=5.0, language='chinese', label=2.0),
self._makeExample(age=5.0, language='hindi', label=2.0)
]
data_location = self._writeTFExamplesToTFRecords(examples)
eval_config = config.EvalConfig(
model_specs=[config.ModelSpec(label_key='label')],
metrics_specs=metric_specs.specs_from_metrics(
[calibration_plot.CalibrationPlot(num_buckets=4)]))
schema = text_format.Parse(
"""
feature {
name: "label"
type: INT
int_domain {
min: 1
max: 2
}
}
""", schema_pb2.Schema())
eval_result = model_eval_lib.run_model_analysis(
eval_config=eval_config,
schema=schema,
eval_shared_model=model_eval_lib.default_eval_shared_model(
eval_saved_model_path=model_location, example_weight_key='age'),
data_location=data_location,
output_path=self._getTempDir())
expected_metrics = {(): {metric_keys.EXAMPLE_COUNT: {'doubleValue': 5.0},}}
self.assertMetricsAlmostEqual(eval_result.slicing_metrics, expected_metrics)
self.assertLen(eval_result.plots, 1)
slice_key, plots = eval_result.plots[0]
self.assertEqual((), slice_key)
got_buckets = plots['']['']['calibrationHistogramBuckets']['buckets']
# buckets include (-inf, left) and (right, inf) by default, but we are
# interested in the values of left and right
self.assertEqual(1.0, got_buckets[1]['lowerThresholdInclusive'])
self.assertEqual(2.0, got_buckets[-2]['upperThresholdExclusive'])
def testRunModelAnalysisWithPlots(self):
model_location = self._exportEvalSavedModel(
fixed_prediction_estimator.simple_fixed_prediction_estimator)
examples = [
self._makeExample(prediction=0.0, label=1.0),
self._makeExample(prediction=0.7, label=0.0),
self._makeExample(prediction=0.8, label=1.0),
self._makeExample(prediction=1.0, label=1.0),
self._makeExample(prediction=1.0, label=1.0)
]
data_location = self._writeTFExamplesToTFRecords(examples)
eval_shared_model = model_eval_lib.default_eval_shared_model(
eval_saved_model_path=model_location,
add_metrics_callbacks=[post_export_metrics.auc_plots()])
eval_result = model_eval_lib.run_model_analysis(
eval_shared_model=eval_shared_model,
data_location=data_location,
output_path=self._getTempDir())
# We only check some of the metrics to ensure that the end-to-end
# pipeline works.
expected_metrics = {(): {metric_keys.EXAMPLE_COUNT: {'doubleValue': 5.0},}}
expected_matrix = {
'threshold': 0.8,
'falseNegatives': 2.0,
'trueNegatives': 1.0,
'truePositives': 2.0,
'precision': 1.0,
'recall': 0.5
}
self.assertMetricsAlmostEqual(eval_result.slicing_metrics, expected_metrics)
self.assertLen(eval_result.plots, 1)
slice_key, plots = eval_result.plots[0]
self.assertEqual((), slice_key)
self.assertDictElementsAlmostEqual(
plots['']['']['confusionMatrixAtThresholds']['matrices'][8001],
expected_matrix)
def testRunModelAnalysisWithMultiplePlots(self):
model_location = self._exportEvalSavedModel(
fixed_prediction_estimator.simple_fixed_prediction_estimator)
examples = [
self._makeExample(prediction=0.0, label=1.0),
self._makeExample(prediction=0.7, label=0.0),
self._makeExample(prediction=0.8, label=1.0),
self._makeExample(prediction=1.0, label=1.0),
self._makeExample(prediction=1.0, label=1.0)
]
data_location = self._writeTFExamplesToTFRecords(examples)
eval_shared_model = model_eval_lib.default_eval_shared_model(
eval_saved_model_path=model_location,
add_metrics_callbacks=[
post_export_metrics.auc_plots(),
post_export_metrics.auc_plots(metric_tag='test')
])
eval_result = model_eval_lib.run_model_analysis(
eval_shared_model=eval_shared_model,
data_location=data_location,
output_path=self._getTempDir())
# pipeline works.
expected_metrics = {(): {metric_keys.EXAMPLE_COUNT: {'doubleValue': 5.0},}}
expected_matrix = {
'threshold': 0.8,
'falseNegatives': 2.0,
'trueNegatives': 1.0,
'truePositives': 2.0,
'precision': 1.0,
'recall': 0.5
}
self.assertMetricsAlmostEqual(eval_result.slicing_metrics, expected_metrics)
self.assertLen(eval_result.plots, 1)
slice_key, plots = eval_result.plots[0]
self.assertEqual((), slice_key)
self.assertDictElementsAlmostEqual(
plots['']['']['post_export_metrics']['confusionMatrixAtThresholds']
['matrices'][8001], expected_matrix)
self.assertDictElementsAlmostEqual(
plots['']['']['post_export_metrics/test']['confusionMatrixAtThresholds']
['matrices'][8001], expected_matrix)
def testRunModelAnalysisForCSVText(self):
model_location = self._exportEvalSavedModel(
csv_linear_classifier.simple_csv_linear_classifier)
examples = [
'3.0,english,1.0', '3.0,chinese,0.0', '4.0,english,1.0',
'5.0,chinese,1.0'
]
data_location = self._writeCSVToTextFile(examples)
eval_config = config.EvalConfig()
eval_result = model_eval_lib.run_model_analysis(
eval_config=eval_config,
eval_shared_model=model_eval_lib.default_eval_shared_model(
eval_saved_model_path=model_location),
data_location=data_location,
file_format='text',
output_path=self._getTempDir())
# We only check some of the metrics to ensure that the end-to-end
# pipeline works.
expected = {
(): {
'accuracy': {
'doubleValue': 0.75
},
metric_keys.EXAMPLE_COUNT: {
'doubleValue': 4.0
}
}
}
self.assertMetricsAlmostEqual(eval_result.slicing_metrics, expected)
def testMultipleModelAnalysis(self):
model_location_1 = self._exportEvalSavedModel(
linear_classifier.simple_linear_classifier)
model_location_2 = self._exportEvalSavedModel(
linear_classifier.simple_linear_classifier)
examples = [
self._makeExample(age=3.0, language='english', label=1.0),
self._makeExample(age=3.0, language='chinese', label=0.0),
self._makeExample(age=4.0, language='english', label=1.0),
self._makeExample(age=5.0, language='chinese', label=1.0)
]
data_location = self._writeTFExamplesToTFRecords(examples)
eval_config = config.EvalConfig(slicing_specs=[
config.SlicingSpec(feature_values={'language': 'english'})
])
eval_results = model_eval_lib.multiple_model_analysis(
[model_location_1, model_location_2],
data_location,
eval_config=eval_config)
# We only check some of the metrics to ensure that the end-to-end
# pipeline works.
self.assertLen(eval_results._results, 2)
expected_result_1 = {
(('language', 'english'),): {
'my_mean_label': {
'doubleValue': 1.0
},
metric_keys.EXAMPLE_COUNT: {
'doubleValue': 2.0
},
}
}
expected_result_2 = {
(('language', 'english'),): {
'my_mean_label': {
'doubleValue': 1.0
},
metric_keys.EXAMPLE_COUNT: {
'doubleValue': 2.0
},
}
}
self.assertMetricsAlmostEqual(eval_results._results[0].slicing_metrics,
expected_result_1)
self.assertMetricsAlmostEqual(eval_results._results[1].slicing_metrics,
expected_result_2)
def testMultipleDataAnalysis(self):
model_location = self._exportEvalSavedModel(
linear_classifier.simple_linear_classifier)
data_location_1 = self._writeTFExamplesToTFRecords([
self._makeExample(age=3.0, language='english', label=1.0),
self._makeExample(age=3.0, language='english', label=0.0),
self._makeExample(age=5.0, language='chinese', label=1.0)
])
data_location_2 = self._writeTFExamplesToTFRecords(
[self._makeExample(age=4.0, language='english', label=1.0)])
eval_config = config.EvalConfig(slicing_specs=[
config.SlicingSpec(feature_values={'language': 'english'})
])
eval_results = model_eval_lib.multiple_data_analysis(
model_location, [data_location_1, data_location_2],
eval_config=eval_config)
self.assertLen(eval_results._results, 2)
# We only check some of the metrics to ensure that the end-to-end
# pipeline works.
expected_result_1 = {
(('language', 'english'),): {
metric_keys.EXAMPLE_COUNT: {
'doubleValue': 2.0
},
}
}
expected_result_2 = {
(('language', 'english'),): {
metric_keys.EXAMPLE_COUNT: {
'doubleValue': 1.0
},
}
}
self.assertMetricsAlmostEqual(eval_results._results[0].slicing_metrics,
expected_result_1)
self.assertMetricsAlmostEqual(eval_results._results[1].slicing_metrics,
expected_result_2)
def testLoadValidationResult(self):
result = validation_result_pb2.ValidationResult(validation_ok=True)
path = os.path.join(absltest.get_default_test_tmpdir(), 'results.tfrecord')
with tf.io.TFRecordWriter(path) as writer:
writer.write(result.SerializeToString())
loaded_result = model_eval_lib.load_validation_result(path)
self.assertTrue(loaded_result.validation_ok)
def testLoadValidationResultDir(self):
result = validation_result_pb2.ValidationResult(validation_ok=True)
path = os.path.join(absltest.get_default_test_tmpdir(),
constants.VALIDATIONS_KEY)
with tf.io.TFRecordWriter(path) as writer:
writer.write(result.SerializeToString())
loaded_result = model_eval_lib.load_validation_result(os.path.dirname(path))
self.assertTrue(loaded_result.validation_ok)
def testLoadValidationResultEmptyFile(self):
path = os.path.join(absltest.get_default_test_tmpdir(),
constants.VALIDATIONS_KEY)
with tf.io.TFRecordWriter(path):
pass
with self.assertRaises(AssertionError):
model_eval_lib.load_validation_result(path)
def testAnalyzeRawData(self):
# Data
# age language label prediction
# 17 english 0 0
# 30 spanish 1 1
dict_data = [{
'age': 17,
'language': 'english',
'prediction': 0,
'label': 0
}, {
'age': 30,
'language': 'spanish',
'prediction': 1,
'label': 1
}]
df_data = pd.DataFrame(dict_data)
# Expected Output
expected_slicing_metrics = {
(('language', 'english'),): {
'': {
'': {
'accuracy': {
'doubleValue': 1.0
},
'example_count': {
'doubleValue': 1.0
}
}
}
},
(('language', 'spanish'),): {
'': {
'': {
'accuracy': {
'doubleValue': 1.0
},
'example_count': {
'doubleValue': 1.0
}
}
}
},
(): {
'': {
'': {
'accuracy': {
'doubleValue': 1.0
},
'example_count': {
'doubleValue': 2.0
}
}
}
}
}
# Actual Output
eval_config = text_format.Parse(
"""
model_specs {
label_key: 'label'
prediction_key: 'prediction'
}
metrics_specs {
metrics { class_name: "Accuracy" }
metrics { class_name: "ExampleCount" }
}
slicing_specs {}
slicing_specs {
feature_keys: 'language'
}
""", config.EvalConfig())
eval_result = model_eval_lib.analyze_raw_data(df_data, eval_config)
# Compare Actual and Expected
self.assertEqual(
len(eval_result.slicing_metrics), len(expected_slicing_metrics))
for slicing_metric in eval_result.slicing_metrics:
slice_key, slice_val = slicing_metric
self.assertIn(slice_key, expected_slicing_metrics)
self.assertDictEqual(slice_val, expected_slicing_metrics[slice_key])
def testAnalyzeRawDataWithoutPrediction(self):
model_specs = [
config.ModelSpec(prediction_key='nonexistent_prediction_key')
]
metrics_specs = [
config.MetricsSpec(metrics=[config.MetricConfig(class_name='Accuracy')])
]
eval_config = config.EvalConfig(
model_specs=model_specs, metrics_specs=metrics_specs)
df_data = pd.DataFrame([{
'prediction': 0,
'label': 0,
}])
with self.assertRaises(KeyError):
model_eval_lib.analyze_raw_data(df_data, eval_config)
def testAnalyzeRawDataWithoutLabel(self):
model_specs = [config.ModelSpec(prediction_key='nonexistent_label_key')]
metrics_specs = [
config.MetricsSpec(metrics=[config.MetricConfig(class_name='Accuracy')])
]
eval_config = config.EvalConfig(
model_specs=model_specs, metrics_specs=metrics_specs)
df_data = pd.DataFrame([{
'prediction': 0,
'label': 0,
}])
with self.assertRaises(KeyError):
model_eval_lib.analyze_raw_data(df_data, eval_config)
if __name__ == '__main__':
tf.compat.v1.enable_v2_behavior()
tf.test.main()
|
apache-2.0
|
boomsbloom/dtm-fmri
|
DTM/for_gensim/lib/python2.7/site-packages/sklearn/metrics/pairwise.py
|
8
|
46732
|
# -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y, precomputed=False, dtype=None):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats (or dtype if provided). Finally, the function
checks that the size of the second dimension of the two arrays is equal, or
the equivalent check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool
True if X is to be treated as precomputed distances to the samples in
Y.
dtype : string, type, list of types or None (default=None)
Data type required for X and Y. If None, the dtype will be an
appropriate float type selected by _return_float_dtype.
.. versionadded:: 0.18
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype_float = _return_float_dtype(X, Y)
warn_on_dtype = dtype is not None
estimator = 'check_pairwise_arrays'
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
Y = check_array(Y, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False,
X_norm_squared=None):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
X_norm_squared : array-like, shape = [n_samples_1], optional
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
XX = check_array(X_norm_squared)
if XX.shape == (1, X.shape[0]):
XX = XX.T
elif XX.shape != (X.shape[0], 1):
raise ValueError(
"Incompatible dimensions for X and X_norm_squared")
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is not None:
YY = np.atleast_2d(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances([[3]], [[2]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[2]], [[3]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
np.clip(S, 0, 2, out=S)
if X is Y or Y is None:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
S[np.diag_indices_from(S)] = 0.0
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
degree : int, default 3
gamma : float, default None
if None, defaults to 1.0 / n_samples_1
coef0 : int, default 1
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_samples_1
coef0 : int, default 1
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_samples_X
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def laplacian_kernel(X, Y=None, gamma=None):
"""Compute the laplacian kernel between X and Y.
The laplacian kernel is defined as::
K(x, y) = exp(-gamma ||x-y||_1)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <laplacian_kernel>`.
.. versionadded:: 0.17
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_samples_X
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = -gamma * manhattan_distances(X, Y)
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.17
parameter ``dense_output`` for dense output.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'precomputed': None, # HACK: precomputed is always allowed, never called
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
dtype = bool if metric in PAIRWISE_BOOLEAN_FUNCTIONS else None
X, Y = check_pairwise_arrays(X, Y, dtype=dtype)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# These distances recquire boolean arrays, when using scipy.spatial.distance
PAIRWISE_BOOLEAN_FUNCTIONS = [
'dice',
'jaccard',
'kulsinski',
'matching',
'rogerstanimoto',
'russellrao',
'sokalmichener',
'sokalsneath',
'yule',
]
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'laplacian': laplacian_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'laplacian' sklearn.pairwise.laplacian_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"laplacian": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params : boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
# import GPKernel locally to prevent circular imports
from ..gaussian_process.kernels import Kernel as GPKernel
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif isinstance(metric, GPKernel):
func = metric.__call__
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
|
mit
|
bikash/kaggleCompetition
|
microsoft malware/code/2gram.py
|
1
|
11171
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 18 01:55:47 2015
@author: marios michailidis
"""
# licence: FreeBSD
"""
Copyright (c) 2015, Marios Michailidis
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import random
import numpy as np
import scipy as spss
from scipy.sparse import csr_matrix
import sys
sys.path.append("../../xgboost/wrapper")
import xgboost as xgb
from sklearn.ensemble import ExtraTreesClassifier
thre=30
num_round=1150
lr=0.05
max_de=7
subsam=0.4
colsample_bytree=0.5
gamma =0.001
min_child_weight=0.05
seed=1
objective='multi:softprob'
param = {}
param['booster']= 'gbtree'#gblinear
param['objective'] = objective
param['bst:eta'] = lr
param['seed']=seed
param['bst:max_depth'] = max_de
param['eval_metric'] = 'auc'
param['bst:min_child_weight']=min_child_weight
param['silent'] = 1
param['nthread'] = thre
param['bst:subsample'] = subsam
param['num_class'] = 9
param['gamma'] = gamma
param['colsample_bytree']=colsample_bytree
def transform2dtos(D2,y2):
# transform a 2d array of predictions to single array
# we also change
d1=[]
y1=[]
for i in range (0,len(D2)):
for j in range (0,len(D2[0])):
d1.append(float(D2[i][j]))
if y2[i]==float(j):
y1.append(1.0)
else:
y1.append(0.0)
return d1,y1
""" print predictions in file"""
def printfilewithtarget(X, name):
print("start print the training file with target")
wfile=open(name + ".csv", "w")
for i in range (0, len(X)):
wfile.write(str(X[i][0]) )
for j in range (1, len(X[i])):
wfile.write("," +str(X[i][j]) )
wfile.write("\n")
wfile.close()
print("done")
""" the metric we are being tested on"""
def logloss_metric(p, y):
logloss=0
for i in range (0, len(p)):
for j in range (0,len(p[i])):
if y[i]==float(j):
logloss+= np.log(spss.maximum(spss.minimum(p[i][j],1-(1e-15) ),1e-15 ))
return -logloss/float(len(y))
"""Load a csv file"""
def load(name):
print("start reading file with target")
wfile=open(name , "r")
line=wfile.readline().replace("\n","")
splits=line.split(",")
datalen=len(splits)
wfile.close()
X = np.loadtxt(open( name), delimiter=',',usecols=range(0, datalen), skiprows=0)
print("done")
return np.array(X)
""" use to concatebate the various kfold sets together"""
def cving(x1, x2, x3, x4,x5, y1 ,y2, y3, y4, y5, ind1, ind2, ind3, ind4 ,ind5, num):
if num==0:
xwhole=np.concatenate((x2,x3,x4,x5), axis=0)
yhol=np.concatenate((y2,y3,y4,y5), axis=0)
return x1,y1 ,ind1,xwhole,yhol
elif num==1:
xwhole=np.concatenate((x1,x3,x4,x5), axis=0)
yhol=np.concatenate((y1,y3,y4,y5), axis=0)
return x2,y2 ,ind2,xwhole,yhol
elif num==2:
xwhole=np.concatenate((x1,x2,x4,x5), axis=0)
yhol=np.concatenate((y1,y2,y4,y5), axis=0)
return x3,y3 ,ind3,xwhole,yhol
elif num==3:
xwhole=np.concatenate((x1,x2,x3,x5), axis=0)
yhol=np.concatenate((y1,y2,y3,y5), axis=0)
return x4,y4 ,ind4,xwhole,yhol
else :
xwhole=np.concatenate((x1,x2,x3,x4), axis=0)
yhol=np.concatenate((y1,y2,y3,y4), axis=0)
return x5,y5 ,ind5,xwhole,yhol
""" Splits data to 5 kfold sets"""
def split_array_in_5(array, seed):
random.seed(seed)
new_arra1=[]
new_arra2=[]
new_arra3=[]
new_arra4=[]
new_arra5=[]
indiceds1=[]
indiceds2=[]
indiceds3=[]
indiceds4=[]
indiceds5=[]
for j in range (0,len(array)):
rand=random.random()
if rand <0.2:
new_arra1.append(array[j])
indiceds1.append(j)
elif rand <0.4:
new_arra2.append(array[j])
indiceds2.append(j)
elif rand <0.6:
new_arra3.append(array[j])
indiceds3.append(j)
elif rand <0.8:
new_arra4.append(array[j])
indiceds4.append(j)
else :
new_arra5.append(array[j])
indiceds5.append(j)
#convert to numpy
new_arra1=np.array(new_arra1)
new_arra2=np.array(new_arra2)
new_arra3=np.array(new_arra3)
new_arra4=np.array(new_arra4)
new_arra5=np.array(new_arra5)
#return arrays and indices
return new_arra1,new_arra2,new_arra3,new_arra4,new_arra5,indiceds1,indiceds2,indiceds3,indiceds4,indiceds5
def scalepreds(prs):
for i in range (0, len(prs)):
suum=0.0
for j in range (0,9):
suum+=prs[i][j]
for j in range (0,9):
prs[i][j]/=suum
"""loads first columns of a file"""
def loadfirstcolumn(filename):
pred=[]
op=open(filename,'r')
op.readline() #header
for line in op:
line=line.replace('\n','')
sp=line.split(',')
#load always the last columns
pred.append(sp[0])
op.close()
return pred
"""loads last columns of a file"""
def loadlastcolumn(filename):
pred=[]
op=open(filename,'r')
op.readline() #header
for line in op:
line=line.replace('\n','')
sp=line.split(',')
#load always the last columns
pred.append(float(sp[len(sp)-1])-1.0)
op.close()
return pred
""" This is the main method"""
def main():
directory=''
train_file="old2gramtrain.csv"
test_file="old2gramtest.csv"
SEED= 15
outset="2gram"
y= loadlastcolumn(directory+"trainLabels.csv")
ids=loadfirstcolumn(directory+"sampleSubmission.csv")
include_inpretrain=True
trainini_file= ["old1gramtrain.csv"]
testini_file = ["old1gramtest.csv"]
X=load(train_file)
print ("train samples: %d columns: %d " % (len(X) , len(X[0])))
X_test=load(test_file)
print ("train samples: %d columns: %d" % (len(X_test) , len(X_test[0])))
if include_inpretrain:
for t in range(0,len(trainini_file)):
Xini=load(trainini_file[t])
print ("train samples: %d columns: %d " % (len(Xini) , len(Xini[0])))
X_testini=load(testini_file[t])
print ("train samples: %d columns: %d" % (len(X_testini) , len(X_testini[0])))
X=np.column_stack((X,Xini))
X_test=np.column_stack((X_test,X_testini))
print ("train after merge samples: %d columns: %d" % (len(X) , len(X[0])))
print ("train after merge samples: %d columns: %d" % (len(X_test) , len(X_test[0])))
number_of_folds=5 # repeat the CV procedure 10 times to get more precise results
train_stacker=[ [0.0 for d in range (0,9)] for k in range (0,len(X)) ]
test_stacker=[[0.0 for d in range (0,9)] for k in range (0,len(X_test))]
#label_stacker=[0 for k in range (0,len(X))]
#split trainingg
x1,x2,x3,x4,x5,in1,in2,in3,in4,in5=split_array_in_5(X, SEED)
y1,y2,y3,y4,y5,iny1,iny2,iny3,iny4,iny5=split_array_in_5(y, SEED)
#create target variable
mean_log = 0.0
for i in range(0,number_of_folds):
X_cv,y_cv,indcv,X_train,y_train=cving(x1, x2, x3, x4,x5, y1 ,y2, y3, y4, y5,in1, in2, in3, in4 ,in5, i)
print (" train size: %d. test size: %d, cols: %d " % (len(X_train) ,len(X_cv) ,len(X_train[0]) ))
""" model XGBOOST classifier"""
xgmat = xgb.DMatrix( csr_matrix(X_train), label=y_train, missing =-999.0 )
bst = xgb.train( param.items(), xgmat, num_round );
xgmat_cv = xgb.DMatrix( csr_matrix(X_cv), missing =-999.0)
preds =bst.predict( xgmat_cv ).reshape( len(X_cv), 9).tolist()
scalepreds(preds)
# compute Loglikelihood metric for this CV fold
loglike = logloss_metric( preds,y_cv)
print "size train: %d size cv: %d Loglikelihood (fold %d/%d): %f" % (len(X_train), len(X_cv), i + 1, number_of_folds, loglike)
mean_log += loglike
#save the results
no=0
for real_index in indcv:
for d in range (0,9):
train_stacker[real_index][d]=(preds[no][d])
no+=1
if (number_of_folds)>0:
mean_log/=number_of_folds
print (" Average M loglikelihood: %f" % (mean_log) )
xgmat = xgb.DMatrix( csr_matrix(X), label=y, missing =-999.0 )
bst = xgb.train( param.items(), xgmat, num_round );
xgmat_cv = xgb.DMatrix(csr_matrix(X_test), missing =-999.0)
preds =bst.predict( xgmat_cv ).reshape( len(X_test), 9 ).tolist()
scalepreds(preds)
for pr in range (0,len(preds)):
for d in range (0,9):
test_stacker[pr][d]=preds[pr][d]
# === Predictions === #
print (" printing datasets ")
printfilewithtarget(train_stacker, outset + "train")
printfilewithtarget(test_stacker, outset + "test")
print("Write results...")
output_file = "submission_"+str( (mean_log ))+".csv"
print("Writing submission to %s" % output_file)
f = open(output_file, "w")
f.write("Id")# the header
for b in range (1,10):
f.write("," + str("Prediction" + str(b) ) )
f.write("\n")
for g in range(0, len(test_stacker)) :
f.write("%s" % ((ids[g])))
for prediction in test_stacker[g]:
f.write(",%f" % (prediction))
f.write("\n")
f.close()
print("Done.")
if __name__=="__main__":
main()
|
apache-2.0
|
cauchycui/scikit-learn
|
examples/neighbors/plot_species_kde.py
|
282
|
4059
|
"""
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data
(see :ref:`example_applications_plot_species_distribution_modeling.py` for
an example of classification based on the attributes in this dataset). It
simply shows the kernel density estimate of observed data points in
geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ['Bradypus Variegatus', 'Microryzomys Minutus']
Xtrain = np.vstack([data['train']['dd lat'],
data['train']['dd long']]).T
ytrain = np.array([d.decode('ascii').startswith('micro')
for d in data['train']['species']], dtype='int')
Xtrain *= np.pi / 180. # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(bandwidth=0.04, metric='haversine',
kernel='gaussian', algorithm='ball_tree')
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = -9999 + np.zeros(land_mask.shape[0])
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
|
bsd-3-clause
|
fyffyt/pylearn2
|
pylearn2/cross_validation/tests/test_train_cv_extensions.py
|
49
|
1681
|
"""
Tests for TrainCV extensions.
"""
import os
import tempfile
from pylearn2.config import yaml_parse
from pylearn2.testing.skip import skip_if_no_sklearn
def test_monitor_based_save_best_cv():
"""Test MonitorBasedSaveBestCV."""
handle, filename = tempfile.mkstemp()
skip_if_no_sklearn()
trainer = yaml_parse.load(test_yaml_monitor_based_save_best_cv %
{'save_path': filename})
trainer.main_loop()
# clean up
os.remove(filename)
test_yaml_monitor_based_save_best_cv = """
!obj:pylearn2.cross_validation.TrainCV {
dataset_iterator:
!obj:pylearn2.cross_validation.dataset_iterators.DatasetKFold {
dataset:
!obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 100,
dim: 10,
num_classes: 2,
},
},
model: !obj:pylearn2.models.autoencoder.Autoencoder {
nvis: 10,
nhid: 8,
act_enc: sigmoid,
act_dec: linear
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
batch_size: 50,
line_search_mode: exhaustive,
conjugate: 1,
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
cost: !obj:pylearn2.costs.autoencoder.MeanSquaredReconstructionError {
},
},
cv_extensions: [
!obj:pylearn2.cross_validation.train_cv_extensions.MonitorBasedSaveBestCV {
channel_name: train_objective,
save_path: %(save_path)s,
},
],
}
"""
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.