repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
wavelets/ThinkStats2 | code/thinkplot.py | 1 | 17979 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import logging
import math
import matplotlib
import matplotlib.pyplot as pyplot
import numpy as np
import pandas
# customize some matplotlib attributes
#matplotlib.rc('figure', figsize=(4, 3))
#matplotlib.rc('font', size=14.0)
#matplotlib.rc('axes', labelsize=22.0, titlesize=22.0)
#matplotlib.rc('legend', fontsize=20.0)
#matplotlib.rc('xtick.major', size=6.0)
#matplotlib.rc('xtick.minor', size=3.0)
#matplotlib.rc('ytick.major', size=6.0)
#matplotlib.rc('ytick.minor', size=3.0)
class Brewer(object):
"""Encapsulates a nice sequence of colors.
Shades of blue that look good in color and can be distinguished
in grayscale (up to a point).
Borrowed from http://colorbrewer2.org/
"""
color_iter = None
colors = ['#081D58',
'#253494',
'#225EA8',
'#1D91C0',
'#41B6C4',
'#7FCDBB',
'#C7E9B4',
'#EDF8B1',
'#FFFFD9']
# lists that indicate which colors to use depending on how many are used
which_colors = [[],
[1],
[1, 3],
[0, 2, 4],
[0, 2, 4, 6],
[0, 2, 3, 5, 6],
[0, 2, 3, 4, 5, 6],
[0, 1, 2, 3, 4, 5, 6],
]
@classmethod
def Colors(cls):
"""Returns the list of colors.
"""
return cls.colors
@classmethod
def ColorGenerator(cls, n):
"""Returns an iterator of color strings.
n: how many colors will be used
"""
for i in cls.which_colors[n]:
yield cls.colors[i]
raise StopIteration('Ran out of colors in Brewer.ColorGenerator')
@classmethod
def InitializeIter(cls, num):
"""Initializes the color iterator with the given number of colors."""
cls.color_iter = cls.ColorGenerator(num)
@classmethod
def ClearIter(cls):
"""Sets the color iterator to None."""
cls.color_iter = None
@classmethod
def GetIter(cls):
"""Gets the color iterator."""
if cls.color_iter is None:
cls.InitializeIter(7)
return cls.color_iter
def PrePlot(num=None, rows=None, cols=None):
"""Takes hints about what's coming.
num: number of lines that will be plotted
rows: number of rows of subplots
cols: number of columns of subplots
"""
if num:
Brewer.InitializeIter(num)
if rows is None and cols is None:
return
if rows is not None and cols is None:
cols = 1
if cols is not None and rows is None:
rows = 1
# resize the image, depending on the number of rows and cols
size_map = {(1, 1): (8, 6),
(1, 2): (14, 6),
(2, 2): (10, 10),
(2, 3): (10, 14),
(3, 1): (8, 10),
}
if (rows, cols) in size_map:
fig = pyplot.gcf()
fig.set_size_inches(*size_map[rows, cols])
# create the first subplot
if rows > 1 or cols > 1:
pyplot.subplot(rows, cols, 1)
global SUBPLOT_ROWS, SUBPLOT_COLS
SUBPLOT_ROWS = rows
SUBPLOT_COLS = cols
def SubPlot(plot_number, rows=None, cols=None):
"""Configures the number of subplots and changes the current plot.
rows: int
cols: int
plot_number: int
"""
rows = rows or SUBPLOT_ROWS
cols = cols or SUBPLOT_COLS
pyplot.subplot(rows, cols, plot_number)
class InfiniteList(list):
"""A list that returns the same value for all indices."""
def __init__(self, val):
"""Initializes the list.
val: value to be stored
"""
list.__init__(self)
self.val = val
def __getitem__(self, index):
"""Gets the item with the given index.
index: int
returns: the stored value
"""
return self.val
def Underride(d, **options):
"""Add key-value pairs to d only if key is not in d.
If d is None, create a new dictionary.
d: dictionary
options: keyword args to add to d
"""
if d is None:
d = {}
for key, val in options.items():
d.setdefault(key, val)
return d
def Clf():
"""Clears the figure and any hints that have been set."""
Brewer.ClearIter()
pyplot.clf()
fig = pyplot.gcf()
fig.set_size_inches(8, 6)
def Figure(**options):
"""Sets options for the current figure."""
Underride(options, figsize=(6, 8))
pyplot.figure(**options)
def UnderrideColor(options):
if 'color' in options:
return options
color_iter = Brewer.GetIter()
if color_iter:
try:
options['color'] = next(color_iter)
except StopIteration:
print('Warning: Brewer ran out of colors.')
Brewer.ClearIter()
return options
def Plot(obj, ys=None, style='', **options):
"""Plots a line.
Args:
obj: sequence of x values, or Series, or anything with Render()
ys: sequence of y values
style: style string passed along to pyplot.plot
options: keyword args passed to pyplot.plot
"""
options = UnderrideColor(options)
label = getattr(obj, 'label', '')
options = Underride(options, linewidth=3, alpha=0.8, label=label)
xs = obj
if ys is None:
if hasattr(obj, 'Render'):
xs, ys = obj.Render()
if isinstance(obj, pandas.Series):
ys = obj.values
xs = obj.index
if ys is None:
pyplot.plot(xs, style, **options)
else:
pyplot.plot(xs, ys, style, **options)
def FillBetween(xs, y1, y2=None, where=None, **options):
"""Plots a line.
Args:
xs: sequence of x values
y1: sequence of y values
y2: sequence of y values
where: sequence of boolean
options: keyword args passed to pyplot.fill_between
"""
options = UnderrideColor(options)
options = Underride(options, linewidth=0, alpha=0.6)
pyplot.fill_between(xs, y1, y2, where, **options)
def Bar(xs, ys, **options):
"""Plots a line.
Args:
xs: sequence of x values
ys: sequence of y values
options: keyword args passed to pyplot.bar
"""
options = UnderrideColor(options)
options = Underride(options, linewidth=0, alpha=0.6)
pyplot.bar(xs, ys, **options)
def Scatter(xs, ys=None, **options):
"""Makes a scatter plot.
xs: x values
ys: y values
options: options passed to pyplot.scatter
"""
options = Underride(options, color='blue', alpha=0.2,
s=30, edgecolors='none')
if ys is None and isinstance(xs, pandas.Series):
ys = xs.values
xs = xs.index
pyplot.scatter(xs, ys, **options)
def HexBin(xs, ys, **options):
"""Makes a scatter plot.
xs: x values
ys: y values
options: options passed to pyplot.scatter
"""
options = Underride(options, cmap=matplotlib.cm.Blues)
pyplot.hexbin(xs, ys, **options)
def Pdf(pdf, **options):
"""Plots a Pdf, Pmf, or Hist as a line.
Args:
pdf: Pdf, Pmf, or Hist object
options: keyword args passed to pyplot.plot
"""
low, high = options.pop('low', None), options.pop('high', None)
n = options.pop('n', 101)
xs, ps = pdf.Render(low=low, high=high, n=n)
if pdf.label:
options = Underride(options, label=pdf.label)
Plot(xs, ps, **options)
def Pdfs(pdfs, **options):
"""Plots a sequence of PDFs.
Options are passed along for all PDFs. If you want different
options for each pdf, make multiple calls to Pdf.
Args:
pdfs: sequence of PDF objects
options: keyword args passed to pyplot.plot
"""
for pdf in pdfs:
Pdf(pdf, **options)
def Hist(hist, **options):
"""Plots a Pmf or Hist with a bar plot.
The default width of the bars is based on the minimum difference
between values in the Hist. If that's too small, you can override
it by providing a width keyword argument, in the same units
as the values.
Args:
hist: Hist or Pmf object
options: keyword args passed to pyplot.bar
"""
# find the minimum distance between adjacent values
xs, ys = hist.Render()
if 'width' not in options:
try:
options['width'] = 0.9 * np.diff(xs).min()
except TypeError:
logging.warning("Hist: Can't compute bar width automatically."
"Check for non-numeric types in Hist."
"Or try providing width option."
)
if hist.label:
options = Underride(options, label=hist.label)
options = Underride(options, align='center')
if options['align'] == 'left':
options['align'] = 'edge'
elif options['align'] == 'right':
options['align'] = 'edge'
options['width'] *= -1
Bar(xs, ys, **options)
def Hists(hists, **options):
"""Plots two histograms as interleaved bar plots.
Options are passed along for all PMFs. If you want different
options for each pmf, make multiple calls to Pmf.
Args:
hists: list of two Hist or Pmf objects
options: keyword args passed to pyplot.plot
"""
for hist in hists:
Hist(hist, **options)
def Pmf(pmf, **options):
"""Plots a Pmf or Hist as a line.
Args:
pmf: Hist or Pmf object
options: keyword args passed to pyplot.plot
"""
xs, ys = pmf.Render()
low, high = min(xs), max(xs)
width = options.pop('width', None)
if width is None:
try:
width = np.diff(xs).min()
if width < 0.1:
logging.warning("Pmf: width is very small; "
"Pmf may not be visible.")
except TypeError:
logging.warning("Pmf: Can't compute bar width automatically."
"Check for non-numeric types in Pmf."
"Or try providing width option.")
points = []
lastx = np.nan
lasty = 0
for x, y in zip(xs, ys):
if (x - lastx) > 1e-5:
points.append((lastx, 0))
points.append((x, 0))
points.append((x, lasty))
points.append((x, y))
points.append((x+width, y))
lastx = x + width
lasty = y
points.append((lastx, 0))
if pmf.label:
options = Underride(options, label=pmf.label)
pxs, pys = zip(*points)
align = options.pop('align', 'center')
if align == 'center':
pxs = np.array(pxs) - width/2.0
if align == 'right':
pxs = np.array(pxs) - width
Plot(pxs, pys, **options)
def Pmfs(pmfs, **options):
"""Plots a sequence of PMFs.
Options are passed along for all PMFs. If you want different
options for each pmf, make multiple calls to Pmf.
Args:
pmfs: sequence of PMF objects
options: keyword args passed to pyplot.plot
"""
for pmf in pmfs:
Pmf(pmf, **options)
def Diff(t):
"""Compute the differences between adjacent elements in a sequence.
Args:
t: sequence of number
Returns:
sequence of differences (length one less than t)
"""
diffs = [t[i+1] - t[i] for i in range(len(t)-1)]
return diffs
def Cdf(cdf, complement=False, transform=None, **options):
"""Plots a CDF as a line.
Args:
cdf: Cdf object
complement: boolean, whether to plot the complementary CDF
transform: string, one of 'exponential', 'pareto', 'weibull', 'gumbel'
options: keyword args passed to pyplot.plot
Returns:
dictionary with the scale options that should be passed to
Config, Show or Save.
"""
xs, ps = cdf.Render()
xs = np.asarray(xs)
ps = np.asarray(ps)
scale = dict(xscale='linear', yscale='linear')
for s in ['xscale', 'yscale']:
if s in options:
scale[s] = options.pop(s)
if transform == 'exponential':
complement = True
scale['yscale'] = 'log'
if transform == 'pareto':
complement = True
scale['yscale'] = 'log'
scale['xscale'] = 'log'
if complement:
ps = [1.0-p for p in ps]
if transform == 'weibull':
xs = np.delete(xs, -1)
ps = np.delete(ps, -1)
ps = [-math.log(1.0-p) for p in ps]
scale['xscale'] = 'log'
scale['yscale'] = 'log'
if transform == 'gumbel':
xs = xp.delete(xs, 0)
ps = np.delete(ps, 0)
ps = [-math.log(p) for p in ps]
scale['yscale'] = 'log'
if cdf.label:
options = Underride(options, label=cdf.label)
Plot(xs, ps, **options)
return scale
def Cdfs(cdfs, complement=False, transform=None, **options):
"""Plots a sequence of CDFs.
cdfs: sequence of CDF objects
complement: boolean, whether to plot the complementary CDF
transform: string, one of 'exponential', 'pareto', 'weibull', 'gumbel'
options: keyword args passed to pyplot.plot
"""
for cdf in cdfs:
Cdf(cdf, complement, transform, **options)
def Contour(obj, pcolor=False, contour=True, imshow=False, **options):
"""Makes a contour plot.
d: map from (x, y) to z, or object that provides GetDict
pcolor: boolean, whether to make a pseudocolor plot
contour: boolean, whether to make a contour plot
imshow: boolean, whether to use pyplot.imshow
options: keyword args passed to pyplot.pcolor and/or pyplot.contour
"""
try:
d = obj.GetDict()
except AttributeError:
d = obj
Underride(options, linewidth=3, cmap=matplotlib.cm.Blues)
xs, ys = zip(*d.iterkeys())
xs = sorted(set(xs))
ys = sorted(set(ys))
X, Y = np.meshgrid(xs, ys)
func = lambda x, y: d.get((x, y), 0)
func = np.vectorize(func)
Z = func(X, Y)
x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes = pyplot.gca()
axes.xaxis.set_major_formatter(x_formatter)
if pcolor:
pyplot.pcolormesh(X, Y, Z, **options)
if contour:
cs = pyplot.contour(X, Y, Z, **options)
pyplot.clabel(cs, inline=1, fontsize=10)
if imshow:
extent = xs[0], xs[-1], ys[0], ys[-1]
pyplot.imshow(Z, extent=extent, **options)
def Pcolor(xs, ys, zs, pcolor=True, contour=False, **options):
"""Makes a pseudocolor plot.
xs:
ys:
zs:
pcolor: boolean, whether to make a pseudocolor plot
contour: boolean, whether to make a contour plot
options: keyword args passed to pyplot.pcolor and/or pyplot.contour
"""
Underride(options, linewidth=3, cmap=matplotlib.cm.Blues)
X, Y = np.meshgrid(xs, ys)
Z = zs
x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes = pyplot.gca()
axes.xaxis.set_major_formatter(x_formatter)
if pcolor:
pyplot.pcolormesh(X, Y, Z, **options)
if contour:
cs = pyplot.contour(X, Y, Z, **options)
pyplot.clabel(cs, inline=1, fontsize=10)
def Text(x, y, s, **options):
"""Puts text in a figure.
x: number
y: number
s: string
options: keyword args passed to pyplot.text
"""
options = Underride(options, verticalalignment='top',
horizontalalignment='left')
pyplot.text(x, y, s, **options)
def Config(**options):
"""Configures the plot.
Pulls options out of the option dictionary and passes them to
the corresponding pyplot functions.
"""
names = ['title', 'xlabel', 'ylabel', 'xscale', 'yscale',
'xticks', 'yticks', 'axis', 'xlim', 'ylim']
for name in names:
if name in options:
getattr(pyplot, name)(options[name])
# looks like this is not necessary: matplotlib understands text loc specs
loc_dict = {'upper right': 1,
'upper left': 2,
'lower left': 3,
'lower right': 4,
'right': 5,
'center left': 6,
'center right': 7,
'lower center': 8,
'upper center': 9,
'center': 10,
}
loc = options.get('loc', 0)
#loc = loc_dict.get(loc, loc)
legend = options.get('legend', True)
if legend:
pyplot.legend(loc=loc)
def Show(**options):
"""Shows the plot.
For options, see Config.
options: keyword args used to invoke various pyplot functions
"""
Config(**options)
pyplot.show()
Clf()
def Save(root=None, formats=None, **options):
"""Saves the plot in the given formats.
For options, see Config.
Args:
root: string filename root
formats: list of string formats
options: keyword args used to invoke various pyplot functions
"""
Config(**options)
if formats is None:
formats = ['pdf', 'eps']
if root:
for fmt in formats:
SaveFormat(root, fmt)
Clf()
def SaveFormat(root, fmt='eps'):
"""Writes the current figure to a file in the given format.
Args:
root: string filename root
fmt: string format
"""
filename = '%s.%s' % (root, fmt)
print('Writing', filename)
pyplot.savefig(filename, format=fmt, dpi=300)
# provide aliases for calling functons with lower-case names
preplot = PrePlot
subplot = SubPlot
clf = Clf
figure = Figure
plot = Plot
scatter = Scatter
pmf = Pmf
pmfs = Pmfs
hist = Hist
hists = Hists
diff = Diff
cdf = Cdf
cdfs = Cdfs
contour = Contour
pcolor = Pcolor
config = Config
show = Show
save = Save
def main():
color_iter = Brewer.ColorGenerator(7)
for color in color_iter:
print(color)
if __name__ == '__main__':
main()
| gpl-3.0 |
ahara/kaggle_otto | otto/model/model_15_nn_adagrad_pca/nn_adagrad_pca.py | 1 | 11704 | """
Mean log loss from 5-fold CV: 0.478792791749
"""
import copy
import itertools
import numpy as np
import lasagne
import math
import os
import theano
import theano.tensor as T
import time
from lasagne.layers import DenseLayer, DropoutLayer, InputLayer, get_all_params
from lasagne.nonlinearities import rectify, softmax
from lasagne.objectives import categorical_crossentropy, Objective
from lasagne.updates import adagrad
from sklearn import decomposition, feature_extraction
from sklearn.base import BaseEstimator
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.utils import check_random_state
from otto_utils import consts, utils
MODEL_NAME = 'model_15_nn_adagrad_pca'
MODE = 'cv' # cv|submission|holdout|tune
class NeuralNetwork(BaseEstimator):
def __init__(self, n_hidden=20, max_epochs=150, batch_size=200,
lr=0.01, epsilon=0.9, dropout=0.5, valid_ratio=0.0,
use_valid=False, verbose=0, random_state=None):
self.n_hidden = n_hidden
self.max_epochs = max_epochs
self.batch_size = batch_size
self.lr = lr
self.epsilon = epsilon
self.dropout = dropout
self.valid_ratio = valid_ratio
self.use_valid = use_valid
self.verbose = verbose
self.random_state = random_state
# State
self.score_ = None
self.classes_ = None
self.n_classes_ = None
self.model = None
def fit(self, data, targets, sample_weight=None):
self.classes_, indices = np.unique(targets, return_inverse=True)
self.n_classes_ = self.classes_.shape[0]
random_state = check_random_state(self.random_state)
# Shuffle data and eventually split on train and validation sets
if self.valid_ratio > 0:
strat_shuffled_split = StratifiedShuffleSplit(targets, test_size=self.valid_ratio,
n_iter=1, random_state=self.random_state)
train_index, valid_index = [s for s in strat_shuffled_split][0]
X_train, y_train = data[train_index], targets[train_index]
X_valid, y_valid = data[valid_index], targets[valid_index]
else:
X_train, y_train = data, targets
X_valid, y_valid = np.array([]), np.array([])
if self.verbose > 5:
print 'X_train: %s, y_train: %s' % (X_train.shape, y_train.shape)
if self.use_valid:
print 'X_valid: %s, y_valid: %s' % (X_valid.shape, y_valid.shape)
# Prepare theano variables
dataset = dict(
X_train=theano.shared(lasagne.utils.floatX(X_train)),
y_train=T.cast(theano.shared(y_train), 'int32'),
X_valid=theano.shared(lasagne.utils.floatX(X_valid)),
y_valid=T.cast(theano.shared(y_valid), 'int32'),
num_examples_train=X_train.shape[0],
num_examples_valid=X_valid.shape[0],
input_dim=X_train.shape[1],
output_dim=self.n_classes_,
)
if self.verbose > 0:
print "Building model and compiling functions..."
output_layer = self.build_model(dataset['input_dim'])
iter_funcs = self.create_iter_functions(dataset, output_layer)
if self.verbose > 0:
print "Starting training..."
now = time.time()
results = []
try:
for epoch in self.train(iter_funcs, dataset, output_layer):
if self.verbose > 1:
print "Epoch {} of {} took {:.3f}s".format(
epoch['number'], self.max_epochs, time.time() - now)
now = time.time()
results.append([epoch['number'], epoch['train_loss'], epoch['valid_loss']])
if self.verbose > 1:
print " training loss:\t\t{:.6f}".format(epoch['train_loss'])
print " validation loss:\t\t{:.6f}".format(epoch['valid_loss'])
print " validation accuracy:\t\t{:.2f} %%".format(
epoch['valid_accuracy'] * 100)
if epoch['number'] >= self.max_epochs:
break
if self.verbose > 0:
print 'Minimum validation error: %f (epoch %d)' % \
(epoch['best_val_error'], epoch['best_val_iter'])
except KeyboardInterrupt:
pass
return self
def predict(self, data):
preds, _ = self.make_predictions(data)
return preds
def predict_proba(self, data):
_, proba = self.make_predictions(data)
return proba
def score(self):
return self.score_
# Private methods
def build_model(self, input_dim):
l_in = InputLayer(shape=(self.batch_size, input_dim))
l_hidden1 = DenseLayer(l_in, num_units=self.n_hidden, nonlinearity=rectify)
l_hidden1_dropout = DropoutLayer(l_hidden1, p=self.dropout)
l_hidden2 = DenseLayer(l_hidden1_dropout, num_units=self.n_hidden / 2, nonlinearity=rectify)
l_hidden2_dropout = DropoutLayer(l_hidden2, p=self.dropout)
l_hidden3 = DenseLayer(l_hidden2_dropout, num_units=self.n_hidden, nonlinearity=rectify)
l_hidden3_dropout = DropoutLayer(l_hidden3, p=self.dropout)
l_out = DenseLayer(l_hidden3_dropout, num_units=self.n_classes_, nonlinearity=softmax)
return l_out
def create_iter_functions(self, dataset, output_layer, X_tensor_type=T.matrix):
batch_index = T.iscalar('batch_index')
X_batch = X_tensor_type('x')
y_batch = T.ivector('y')
batch_slice = slice(batch_index * self.batch_size, (batch_index + 1) * self.batch_size)
objective = Objective(output_layer, loss_function=categorical_crossentropy)
loss_train = objective.get_loss(X_batch, target=y_batch)
loss_eval = objective.get_loss(X_batch, target=y_batch, deterministic=True)
pred = T.argmax(output_layer.get_output(X_batch, deterministic=True), axis=1)
proba = output_layer.get_output(X_batch, deterministic=True)
accuracy = T.mean(T.eq(pred, y_batch), dtype=theano.config.floatX)
all_params = get_all_params(output_layer)
updates = adagrad(loss_train, all_params, self.lr, self.epsilon)
iter_train = theano.function(
[batch_index], loss_train,
updates=updates,
givens={
X_batch: dataset['X_train'][batch_slice],
y_batch: dataset['y_train'][batch_slice],
},
on_unused_input='ignore',
)
iter_valid = None
if self.use_valid:
iter_valid = theano.function(
[batch_index], [loss_eval, accuracy, proba],
givens={
X_batch: dataset['X_valid'][batch_slice],
y_batch: dataset['y_valid'][batch_slice],
},
)
return dict(train=iter_train, valid=iter_valid)
def create_test_function(self, dataset, output_layer, X_tensor_type=T.matrix):
batch_index = T.iscalar('batch_index')
X_batch = X_tensor_type('x')
batch_slice = slice(batch_index * self.batch_size, (batch_index + 1) * self.batch_size)
pred = T.argmax(output_layer.get_output(X_batch, deterministic=True), axis=1)
proba = output_layer.get_output(X_batch, deterministic=True)
iter_test = theano.function(
[batch_index], [pred, proba],
givens={
X_batch: dataset['X_test'][batch_slice],
},
)
return dict(test=iter_test)
def train(self, iter_funcs, dataset, output_layer):
num_batches_train = dataset['num_examples_train'] // self.batch_size
num_batches_valid = int(math.ceil(dataset['num_examples_valid'] / float(self.batch_size)))
best_val_err = 100
best_val_iter = -1
for epoch in itertools.count(1):
batch_train_losses = []
for b in range(num_batches_train):
batch_train_loss = iter_funcs['train'](b)
batch_train_losses.append(batch_train_loss)
avg_train_loss = np.mean(batch_train_losses)
batch_valid_losses = []
batch_valid_accuracies = []
batch_valid_probas = []
if self.use_valid:
for b in range(num_batches_valid):
batch_valid_loss, batch_valid_accuracy, batch_valid_proba = iter_funcs['valid'](b)
batch_valid_losses.append(batch_valid_loss)
batch_valid_accuracies.append(batch_valid_accuracy)
batch_valid_probas.append(batch_valid_proba)
avg_valid_loss = np.mean(batch_valid_losses)
avg_valid_accuracy = np.mean(batch_valid_accuracies)
if (best_val_err > avg_valid_loss and self.use_valid) or\
(epoch == self.max_epochs and not self.use_valid):
best_val_err = avg_valid_loss
best_val_iter = epoch
# Save model
self.score_ = best_val_err
self.model = copy.deepcopy(output_layer)
yield {
'number': epoch,
'train_loss': avg_train_loss,
'valid_loss': avg_valid_loss,
'valid_accuracy': avg_valid_accuracy,
'best_val_error': best_val_err,
'best_val_iter': best_val_iter,
}
def make_predictions(self, data):
dataset = dict(
X_test=theano.shared(lasagne.utils.floatX(data)),
num_examples_test=data.shape[0],
input_dim=data.shape[1],
output_dim=self.n_classes_,
)
iter_funcs = self.create_test_function(dataset, self.model)
num_batches_test = int(math.ceil(dataset['num_examples_test'] / float(self.batch_size)))
test_preds, test_probas = np.array([]), None
for b in range(num_batches_test):
batch_test_pred, batch_test_proba = iter_funcs['test'](b)
test_preds = np.append(test_preds, batch_test_pred)
test_probas = np.append(test_probas, batch_test_proba, axis=0) if test_probas is not None else batch_test_proba
return test_preds, test_probas
if __name__ == '__main__':
train, labels, test, _, _ = utils.load_data()
# Preprocess data - transform counts to TFIDF features
tfidf = feature_extraction.text.TfidfTransformer(smooth_idf=False)
train = np.append(train, tfidf.fit_transform(train).toarray(), axis=1)
test = np.append(test, tfidf.transform(test).toarray(), axis=1)
# PCA
pp = decomposition.PCA()
train = pp.fit_transform(train)
test = pp.transform(test)
clf = NeuralNetwork(1024, 110, 150, 0.0010954104605473447, 5.003481345255732e-15, 0.1,
.02, True, 10, random_state=18)
if MODE == 'cv':
scores, predictions = utils.make_blender_cv(clf, train, labels, calibrate=False)
print 'CV:', scores, 'Mean log loss:', np.mean(scores)
utils.write_blender_data(consts.BLEND_PATH, MODEL_NAME + '.csv', predictions)
elif MODE == 'submission':
clf.fit(train, labels)
predictions = clf.predict_proba(test)
utils.save_submission(consts.DATA_SAMPLE_SUBMISSION_PATH,
os.path.join(consts.ENSEMBLE_PATH, MODEL_NAME + '.csv'),
predictions)
elif MODE == 'holdout':
score = utils.hold_out_evaluation(clf, train, labels, calibrate=False)
print 'Log loss:', score
else:
print 'Unknown mode' | bsd-3-clause |
jm-begon/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 387 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
WuShichao/computational-physics | 3/3_19/3_19.py | 1 | 1974 | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 28 17:00:09 2016
Caculate Poincare sections for a frequency which has nothing to do
with those intrinsic to the system, using Euler-Cromer method.
@author: nightwing
"""
from tqdm import tqdm
from math import sin,pi
import matplotlib.pyplot as plt
g = 9.8 #gravity acceleration (m/s2)
length = 9.8 #length of the rod (m)
k = g / length #g/length
dt = 0.001 #time step (s)
t_end = 6000 #end time (s)
situations = [] #this list store [time, angle]
#caculate the physical pendulum
def PHYSICAL_PENDULUM(q,fd,freq,theta,snap_freq):
t = 0 #initial time (s)
angular_vel = 0 #initial angular velocity (rad/s)
angular_velocity = [] #this list store value of angular velocity
angle = [] #this list store value of angle
time = [] #this list store value of time
while t <= t_end:
if abs(round(t/(2*pi/snap_freq)) - (t/(2*pi/snap_freq))) < 0.001:
angular_velocity.append(angular_vel)
angle.append(theta)
time.append(t)
angular_vel += (-k*sin(theta)-q*angular_vel+fd*sin(freq*t)) * dt
theta += angular_vel * dt
if theta > pi:
theta -= 2*pi
elif theta < -pi:
theta += 2*pi
t += dt
return [angle,angular_velocity]
#-------------caculate (Euler-Cromer method)------------
for F in tqdm([0.5, 1.2]):
situations.append(PHYSICAL_PENDULUM(0.5, F, 2/3.0, 0.2, 3/4.0))
#----------------graph--------------
plt.subplot(121)
plt.title("$\omega$ versus $\\theta$ Fd=0.5")
plt.xlabel("$\\theta$ (radians)")
plt.ylabel("$\omega$ (rad/s)")
plt.scatter(situations[0][0],situations[0][1],s=1)
plt.ylim(-1,1)
plt.subplot(122)
plt.title("$\omega$ versus $\\theta$ Fd=1.2")
plt.xlabel("$\\theta$ (radians)")
plt.ylabel("$\omega$ (rad/s)")
plt.scatter(situations[1][0],situations[1][1],s=1)
plt.show() | gpl-3.0 |
wdurhamh/statsmodels | statsmodels/graphics/tests/test_regressionplots.py | 20 | 9978 | import numpy as np
import statsmodels.api as sm
from numpy.testing import dec
from statsmodels.graphics.regressionplots import (plot_fit, plot_ccpr,
plot_partregress, plot_regress_exog, abline_plot,
plot_partregress_grid, plot_ccpr_grid, add_lowess,
plot_added_variable, plot_partial_residuals,
plot_ceres_residuals)
from pandas import Series, DataFrame
try:
import matplotlib.pyplot as plt #makes plt available for test functions
have_matplotlib = True
except:
have_matplotlib = False
pdf_output = False
if pdf_output:
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages("test_regressionplots.pdf")
else:
pdf = None
def close_or_save(pdf, fig):
if pdf_output:
pdf.savefig(fig)
plt.close(fig)
@dec.skipif(not have_matplotlib)
def teardown_module():
plt.close('all')
if pdf_output:
pdf.close()
class TestPlot(object):
def __init__(self):
self.setup() #temp: for testing without nose
def setup(self):
nsample = 100
sig = 0.5
x1 = np.linspace(0, 20, nsample)
x2 = 5 + 3* np.random.randn(nsample)
X = np.c_[x1, x2, np.sin(0.5*x1), (x2-5)**2, np.ones(nsample)]
beta = [0.5, 0.5, 1, -0.04, 5.]
y_true = np.dot(X, beta)
y = y_true + sig * np.random.normal(size=nsample)
exog0 = sm.add_constant(np.c_[x1, x2], prepend=False)
res = sm.OLS(y, exog0).fit()
self.res = res
@dec.skipif(not have_matplotlib)
def test_plot_fit(self):
res = self.res
fig = plot_fit(res, 0, y_true=None)
x0 = res.model.exog[:, 0]
yf = res.fittedvalues
y = res.model.endog
px1, px2 = fig.axes[0].get_lines()[0].get_data()
np.testing.assert_equal(x0, px1)
np.testing.assert_equal(y, px2)
px1, px2 = fig.axes[0].get_lines()[1].get_data()
np.testing.assert_equal(x0, px1)
np.testing.assert_equal(yf, px2)
close_or_save(pdf, fig)
@dec.skipif(not have_matplotlib)
def test_plot_oth(self):
#just test that they run
res = self.res
plot_fit(res, 0, y_true=None)
plot_partregress_grid(res, exog_idx=[0,1])
plot_regress_exog(res, exog_idx=0)
plot_ccpr(res, exog_idx=0)
plot_ccpr_grid(res, exog_idx=[0])
fig = plot_ccpr_grid(res, exog_idx=[0,1])
for ax in fig.axes:
add_lowess(ax)
close_or_save(pdf, fig)
class TestPlotPandas(TestPlot):
def setup(self):
nsample = 100
sig = 0.5
x1 = np.linspace(0, 20, nsample)
x2 = 5 + 3* np.random.randn(nsample)
X = np.c_[x1, x2, np.sin(0.5*x1), (x2-5)**2, np.ones(nsample)]
beta = [0.5, 0.5, 1, -0.04, 5.]
y_true = np.dot(X, beta)
y = y_true + sig * np.random.normal(size=nsample)
exog0 = sm.add_constant(np.c_[x1, x2], prepend=False)
exog0 = DataFrame(exog0, columns=["const", "var1", "var2"])
y = Series(y, name="outcome")
res = sm.OLS(y, exog0).fit()
self.res = res
data = DataFrame(exog0, columns=["const", "var1", "var2"])
data['y'] = y
self.data = data
class TestPlotFormula(TestPlotPandas):
@dec.skipif(not have_matplotlib)
def test_one_column_exog(self):
from statsmodels.formula.api import ols
res = ols("y~var1-1", data=self.data).fit()
plot_regress_exog(res, "var1")
res = ols("y~var1", data=self.data).fit()
plot_regress_exog(res, "var1")
class TestABLine(object):
@classmethod
def setupClass(cls):
np.random.seed(12345)
X = sm.add_constant(np.random.normal(0, 20, size=30))
y = np.dot(X, [25, 3.5]) + np.random.normal(0, 30, size=30)
mod = sm.OLS(y,X).fit()
cls.X = X
cls.y = y
cls.mod = mod
@dec.skipif(not have_matplotlib)
def test_abline_model(self):
fig = abline_plot(model_results=self.mod)
ax = fig.axes[0]
ax.scatter(self.X[:,1], self.y)
close_or_save(pdf, fig)
@dec.skipif(not have_matplotlib)
def test_abline_model_ax(self):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(self.X[:,1], self.y)
fig = abline_plot(model_results=self.mod, ax=ax)
close_or_save(pdf, fig)
@dec.skipif(not have_matplotlib)
def test_abline_ab(self):
mod = self.mod
intercept, slope = mod.params
fig = abline_plot(intercept=intercept, slope=slope)
close_or_save(pdf, fig)
@dec.skipif(not have_matplotlib)
def test_abline_ab_ax(self):
mod = self.mod
intercept, slope = mod.params
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(self.X[:,1], self.y)
fig = abline_plot(intercept=intercept, slope=slope, ax=ax)
close_or_save(pdf, fig)
class TestABLinePandas(TestABLine):
@classmethod
def setupClass(cls):
np.random.seed(12345)
X = sm.add_constant(np.random.normal(0, 20, size=30))
y = np.dot(X, [25, 3.5]) + np.random.normal(0, 30, size=30)
cls.X = X
cls.y = y
X = DataFrame(X, columns=["const", "someX"])
y = Series(y, name="outcome")
mod = sm.OLS(y,X).fit()
cls.mod = mod
class TestAddedVariablePlot(object):
@dec.skipif(not have_matplotlib)
def test_added_variable_poisson(self):
np.random.seed(3446)
n = 100
p = 3
exog = np.random.normal(size=(n, p))
lin_pred = 4 + exog[:, 0] + 0.2*exog[:, 1]**2
expval = np.exp(lin_pred)
endog = np.random.poisson(expval)
model = sm.GLM(endog, exog, family=sm.families.Poisson())
results = model.fit()
for focus_col in 0, 1, 2:
for use_glm_weights in False, True:
for resid_type in "resid_deviance", "resid_response":
weight_str = ["Unweighted", "Weighted"][use_glm_weights]
# Run directly and called as a results method.
for j in 0,1:
if j == 0:
fig = plot_added_variable(results, focus_col,
use_glm_weights=use_glm_weights,
resid_type=resid_type)
ti = "Added variable plot"
else:
fig = results.plot_added_variable(focus_col,
use_glm_weights=use_glm_weights,
resid_type=resid_type)
ti = "Added variable plot (called as method)"
ax = fig.get_axes()[0]
add_lowess(ax)
ax.set_position([0.1, 0.1, 0.8, 0.7])
effect_str = ["Linear effect, slope=1",
"Quadratic effect", "No effect"][focus_col]
ti += "\nPoisson regression\n"
ti += effect_str + "\n"
ti += weight_str + "\n"
ti += "Using '%s' residuals" % resid_type
ax.set_title(ti)
close_or_save(pdf, fig)
class TestPartialResidualPlot(object):
@dec.skipif(not have_matplotlib)
def test_partial_residual_poisson(self):
np.random.seed(3446)
n = 100
p = 3
exog = np.random.normal(size=(n, p))
exog[:, 0] = 1
lin_pred = 4 + exog[:, 1] + 0.2*exog[:, 2]**2
expval = np.exp(lin_pred)
endog = np.random.poisson(expval)
model = sm.GLM(endog, exog, family=sm.families.Poisson())
results = model.fit()
for focus_col in 1, 2:
for j in 0,1:
if j == 0:
fig = plot_partial_residuals(results, focus_col)
else:
fig = results.plot_partial_residuals(focus_col)
ax = fig.get_axes()[0]
add_lowess(ax)
ax.set_position([0.1, 0.1, 0.8, 0.77])
effect_str = ["Intercept", "Linear effect, slope=1",
"Quadratic effect"][focus_col]
ti = "Partial residual plot"
if j == 1:
ti += " (called as method)"
ax.set_title(ti + "\nPoisson regression\n" +
effect_str)
close_or_save(pdf, fig)
class TestCERESPlot(object):
@dec.skipif(not have_matplotlib)
def test_ceres_poisson(self):
np.random.seed(3446)
n = 100
p = 3
exog = np.random.normal(size=(n, p))
exog[:, 0] = 1
lin_pred = 4 + exog[:, 1] + 0.2*exog[:, 2]**2
expval = np.exp(lin_pred)
endog = np.random.poisson(expval)
model = sm.GLM(endog, exog, family=sm.families.Poisson())
results = model.fit()
for focus_col in 1, 2:
for j in 0, 1:
if j == 0:
fig = plot_ceres_residuals(results, focus_col)
else:
fig = results.plot_ceres_residuals(focus_col)
ax = fig.get_axes()[0]
add_lowess(ax)
ax.set_position([0.1, 0.1, 0.8, 0.77])
effect_str = ["Intercept", "Linear effect, slope=1",
"Quadratic effect"][focus_col]
ti = "CERES plot"
if j == 1:
ti += " (called as method)"
ax.set_title(ti + "\nPoisson regression\n" +
effect_str)
close_or_save(pdf, fig)
if __name__ == "__main__":
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb'], exit=False)
| bsd-3-clause |
mendax-grip/cfdemUtilities | mixing/pca/old/unitTestPca.py | 2 | 3003 | #--------------------------------------------------------------------------------------------------
#
# Description : Sample program to generate random trajectories and to analyse them using PCA
# This is a unit test function for the FSAPLF CODE
#
# Usage : python unitTestPca
#
#
# Author : Bruno Blais
#
#
#
#--------------------------------------------------------------------------------------------------
# Imports
import os
import sys
import numpy
import time
import matplotlib.pyplot as plt
import pylab
from mpl_toolkits.mplot3d import Axes3D
plot = True
vScaleX=0.15
vScaleY=0.15
# Calculation of reduced deviation
def reDev(x):
y = 1./numpy.std(x,ddof=1) * (x-numpy.mean(x))
return y
def writeFile(i,x,y,z):
if (i<10):
outname=sys.argv[1]+"_00"+str(i)+".dump"
elif(i<100):
outname=sys.argv[1]+"_0"+str(i)+".dump"
else:
outname=sys.argv[1]+"_"+str(i)+".dump"
print "Writing the file : ", outname
outfile=open(outname,'w')
outfile.write("ITEM: TIMESTEP\n")
outfile.write("%i\n" %i);
outfile.write("ITEM: NUMBER OF ATOMS\n")
outfile.write("%i\n" %numpy.size(x));
outfile.write("ITEM: BOX BOUNDS ff ff ff\n-0.15 0.15\n-0.15 0.15\n-5e-06 0.300005\n")
outfile.write("ITEM: ATOMS id type type x y z vx vy vz fx fy fz radius\n")
x2=numpy.reshape(x,numpy.size(x))
y2=numpy.reshape(y,numpy.size(x))
z2=numpy.reshape(z,numpy.size(x))
for i in range(0,numpy.size(x)):
outfile.write("%i 1 1 %f %f %f 1 1 1 1 1 1 1\n" %(i,x2[i],y2[i],z2[i]))
nx, ny = (20, 20)
x = numpy.linspace(0.001, 1, nx)
y = numpy.linspace(0.001, 1, ny)
xv, yv = numpy.meshgrid(x, y)
#fig=plt.figure("Trajectories")
lFig=plt.figure("lambda")
lAx=lFig.add_subplot(111)
lAx.set_ylabel("Mixing index")
lAx.set_xlabel("Sampling time")
#ax = Axes3D(fig)
xvl=xv
yvl=yv
zvl=xv
zv= xv
lamL=[]
C=numpy.zeros([3,3])
for t in range(0,1000):
# Uniaxial flow ---> (u,v,w) = (0, 0,1)
xvl = xvl
yvl = yvl
zvl = zvl
if (t>100 and t<800):
u = vScaleX * (numpy.random.random_sample([ny,nx])-0.5)
v = vScaleY * (numpy.random.random_sample([ny,nx])-0.5)
xvl = xvl + u
yvl = yvl + v
zvl = zvl+ xvl/xvl*0.1 * (numpy.random.random_sample([ny,nx])-0.5)
#ax.scatter(xvl,yvl,zvl,'o')
#Construct correlation matrix
C[0,0]=numpy.mean(reDev(xvl)*reDev(xv) )
C[1,0]=numpy.mean(reDev(yvl)*reDev(xv))
C[2,0]=numpy.mean(reDev(zvl)*reDev(xv))
C[0,1]=numpy.mean(reDev(xvl)*reDev(yv))
C[1,1]=numpy.mean(reDev(yvl)*reDev(yv))
C[2,1]=numpy.mean(reDev(zvl)*reDev(yv))
C[0,2]=numpy.mean(reDev(xvl)*reDev(zv))
C[1,2]=numpy.mean(reDev(yvl)*reDev(zv))
C[2,2]=numpy.mean(reDev(zvl)*reDev(zv))
M = numpy.dot(C,C.transpose())
lam,R=numpy.linalg.eig(M)
lAx.scatter(t,numpy.sqrt(numpy.max(lam)/3.))
writeFile(t,xvl,yvl,zvl)
lamL.extend([lam])
for i in lamL:
print numpy.sort(i), " \n"
plt.show()
| lgpl-3.0 |
djgagne/scikit-learn | sklearn/datasets/lfw.py | 141 | 19372 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warning("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828)
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
pairs : numpy array of shape (2200, 2, 62, 47)
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_`` or resize
parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
SANDAG/urbansim | urbansim/models/tests/test_supplydemand.py | 9 | 4116 | from __future__ import division
import pandas as pd
import pytest
from pandas.util import testing as pdt
from .. import supplydemand as supdem
@pytest.fixture
def choosers():
return pd.DataFrame(
{'var1': range(5, 10),
'thing_id': ['a', 'c', 'e', 'g', 'i']})
@pytest.fixture
def alternatives():
return pd.DataFrame(
{'var2': range(10, 20),
'var3': range(20, 30),
'price_col': [1] * 10,
'zone_id': ['w', 'x', 'y', 'z', 'z', 'x', 'y', 'w', 'y', 'y']},
index=pd.Index([x for x in 'abcdefghij'], name='thing_id'))
@pytest.fixture(scope='module')
def alt_segmenter():
return 'zone_id'
class _TestLCM(object):
def apply_predict_filters(self, choosers, alternatives):
choosers = choosers.query('var1 != 7')
alternatives = alternatives.query('var2 != 14')
return choosers, alternatives
def summed_probabilities(self, choosers, alternatives):
return pd.Series(
[1, 0.25, 1, 2, 0.75, 2, 1, 1.5, 0.5],
index=['a', 'b', 'c', 'd', 'f', 'g', 'h', 'i', 'j'])
@pytest.fixture(scope='module')
def lcm():
return _TestLCM()
@pytest.fixture
def filtered(lcm, choosers, alternatives):
return lcm.apply_predict_filters(choosers, alternatives)
@pytest.fixture(scope='module')
def wxyz():
w = 1
x = 0.5
y = 1.25
z = 2
return w, x, y, z
def test_calculate_adjustment_clips(lcm, filtered, alt_segmenter):
clip = 1
choosers, alternatives = filtered
alts_multiplier, submarkets_multiplier, finished = \
supdem._calculate_adjustment(
lcm, choosers, alternatives, alternatives[alt_segmenter],
clip, clip)
pdt.assert_series_equal(
alts_multiplier, pd.Series([1] * 9, index=alternatives.index),
check_dtype=False)
pdt.assert_series_equal(
submarkets_multiplier, pd.Series([1] * 4, index=['w', 'x', 'y', 'z']),
check_dtype=False)
def test_calculate_adjustment(lcm, filtered, alt_segmenter, wxyz):
clip_low = 0
clip_high = 2
choosers, alternatives = filtered
alts_multiplier, submarkets_multiplier, finished = \
supdem._calculate_adjustment(
lcm, choosers, alternatives, alternatives[alt_segmenter],
clip_low, clip_high)
w, x, y, z = wxyz
pdt.assert_series_equal(
alts_multiplier,
pd.Series([w, x, y, z, x, y, w, y, y],
index=alternatives.index))
pdt.assert_series_equal(
submarkets_multiplier,
pd.Series([w, x, y, z], index=['w', 'x', 'y', 'z']))
def test_supply_and_demand(
lcm, choosers, alternatives, alt_segmenter, filtered, wxyz):
clip_low = 0
clip_high = 2
price_col = 'price_col'
w, x, y, z = wxyz
filtered_choosers, filtered_alts = filtered
new_price, submarkets_multiplier = supdem.supply_and_demand(
lcm, choosers, alternatives, alt_segmenter, price_col,
clip_change_low=clip_low, clip_change_high=clip_high)
pdt.assert_series_equal(
new_price,
pd.Series(
[w, x, y, z, x, y, w, y, y],
index=filtered_alts.index, name='price_col') ** 5)
pdt.assert_series_equal(
submarkets_multiplier,
pd.Series([w, x, y, z], index=['w', 'x', 'y', 'z']) ** 5)
def test_supply_and_demand_base_ratio(
lcm, choosers, alternatives, alt_segmenter, filtered, wxyz):
clip_low = 0
clip_high = 2
price_col = 'price_col'
w, x, y, z = wxyz
filtered_choosers, filtered_alts = filtered
base_multiplier = pd.Series([w, x, y, z], index=['w', 'x', 'y', 'z'])
new_price, submarkets_multiplier = supdem.supply_and_demand(
lcm, choosers, alternatives, alt_segmenter, price_col,
base_multiplier, clip_low, clip_high)
pdt.assert_series_equal(
new_price,
pd.Series(
[w, x, y, z, x, y, w, y, y],
index=filtered_alts.index, name='price_col') ** 6)
pdt.assert_series_equal(
submarkets_multiplier,
pd.Series([w, x, y, z], index=['w', 'x', 'y', 'z']) ** 6)
| bsd-3-clause |
cainiaocome/scikit-learn | benchmarks/bench_plot_omp_lars.py | 266 | 4447 | """Benchmarks of orthogonal matching pursuit (:ref:`OMP`) versus least angle
regression (:ref:`least_angle_regression`)
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path, orthogonal_mp
from sklearn.datasets.samples_generator import make_sparse_coded_signal
def compute_bench(samples_range, features_range):
it = 0
results = dict()
lars = np.empty((len(features_range), len(samples_range)))
lars_gram = lars.copy()
omp = lars.copy()
omp_gram = lars.copy()
max_it = len(samples_range) * len(features_range)
for i_s, n_samples in enumerate(samples_range):
for i_f, n_features in enumerate(features_range):
it += 1
n_informative = n_features / 10
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
# dataset_kwargs = {
# 'n_train_samples': n_samples,
# 'n_test_samples': 2,
# 'n_features': n_features,
# 'n_informative': n_informative,
# 'effective_rank': min(n_samples, n_features) / 10,
# #'effective_rank': None,
# 'bias': 0.0,
# }
dataset_kwargs = {
'n_samples': 1,
'n_components': n_features,
'n_features': n_samples,
'n_nonzero_coefs': n_informative,
'random_state': 0
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
y, X, _ = make_sparse_coded_signal(**dataset_kwargs)
X = np.asfortranarray(X)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, Gram=None, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (with Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (without Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=False,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp[i_f, i_s] = delta
results['time(LARS) / time(OMP)\n (w/ Gram)'] = (lars_gram / omp_gram)
results['time(LARS) / time(OMP)\n (w/o Gram)'] = (lars / omp)
return results
if __name__ == '__main__':
samples_range = np.linspace(1000, 5000, 5).astype(np.int)
features_range = np.linspace(1000, 5000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(np.max(t) for t in results.values())
import pylab as pl
fig = pl.figure('scikit-learn OMP vs. LARS benchmark results')
for i, (label, timings) in enumerate(sorted(results.iteritems())):
ax = fig.add_subplot(1, 2, i)
vmax = max(1 - timings.min(), -1 + timings.max())
pl.matshow(timings, fignum=False, vmin=1 - vmax, vmax=1 + vmax)
ax.set_xticklabels([''] + map(str, samples_range))
ax.set_yticklabels([''] + map(str, features_range))
pl.xlabel('n_samples')
pl.ylabel('n_features')
pl.title(label)
pl.subplots_adjust(0.1, 0.08, 0.96, 0.98, 0.4, 0.63)
ax = pl.axes([0.1, 0.08, 0.8, 0.06])
pl.colorbar(cax=ax, orientation='horizontal')
pl.show()
| bsd-3-clause |
profxj/xastropy | xastropy/xguis/spec_widgets.py | 3 | 32656 | """
#;+
#; NAME:
#; spec_widgets
#; Version 1.0
#;
#; PURPOSE:
#; Module for Spectroscopy widgets with QT
#; 12-Dec-2014 by JXP
#;-
#;------------------------------------------------------------------------------
"""
from __future__ import print_function, absolute_import, division, unicode_literals
# Import libraries
import numpy as np
import os, sys, imp
import matplotlib.pyplot as plt
from PyQt4 import QtGui
from PyQt4 import QtCore
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
# Matplotlib Figure object
from matplotlib.figure import Figure
from astropy import constants as const
from astropy import units as u
from astropy.units import Quantity
u.def_unit(['mAA', 'milliAngstrom'], 0.001 * u.AA, namespace=globals()) # mA
from linetools.spectra import io as lsi
from linetools.spectralline import AbsLine
from linetools.lists.linelist import LineList
from linetools.guis import utils as ltgu
from linetools import utils as ltu
from linetools.isgm.abssystem import GenericAbsSystem
from xastropy import stats as xstats
from xastropy.xutils import xdebug as xdb
from xastropy.plotting import utils as xputils
from xastropy.igm.abs_sys import abssys_utils as xiaa
from pyigm.abssys.lls import LLSSystem
from xastropy.xguis import utils as xguiu
xa_path = imp.find_module('xastropy')[1]
# class ExamineSpecWidget
# class PlotLinesWidget
# class SelectLineWidget
# class SelectedLinesWidget
# class AbsSysWidget
# class VelPlotWidget
# class AODMWidget
# #####
# #####
class AbsSysWidget(QtGui.QWidget):
''' Widget to organize AbsSys along a given sightline
Parameters:
-----------
abssys_list: List
String list of abssys files
16-Dec-2014 by JXP
'''
def __init__(self, abssys_list, parent=None,
only_one=False, linelist=None, no_buttons=False):
'''
only_one: bool, optional
Restrict to one selection at a time? [False]
no_buttons: bool, optional
Eliminate Refine/Reload buttons?
'''
super(AbsSysWidget, self).__init__(parent)
#if not status is None:
# self.statusBar = status
self.abssys_list = abssys_list
# Speeds things up
if linelist is None:
self.linelist = LineList('ISM')
else:
self.linelist = linelist
# Create the line list
list_label = QtGui.QLabel('Abs Systems:')
self.abslist_widget = QtGui.QListWidget(self)
if not only_one:
self.abslist_widget.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.abslist_widget.addItem('None')
#self.abslist_widget.addItem('Test')
# Lists
self.abs_sys = []
self.items = []
self.all_items = []
self.all_abssys = []
for abssys_fil in self.abssys_list:
self.all_abssys.append(LLSSystem.from_absid_fil(abssys_fil,
linelist=self.linelist))
self.add_item(abssys_fil)
self.abslist_widget.setCurrentRow(0)
self.abslist_widget.itemSelectionChanged.connect(self.on_list_change)
# Layout
vbox = QtGui.QVBoxLayout()
vbox.addWidget(list_label)
# Buttons
if not no_buttons:
buttons = QtGui.QWidget()
self.refine_button = QtGui.QPushButton('Refine', self)
#self.refine_button.clicked.connect(self.refine) # CONNECTS TO A PARENT
reload_btn = QtGui.QPushButton('Reload', self)
reload_btn.clicked.connect(self.reload)
hbox1 = QtGui.QHBoxLayout()
hbox1.addWidget(self.refine_button)
hbox1.addWidget(reload_btn)
buttons.setLayout(hbox1)
vbox.addWidget(buttons)
vbox.addWidget(self.abslist_widget)
self.setLayout(vbox)
# ##
def on_list_change(self):
items = self.abslist_widget.selectedItems()
# Empty the list
#self.abs_sys = []
if len(self.abs_sys) > 0:
for ii in range(len(self.abs_sys)-1,-1,-1):
self.abs_sys.pop(ii)
# Load up abs_sys (as need be)
new_items = []
for item in items:
txt = item.text()
# Dummy
if txt == 'None':
continue
print('Including {:s} in the list'.format(txt))
# Using LLS for now. Might change to generic
new_items.append(txt)
ii = self.all_items.index(txt)
self.abs_sys.append(self.all_abssys[ii])
# Pass back
self.items = new_items
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
def add_fil(self,abssys_fil):
self.abssys_list.append( abssys_fil )
self.add_item(abssys_fil)
def add_item(self,abssys_fil):
ipos0 = abssys_fil.rfind('/') + 1
ipos1 = abssys_fil.rfind('.fits')
if ipos1 == -1:
ipos1 = len(abssys_fil)
#
self.all_items.append( abssys_fil[ipos0:ipos1] )
self.abslist_widget.addItem(abssys_fil[ipos0:ipos1] )
def remove_item(self,idx):
# Delete
del self.all_items[idx]
del self.all_abssys[idx]
tmp = self.abslist_widget.takeItem(idx+1) # 1 for None
self.on_list_change()
def reload(self):
print('AbsSysWidget: Reloading systems..')
self.all_abssys = []
for abssys_fil in self.abssys_list:
self.all_abssys.append(LLSSystem.from_absid_fil(abssys_fil,
linelist=self.linelist))
#self.add_item(abssys_fil)
self.on_list_change()
# ######################
class VelPlotWidget(QtGui.QWidget):
''' Widget for a velocity plot with interaction.
19-Dec-2014 by JXP
'''
def __init__(self, ispec, z=None, parent=None, llist=None, norm=True,
vmnx=[-300., 300.]*u.km/u.s, abs_sys=None):
'''
spec = Spectrum1D
Norm: Bool (False)
Normalized spectrum?
abs_sys: AbsSystem
Absorption system class
'''
super(VelPlotWidget, self).__init__(parent)
# Initialize
spec, spec_fil = ltgu.read_spec(ispec)
self.spec = spec
self.spec_fil = spec_fil
self.z = z
self.vmnx = vmnx
self.norm = norm
# Abs_System
self.abs_sys = abs_sys
if self.abs_sys is None:
self.abs_sys = GenericAbsSystem((0.*u.deg,0.*u.deg), self.z, self.vmnx)
self.abs_lines = []
else:
self.z = self.abs_sys.zabs
# Line list
if llist is None:
self.abs_lines = self.abs_sys.list_of_abslines()
if len(self.abs_lines)>0:
lwrest = [iline.wrest for iline in self.abs_lines]
else:
lwrest = None
if lwrest is not None:
llist = ltgu.set_llist(lwrest) # Not sure this is working..
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
self.psdict = {} # Dict for spectra plotting
self.psdict['x_minmax'] = self.vmnx.value # Too much pain to use units with this
self.psdict['y_minmax'] = [-0.1, 1.1]
self.psdict['nav'] = ltgu.navigate(0,0,init=True)
# Status Bar?
#if not status is None:
# self.statusBar = status
# Line List
if llist is None:
self.llist = ltgu.set_llist('Strong')
else:
self.llist = llist
self.llist['z'] = self.z
# Indexing for line plotting
self.idx_line = 0
self.init_lines()
# Create the mpl Figure and FigCanvas objects.
#
self.dpi = 150
self.fig = Figure((8.0, 4.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self)
self.canvas.setFocusPolicy( QtCore.Qt.ClickFocus )
self.canvas.setFocus()
self.canvas.mpl_connect('key_press_event', self.on_key)
self.canvas.mpl_connect('button_press_event', self.on_click)
# Sub_plots
self.sub_xy = [3,4]
self.fig.subplots_adjust(hspace=0.0, wspace=0.1)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.canvas)
self.setLayout(vbox)
# Draw on init
self.on_draw()
# Load them up for display
def init_lines(self):
wvmin = np.min(self.spec.wavelength)
wvmax = np.max(self.spec.wavelength)
#
wrest = self.llist[self.llist['List']].wrest
wvobs = (1+self.z) * wrest
gdlin = np.where( (wvobs > wvmin) & (wvobs < wvmax) )[0]
self.llist['show_line'] = gdlin
# Update/generate lines [will not update]
for idx in gdlin:
self.generate_line((self.z,wrest[idx]))
def grab_line(self, wrest):
""" Grab a line from the list
Parameters
----------
wrest
Returns
-------
iline : AbsLine object
"""
awrest = [iline.wrest for iline in self.abs_lines]
try:
idx = awrest.index(wrest)
except ValueError:
return None
else:
return self.abs_lines[idx]
def generate_line(self, inp):
''' Generate a new line, if it doesn't exist
Parameters:
----------
inp: tuple
(z,wrest)
'''
# Generate?
if self.grab_line(inp[1]) is None:
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
newline = AbsLine(inp[1],linelist=self.llist[self.llist['List']])
print('VelPlot: Generating line {:g}'.format(inp[1]))
newline.analy['vlim'] = self.vmnx/2.
newline.attrib['z'] = self.abs_sys.zabs
newline.analy['do_analysis'] = 1 # Init to ok
# Spec file
if self.spec_fil is not None:
newline.analy['datafile'] = self.spec_fil
# Append
self.abs_lines.append(newline)
def remove_line(self, wrest):
""" Remove a line, if it exists
Parameters
----------
wrest : Quantity
"""
awrest = [iline.wrest for iline in self.abs_lines]
try:
idx = awrest.index(wrest)
except ValueError:
return None
else:
_ = self.abs_lines.pop(idx)
# Key stroke
def on_key(self,event):
# Init
rescale = True
fig_clear = False
wrest = None
flg = 0
sv_idx = self.idx_line
## Change rows/columns
if event.key == 'k':
self.sub_xy[0] = max(0, self.sub_xy[0]-1)
if event.key == 'K':
self.sub_xy[0] = self.sub_xy[0]+1
if event.key == 'c':
self.sub_xy[1] = max(0, self.sub_xy[1]-1)
if event.key == 'C':
self.sub_xy[1] = max(0, self.sub_xy[1]+1)
## NAVIGATING
if event.key in self.psdict['nav']:
flg = ltgu.navigate(self.psdict,event)
if event.key == '-':
self.idx_line = max(0, self.idx_line-self.sub_xy[0]*self.sub_xy[1]) # Min=0
if self.idx_line == sv_idx:
print('Edge of list')
if event.key == '=':
self.idx_line = min(len(self.llist['show_line'])-self.sub_xy[0]*self.sub_xy[1],
self.idx_line + self.sub_xy[0]*self.sub_xy[1])
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
if self.idx_line == sv_idx:
print('Edge of list')
## Reset z
if event.key == 'z':
newz = ltu.z_from_v(self.z, event.xdata)
self.z = newz
self.abs_sys.zabs = newz
# Drawing
self.psdict['x_minmax'] = self.vmnx.value
# Single line command
if event.key in ['1','2','B','U','L','N','V','A', 'x', 'X',
'^', '&']:
try:
wrest = event.inaxes.get_gid()
except AttributeError:
return
else:
absline = self.grab_line(wrest)
kwrest = wrest.value
## Velocity limits
unit = u.km/u.s
if event.key == '1':
absline.analy['vlim'][0] = event.xdata*unit
if event.key == '2':
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
absline.analy['vlim'][1] = event.xdata*unit
if event.key == '!':
for iline in self.abs_sys.lines:
iline.analy['vlim'][0] = event.xdata*unit
if event.key == '@':
for iline in self.abs_sys.lines:
iline.analy['vlim'][1] = event.xdata*unit
## Line type
if event.key == 'A': # Add to lines
self.generate_line((self.z,wrest))
if event.key == 'x': # Remove line
if self.remove_line(wrest):
print('VelPlot: Removed line {:g}'.format(wrest))
if event.key == 'X': # Remove all lines
# Double check
gui = xguiu.WarningWidg('About to remove all lines. \n Continue??')
gui.exec_()
if gui.ans is False:
return
#
self.abs_lines = [] # Flush??
# Kinematics
if event.key == '^': # Low-Ion
try:
fkin = absline.analy['flag_kin']
except KeyError:
fkin = 0
fkin += (-1)**(fkin % 2**1 >= 2**0) * 2**0
absline.analy['flag_kin'] = fkin
if event.key == '&': # High-Ion
try:
fkin = absline.analy['flag_kin']
except KeyError:
fkin = 0
fkin += (-1)**(fkin % 2**2 >= 2**1) * 2**1
absline.analy['flag_kin'] = fkin
# Toggle blend
if event.key == 'B':
try:
feye = absline.analy['flg_eye']
except KeyError:
feye = 0
feye = (feye + 1) % 2
absline.analy['flg_eye'] = feye
# Toggle NG
if event.key == 'N':
try:
fanly = absline.analy['do_analysis']
except KeyError:
fanly = 1
if fanly == 0:
fanly = 1
else:
fanly = 0
absline.analy['do_analysis'] = fanly
if event.key == 'V': # Normal
absline.analy['flg_limit'] = 1
if event.key == 'L': # Lower limit
absline.analy['flg_limit'] = 2
if event.key == 'U': # Upper limit
absline.analy['flg_limit'] = 3
# AODM plot
if event.key == ':': #
# Grab good lines
from xastropy.xguis import spec_guis as xsgui
gdl = [iline.wrest for iline in self.abs_sys.lines
if iline.analy['do_analysis'] > 0]
# Launch AODM
if len(gdl) > 0:
gui = xsgui.XAODMGui(self.spec, self.z, gdl, vmnx=self.vmnx, norm=self.norm)
gui.exec_()
else:
print('VelPlot.AODM: No good lines to plot')
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
if not wrest is None: # Single window
flg = 3
if event.key in ['c','C','k','K','W','!', '@', '=', '-', 'X', 'z','R']: # Redraw all
flg = 1
if event.key in ['Y']:
rescale = False
if event.key in ['k','c','C','K', 'R']:
fig_clear = True
if flg==1: # Default is not to redraw
self.on_draw(rescale=rescale, fig_clear=fig_clear)
elif flg==2: # Layer (no clear)
self.on_draw(replot=False, rescale=rescale)
elif flg==3: # Layer (no clear)
self.on_draw(in_wrest=wrest, rescale=rescale)
# Click of main mouse button
def on_click(self,event):
try:
print('button={:d}, x={:f}, y={:f}, xdata={:f}, ydata={:f}'.format(
event.button, event.x, event.y, event.xdata, event.ydata))
except ValueError:
return
if event.button == 1: # Draw line
self.ax.plot( [event.xdata,event.xdata], self.psdict['y_minmax'], ':', color='green')
self.on_draw(replot=False)
# Print values
try:
self.statusBar().showMessage('x,y = {:f}, {:f}'.format(event.xdata,event.ydata))
except AttributeError:
return
def on_draw(self, replot=True, in_wrest=None, rescale=True, fig_clear=False):
""" Redraws the figure
"""
#
if replot is True:
if fig_clear:
self.fig.clf()
# Loop on windows
all_idx = self.llist['show_line']
nplt = self.sub_xy[0]*self.sub_xy[1]
if len(all_idx) <= nplt:
self.idx_line = 0
subp = np.arange(nplt) + 1
subp_idx = np.hstack(subp.reshape(self.sub_xy[0],self.sub_xy[1]).T)
#print('idx_l={:d}, nplt={:d}, lall={:d}'.format(self.idx_line,nplt,len(all_idx)))
for jj in range(min(nplt, len(all_idx))):
try:
idx = all_idx[jj+self.idx_line]
except IndexError:
continue # Likely too few lines
#print('jj={:d}, idx={:d}'.format(jj,idx))
# Grab line
wrest = self.llist[self.llist['List']].wrest[idx]
kwrest = wrest.value # For the Dict
# Single window?
if in_wrest is not None:
if np.abs(wrest-in_wrest) > (1e-3*u.AA):
continue
# Abs_Sys: Color the lines
if self.abs_sys is not None:
absline = self.grab_line(wrest)
# Generate plot
self.ax = self.fig.add_subplot(self.sub_xy[0],self.sub_xy[1], subp_idx[jj])
self.ax.clear()
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
# Zero line
self.ax.plot( [0., 0.], [-1e9, 1e9], ':', color='gray')
# Velocity
wvobs = (1+self.z) * wrest
velo = (self.spec.wavelength/wvobs - 1.)*const.c.to('km/s')
# Plot
self.ax.plot(velo, self.spec.flux, 'k-',drawstyle='steps-mid')
# GID for referencing
self.ax.set_gid(wrest)
# Labels
#if jj >= (self.sub_xy[0]-1)*(self.sub_xy[1]):
if (((jj+1) % self.sub_xy[0]) == 0) or ((jj+1) == len(all_idx)):
self.ax.set_xlabel('Relative Velocity (km/s)')
else:
self.ax.get_xaxis().set_ticks([])
lbl = self.llist[self.llist['List']].name[idx]
# Kinematics
kinl = ''
if absline is not None:
if (absline.analy['flag_kin'] % 2) >= 1:
kinl = kinl + 'L'
if (absline.analy['flag_kin'] % 4) >= 2:
kinl = kinl + 'H'
self.ax.text(0.1, 0.05, lbl+kinl, color='blue', transform=self.ax.transAxes,
size='x-small', ha='left')
# Reset window limits
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
self.ax.set_xlim(self.psdict['x_minmax'])
# Rescale?
if (rescale is True) & (self.norm is False):
gdp = np.where( (velo.value > self.psdict['x_minmax'][0]) &
(velo.value < self.psdict['x_minmax'][1]))[0]
if len(gdp) > 5:
per = xstats.basic.perc(self.spec.flux[gdp])
self.ax.set_ylim((0., 1.1*per[1]))
else:
self.ax.set_ylim(self.psdict['y_minmax'])
else:
self.ax.set_ylim(self.psdict['y_minmax'])
# Fonts
xputils.set_fontsize(self.ax,6.)
clr='black'
if absline is not None:
try:
vlim = absline.analy['vlim']
except KeyError:
pass
# Color coding
try: # .clm style
flag = absline.analy['FLAGS'][0]
except KeyError:
flag = None
else:
if flag <= 1: # Standard detection
clr = 'green'
elif flag in [2,3]:
clr = 'blue'
elif flag in [4,5]:
clr = 'purple'
# ABS ID
try: # NG?
flagA = absline.analy['do_analysis']
except KeyError:
flagA = None
else:
if (flagA>0) & (clr == 'black'):
clr = 'green'
try: # Limit?
flagL = absline.analy['flg_limit']
except KeyError:
flagL = None
else:
if flagL == 2:
clr = 'blue'
if flagL == 3:
clr = 'purple'
try: # Blends?
flagE = absline.analy['flg_eye']
except KeyError:
flagE = None
else:
if flagE == 1:
clr = 'orange'
if flagA == 0:
clr = 'red'
pix = np.where( (velo > vlim[0]) & (velo < vlim[1]))[0]
self.ax.plot(velo[pix], self.spec.flux[pix], '-',
drawstyle='steps-mid', color=clr)
# Draw
self.canvas.draw()
# ######################
class AODMWidget(QtGui.QWidget):
''' Widget for comparing tau_AODM profiles
19-Dec-2014 by JXP
'''
def __init__(self, spec, z, wrest, parent=None, vmnx=[-300., 300.]*u.km/u.s,
norm=True, linelist=None):
'''
spec = Spectrum1D
'''
super(AODMWidget, self).__init__(parent)
# Initialize
self.spec = spec
self.norm = norm
self.z = z
self.vmnx = vmnx
self.wrest = wrest # Expecting (requires) units
self.lines = []
if linelist is None:
self.linelist = LineList('ISM')
for iwrest in self.wrest:
self.lines.append(AbsLine(iwrest,linelist=self.linelist))
self.psdict = {} # Dict for spectra plotting
self.psdict['x_minmax'] = self.vmnx.value # Too painful to use units here
self.psdict['y_minmax'] = [-0.1, 1.1]
self.psdict['nav'] = ltgu.navigate(0,0,init=True)
# Create the mpl Figure and FigCanvas objects.
#
self.dpi = 150
self.fig = Figure((8.0, 4.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self)
self.canvas.setFocusPolicy( QtCore.Qt.ClickFocus )
self.canvas.setFocus()
self.canvas.mpl_connect('key_press_event', self.on_key)
self.canvas.mpl_connect('button_press_event', self.on_click)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.canvas)
self.setLayout(vbox)
# Draw on init
self.on_draw()
# Key stroke
def on_key(self,event):
# Init
rescale = True
flg = 0
## NAVIGATING
if event.key in self.psdict['nav']:
flg = ltgu.navigate(self.psdict,event)
if event.key in ['b','t','W','Z','Y','l','r']:
rescale = False
self.on_draw(rescale=rescale)
# Click of main mouse button
def on_click(self,event):
return # DO NOTHING FOR NOW
try:
print('button={:d}, x={:f}, y={:f}, xdata={:f}, ydata={:f}'.format(
event.button, event.x, event.y, event.xdata, event.ydata))
except ValueError:
return
if event.button == 1: # Draw line
self.ax.plot( [event.xdata,event.xdata], self.psdict['y_minmax'], ':', color='green')
self.on_draw()
# Print values
try:
self.statusBar().showMessage('x,y = {:f}, {:f}'.format(event.xdata,event.ydata))
except AttributeError:
return
def on_draw(self, rescale=True):
""" Redraws the figure
"""
#
self.ax = self.fig.add_subplot(1,1,1)
self.ax.clear()
ymx = 0.
for ii,iwrest in enumerate(self.wrest):
# Velocity
wvobs = (1+self.z) * iwrest
velo = (self.spec.wavelength/wvobs - 1.)*const.c.to('km/s')
gdp = np.where((velo.value > self.psdict['x_minmax'][0]) &
(velo.value < self.psdict['x_minmax'][1]))[0]
# Normalize?
if self.norm is False:
per = xstats.basic.perc(self.spec.flux[gdp])
fsplice = per[1] / self.spec.flux[gdp]
else:
fsplice = 1./ self.spec.flux[gdp]
# AODM
cst = (10.**14.5761)/(self.lines[ii].data['f']*iwrest.value)
Naodm = np.log(fsplice)*cst
ymx = max(ymx,np.max(Naodm))
# Plot
line, = self.ax.plot(velo[gdp], Naodm, '-', drawstyle='steps-mid')
# Labels
lbl = '{:g}'.format(iwrest)
clr = plt.getp(line, 'color')
self.ax.text(0.1, 1.-(0.05+0.05*ii), lbl, color=clr,
transform=self.ax.transAxes, size='small', ha='left')
self.ax.set_xlabel('Relative Velocity (km/s)')
self.ax.set_ylabel('N(AODM)')
# Zero line
self.ax.plot( [0., 0.], [-1e29, 1e29], ':', color='gray')
# Reset window limits
self.ax.set_xlim(self.psdict['x_minmax'])
if rescale:
self.psdict['y_minmax'] = [0.05*ymx, ymx*1.1]
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
self.ax.set_ylim(self.psdict['y_minmax'])
# Draw
self.canvas.draw()
# ##################################
# GUI for simple text entering
class EnterTextGUI(QtGui.QDialog):
''' GUI to grab text from the user
29-Julc-2014 by JXP
'''
def __init__(self, directions='Enter:', parent=None):
'''
message = str
Message to display
'''
super(EnterTextGUI, self).__init__(parent)
# Initialize
self.text = ''
#age textbox
textW = QtGui.QWidget()
textlabel = QtGui.QLabel(directions)
self.textbox = QtGui.QLineEdit()
self.connect(self.textbox,QtCore.SIGNAL('editingFinished ()'),
self.set_text)
# self.ageerror = QtGui.QLabel('')
textvbox = QtGui.QVBoxLayout()
textvbox.addWidget(textlabel)
textvbox.addWidget(self.textbox)
textW.setLayout(textvbox)
# Buttons
donebtn = QtGui.QPushButton('Done', self)
donebtn.clicked.connect(self.touch_done)
donebtn.setAutoDefault(False)
# Main Layout
vbox = QtGui.QVBoxLayout()
vbox.addWidget(textW)
vbox.addWidget(donebtn)
#vbox.addWidget(self.cntdwn)
self.setLayout(vbox)
def set_text(self):
self.text = str(self.textbox.text())
def touch_done(self):
self.done(0)
# ################
# TESTING
if __name__ == "__main__":
from xastropy import spec as xspec
if len(sys.argv) == 1: #
flg_tst = 0
#flg_tst += 2**0 # ExamineSpecWidget
#flg_tst += 2**1 # PlotLinesWidget
#flg_tst += 2**2 # SelectLineWidget
#flg_tst += 2**3 # AbsSysWidget
#flg_tst += 2**4 # VelPltWidget
#flg_tst += 2**5 # SelectedLinesWidget
#flg_tst += 2**6 # AODMWidget
flg_tst += 2**7 # Simple Text Widget
else:
flg_tst = int(sys.argv[1])
# ExamineSpec
if (flg_tst % 2) == 1:
app = QtGui.QApplication(sys.argv)
spec_fil = '/u/xavier/Keck/HIRES/RedData/PH957/PH957_f.fits'
spec = lsi.readspec(spec_fil)
app.setApplicationName('XSpec')
main = ExamineSpecWidget(spec)
main.show()
sys.exit(app.exec_())
# PltLineWidget
if (flg_tst % 2**2) >= 2**1:
app = QtGui.QApplication(sys.argv)
app.setApplicationName('PltLine')
main = PlotLinesWidget()
main.show()
sys.exit(app.exec_())
# SelectLineWidget
if (flg_tst % 2**3) >= 2**2:
orig = False
llist_cls = LineList('ISM')
app = QtGui.QApplication(sys.argv)
app.setApplicationName('SelectLine')
main = SelectLineWidget(llist_cls._data)
main.show()
app.exec_()
print(main.line)
# Another test
quant = main.line.split('::')[1].lstrip()
spltw = quant.split(' ')
wrest = Quantity(float(spltw[0]), unit=spltw[1])
print(wrest)
sys.exit()
# AbsSys Widget
if (flg_tst % 2**4) >= 2**3:
abs_fil = '/Users/xavier/paper/LLS/Optical/Data/Analysis/MAGE/SDSSJ1004+0018_z2.746_id.fits'
abs_fil2 = '/Users/xavier/paper/LLS/Optical/Data/Analysis/MAGE/SDSSJ2319-1040_z2.675_id.fits'
app = QtGui.QApplication(sys.argv)
app.setApplicationName('AbsSys')
main = AbsSysWidget([abs_fil,abs_fil2])
main.show()
sys.exit(app.exec_())
# VelPlt Widget
if (flg_tst % 2**5) >= 2**4:
specf = 0
if specf == 0: # PH957 DLA
# Spectrum
spec_fil = '/u/xavier/Keck/HIRES/RedData/PH957/PH957_f.fits'
spec = lsi.readspec(spec_fil)
# Abs_sys
abs_sys = xiaa.GenericAbsSystem()
ion_fil = '/Users/xavier/DLA/Abund/Tables/PH957.z2309.ion'
abs_sys.zabs = 2.309
abs_sys.read_ion_file(ion_fil)
elif specf == 1: # UM184 LLS
# Spectrum
spec_fil = '/Users/xavier/PROGETTI/LLSZ3/data/normalize/UM184_nF.fits'
spec = lsi.readspec(spec_fil)
# Abs_sys
abs_fil = '/Users/xavier/paper/LLS/Optical/Data/Analysis/MAGE/UM184_z2.930_id.fits'
abs_sys = xiaa.GenericAbsSystem()
abs_sys.parse_absid_file(abs_fil)
# Launch
app = QtGui.QApplication(sys.argv)
app.setApplicationName('VelPlot')
main = VelPlotWidget(spec, abs_sys=abs_sys)
main.show()
sys.exit(app.exec_())
# SelectedLines Widget
if (flg_tst % 2**6) >= 2**5:
print('Test: SelectedLines Widget')
llist = ltgu.set_llist('ISM')
# Launch
app = QtGui.QApplication(sys.argv)
app.setApplicationName('SelectedLines')
main = SelectedLinesWidget(llist['ISM'])#._data)
main.show()
sys.exit(app.exec_())
# AODM Widget
if (flg_tst % 2**7) >= 2**6:
spec_fil = '/Users/xavier/PROGETTI/LLSZ3/data/normalize/UM184_nF.fits'
spec = lsi.readspec(spec_fil)
z=2.96916
lines = np.array([1548.195, 1550.770]) * u.AA
# Launch
app = QtGui.QApplication(sys.argv)
app.setApplicationName('AODM')
main = AODMWidget(spec, z, lines)
main.show()
sys.exit(app.exec_())
# Simple text input widget
if (flg_tst % 2**8) >= 2**7:
app = QtGui.QApplication(sys.argv)
app.setApplicationName('TEXT')
main = EnterTextGUI('Enter some text:')
main.exec_()
print('You entered: {:s}'.format(main.text))
sys.exit()
| bsd-3-clause |
juliusbierk/scikit-image | doc/examples/plot_peak_local_max.py | 14 | 1445 | """
====================
Finding local maxima
====================
The ``peak_local_max`` function returns the coordinates of local peaks (maxima)
in an image. A maximum filter is used for finding local maxima. This operation
dilates the original image and merges neighboring local maxima closer than the
size of the dilation. Locations where the original image is equal to the
dilated image are returned as local maxima.
"""
from scipy import ndimage as ndi
import matplotlib.pyplot as plt
from skimage.feature import peak_local_max
from skimage import data, img_as_float
im = img_as_float(data.coins())
# image_max is the dilation of im with a 20*20 structuring element
# It is used within peak_local_max function
image_max = ndi.maximum_filter(im, size=20, mode='constant')
# Comparison between image_max and im to find the coordinates of local maxima
coordinates = peak_local_max(im, min_distance=20)
# display results
fig, ax = plt.subplots(1, 3, figsize=(8, 3))
ax1, ax2, ax3 = ax.ravel()
ax1.imshow(im, cmap=plt.cm.gray)
ax1.axis('off')
ax1.set_title('Original')
ax2.imshow(image_max, cmap=plt.cm.gray)
ax2.axis('off')
ax2.set_title('Maximum filter')
ax3.imshow(im, cmap=plt.cm.gray)
ax3.autoscale(False)
ax3.plot(coordinates[:, 1], coordinates[:, 0], 'r.')
ax3.axis('off')
ax3.set_title('Peak local max')
fig.subplots_adjust(wspace=0.02, hspace=0.02, top=0.9,
bottom=0.02, left=0.02, right=0.98)
plt.show()
| bsd-3-clause |
aisthesis/pynance | pynance/opt/spread/diag.py | 2 | 7150 | """
.. Copyright (c) 2015 Marshall Farrier
license http://opensource.org/licenses/MIT
Options - diagonal spreads (:mod:`pynance.opt.spread.diag`)
=============================================================
.. currentmodule:: pynance.opt.spread.diag
"""
from __future__ import absolute_import
import numpy as np
import pandas as pd
from .._common import _getkeys
from .._common import _getprice
from .._common import _relevant_rows
from .. import _constants
class Diag(object):
"""
Wrapper class for :class:`pandas.DataFrame` for retrieving
metrics on horizontal (calendar) spreads
Objects of this class are not intended for direct instantiation
but are created as attributes of objects of type
:class:`~pynance.opt.spread.core.Spread`.
.. versionadded:: 0.3.0
Parameters
----------
df : :class:`pandas.DataFrame`
Options data.
Attributes
----------
data : :class:`pandas.DataFrame`
Methods
-------
.. automethod:: dblcal
.. automethod:: diagbtrfly
"""
def __init__(self, df):
self.data = df
def dblcal(self, lowstrike, highstrike, expiry1, expiry2):
"""
Metrics for evaluating a double calendar spread.
Parameters
------------
optdata : DataFrame
Data returned from :func:`~pynance.opt.retrieve.get`
opttype : str ('call' or 'put')
Type of option on which to collect data.
lowstrike : numeric
Lower strike price. To be used for put spread.
highstrike : numeric
Higher strike price. To be used for call spread.
expiry1 : date or date str (e.g. '2015-01-01')
Earlier expiration date.
expiry2 : date or date str (e.g. '2015-01-01')
Later expiration date.
Returns
------------
metrics : DataFrame
Metrics for evaluating spread.
"""
assert lowstrike < highstrike
assert pd.Timestamp(expiry1) < pd.Timestamp(expiry2)
_rows1 = {}
_rows2 = {}
_prices1 = {}
_prices2 = {}
_index = ['Near Call', 'Far Call', 'Call Ratio', 'Near Put', 'Far Put',
'Put Ratio', 'Near to Far Ratio', 'Debit', 'Underlying_Price', 'Quote_Time']
_metrics = pd.DataFrame(index=_index, columns=['Value'])
_errmsg = "No key for {} strike {} {}"
_opttype = 'call'
_rows1[_opttype] = _relevant_rows(self.data, (highstrike, expiry1, _opttype),
_errmsg.format(expiry1, highstrike, _opttype))
_prices1[_opttype] = _getprice(_rows1[_opttype])
_rows2[_opttype] = _relevant_rows(self.data, (highstrike, expiry2, _opttype),
_errmsg.format(expiry2, highstrike, _opttype))
_prices2[_opttype] = _getprice(_rows2[_opttype])
_metrics.loc['Near Call', 'Value'] = _prices1[_opttype]
_metrics.loc['Far Call', 'Value'] = _prices2[_opttype]
_metrics.loc['Call Ratio', 'Value'] = _prices1[_opttype] / _prices2[_opttype]
_metrics.loc['Underlying_Price', 'Value'], _metrics.loc['Quote_Time', 'Value'] =\
_getkeys(_rows1[_opttype], ['Underlying_Price', 'Quote_Time'])
_opttype = 'put'
_rows1[_opttype] = _relevant_rows(self.data, (lowstrike, expiry1, _opttype),
_errmsg.format(expiry1, lowstrike, _opttype))
_prices1[_opttype] = _getprice(_rows1[_opttype])
_rows2[_opttype] = _relevant_rows(self.data, (lowstrike, expiry2, _opttype),
_errmsg.format(expiry2, lowstrike, _opttype))
_prices2[_opttype] = _getprice(_rows2[_opttype])
_metrics.loc['Near Put', 'Value'] = _prices1[_opttype]
_metrics.loc['Far Put', 'Value'] = _prices2[_opttype]
_metrics.loc['Put Ratio', 'Value'] = _prices1[_opttype] / _prices2[_opttype]
_neartot = sum(_prices1.values())
_fartot = sum(_prices2.values())
_metrics.loc['Near to Far Ratio', 'Value'] = float(_neartot) / _fartot
_metrics.loc['Debit', 'Value'] = _fartot - _neartot
return _metrics
def diagbtrfly(self, lowstrike, midstrike, highstrike, expiry1, expiry2):
"""
Metrics for evaluating a diagonal butterfly spread.
Parameters
------------
opttype : str ('call' or 'put')
Type of option on which to collect data.
lowstrike : numeric
Lower strike price. To be used for far put.
midstrike : numeric
Middle strike price. To be used for near straddle.
Typically at the money.
highstrike : numeric
Higher strike price. To be used for far call.
expiry1 : date or date str (e.g. '2015-01-01')
Earlier expiration date.
expiry2 : date or date str (e.g. '2015-01-01')
Later expiration date.
Returns
------------
metrics : DataFrame
Metrics for evaluating spread.
"""
assert lowstrike < midstrike
assert midstrike < highstrike
assert pd.Timestamp(expiry1) < pd.Timestamp(expiry2)
_rows1 = {}
_rows2 = {}
_prices1 = {}
_prices2 = {}
_index = ['Straddle Call', 'Straddle Put', 'Straddle Total', 'Far Call', 'Far Put', 'Far Total',
'Straddle to Far Ratio', 'Credit', 'Underlying_Price', 'Quote_Time']
_metrics = pd.DataFrame(index=_index, columns=['Value'])
_errmsg = "No key for {} strike {} {}"
_opttype = 'call'
_rows1[_opttype] = _relevant_rows(self.data, (midstrike, expiry1, _opttype),
_errmsg.format(expiry1, midstrike, _opttype))
_prices1[_opttype] = _getprice(_rows1[_opttype])
_rows2[_opttype] = _relevant_rows(self.data, (highstrike, expiry2, _opttype),
_errmsg.format(expiry2, highstrike, _opttype))
_prices2[_opttype] = _getprice(_rows2[_opttype])
_metrics.loc['Straddle Call', 'Value'] = _prices1[_opttype]
_metrics.loc['Far Call', 'Value'] = _prices2[_opttype]
_metrics.loc['Underlying_Price', 'Value'], _metrics.loc['Quote_Time', 'Value'] =\
_getkeys(_rows1[_opttype], ['Underlying_Price', 'Quote_Time'])
_opttype = 'put'
_rows1[_opttype] = _relevant_rows(self.data, (midstrike, expiry1, _opttype),
_errmsg.format(expiry1, midstrike, _opttype))
_prices1[_opttype] = _getprice(_rows1[_opttype])
_rows2[_opttype] = _relevant_rows(self.data, (lowstrike, expiry2, _opttype),
_errmsg.format(expiry2, lowstrike, _opttype))
_prices2[_opttype] = _getprice(_rows2[_opttype])
_metrics.loc['Straddle Put', 'Value'] = _prices1[_opttype]
_metrics.loc['Far Put', 'Value'] = _prices2[_opttype]
_metrics.loc['Straddle Total', 'Value'] = _neartot = sum(_prices1.values())
_metrics.loc['Far Total', 'Value'] = _fartot = sum(_prices2.values())
_metrics.loc['Straddle to Far Ratio', 'Value'] = _neartot / _fartot
_metrics.loc['Credit', 'Value'] = _neartot - _fartot
return _metrics
| mit |
abhishekkrthakur/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
krafczyk/spack | var/spack/repos/builtin/packages/py-scikit-image/package.py | 5 | 2071 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyScikitImage(PythonPackage):
"""Image processing algorithms for SciPy, including IO, morphology,
filtering, warping, color manipulation, object detection, etc."""
homepage = "http://scikit-image.org/"
url = "https://pypi.io/packages/source/s/scikit-image/scikit-image-0.12.3.tar.gz"
version('0.12.3', '04ea833383e0b6ad5f65da21292c25e1')
extends('python', ignore=r'bin/.*\.py$')
depends_on('py-dask', type=('build', 'run'))
depends_on('pil', type=('build', 'run'))
depends_on('py-networkx', type=('build', 'run'))
depends_on('py-six', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', type='build')
| lgpl-2.1 |
subodhchhabra/pandashells | pandashells/test/config_lib_tests.py | 2 | 2187 | #! /usr/bin/env python
import os
import json
from unittest import TestCase
from pandashells.lib import config_lib
class GlobalArgTests(TestCase):
def test_home_path_looks_right(self):
"""
The path to the users home directory looks right
"""
home = os.path.expanduser('~')
self.assertEqual(config_lib.HOME, home)
def test_default_opt_dict_exists(self):
"""
The dictionary of default options exists
"""
self.assertTrue(len(config_lib.DEFAULT_DICT) > 0)
class GetConfigTests(TestCase):
def setUp(self):
if os.path.isfile(config_lib.CONFIG_FILE_NAME):
os.system('cp {f} {f}_orig'.format(f=config_lib.CONFIG_FILE_NAME))
def tearDown(self):
if os.path.isfile(config_lib.CONFIG_FILE_NAME + '_orig'):
os.system('mv {f}_orig {f}'.format(f=config_lib.CONFIG_FILE_NAME))
else: # pragma: no cover
os.system('rm {f}'.format(f=config_lib.CONFIG_FILE_NAME))
def test_set_config_creates_file(self):
"""
set_config() function writes to file
"""
expected_dict = {'name': 'John'}
config_lib.set_config(expected_dict)
with open(config_lib.CONFIG_FILE_NAME) as jsonfile:
saved_dict = json.loads(jsonfile.read())
self.assertEqual(expected_dict, saved_dict)
def test_get_config_non_existent_file(self):
"""
get_config() creates config file when it doesn't exist
"""
if os.path.isfile(config_lib.CONFIG_FILE_NAME):
os.system('rm {}'.format(config_lib.CONFIG_FILE_NAME))
config = config_lib.get_config()
self.assertEqual(config_lib.DEFAULT_DICT, config)
def test_get_config_existing_file(self):
"""
get_config() reads existing file
"""
if os.path.isfile(config_lib.CONFIG_FILE_NAME):
os.system('rm {}'.format(config_lib.CONFIG_FILE_NAME))
test_config = {'name': 'Bill'}
with open(config_lib.CONFIG_FILE_NAME, 'w') as f:
f.write(json.dumps(test_config))
config = config_lib.get_config()
self.assertEqual(test_config, config)
| bsd-2-clause |
huongttlan/statsmodels | statsmodels/iolib/summary.py | 22 | 33071 | from statsmodels.compat.python import range, lrange, lmap, lzip, zip_longest
import numpy as np
from statsmodels.iolib.table import SimpleTable
from statsmodels.iolib.tableformatting import (gen_fmt, fmt_2,
fmt_params, fmt_base, fmt_2cols)
#from statsmodels.iolib.summary2d import summary_params_2dflat
#from summary2d import summary_params_2dflat
def forg(x, prec=3):
if prec == 3:
#for 3 decimals
if (abs(x) >= 1e4) or (abs(x) < 1e-4):
return '%9.3g' % x
else:
return '%9.3f' % x
elif prec == 4:
if (abs(x) >= 1e4) or (abs(x) < 1e-4):
return '%10.4g' % x
else:
return '%10.4f' % x
else:
raise NotImplementedError
def summary(self, yname=None, xname=None, title=0, alpha=.05,
returns='text', model_info=None):
"""
Parameters
-----------
yname : string
optional, Default is `Y`
xname : list of strings
optional, Default is `X.#` for # in p the number of regressors
Confidance interval : (0,1) not implimented
title : string
optional, Defualt is 'Generalized linear model'
returns : string
'text', 'table', 'csv', 'latex', 'html'
Returns
-------
Default :
returns='print'
Prints the summarirized results
Option :
returns='text'
Prints the summarirized results
Option :
returns='table'
SimpleTable instance : summarizing the fit of a linear model.
Option :
returns='csv'
returns a string of csv of the results, to import into a spreadsheet
Option :
returns='latex'
Not implimented yet
Option :
returns='HTML'
Not implimented yet
Examples (needs updating)
--------
>>> import statsmodels as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> ols_results = sm.OLS(data.endog, data.exog).results
>>> print ols_results.summary()
...
Notes
-----
conf_int calculated from normal dist.
"""
import time as time
#TODO Make sure all self.model.__class__.__name__ are listed
model_types = {'OLS' : 'Ordinary least squares',
'GLS' : 'Generalized least squares',
'GLSAR' : 'Generalized least squares with AR(p)',
'WLS' : 'Weigthed least squares',
'RLM' : 'Robust linear model',
'GLM' : 'Generalized linear model'
}
model_methods = {'OLS' : 'Least Squares',
'GLS' : 'Least Squares',
'GLSAR' : 'Least Squares',
'WLS' : 'Least Squares',
'RLM' : '?',
'GLM' : '?'
}
if title==0:
title = model_types[self.model.__class__.__name__]
if yname is None:
try:
yname = self.model.endog_names
except AttributeError:
yname = 'y'
if xname is None:
try:
xname = self.model.exog_names
except AttributeError:
xname = ['var_%d' % i for i in range(len(self.params))]
time_now = time.localtime()
time_of_day = [time.strftime("%H:%M:%S", time_now)]
date = time.strftime("%a, %d %b %Y", time_now)
modeltype = self.model.__class__.__name__
#dist_family = self.model.family.__class__.__name__
nobs = self.nobs
df_model = self.df_model
df_resid = self.df_resid
#General part of the summary table, Applicable to all? models
#------------------------------------------------------------
#TODO: define this generically, overwrite in model classes
#replace definition of stubs data by single list
#e.g.
gen_left = [('Model type:', [modeltype]),
('Date:', [date]),
('Dependent Variable:', yname), #What happens with multiple names?
('df model', [df_model])
]
gen_stubs_left, gen_data_left = zip_longest(*gen_left) #transpose row col
gen_title = title
gen_header = None
## gen_stubs_left = ('Model type:',
## 'Date:',
## 'Dependent Variable:',
## 'df model'
## )
## gen_data_left = [[modeltype],
## [date],
## yname, #What happens with multiple names?
## [df_model]
## ]
gen_table_left = SimpleTable(gen_data_left,
gen_header,
gen_stubs_left,
title = gen_title,
txt_fmt = gen_fmt
)
gen_stubs_right = ('Method:',
'Time:',
'Number of Obs:',
'df resid'
)
gen_data_right = ([modeltype], #was dist family need to look at more
time_of_day,
[nobs],
[df_resid]
)
gen_table_right = SimpleTable(gen_data_right,
gen_header,
gen_stubs_right,
title = gen_title,
txt_fmt = gen_fmt
)
gen_table_left.extend_right(gen_table_right)
general_table = gen_table_left
#Parameters part of the summary table
#------------------------------------
#Note: this is not necessary since we standardized names, only t versus normal
tstats = {'OLS' : self.t(),
'GLS' : self.t(),
'GLSAR' : self.t(),
'WLS' : self.t(),
'RLM' : self.t(),
'GLM' : self.t()
}
prob_stats = {'OLS' : self.pvalues,
'GLS' : self.pvalues,
'GLSAR' : self.pvalues,
'WLS' : self.pvalues,
'RLM' : self.pvalues,
'GLM' : self.pvalues
}
#Dictionary to store the header names for the parameter part of the
#summary table. look up by modeltype
alp = str((1-alpha)*100)+'%'
param_header = {
'OLS' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'],
'GLS' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'],
'GLSAR' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'],
'WLS' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'],
'GLM' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'], #glm uses t-distribution
'RLM' : ['coef', 'std err', 'z', 'P>|z|', alp + ' Conf. Interval'] #checke z
}
params_stubs = xname
params = self.params
conf_int = self.conf_int(alpha)
std_err = self.bse
exog_len = lrange(len(xname))
tstat = tstats[modeltype]
prob_stat = prob_stats[modeltype]
# Simpletable should be able to handle the formating
params_data = lzip(["%#6.4g" % (params[i]) for i in exog_len],
["%#6.4f" % (std_err[i]) for i in exog_len],
["%#6.4f" % (tstat[i]) for i in exog_len],
["%#6.4f" % (prob_stat[i]) for i in exog_len],
["(%#5g, %#5g)" % tuple(conf_int[i]) for i in \
exog_len]
)
parameter_table = SimpleTable(params_data,
param_header[modeltype],
params_stubs,
title = None,
txt_fmt = fmt_2, #gen_fmt,
)
#special table
#-------------
#TODO: exists in linear_model, what about other models
#residual diagnostics
#output options
#--------------
#TODO: JP the rest needs to be fixed, similar to summary in linear_model
def ols_printer():
"""
print summary table for ols models
"""
table = str(general_table)+'\n'+str(parameter_table)
return table
def ols_to_csv():
"""
exports ols summary data to csv
"""
pass
def glm_printer():
table = str(general_table)+'\n'+str(parameter_table)
return table
pass
printers = {'OLS': ols_printer,
'GLM' : glm_printer
}
if returns=='print':
try:
return printers[modeltype]()
except KeyError:
return printers['OLS']()
def _getnames(self, yname=None, xname=None):
'''extract names from model or construct names
'''
if yname is None:
if hasattr(self.model, 'endog_names') and (
not self.model.endog_names is None):
yname = self.model.endog_names
else:
yname = 'y'
if xname is None:
if hasattr(self.model, 'exog_names') and (
not self.model.exog_names is None):
xname = self.model.exog_names
else:
xname = ['var_%d' % i for i in range(len(self.params))]
return yname, xname
def summary_top(results, title=None, gleft=None, gright=None, yname=None, xname=None):
'''generate top table(s)
TODO: this still uses predefined model_methods
? allow gleft, gright to be 1 element tuples instead of filling with None?
'''
#change of names ?
gen_left, gen_right = gleft, gright
#time and names are always included
import time
time_now = time.localtime()
time_of_day = [time.strftime("%H:%M:%S", time_now)]
date = time.strftime("%a, %d %b %Y", time_now)
yname, xname = _getnames(results, yname=yname, xname=xname)
#create dictionary with default
#use lambdas because some values raise exception if they are not available
#alternate spellings are commented out to force unique labels
default_items = dict([
('Dependent Variable:', lambda: [yname]),
('Dep. Variable:', lambda: [yname]),
('Model:', lambda: [results.model.__class__.__name__]),
#('Model type:', lambda: [results.model.__class__.__name__]),
('Date:', lambda: [date]),
('Time:', lambda: time_of_day),
('Number of Obs:', lambda: [results.nobs]),
#('No. of Observations:', lambda: ["%#6d" % results.nobs]),
('No. Observations:', lambda: ["%#6d" % results.nobs]),
#('Df model:', lambda: [results.df_model]),
('Df Model:', lambda: ["%#6d" % results.df_model]),
#TODO: check when we have non-integer df
('Df Residuals:', lambda: ["%#6d" % results.df_resid]),
#('Df resid:', lambda: [results.df_resid]),
#('df resid:', lambda: [results.df_resid]), #check capitalization
('Log-Likelihood:', lambda: ["%#8.5g" % results.llf]) #doesn't exist for RLM - exception
#('Method:', lambda: [???]), #no default for this
])
if title is None:
title = results.model.__class__.__name__ + 'Regression Results'
if gen_left is None:
#default: General part of the summary table, Applicable to all? models
gen_left = [('Dep. Variable:', None),
('Model type:', None),
('Date:', None),
('No. Observations:', None),
('Df model:', None),
('Df resid:', None)]
try:
llf = results.llf
gen_left.append(('Log-Likelihood', None))
except: #AttributeError, NotImplementedError
pass
gen_right = []
gen_title = title
gen_header = None
#needed_values = [k for k,v in gleft + gright if v is None] #not used anymore
#replace missing (None) values with default values
gen_left_ = []
for item, value in gen_left:
if value is None:
value = default_items[item]() #let KeyErrors raise exception
gen_left_.append((item, value))
gen_left = gen_left_
if gen_right:
gen_right_ = []
for item, value in gen_right:
if value is None:
value = default_items[item]() #let KeyErrors raise exception
gen_right_.append((item, value))
gen_right = gen_right_
#check
missing_values = [k for k,v in gen_left + gen_right if v is None]
assert missing_values == [], missing_values
#pad both tables to equal number of rows
if gen_right:
if len(gen_right) < len(gen_left):
#fill up with blank lines to same length
gen_right += [(' ', ' ')] * (len(gen_left) - len(gen_right))
elif len(gen_right) > len(gen_left):
#fill up with blank lines to same length, just to keep it symmetric
gen_left += [(' ', ' ')] * (len(gen_right) - len(gen_left))
#padding in SimpleTable doesn't work like I want
#force extra spacing and exact string length in right table
gen_right = [('%-21s' % (' '+k), v) for k,v in gen_right]
gen_stubs_right, gen_data_right = zip_longest(*gen_right) #transpose row col
gen_table_right = SimpleTable(gen_data_right,
gen_header,
gen_stubs_right,
title = gen_title,
txt_fmt = fmt_2cols #gen_fmt
)
else:
gen_table_right = [] #because .extend_right seems works with []
#moved below so that we can pad if needed to match length of gen_right
#transpose rows and columns, `unzip`
gen_stubs_left, gen_data_left = zip_longest(*gen_left) #transpose row col
gen_table_left = SimpleTable(gen_data_left,
gen_header,
gen_stubs_left,
title = gen_title,
txt_fmt = fmt_2cols
)
gen_table_left.extend_right(gen_table_right)
general_table = gen_table_left
return general_table #, gen_table_left, gen_table_right
def summary_params(results, yname=None, xname=None, alpha=.05, use_t=True,
skip_header=False, title=None):
'''create a summary table for the parameters
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
alpha : float
significance level for the confidence intervals
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
skip_headers : bool
If false (default), then the header row is added. If true, then no
header row is added.
Returns
-------
params_table : SimpleTable instance
'''
#Parameters part of the summary table
#------------------------------------
#Note: this is not necessary since we standardized names, only t versus normal
if isinstance(results, tuple):
#for multivariate endog
#TODO: check whether I don't want to refactor this
#we need to give parameter alpha to conf_int
results, params, std_err, tvalues, pvalues, conf_int = results
else:
params = results.params
std_err = results.bse
tvalues = results.tvalues #is this sometimes called zvalues
pvalues = results.pvalues
conf_int = results.conf_int(alpha)
#Dictionary to store the header names for the parameter part of the
#summary table. look up by modeltype
if use_t:
param_header = ['coef', 'std err', 't', 'P>|t|',
'[' + str(alpha/2), str(1-alpha/2) + ']']
else:
param_header = ['coef', 'std err', 'z', 'P>|z|',
'[' + str(alpha/2), str(1-alpha/2) + ']']
if skip_header:
param_header = None
_, xname = _getnames(results, yname=yname, xname=xname)
params_stubs = xname
exog_idx = lrange(len(xname))
params_data = lzip([forg(params[i], prec=4) for i in exog_idx],
[forg(std_err[i]) for i in exog_idx],
[forg(tvalues[i]) for i in exog_idx],
["%#6.3f" % (pvalues[i]) for i in exog_idx],
[forg(conf_int[i,0]) for i in exog_idx],
[forg(conf_int[i,1]) for i in exog_idx]
)
parameter_table = SimpleTable(params_data,
param_header,
params_stubs,
title = title,
txt_fmt = fmt_params #gen_fmt #fmt_2, #gen_fmt,
)
return parameter_table
def summary_params_frame(results, yname=None, xname=None, alpha=.05,
use_t=True):
'''create a summary table for the parameters
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
alpha : float
significance level for the confidence intervals
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
skip_headers : bool
If false (default), then the header row is added. If true, then no
header row is added.
Returns
-------
params_table : SimpleTable instance
'''
#Parameters part of the summary table
#------------------------------------
#Note: this is not necessary since we standardized names, only t versus normal
if isinstance(results, tuple):
#for multivariate endog
#TODO: check whether I don't want to refactor this
#we need to give parameter alpha to conf_int
results, params, std_err, tvalues, pvalues, conf_int = results
else:
params = results.params
std_err = results.bse
tvalues = results.tvalues #is this sometimes called zvalues
pvalues = results.pvalues
conf_int = results.conf_int(alpha)
#Dictionary to store the header names for the parameter part of the
#summary table. look up by modeltype
alp = str((1-alpha)*100)+'%'
if use_t:
param_header = ['coef', 'std err', 't', 'P>|t|',
'Conf. Int. Low', 'Conf. Int. Upp.']
else:
param_header = ['coef', 'std err', 'z', 'P>|z|',
'Conf. Int. Low', 'Conf. Int. Upp.']
_, xname = _getnames(results, yname=yname, xname=xname)
#------------------
from pandas import DataFrame
table = np.column_stack((params, std_err, tvalues, pvalues, conf_int))
return DataFrame(table, columns=param_header, index=xname)
def summary_params_2d(result, extras=None, endog_names=None, exog_names=None,
title=None):
'''create summary table of regression parameters with several equations
This allows interleaving of parameters with bse and/or tvalues
Parameters
----------
result : result instance
the result instance with params and attributes in extras
extras : list of strings
additional attributes to add below a parameter row, e.g. bse or tvalues
endog_names : None or list of strings
names for rows of the parameter array (multivariate endog)
exog_names : None or list of strings
names for columns of the parameter array (exog)
alpha : float
level for confidence intervals, default 0.95
title : None or string
Returns
-------
tables : list of SimpleTable
this contains a list of all seperate Subtables
table_all : SimpleTable
the merged table with results concatenated for each row of the parameter
array
'''
if endog_names is None:
#TODO: note the [1:] is specific to current MNLogit
endog_names = ['endog_%d' % i for i in
np.unique(result.model.endog)[1:]]
if exog_names is None:
exog_names = ['var%d' %i for i in range(len(result.params))]
#TODO: check formatting options with different values
#res_params = [['%10.4f'%item for item in row] for row in result.params]
res_params = [[forg(item, prec=4) for item in row] for row in result.params]
if extras: #not None or non-empty
#maybe this should be a simple triple loop instead of list comprehension?
#below_list = [[['%10s' % ('('+('%10.3f'%v).strip()+')')
extras_list = [[['%10s' % ('(' + forg(v, prec=3).strip() + ')')
for v in col]
for col in getattr(result, what)]
for what in extras
]
data = lzip(res_params, *extras_list)
data = [i for j in data for i in j] #flatten
stubs = lzip(endog_names, *[['']*len(endog_names)]*len(extras))
stubs = [i for j in stubs for i in j] #flatten
#return SimpleTable(data, headers=exog_names, stubs=stubs)
else:
data = res_params
stubs = endog_names
# return SimpleTable(data, headers=exog_names, stubs=stubs,
# data_fmts=['%10.4f'])
import copy
txt_fmt = copy.deepcopy(fmt_params)
txt_fmt.update(dict(data_fmts = ["%s"]*result.params.shape[1]))
return SimpleTable(data, headers=exog_names,
stubs=stubs,
title=title,
# data_fmts = ["%s"]),
txt_fmt = txt_fmt)
def summary_params_2dflat(result, endog_names=None, exog_names=None, alpha=0.05,
use_t=True, keep_headers=True, endog_cols=False):
#skip_headers2=True):
'''summary table for parameters that are 2d, e.g. multi-equation models
Parameters
----------
result : result instance
the result instance with params, bse, tvalues and conf_int
endog_names : None or list of strings
names for rows of the parameter array (multivariate endog)
exog_names : None or list of strings
names for columns of the parameter array (exog)
alpha : float
level for confidence intervals, default 0.95
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
keep_headers : bool
If true (default), then sub-tables keep their headers. If false, then
only the first headers are kept, the other headerse are blanked out
endog_cols : bool
If false (default) then params and other result statistics have
equations by rows. If true, then equations are assumed to be in columns.
Not implemented yet.
Returns
-------
tables : list of SimpleTable
this contains a list of all seperate Subtables
table_all : SimpleTable
the merged table with results concatenated for each row of the parameter
array
'''
res = result
params = res.params
if params.ndim == 2: # we've got multiple equations
n_equ = params.shape[1]
if not len(endog_names) == params.shape[1]:
raise ValueError('endog_names has wrong length')
else:
if not len(endog_names) == len(params):
raise ValueError('endog_names has wrong length')
n_equ = 1
#VAR doesn't have conf_int
#params = res.params.T # this is a convention for multi-eq models
if not isinstance(endog_names, list):
#this might be specific to multinomial logit type, move?
if endog_names is None:
endog_basename = 'endog'
else:
endog_basename = endog_names
#TODO: note, the [1:] is specific to current MNLogit
endog_names = res.model.endog_names[1:]
#check if we have the right length of names
tables = []
for eq in range(n_equ):
restup = (res, res.params[:,eq], res.bse[:,eq], res.tvalues[:,eq],
res.pvalues[:,eq], res.conf_int(alpha)[eq])
#not used anymore in current version
# if skip_headers2:
# skiph = (row != 0)
# else:
# skiph = False
skiph = False
tble = summary_params(restup, yname=endog_names[eq],
xname=exog_names, alpha=alpha, use_t=use_t,
skip_header=skiph)
tables.append(tble)
#add titles, they will be moved to header lines in table_extend
for i in range(len(endog_names)):
tables[i].title = endog_names[i]
table_all = table_extend(tables, keep_headers=keep_headers)
return tables, table_all
def table_extend(tables, keep_headers=True):
'''extend a list of SimpleTables, adding titles to header of subtables
This function returns the merged table as a deepcopy, in contrast to the
SimpleTable extend method.
Parameters
----------
tables : list of SimpleTable instances
keep_headers : bool
If true, then all headers are kept. If falls, then the headers of
subtables are blanked out.
Returns
-------
table_all : SimpleTable
merged tables as a single SimpleTable instance
'''
from copy import deepcopy
for ii, t in enumerate(tables[:]): #[1:]:
t = deepcopy(t)
#move title to first cell of header
#TODO: check if we have multiline headers
if t[0].datatype == 'header':
t[0][0].data = t.title
t[0][0]._datatype = None
t[0][0].row = t[0][1].row
if not keep_headers and (ii > 0):
for c in t[0][1:]:
c.data = ''
#add separating line and extend tables
if ii == 0:
table_all = t
else:
r1 = table_all[-1]
r1.add_format('txt', row_dec_below='-')
table_all.extend(t)
table_all.title = None
return table_all
def summary_return(tables, return_fmt='text'):
######## Return Summary Tables ########
# join table parts then print
if return_fmt == 'text':
strdrop = lambda x: str(x).rsplit('\n',1)[0]
#convert to string drop last line
return '\n'.join(lmap(strdrop, tables[:-1]) + [str(tables[-1])])
elif return_fmt == 'tables':
return tables
elif return_fmt == 'csv':
return '\n'.join(map(lambda x: x.as_csv(), tables))
elif return_fmt == 'latex':
#TODO: insert \hline after updating SimpleTable
import copy
table = copy.deepcopy(tables[0])
del table[-1]
for part in tables[1:]:
table.extend(part)
return table.as_latex_tabular()
elif return_fmt == 'html':
return "\n".join(table.as_html() for table in tables)
else:
raise ValueError('available output formats are text, csv, latex, html')
class Summary(object):
'''class to hold tables for result summary presentation
Construction does not take any parameters. Tables and text can be added
with the `add_` methods.
Attributes
----------
tables : list of tables
Contains the list of SimpleTable instances, horizontally concatenated tables are not saved separately.
extra_txt : string
extra lines that are added to the text output, used for warnings and explanations.
'''
def __init__(self):
self.tables = []
self.extra_txt = None
def __str__(self):
return self.as_text()
def __repr__(self):
#return '<' + str(type(self)) + '>\n"""\n' + self.__str__() + '\n"""'
return str(type(self)) + '\n"""\n' + self.__str__() + '\n"""'
def _repr_html_(self):
'''Display as HTML in IPython notebook.'''
return self.as_html()
def add_table_2cols(self, res, title=None, gleft=None, gright=None,
yname=None, xname=None):
'''add a double table, 2 tables with one column merged horizontally
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
title : string or None
if None, then a default title is used.
gleft : list of tuples
elements for the left table, tuples are (name, value) pairs
If gleft is None, then a default table is created
gright : list of tuples or None
elements for the right table, tuples are (name, value) pairs
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
Returns
-------
None : tables are attached
'''
table = summary_top(res, title=title, gleft=gleft, gright=gright,
yname=yname, xname=xname)
self.tables.append(table)
def add_table_params(self, res, yname=None, xname=None, alpha=.05,
use_t=True):
'''create and add a table for the parameter estimates
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
alpha : float
significance level for the confidence intervals
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
Returns
-------
None : table is attached
'''
if res.params.ndim == 1:
table = summary_params(res, yname=yname, xname=xname, alpha=alpha,
use_t=use_t)
elif res.params.ndim == 2:
# _, table = summary_params_2dflat(res, yname=yname, xname=xname,
# alpha=alpha, use_t=use_t)
_, table = summary_params_2dflat(res, endog_names=yname,
exog_names=xname,
alpha=alpha, use_t=use_t)
else:
raise ValueError('params has to be 1d or 2d')
self.tables.append(table)
def add_extra_txt(self, etext):
'''add additional text that will be added at the end in text format
Parameters
----------
etext : string
string with lines that are added to the text output.
'''
self.extra_txt = '\n'.join(etext)
def as_text(self):
'''return tables as string
Returns
-------
txt : string
summary tables and extra text as one string
'''
txt = summary_return(self.tables, return_fmt='text')
if not self.extra_txt is None:
txt = txt + '\n\n' + self.extra_txt
return txt
def as_latex(self):
'''return tables as string
Returns
-------
latex : string
summary tables and extra text as string of Latex
Notes
-----
This currently merges tables with different number of columns.
It is recommended to use `as_latex_tabular` directly on the individual
tables.
'''
return summary_return(self.tables, return_fmt='latex')
def as_csv(self):
'''return tables as string
Returns
-------
csv : string
concatenated summary tables in comma delimited format
'''
return summary_return(self.tables, return_fmt='csv')
def as_html(self):
'''return tables as string
Returns
-------
html : string
concatenated summary tables in HTML format
'''
return summary_return(self.tables, return_fmt='html')
if __name__ == "__main__":
import statsmodels.api as sm
data = sm.datasets.longley.load()
data.exog = sm.add_constant(data.exog)
res = sm.OLS(data.endog, data.exog).fit()
#summary(
| bsd-3-clause |
LSSTC-DSFP/LSSTC-DSFP-Sessions | Session5/Day1/rhlUtils.py | 1 | 2227 | """Utilities used in Robert Lupton's Practical Sessions
These replace LSST stack code, although with different APIs
"""
import numpy as np
import matplotlib.pyplot as plt
class CCD:
"""Describe a CCD's properties"""
rawWidth = 552 # number of pixels digitised per parallel transfer
tau_s = 13.92e-6 # Time for one serial transfer (s)
tau_p = 222.72e-6 # Time for one parallel transfer (s)
def __init__(self):
pass
class BBox:
"""Simple Bounding Box class for DSFP"""
def __init__(self, bbox):
self.x0 = bbox[0][0]
self.y0 = bbox[0][1]
self.x1 = self.x0 + bbox[1][0]
self.y1 = self.y0 + bbox[1][1]
self.size = bbox[1]
class Image:
"""Simple Image class for DSFP"""
def __init__(self, fileName):
biasData = np.load(fileName)
self.image = biasData["image"]
self.amps = []
for bbox in biasData['bboxes']:
self.amps.append(BBox(bbox))
def getAmpImage(self, amp):
"""Return the image of a single amplifier, passed either then index or a BBox"""
try:
amp.x0
except AttributeError:
amp = self.amps[amp]
return self.image[amp.y0:amp.y1, amp.x0:amp.x1]
def imshow(image, nsigma=2, *args, **kwargs):
"""Like pyplot.imshow but with sane defaults for image display
If vmin or vmax is omitted, use a linear stretch from nsigma[0]..nsigma[1] (or +- nsigma if a scalar)
"""
if 'aspect' not in kwargs:
kwargs['aspect'] = 'equal'
if 'cmap' not in kwargs:
kwargs['cmap'] = 'gray'
if 'interpolation' not in kwargs:
kwargs['interpolation'] = 'none'
if 'origin' not in kwargs:
kwargs['origin'] = 'lowerleft'
if not ('vmin' in kwargs and'vmax' in kwargs):
q1, med, q3 = np.percentile(image, [25, 50, 75])
stdev = 0.741*(q3 - q1)
try:
vmin, vmax = nsigma
except TypeError:
vmin, vmax = -nsigma, nsigma
kwargs['vmin'] = med + vmin*stdev
kwargs['vmax'] = med + vmax*stdev
plt.imshow(image, *args, **kwargs)
| mit |
AlexRobson/scikit-learn | sklearn/utils/tests/test_shortest_path.py | 88 | 2828 | from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix += dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
| bsd-3-clause |
fretsonfire/fof-python | setup.py | 1 | 4223 | #####################################################################
# -*- coding: iso-8859-1 -*- #
# #
# Frets on Fire #
# Copyright (C) 2006 Sami Kyöstilä #
# #
# This program is free software; you can redistribute it and/or #
# modify it under the terms of the GNU General Public License #
# as published by the Free Software Foundation; either version 2 #
# of the License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, #
# MA 02110-1301, USA. #
#####################################################################
import sys
sys.path.append("src")
from setuptools import setup
import sys, SceneFactory, Version, glob, os
import distutils.command.sdist
options = {
"py2exe": {
"dist_dir": "dist/win32",
"includes": SceneFactory.scenes,
"excludes": [
"OpenGL", # OpenGL must be excluded and handled manually due to a py2exe bug
"glew.gl.apple",
"glew.gl.ati",
"glew.gl.atix",
"glew.gl.hp",
"glew.gl.ibm",
"glew.gl.ingr",
"glew.gl.intel",
"glew.gl.ktx",
"glew.gl.mesa",
"glew.gl.oml",
"glew.gl.pgi",
"glew.gl.rend",
"glew.gl.s3",
"glew.gl.sgi",
"glew.gl.sgis",
"glew.gl.sgix",
"glew.gl.sun",
"glew.gl.sunx",
"glew.gl.threedfx",
"glew.gl.win",
"ode",
"_ssl",
"bz2",
"email",
"calendar",
"doctest",
"ftplib",
"getpass",
"gopherlib",
"macpath",
"macurl2path",
"GimpGradientFile",
"GimpPaletteFile",
"PaletteFile",
"macosx",
"matplotlib",
"Tkinter",
"curses",
],
"optimize": 2,
},
"py2app": {
'app': ["src/FretsOnFire.py"],
'argv_emulation': True,
'dist_dir': 'dist/mac',
'frameworks': '/opt/local/lib/libvorbisfile.dylib',
#'dylib_excludes': 'OpenGL,AGL',
'iconfile': 'data/icon_mac_composed.icns',
'includes': SceneFactory.scenes,
'excludes': [
]
}
}
# Reuse the manifest file from "python setup.py sdist"
try:
dataFiles = []
ignoreExts = [".po", ".py", ".pot"]
for line in open("MANIFEST").readlines():
fn = line.strip()
if any([fn.endswith(e) for e in ignoreExts]): continue
if fn in ["Makefile", "MANIFEST", "MANIFEST.in"]: continue
dataFiles.append((os.path.dirname(fn), [fn]))
except IOError:
print "Unable to open MANIFEST. Please run python setup.py sdist -o to generate it."
dataFiles = []
extraOpts = {}
if os.name == "nt":
import py2exe
setupRequires = ["py2exe"]
extraOpts["windows"] = [
{
"script": "src/FretsOnFire.py",
"icon_resources": [(1, "data/icon.ico")]
}
]
extraOpts["zipfile"] = "data/library.zip"
elif sys.platform == "darwin":
setupRequires = ["py2app"]
else:
setupRequires = []
setup(version = Version.version(),
name = "Frets on Fire",
url = "http://www.unrealvoodoo.org",
author = "Unreal Voodoo",
author_email = "[email protected]",
license = "GPLv2",
description = "Frets on Fire is a game of musical skill and fast fingers. The aim of the game is to play guitar with the keyboard as accurately as possible.",
data_files = dataFiles,
options = options,
setup_requires = setupRequires,
**extraOpts
)
| mit |
kseetharam/genPolaron | amp_reconstruct_gs_plot.py | 1 | 11570 | import numpy as np
import xarray as xr
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from matplotlib.animation import FuncAnimation
from matplotlib.animation import writers
import pf_dynamic_cart as pfc
import pf_dynamic_sph as pfs
import Grid
from scipy import interpolate
from timeit import default_timer as timer
if __name__ == "__main__":
mpegWriter = writers['ffmpeg'](fps=1, bitrate=1800)
# ---- INITIALIZE GRIDS ----
(Lx, Ly, Lz) = (60, 60, 60)
(dx, dy, dz) = (0.25, 0.25, 0.25)
higherCutoff = False; cutoffRat = 1.5
betterResolution = False; resRat = 0.5
# (Lx, Ly, Lz) = (21, 21, 21)
# (dx, dy, dz) = (0.375, 0.375, 0.375)
# higherCutoff = False; cutoffRat = 1.5
# betterResolution = False; resRat = 0.5
NGridPoints_cart = (1 + 2 * Lx / dx) * (1 + 2 * Ly / dy) * (1 + 2 * Lz / dz)
massRat = 10.0
IRrat = 1
print(NGridPoints_cart)
NGridPoints_cart = 3.75e8
# Toggle parameters
toggleDict = {'Dynamics': 'imaginary'}
# cmap = 'gist_heat'
cmap = 'inferno'
my_cmap = matplotlib.cm.get_cmap(cmap)
# ---- SET OUTPUT DATA FOLDER ----
datapath = '/Users/kis/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_{:.2E}'.format(NGridPoints_cart)
animpath = '/Users/kis/Dropbox/VariationalResearch/DataAnalysis/figs/idyn_twophonon/hostGasDensity'
datapath = datapath + '/massRatio={:.1f}'.format(massRat)
if toggleDict['Dynamics'] == 'real':
innerdatapath = datapath + '/redyn'
cartdatapath = '/media/kis/Storage/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_{:.2E}/massRatio={:.1f}/redyn_cart'.format(1.44e6, 1)
elif toggleDict['Dynamics'] == 'imaginary':
innerdatapath = datapath + '/imdyn'
cartdatapath = '/media/kis/Storage/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_{:.2E}/massRatio={:.1f}/imdyn_cart'.format(1.44e6, 1)
innerdatapath = innerdatapath + '_spherical'
# # Analysis of Total Dataset
interpdatapath = innerdatapath + '/interp'
aIBi = -10
# Pnorm_des = 1.9
# Pnorm_des = 1.4
# Pnorm_des = 1.2
Pnorm_des = 1.0
# Pnorm_des = 0.5
# Pnorm_des = 0.1
tind = -1
linDimList = [(2, 2), (2.05, 2.05), (10, 10)]
linDimMajor, linDimMinor = linDimList[1]
qds_orig = xr.open_dataset(innerdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
n0 = qds_orig.attrs['n0']; gBB = qds_orig.attrs['gBB']; mI = qds_orig.attrs['mI']; mB = qds_orig.attrs['mB']
nu = np.sqrt(n0 * gBB / mB)
mc = mI * nu
aBB = (mB / (4 * np.pi)) * gBB
xi = (8 * np.pi * n0 * aBB)**(-1 / 2)
tscale = xi / nu
PVals = qds_orig['P'].values
Pnorm = PVals / mc
Pind = np.abs(Pnorm - Pnorm_des).argmin().astype(int)
P = PVals[Pind]
tVals = qds_orig['t'].values
t = tVals[tind]
print('P/mc: {:.2f}'.format(P / mc))
print(P)
print(massRat, aIBi)
print(t / tscale)
print(linDimMajor, linDimMinor)
# All Plotting:
# # ORIGINAL SPHERICAL DATA PLOTS
# Individual Phonon Momentum Distribution(Original Spherical data)
Bk_2D_orig = (qds_orig['Real_CSAmp'] + 1j * qds_orig['Imag_CSAmp']).sel(P=P).isel(t=tind).values
Nph_orig = qds_orig['Nph'].sel(P=P).isel(t=tind).values
PhDen_orig_Vals = ((1 / Nph_orig) * np.abs(Bk_2D_orig)**2).real.astype(float)
kgrid = Grid.Grid("SPHERICAL_2D"); kgrid.initArray_premade('k', qds_orig.coords['k'].values); kgrid.initArray_premade('th', qds_orig.coords['th'].values)
kVec = kgrid.getArray('k'); dk = kVec[1] - kVec[0]
thVec = kgrid.getArray('th'); dth = thVec[1] - thVec[0]
print(P, np.max(kVec))
kg, thg = np.meshgrid(kVec, thVec, indexing='ij')
kxg = kg * np.sin(thg)
kzg = kg * np.cos(thg)
wk_Vals = pfs.omegak(kVec, mB, n0, gBB)
bdiff = 100 * np.abs(wk_Vals - nu * kVec) / (nu * kVec)
kind = np.abs(bdiff - 1).argmin().astype(int)
klin = kVec[kind]
print(klin)
# interpmul = 2
# PhDen_orig_da = xr.DataArray(PhDen_orig_Vals, coords=[kVec, thVec], dims=['k', 'th'])
# PhDen_orig_smooth, kg_orig_smooth, thg_orig_smooth = pfc.xinterp2D(PhDen_orig_da, 'k', 'th', interpmul)
# dk_smooth = kg_orig_smooth[1, 0] - kg_orig_smooth[0, 0]
# dth_smooth = thg_orig_smooth[0, 1] - thg_orig_smooth[0, 0]
# kxg_smooth = kg_orig_smooth * np.sin(thg_orig_smooth)
# kzg_smooth = kg_orig_smooth * np.cos(thg_orig_smooth)
# PhDen_orig_sum = np.sum(PhDen_orig_Vals * kg**2 * np.sin(thg) * dk * dth * (2 * np.pi)**(-2))
# PhDen_smooth_sum = np.sum(PhDen_orig_smooth * kg_orig_smooth**2 * np.sin(thg_orig_smooth) * dk_smooth * dth_smooth * (2 * np.pi)**(-2))
# print(PhDen_orig_sum, PhDen_smooth_sum)
fig1, ax1 = plt.subplots()
vmax = np.max(PhDen_orig_Vals)
# vmax = 8414555 # P=2.4
# vmax = 2075494 # P=1.20
# vmax = 1055106 # P=0.38
quad1 = ax1.pcolormesh(kzg, kxg, PhDen_orig_Vals, norm=colors.LogNorm(vmin=1e-3, vmax=vmax), cmap='inferno')
quad1m = ax1.pcolormesh(kzg, -1 * kxg, PhDen_orig_Vals, norm=colors.LogNorm(vmin=1e-3, vmax=vmax), cmap='inferno')
ax1.set_xlim([-1 * linDimMajor, linDimMajor])
ax1.set_ylim([-1 * linDimMinor, linDimMinor])
# print(vmax)
ax1.set_xlabel('kz (Impurity Propagation Direction)')
ax1.set_ylabel('kx')
ax1.set_title('Individual Phonon Momentum Distribution (Sph Orig)', size='smaller')
fig1.colorbar(quad1, ax=ax1, extend='both')
# CARTESIAN INTERPOLATION PLOTS
interp_ds = xr.open_dataset(interpdatapath + '/InterpDat_P_{:.2f}_aIBi_{:.2f}_lDM_{:.2f}_lDm_{:.2f}.nc'.format(P, aIBi, linDimMajor, linDimMinor))
# interp_ds = xr.open_dataset(interpdatapath + '/InterpDat_P_{:.2f}_aIBi_{:.2f}_lDM_{:.2f}_lDm_{:.2f}_unique.nc'.format(P, aIBi, linDimMajor, linDimMinor)); print('unique')
kxL = interp_ds['kx'].values; dkxL = kxL[1] - kxL[0]
kyL = interp_ds['ky'].values; dkyL = kyL[1] - kyL[0]
kzL = interp_ds['kz'].values; dkzL = kzL[1] - kzL[0]
xL = interp_ds['x'].values
yL = interp_ds['y'].values
zL = interp_ds['z'].values
kxLg_xz_slice, kzLg_xz_slice = np.meshgrid(kxL, kzL, indexing='ij')
xLg_xz_slice, zLg_xz_slice = np.meshgrid(xL, zL, indexing='ij')
xLg_xy_slice, yLg_xy_slice = np.meshgrid(xL, yL, indexing='ij')
PhDenLg_xz_slice = interp_ds['PhDen_xz'].values
n0 = interp_ds.attrs['n0']
gBB = interp_ds.attrs['gBB']
mI = interp_ds.attrs['mI']
mB = interp_ds.attrs['mB']
nu = np.sqrt(n0 * gBB / mB)
mc = mI * nu
# Individual Phonon Momentum Distribution (Cart Interp)
fig2, ax2 = plt.subplots()
quad2 = ax2.pcolormesh(kzLg_xz_slice, kxLg_xz_slice, PhDenLg_xz_slice, norm=colors.LogNorm(vmin=1e-3, vmax=np.max(PhDen_orig_Vals)), cmap='inferno')
ax2.set_xlabel('kz (Impurity Propagation Direction)')
ax2.set_ylabel('kx')
ax2.set_title('Individual Phonon Momentum Distribution (Cart Interp)', size='smaller')
fig2.colorbar(quad2, ax=ax2, extend='both')
# Impurity Momentum Magnitude Distribution (Interp)
PI_mag = interp_ds['PI_mag'].values
nPI_mag = interp_ds['nPI_mag'].values
# mom_deltapeak = interp_ds.attrs['mom_deltapeak']
mom_deltapeak = interp_ds['mom_deltapeak'].values
print(mom_deltapeak)
fig5, ax5 = plt.subplots()
ax5.plot(mc * np.ones(PI_mag.size), np.linspace(0, 1, PI_mag.size), 'y--', label=r'$m_{I}c_{BEC}$')
curve = ax5.plot(PI_mag, nPI_mag, color='k', lw=3, label='')
# D = nPI_mag - np.max(nPI_mag) / 2
# indices = np.where(D > 0)[0]
# ind_s, ind_f = indices[0], indices[-1]
# FWHMcurve = ax5.plot(np.linspace(PI_mag[ind_s], PI_mag[ind_f], 100), nPI_mag[ind_s] * np.ones(100), 'b-', linewidth=3.0, label='Incoherent Part FWHM')
# FWHMmarkers = ax5.plot(np.linspace(PI_mag[ind_s], PI_mag[ind_f], 2), nPI_mag[ind_s] * np.ones(2), 'bD', mew=0.75, ms=7.5, label='')
Zline = ax5.plot(P * np.ones(PI_mag.size), np.linspace(0, mom_deltapeak, PI_mag.size), 'r-', linewidth=3.0, label='Delta Peak (Z-factor)')
Zmarker = ax5.plot(P, mom_deltapeak, 'rx', mew=0.75, ms=7.5, label='')
dPIm = PI_mag[1] - PI_mag[0]
nPIm_Tot = np.sum(nPI_mag * dPIm) + mom_deltapeak
norm_text = ax5.text(0.7, 0.65, r'$\int n_{|\vec{P_{I}}|} d|\vec{P_{I}}| = $' + '{:.2f}'.format(nPIm_Tot), transform=ax5.transAxes, color='k')
ax5.legend()
ax5.set_xlim([-0.01, np.max(PI_mag)])
ax5.set_ylim([0, 1.05])
ax5.set_title('Impurity Momentum Magnitude Distribution (Cart Interp) (' + r'$aIB^{-1}=$' + '{0}, '.format(aIBi) + r'$\frac{P}{m_{I}c_{BEC}}=$' + '{:.2f})'.format(P / mc), size='smaller')
ax5.set_ylabel(r'$n_{|\vec{P_{I}}|}$')
ax5.set_xlabel(r'$|\vec{P_{I}}|$')
# BARE ATOM POSITION DISTRIBUTIONS
# Interpolate 2D slice of position distribution
na_xz_slice = interp_ds['na_xz_slice'].values
na_xy_slice = interp_ds['na_xy_slice'].values
fig4, ax4 = plt.subplots()
# quad4 = ax4.pcolormesh(zLg_xz_slice, xLg_xz_slice, na_xz_slice, norm=colors.LogNorm(vmin=np.abs(np.min(na_xz_slice)), vmax=np.max(na_xz_slice)), cmap='inferno')
quad4 = ax4.pcolormesh(zLg_xz_slice, xLg_xz_slice, na_xz_slice, norm=colors.LogNorm(vmin=1e-15, vmax=np.max(na_xz_slice)), cmap=cmap)
# poslinDim4 = 1300
# ax4.set_xlim([-1 * poslinDim4, poslinDim4])
# ax4.set_ylim([-1 * poslinDim4, poslinDim4])
ax4.set_xlabel('z (Impurity Propagation Direction)')
ax4.set_ylabel('x')
ax4.set_xlim([-200, 200])
ax4.set_ylim([-200, 200])
ax4.set_title('Host Gas Density (real space, lab frame)')
fig4.colorbar(quad4, ax=ax4, extend='both')
# Bare Atom Position Distribution (Interp)
fig6, ax6 = plt.subplots()
# quad6 = ax6.pcolormesh(yLg_xy_slice, xLg_xy_slice, na_xy_slice, norm=colors.LogNorm(vmin=np.abs(np.min(na_xy_slice)), vmax=np.max(na_xy_slice)), cmap='inferno')
quad6 = ax6.pcolormesh(yLg_xy_slice, xLg_xy_slice, na_xy_slice, norm=colors.LogNorm(vmin=1e-15, vmax=np.max(na_xy_slice)), cmap=cmap)
# poslinDim6 = 1300
# ax6.set_xlim([-1 * poslinDim6, poslinDim6])
# ax6.set_ylim([-1 * poslinDim6, poslinDim6])
ax6.set_xlabel('y')
ax6.set_ylabel('x')
ax6.set_title('Host Gas Density (real space, lab frame)')
fig6.colorbar(quad6, ax=ax6, extend='both')
# plt.show()
# # BARE ATOM POSITION ANIMATION
# vmin = 1e-10; vmax = 1e-1
# na_xz_array = np.empty(tVals.size, dtype=np.object)
# for tind, t in enumerate(tVals):
# interp_ds = xr.open_dataset(interpdatapath + '/InterpDat_P_{:.2f}_aIBi_{:.2f}_t_{:.2f}_lDM_{:.2f}_lDm_{:.2f}.nc'.format(P, aIBi, t, linDimMajor, linDimMinor))
# na_xz_array[tind] = interp_ds['na_xz_slice'].values
# fig_a1, ax_a1 = plt.subplots()
# quad_a1 = ax_a1.pcolormesh(zLg_xz_slice, xLg_xz_slice, na_xz_array[0][:-1, :-1], norm=colors.LogNorm(vmin=vmin, vmax=vmax), cmap=cmap)
# ax_a1.set_xlabel('z (Impurity Propagation Direction)')
# ax_a1.set_ylabel('x')
# ax_a1.set_title('Host Gas Density (real space, lab frame)')
# fig_a1.colorbar(quad_a1, ax=ax_a1, extend='both')
# def animate_Den(i):
# if i >= tVals.size:
# return
# quad_a1.set_array(na_xz_array[i][:-1, :-1].ravel())
# anim_Den = FuncAnimation(fig_a1, animate_Den, interval=1000, frames=range(tVals.size), repeat=True)
# anim_Den_filename = '/HostGasDensity_mRat_{:.1f}_Pnorm_{:.2f}_aIBi_{:.2f}.mp4'.format(massRat, P / mc, aIBi)
# anim_Den.save(animpath + anim_Den_filename, writer=mpegWriter)
plt.show()
| mit |
Titan-C/scikit-learn | sklearn/cluster/tests/test_hierarchical.py | 33 | 20167 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hierarchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hierarchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering_wrong_arg_memory():
# Test either if an error is raised when memory is not
# either a str or a joblib.Memory instance
rng = np.random.RandomState(0)
n_samples = 100
X = rng.randn(n_samples, 50)
memory = 5
clustering = AgglomerativeClustering(memory=memory)
assert_raises(ValueError, clustering.fit, X)
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144)])
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert_equal(ignore_warnings(linkage_func)(X, connectivity)[1], 5)
def test_agg_n_clusters():
# Test that an error is raised when n_clusters <= 0
rng = np.random.RandomState(0)
X = rng.rand(20, 10)
for n_clus in [-1, 0]:
agc = AgglomerativeClustering(n_clusters=n_clus)
msg = ("n_clusters should be an integer greater than 0."
" %s was provided." % str(agc.n_clusters))
assert_raise_message(ValueError, msg, agc.fit, X)
| bsd-3-clause |
marionleborgne/nupic.research | projects/sensorimotor/experiments/capacity/data_utils.py | 22 | 6537 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Utilities to process and visualize data from the sensorimotor experiment
"""
import csv
import glob
import os
import sys
import matplotlib.pyplot as plt
from pylab import rcParams
def combineCsvFiles(directoryPath, outputFileName):
"""
Combines all csv files in specified path.
All files are assumed to have a header row followed by data.
The resulting file contains only 1 head but all of the files' data
combined.
Caution, the csv are iterated over in alphabetical order so a file
100.csv may be appended before a file 10.csv and may mess up yo' data plotting
"""
appendHeader = True
# Create csv output writer
os.chdir(directoryPath)
with open(outputFileName, "wb") as outputFile:
csvWriter = csv.writer(outputFile)
# Iterate over csv files in directory
for csvFileName in glob.glob("*.csv"):
# Ignore and write over old version of the same file name
if csvFileName != outputFileName:
# Read each file writing the pertinent file lines to output
with open(csvFileName, "rU") as inputFile:
csvReader = csv.reader(inputFile)
line = next(csvReader)
if appendHeader:
csvWriter.writerow(line)
appendHeader = False
line = next(csvReader)
csvWriter.writerow(line)
def getChartData(path, xDataColumnIdx, yDataColumnIdxs, yStdDevIdxs):
"""
Gets chart-ready data from the specified csv file
"""
assert len(yDataColumnIdxs) == len(yStdDevIdxs)
with open(path, "rU") as inputFile:
csvReader = csv.reader(inputFile)
# Get DV values
isHeader = True
xData = []
for row in csvReader:
if isHeader:
isHeader = False
else:
xData.append(float(row[xDataColumnIdx]))
# Get IVs' values
allYData = []
allYStdDevs = []
plotTitles = []
for i, yColIdx in enumerate(yDataColumnIdxs):
# Reset the file position to allow iterator reuse
inputFile.seek(0)
# build the y data and y std devs
yCol = []
yColStdDev = []
isHeader = True
stdDevIdx = yStdDevIdxs[i]
for row in csvReader:
if isHeader:
plotTitles.append(row[yColIdx])
else:
yCol.append(float(row[yColIdx]))
# Std Devs
if isHeader:
isHeader = False
elif stdDevIdx == -1:
yColStdDev.append(0)
else:
yColStdDev.append(float(row[stdDevIdx]))
allYData.append(yCol)
allYStdDevs.append(yColStdDev)
return xData, allYData, allYStdDevs, plotTitles
def getErrorbarFigures(title, X, Ys, stdDevs, plotTitles, xAxisLabel,
yAxisLabels, gridFormat):
"""
Plots the specified data and saves specified plot to file
"""
rcParams['figure.figsize'] = 15, 15
fig = plt.figure()
fig.suptitle(title)
fig.subplots_adjust(left=None, right=None, bottom=None, top=None,
wspace=None, hspace=0.35)
plt.ion()
plt.show()
rcParams.update({'font.size': 12})
for i, y in enumerate(Ys):
ax = fig.add_subplot(gridFormat + 1 + i)
ax.set_title(plotTitles[i])
ax.set_xlabel(xAxisLabel)
ax.set_ylabel(yAxisLabels[i])
ax.axis([0, max(X) + 10, 0, 20])
ax.errorbar(X, y, stdDevs[i])
return fig
def getErrorbarFigure(title, x, y, stdDevs, xAxisLabel, yAxisLabel,
xRangeMax=None, yRangeMax=None):
fig = plt.figure()
fig.suptitle(title)
fig.subplots_adjust(left=None, right=None, bottom=None, top=None,
wspace=None, hspace=0.35)
plt.ion()
plt.show()
ax = fig.add_subplot(111)
ax.set_xlabel(xAxisLabel)
ax.set_ylabel(yAxisLabel)
if xRangeMax is None:
xRangeMax = max(x) + 10
if yRangeMax is None:
yRangeMax = max(y) + 10
ax.axis([0, xRangeMax, 0, yRangeMax])
ax.errorbar(x, y, stdDevs)
plt.draw()
return fig
def plotSensorimotorExperimentResults(filesDir, outputFileName):
"""
Plots the data produced by
sensorimotor/experiments/capacity/run.py
"""
print "Combining csv's in: {0}".format(filesDir)
print "Output file name: {0}\n".format(outputFileName)
combineCsvFiles(filesDir, outputFileName + ".csv")
# 0 when number of worlds is IV
# 1 when number of elements is IV
xColumnIdx = 0
xAxisLabel = "Worlds"
yAxisLabels = ["Cells", "Cells", "Cells", "Cells", "Cols", "Cols"]
# Following indices are columns in the excel file produced by
# sensorimotor/experiments/capacity/run.py and represent the following
# metrics:
# Mean & Max Stability, Mean & Max Distinctness, Mean & Max Bursting Cols
yColumnIdxs = [11, 9, 16, 14, 46, 44]
# The following are the column indices in the same xls file for the std
# deviations of the metrics specified by yColumnIdxs. A -1 means the script
# won't plot a std dev for the corresponding metric.
yStdDevIdxs = [12, -1, 17, -1, 47, -1]
iv, dvs, stdDevs, metricTitles = getChartData(outputFileName + ".csv",
xColumnIdx, yColumnIdxs,
yStdDevIdxs)
# 3x2 subplot grid
gridFormat = 320
getErrorbarFigures(filesDir, iv, dvs, stdDevs, metricTitles,
xAxisLabel, yAxisLabels, gridFormat)
plt.savefig(outputFileName, bbox_inches="tight")
plt.draw()
raw_input("Press enter...")
if __name__ == "__main__":
if len(sys.argv) < 3:
print "Usage: ./data_utils.py FILES_DIR COMBINED_FILE_NAME"
sys.exit()
plotSensorimotorExperimentResults(sys.argv[1], sys.argv[2])
| agpl-3.0 |
mlyundin/scikit-learn | sklearn/datasets/__init__.py | 176 | 3671 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
ARudiuk/mne-python | mne/viz/montage.py | 11 | 1801 | """Functions to plot EEG sensor montages or digitizer montages
"""
import numpy as np
from .utils import plt_show
def plot_montage(montage, scale_factor=1.5, show_names=False, show=True):
"""Plot a montage
Parameters
----------
montage : instance of Montage
The montage to visualize.
scale_factor : float
Determines the size of the points. Defaults to 1.5.
show_names : bool
Whether to show the channel names. Defaults to False.
show : bool
Show figure if True.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure object.
"""
from ..channels.montage import Montage, DigMontage
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
if isinstance(montage, Montage):
pos = montage.pos
ax.scatter(pos[:, 0], pos[:, 1], pos[:, 2])
if show_names:
ch_names = montage.ch_names
for ch_name, x, y, z in zip(ch_names, pos[:, 0],
pos[:, 1], pos[:, 2]):
ax.text(x, y, z, ch_name)
elif isinstance(montage, DigMontage):
pos = np.vstack((montage.hsp, montage.elp))
ax.scatter(pos[:, 0], pos[:, 1], pos[:, 2])
if show_names:
if montage.point_names:
hpi_names = montage.point_names
for hpi_name, x, y, z in zip(hpi_names, montage.elp[:, 0],
montage.elp[:, 1],
montage.elp[:, 2]):
ax.text(x, y, z, hpi_name)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt_show(show)
return fig
| bsd-3-clause |
chatcannon/scipy | scipy/stats/tests/test_morestats.py | 17 | 50896 | # Author: Travis Oliphant, 2002
#
# Further enhancements and tests added by numerous SciPy developers.
#
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy.random import RandomState
from numpy.testing import (TestCase, run_module_suite, assert_array_equal,
assert_almost_equal, assert_array_less, assert_array_almost_equal,
assert_raises, assert_, assert_allclose, assert_equal, dec, assert_warns)
from scipy import stats
from common_tests import check_named_results
# Matplotlib is not a scipy dependency but is optionally used in probplot, so
# check if it's available
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
g1 = [1.006, 0.996, 0.998, 1.000, 0.992, 0.993, 1.002, 0.999, 0.994, 1.000]
g2 = [0.998, 1.006, 1.000, 1.002, 0.997, 0.998, 0.996, 1.000, 1.006, 0.988]
g3 = [0.991, 0.987, 0.997, 0.999, 0.995, 0.994, 1.000, 0.999, 0.996, 0.996]
g4 = [1.005, 1.002, 0.994, 1.000, 0.995, 0.994, 0.998, 0.996, 1.002, 0.996]
g5 = [0.998, 0.998, 0.982, 0.990, 1.002, 0.984, 0.996, 0.993, 0.980, 0.996]
g6 = [1.009, 1.013, 1.009, 0.997, 0.988, 1.002, 0.995, 0.998, 0.981, 0.996]
g7 = [0.990, 1.004, 0.996, 1.001, 0.998, 1.000, 1.018, 1.010, 0.996, 1.002]
g8 = [0.998, 1.000, 1.006, 1.000, 1.002, 0.996, 0.998, 0.996, 1.002, 1.006]
g9 = [1.002, 0.998, 0.996, 0.995, 0.996, 1.004, 1.004, 0.998, 0.999, 0.991]
g10 = [0.991, 0.995, 0.984, 0.994, 0.997, 0.997, 0.991, 0.998, 1.004, 0.997]
class TestBayes_mvs(TestCase):
def test_basic(self):
# Expected values in this test simply taken from the function. For
# some checks regarding correctness of implementation, see review in
# gh-674
data = [6, 9, 12, 7, 8, 8, 13]
mean, var, std = stats.bayes_mvs(data)
assert_almost_equal(mean.statistic, 9.0)
assert_allclose(mean.minmax, (7.1036502226125329, 10.896349777387467),
rtol=1e-14)
assert_almost_equal(var.statistic, 10.0)
assert_allclose(var.minmax, (3.1767242068607087, 24.45910381334018),
rtol=1e-09)
assert_almost_equal(std.statistic, 2.9724954732045084, decimal=14)
assert_allclose(std.minmax, (1.7823367265645145, 4.9456146050146312),
rtol=1e-14)
def test_empty_input(self):
assert_raises(ValueError, stats.bayes_mvs, [])
def test_result_attributes(self):
x = np.arange(15)
attributes = ('statistic', 'minmax')
res = stats.bayes_mvs(x)
for i in res:
check_named_results(i, attributes)
class TestMvsdist(TestCase):
def test_basic(self):
data = [6, 9, 12, 7, 8, 8, 13]
mean, var, std = stats.mvsdist(data)
assert_almost_equal(mean.mean(), 9.0)
assert_allclose(mean.interval(0.9), (7.1036502226125329,
10.896349777387467), rtol=1e-14)
assert_almost_equal(var.mean(), 10.0)
assert_allclose(var.interval(0.9), (3.1767242068607087,
24.45910381334018), rtol=1e-09)
assert_almost_equal(std.mean(), 2.9724954732045084, decimal=14)
assert_allclose(std.interval(0.9), (1.7823367265645145,
4.9456146050146312), rtol=1e-14)
def test_empty_input(self):
assert_raises(ValueError, stats.mvsdist, [])
def test_bad_arg(self):
# Raise ValueError if fewer than two data points are given.
data = [1]
assert_raises(ValueError, stats.mvsdist, data)
def test_warns(self):
# regression test for gh-5270
# make sure there are no spurious divide-by-zero warnings
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
[x.mean() for x in stats.mvsdist([1, 2, 3])]
[x.mean() for x in stats.mvsdist([1, 2, 3, 4, 5])]
class TestShapiro(TestCase):
def test_basic(self):
x1 = [0.11,7.87,4.61,10.14,7.95,3.14,0.46,
4.43,0.21,4.75,0.71,1.52,3.24,
0.93,0.42,4.97,9.53,4.55,0.47,6.66]
w,pw = stats.shapiro(x1)
assert_almost_equal(w,0.90047299861907959,6)
assert_almost_equal(pw,0.042089745402336121,6)
x2 = [1.36,1.14,2.92,2.55,1.46,1.06,5.27,-1.11,
3.48,1.10,0.88,-0.51,1.46,0.52,6.20,1.69,
0.08,3.67,2.81,3.49]
w,pw = stats.shapiro(x2)
assert_almost_equal(w,0.9590270,6)
assert_almost_equal(pw,0.52460,3)
# Verified against R
np.random.seed(12345678)
x3 = stats.norm.rvs(loc=5, scale=3, size=100)
w, pw = stats.shapiro(x3)
assert_almost_equal(w, 0.9772805571556091, decimal=6)
assert_almost_equal(pw, 0.08144091814756393, decimal=3)
# Extracted from original paper
x4 = [0.139, 0.157, 0.175, 0.256, 0.344, 0.413, 0.503, 0.577, 0.614,
0.655, 0.954, 1.392, 1.557, 1.648, 1.690, 1.994, 2.174, 2.206,
3.245, 3.510, 3.571, 4.354, 4.980, 6.084, 8.351]
W_expected = 0.83467
p_expected = 0.000914
w, pw = stats.shapiro(x4)
assert_almost_equal(w, W_expected, decimal=4)
assert_almost_equal(pw, p_expected, decimal=5)
def test_2d(self):
x1 = [[0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46,
4.43, 0.21, 4.75], [0.71, 1.52, 3.24,
0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66]]
w, pw = stats.shapiro(x1)
assert_almost_equal(w, 0.90047299861907959, 6)
assert_almost_equal(pw, 0.042089745402336121, 6)
x2 = [[1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11,
3.48, 1.10], [0.88, -0.51, 1.46, 0.52, 6.20, 1.69,
0.08, 3.67, 2.81, 3.49]]
w, pw = stats.shapiro(x2)
assert_almost_equal(w, 0.9590270, 6)
assert_almost_equal(pw, 0.52460, 3)
def test_empty_input(self):
assert_raises(ValueError, stats.shapiro, [])
assert_raises(ValueError, stats.shapiro, [[], [], []])
def test_not_enough_values(self):
assert_raises(ValueError, stats.shapiro, [1, 2])
assert_raises(ValueError, stats.shapiro, [[], [2]])
def test_bad_arg(self):
# Length of x is less than 3.
x = [1]
assert_raises(ValueError, stats.shapiro, x)
def test_nan_input(self):
x = np.arange(10.)
x[9] = np.nan
w, pw = stats.shapiro(x)
assert_equal(w, np.nan)
assert_almost_equal(pw, 1.0)
class TestAnderson(TestCase):
def test_normal(self):
rs = RandomState(1234567890)
x1 = rs.standard_exponential(size=50)
x2 = rs.standard_normal(size=50)
A,crit,sig = stats.anderson(x1)
assert_array_less(crit[:-1], A)
A,crit,sig = stats.anderson(x2)
assert_array_less(A, crit[-2:])
def test_expon(self):
rs = RandomState(1234567890)
x1 = rs.standard_exponential(size=50)
x2 = rs.standard_normal(size=50)
A,crit,sig = stats.anderson(x1,'expon')
assert_array_less(A, crit[-2:])
olderr = np.seterr(all='ignore')
try:
A,crit,sig = stats.anderson(x2,'expon')
finally:
np.seterr(**olderr)
assert_(A > crit[-1])
def test_bad_arg(self):
assert_raises(ValueError, stats.anderson, [1], dist='plate_of_shrimp')
def test_result_attributes(self):
rs = RandomState(1234567890)
x = rs.standard_exponential(size=50)
res = stats.anderson(x)
attributes = ('statistic', 'critical_values', 'significance_level')
check_named_results(res, attributes)
class TestAndersonKSamp(TestCase):
def test_example1a(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass a mixture of lists and arrays
t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
assert_warns(UserWarning, stats.anderson_ksamp, (t1, t2, t3, t4),
midrank=False)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False)
assert_almost_equal(Tk, 4.449, 3)
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
tm, 4)
assert_almost_equal(p, 0.0021, 4)
def test_example1b(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass arrays
t1 = np.array([38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0])
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=True)
assert_almost_equal(Tk, 4.480, 3)
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
tm, 4)
assert_almost_equal(p, 0.0020, 4)
def test_example2a(self):
# Example data taken from an earlier technical report of
# Scholz and Stephens
# Pass lists instead of arrays
t1 = [194, 15, 41, 29, 33, 181]
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
118, 25, 156, 310, 76, 26, 44, 23, 62]
t5 = [130, 208, 70, 101, 208]
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
22, 139, 210, 97, 30, 23, 13, 14]
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
t12 = [50, 254, 5, 283, 35, 12]
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
61, 34]
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,
t9, t10, t11, t12, t13, t14),
midrank=False)
assert_almost_equal(Tk, 3.288, 3)
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
tm, 4)
assert_almost_equal(p, 0.0041, 4)
def test_example2b(self):
# Example data taken from an earlier technical report of
# Scholz and Stephens
t1 = [194, 15, 41, 29, 33, 181]
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
118, 25, 156, 310, 76, 26, 44, 23, 62]
t5 = [130, 208, 70, 101, 208]
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
22, 139, 210, 97, 30, 23, 13, 14]
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
t12 = [50, 254, 5, 283, 35, 12]
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
61, 34]
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,
t9, t10, t11, t12, t13, t14),
midrank=True)
assert_almost_equal(Tk, 3.294, 3)
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
tm, 4)
assert_almost_equal(p, 0.0041, 4)
def test_not_enough_samples(self):
assert_raises(ValueError, stats.anderson_ksamp, np.ones(5))
def test_no_distinct_observations(self):
assert_raises(ValueError, stats.anderson_ksamp,
(np.ones(5), np.ones(5)))
def test_empty_sample(self):
assert_raises(ValueError, stats.anderson_ksamp, (np.ones(5), []))
def test_result_attributes(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass a mixture of lists and arrays
t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
res = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False)
attributes = ('statistic', 'critical_values', 'significance_level')
check_named_results(res, attributes)
class TestAnsari(TestCase):
def test_small(self):
x = [1,2,3,3,4]
y = [3,2,6,1,6,1,4,1]
with warnings.catch_warnings(record=True): # Ties preclude use ...
W, pval = stats.ansari(x,y)
assert_almost_equal(W,23.5,11)
assert_almost_equal(pval,0.13499256881897437,11)
def test_approx(self):
ramsay = np.array((111, 107, 100, 99, 102, 106, 109, 108, 104, 99,
101, 96, 97, 102, 107, 113, 116, 113, 110, 98))
parekh = np.array((107, 108, 106, 98, 105, 103, 110, 105, 104,
100, 96, 108, 103, 104, 114, 114, 113, 108, 106, 99))
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
message="Ties preclude use of exact statistic.")
W, pval = stats.ansari(ramsay, parekh)
assert_almost_equal(W,185.5,11)
assert_almost_equal(pval,0.18145819972867083,11)
def test_exact(self):
W,pval = stats.ansari([1,2,3,4],[15,5,20,8,10,12])
assert_almost_equal(W,10.0,11)
assert_almost_equal(pval,0.533333333333333333,7)
def test_bad_arg(self):
assert_raises(ValueError, stats.ansari, [], [1])
assert_raises(ValueError, stats.ansari, [1], [])
def test_result_attributes(self):
x = [1, 2, 3, 3, 4]
y = [3, 2, 6, 1, 6, 1, 4, 1]
with warnings.catch_warnings(record=True): # Ties preclude use ...
res = stats.ansari(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
class TestBartlett(TestCase):
def test_data(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
T, pval = stats.bartlett(*args)
assert_almost_equal(T,20.78587342806484,7)
assert_almost_equal(pval,0.0136358632781,7)
def test_bad_arg(self):
# Too few args raises ValueError.
assert_raises(ValueError, stats.bartlett, [1])
def test_result_attributes(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
res = stats.bartlett(*args)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_empty_arg(self):
args = (g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, [])
assert_equal((np.nan, np.nan), stats.bartlett(*args))
class TestLevene(TestCase):
def test_data(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
W, pval = stats.levene(*args)
assert_almost_equal(W,1.7059176930008939,7)
assert_almost_equal(pval,0.0990829755522,7)
def test_trimmed1(self):
# Test that center='trimmed' gives the same result as center='mean'
# when proportiontocut=0.
W1, pval1 = stats.levene(g1, g2, g3, center='mean')
W2, pval2 = stats.levene(g1, g2, g3, center='trimmed', proportiontocut=0.0)
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_trimmed2(self):
x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
np.random.seed(1234)
x2 = np.random.permutation(x)
# Use center='trimmed'
W0, pval0 = stats.levene(x, y, center='trimmed', proportiontocut=0.125)
W1, pval1 = stats.levene(x2, y, center='trimmed', proportiontocut=0.125)
# Trim the data here, and use center='mean'
W2, pval2 = stats.levene(x[1:-1], y[1:-1], center='mean')
# Result should be the same.
assert_almost_equal(W0, W2)
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_equal_mean_median(self):
x = np.linspace(-1,1,21)
np.random.seed(1234)
x2 = np.random.permutation(x)
y = x**3
W1, pval1 = stats.levene(x, y, center='mean')
W2, pval2 = stats.levene(x2, y, center='median')
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_bad_keyword(self):
x = np.linspace(-1,1,21)
assert_raises(TypeError, stats.levene, x, x, portiontocut=0.1)
def test_bad_center_value(self):
x = np.linspace(-1,1,21)
assert_raises(ValueError, stats.levene, x, x, center='trim')
def test_too_few_args(self):
assert_raises(ValueError, stats.levene, [1])
def test_result_attributes(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
res = stats.levene(*args)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
class TestBinomP(TestCase):
def test_data(self):
pval = stats.binom_test(100,250)
assert_almost_equal(pval,0.0018833009350757682,11)
pval = stats.binom_test(201,405)
assert_almost_equal(pval,0.92085205962670713,11)
pval = stats.binom_test([682,243],p=3.0/4)
assert_almost_equal(pval,0.38249155957481695,11)
def test_bad_len_x(self):
# Length of x must be 1 or 2.
assert_raises(ValueError, stats.binom_test, [1,2,3])
def test_bad_n(self):
# len(x) is 1, but n is invalid.
# Missing n
assert_raises(ValueError, stats.binom_test, [100])
# n less than x[0]
assert_raises(ValueError, stats.binom_test, [100], n=50)
def test_bad_p(self):
assert_raises(ValueError, stats.binom_test, [50, 50], p=2.0)
def test_alternatives(self):
res = stats.binom_test(51, 235, p=1./6, alternative='less')
assert_almost_equal(res, 0.982022657605858)
res = stats.binom_test(51, 235, p=1./6, alternative='greater')
assert_almost_equal(res, 0.02654424571169085)
res = stats.binom_test(51, 235, p=1./6, alternative='two-sided')
assert_almost_equal(res, 0.0437479701823997)
class TestFligner(TestCase):
def test_data(self):
# numbers from R: fligner.test in package stats
x1 = np.arange(5)
assert_array_almost_equal(stats.fligner(x1,x1**2),
(3.2282229927203536, 0.072379187848207877), 11)
def test_trimmed1(self):
# Test that center='trimmed' gives the same result as center='mean'
# when proportiontocut=0.
Xsq1, pval1 = stats.fligner(g1, g2, g3, center='mean')
Xsq2, pval2 = stats.fligner(g1, g2, g3, center='trimmed', proportiontocut=0.0)
assert_almost_equal(Xsq1, Xsq2)
assert_almost_equal(pval1, pval2)
def test_trimmed2(self):
x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
# Use center='trimmed'
Xsq1, pval1 = stats.fligner(x, y, center='trimmed', proportiontocut=0.125)
# Trim the data here, and use center='mean'
Xsq2, pval2 = stats.fligner(x[1:-1], y[1:-1], center='mean')
# Result should be the same.
assert_almost_equal(Xsq1, Xsq2)
assert_almost_equal(pval1, pval2)
# The following test looks reasonable at first, but fligner() uses the
# function stats.rankdata(), and in one of the cases in this test,
# there are ties, while in the other (because of normal rounding
# errors) there are not. This difference leads to differences in the
# third significant digit of W.
#
#def test_equal_mean_median(self):
# x = np.linspace(-1,1,21)
# y = x**3
# W1, pval1 = stats.fligner(x, y, center='mean')
# W2, pval2 = stats.fligner(x, y, center='median')
# assert_almost_equal(W1, W2)
# assert_almost_equal(pval1, pval2)
def test_bad_keyword(self):
x = np.linspace(-1,1,21)
assert_raises(TypeError, stats.fligner, x, x, portiontocut=0.1)
def test_bad_center_value(self):
x = np.linspace(-1,1,21)
assert_raises(ValueError, stats.fligner, x, x, center='trim')
def test_bad_num_args(self):
# Too few args raises ValueError.
assert_raises(ValueError, stats.fligner, [1])
def test_empty_arg(self):
x = np.arange(5)
assert_equal((np.nan, np.nan), stats.fligner(x, x**2, []))
class TestMood(TestCase):
def test_mood(self):
# numbers from R: mood.test in package stats
x1 = np.arange(5)
assert_array_almost_equal(stats.mood(x1, x1**2),
(-1.3830857299399906, 0.16663858066771478), 11)
def test_mood_order_of_args(self):
# z should change sign when the order of arguments changes, pvalue
# should not change
np.random.seed(1234)
x1 = np.random.randn(10, 1)
x2 = np.random.randn(15, 1)
z1, p1 = stats.mood(x1, x2)
z2, p2 = stats.mood(x2, x1)
assert_array_almost_equal([z1, p1], [-z2, p2])
def test_mood_with_axis_none(self):
#Test with axis = None, compare with results from R
x1 = [-0.626453810742332, 0.183643324222082, -0.835628612410047,
1.59528080213779, 0.329507771815361, -0.820468384118015,
0.487429052428485, 0.738324705129217, 0.575781351653492,
-0.305388387156356, 1.51178116845085, 0.389843236411431,
-0.621240580541804, -2.2146998871775, 1.12493091814311,
-0.0449336090152309, -0.0161902630989461, 0.943836210685299,
0.821221195098089, 0.593901321217509]
x2 = [-0.896914546624981, 0.184849184646742, 1.58784533120882,
-1.13037567424629, -0.0802517565509893, 0.132420284381094,
0.707954729271733, -0.23969802417184, 1.98447393665293,
-0.138787012119665, 0.417650750792556, 0.981752777463662,
-0.392695355503813, -1.03966897694891, 1.78222896030858,
-2.31106908460517, 0.878604580921265, 0.035806718015226,
1.01282869212708, 0.432265154539617, 2.09081920524915,
-1.19992581964387, 1.58963820029007, 1.95465164222325,
0.00493777682814261, -2.45170638784613, 0.477237302613617,
-0.596558168631403, 0.792203270299649, 0.289636710177348]
x1 = np.array(x1)
x2 = np.array(x2)
x1.shape = (10, 2)
x2.shape = (15, 2)
assert_array_almost_equal(stats.mood(x1, x2, axis=None),
[-1.31716607555, 0.18778296257])
def test_mood_2d(self):
# Test if the results of mood test in 2-D case are consistent with the
# R result for the same inputs. Numbers from R mood.test().
ny = 5
np.random.seed(1234)
x1 = np.random.randn(10, ny)
x2 = np.random.randn(15, ny)
z_vectest, pval_vectest = stats.mood(x1, x2)
for j in range(ny):
assert_array_almost_equal([z_vectest[j], pval_vectest[j]],
stats.mood(x1[:, j], x2[:, j]))
# inverse order of dimensions
x1 = x1.transpose()
x2 = x2.transpose()
z_vectest, pval_vectest = stats.mood(x1, x2, axis=1)
for i in range(ny):
# check axis handling is self consistent
assert_array_almost_equal([z_vectest[i], pval_vectest[i]],
stats.mood(x1[i, :], x2[i, :]))
def test_mood_3d(self):
shape = (10, 5, 6)
np.random.seed(1234)
x1 = np.random.randn(*shape)
x2 = np.random.randn(*shape)
for axis in range(3):
z_vectest, pval_vectest = stats.mood(x1, x2, axis=axis)
# Tests that result for 3-D arrays is equal to that for the
# same calculation on a set of 1-D arrays taken from the
# 3-D array
axes_idx = ([1, 2], [0, 2], [0, 1]) # the two axes != axis
for i in range(shape[axes_idx[axis][0]]):
for j in range(shape[axes_idx[axis][1]]):
if axis == 0:
slice1 = x1[:, i, j]
slice2 = x2[:, i, j]
elif axis == 1:
slice1 = x1[i, :, j]
slice2 = x2[i, :, j]
else:
slice1 = x1[i, j, :]
slice2 = x2[i, j, :]
assert_array_almost_equal([z_vectest[i, j],
pval_vectest[i, j]],
stats.mood(slice1, slice2))
def test_mood_bad_arg(self):
# Raise ValueError when the sum of the lengths of the args is less than 3
assert_raises(ValueError, stats.mood, [1], [])
class TestProbplot(TestCase):
def test_basic(self):
np.random.seed(12345)
x = stats.norm.rvs(size=20)
osm, osr = stats.probplot(x, fit=False)
osm_expected = [-1.8241636, -1.38768012, -1.11829229, -0.91222575,
-0.73908135, -0.5857176, -0.44506467, -0.31273668,
-0.18568928, -0.06158146, 0.06158146, 0.18568928,
0.31273668, 0.44506467, 0.5857176, 0.73908135,
0.91222575, 1.11829229, 1.38768012, 1.8241636]
assert_allclose(osr, np.sort(x))
assert_allclose(osm, osm_expected)
res, res_fit = stats.probplot(x, fit=True)
res_fit_expected = [1.05361841, 0.31297795, 0.98741609]
assert_allclose(res_fit, res_fit_expected)
def test_sparams_keyword(self):
np.random.seed(123456)
x = stats.norm.rvs(size=100)
# Check that None, () and 0 (loc=0, for normal distribution) all work
# and give the same results
osm1, osr1 = stats.probplot(x, sparams=None, fit=False)
osm2, osr2 = stats.probplot(x, sparams=0, fit=False)
osm3, osr3 = stats.probplot(x, sparams=(), fit=False)
assert_allclose(osm1, osm2)
assert_allclose(osm1, osm3)
assert_allclose(osr1, osr2)
assert_allclose(osr1, osr3)
# Check giving (loc, scale) params for normal distribution
osm, osr = stats.probplot(x, sparams=(), fit=False)
def test_dist_keyword(self):
np.random.seed(12345)
x = stats.norm.rvs(size=20)
osm1, osr1 = stats.probplot(x, fit=False, dist='t', sparams=(3,))
osm2, osr2 = stats.probplot(x, fit=False, dist=stats.t, sparams=(3,))
assert_allclose(osm1, osm2)
assert_allclose(osr1, osr2)
assert_raises(ValueError, stats.probplot, x, dist='wrong-dist-name')
assert_raises(AttributeError, stats.probplot, x, dist=[])
class custom_dist(object):
"""Some class that looks just enough like a distribution."""
def ppf(self, q):
return stats.norm.ppf(q, loc=2)
osm1, osr1 = stats.probplot(x, sparams=(2,), fit=False)
osm2, osr2 = stats.probplot(x, dist=custom_dist(), fit=False)
assert_allclose(osm1, osm2)
assert_allclose(osr1, osr2)
@dec.skipif(not have_matplotlib)
def test_plot_kwarg(self):
np.random.seed(7654321)
fig = plt.figure()
fig.add_subplot(111)
x = stats.t.rvs(3, size=100)
res1, fitres1 = stats.probplot(x, plot=plt)
plt.close()
res2, fitres2 = stats.probplot(x, plot=None)
res3 = stats.probplot(x, fit=False, plot=plt)
plt.close()
res4 = stats.probplot(x, fit=False, plot=None)
# Check that results are consistent between combinations of `fit` and
# `plot` keywords.
assert_(len(res1) == len(res2) == len(res3) == len(res4) == 2)
assert_allclose(res1, res2)
assert_allclose(res1, res3)
assert_allclose(res1, res4)
assert_allclose(fitres1, fitres2)
# Check that a Matplotlib Axes object is accepted
fig = plt.figure()
ax = fig.add_subplot(111)
stats.probplot(x, fit=False, plot=ax)
plt.close()
def test_probplot_bad_args(self):
# Raise ValueError when given an invalid distribution.
assert_raises(ValueError, stats.probplot, [1], dist="plate_of_shrimp")
def test_empty(self):
assert_equal(stats.probplot([], fit=False),
(np.array([]), np.array([])))
assert_equal(stats.probplot([], fit=True),
((np.array([]), np.array([])),
(np.nan, np.nan, 0.0)))
def test_array_of_size_one(self):
with np.errstate(invalid='ignore'):
assert_equal(stats.probplot([1], fit=True),
((np.array([0.]), np.array([1])),
(np.nan, np.nan, 0.0)))
def test_wilcoxon_bad_arg():
# Raise ValueError when two args of different lengths are given or
# zero_method is unknown.
assert_raises(ValueError, stats.wilcoxon, [1], [1,2])
assert_raises(ValueError, stats.wilcoxon, [1,2], [1,2], "dummy")
class TestKstat(TestCase):
def test_moments_normal_distribution(self):
np.random.seed(32149)
data = np.random.randn(12345)
moments = []
for n in [1, 2, 3, 4]:
moments.append(stats.kstat(data, n))
expected = [0.011315, 1.017931, 0.05811052, 0.0754134]
assert_allclose(moments, expected, rtol=1e-4)
# test equivalence with `stats.moment`
m1 = stats.moment(data, moment=1)
m2 = stats.moment(data, moment=2)
m3 = stats.moment(data, moment=3)
assert_allclose((m1, m2, m3), expected[:-1], atol=0.02, rtol=1e-2)
def test_empty_input(self):
assert_raises(ValueError, stats.kstat, [])
def test_nan_input(self):
data = np.arange(10.)
data[6] = np.nan
assert_equal(stats.kstat(data), np.nan)
def test_kstat_bad_arg(self):
# Raise ValueError if n > 4 or n < 1.
data = np.arange(10)
for n in [0, 4.001]:
assert_raises(ValueError, stats.kstat, data, n=n)
class TestKstatVar(TestCase):
def test_empty_input(self):
assert_raises(ValueError, stats.kstatvar, [])
def test_nan_input(self):
data = np.arange(10.)
data[6] = np.nan
assert_equal(stats.kstat(data), np.nan)
def test_bad_arg(self):
# Raise ValueError is n is not 1 or 2.
data = [1]
n = 10
assert_raises(ValueError, stats.kstatvar, data, n=n)
class TestPpccPlot(TestCase):
def setUp(self):
np.random.seed(7654321)
self.x = stats.loggamma.rvs(5, size=500) + 5
def test_basic(self):
N = 5
svals, ppcc = stats.ppcc_plot(self.x, -10, 10, N=N)
ppcc_expected = [0.21139644, 0.21384059, 0.98766719, 0.97980182, 0.93519298]
assert_allclose(svals, np.linspace(-10, 10, num=N))
assert_allclose(ppcc, ppcc_expected)
def test_dist(self):
# Test that we can specify distributions both by name and as objects.
svals1, ppcc1 = stats.ppcc_plot(self.x, -10, 10, dist='tukeylambda')
svals2, ppcc2 = stats.ppcc_plot(self.x, -10, 10, dist=stats.tukeylambda)
assert_allclose(svals1, svals2, rtol=1e-20)
assert_allclose(ppcc1, ppcc2, rtol=1e-20)
# Test that 'tukeylambda' is the default dist
svals3, ppcc3 = stats.ppcc_plot(self.x, -10, 10)
assert_allclose(svals1, svals3, rtol=1e-20)
assert_allclose(ppcc1, ppcc3, rtol=1e-20)
@dec.skipif(not have_matplotlib)
def test_plot_kwarg(self):
# Check with the matplotlib.pyplot module
fig = plt.figure()
fig.add_subplot(111)
stats.ppcc_plot(self.x, -20, 20, plot=plt)
plt.close()
# Check that a Matplotlib Axes object is accepted
fig.add_subplot(111)
ax = fig.add_subplot(111)
stats.ppcc_plot(self.x, -20, 20, plot=ax)
plt.close()
def test_invalid_inputs(self):
# `b` has to be larger than `a`
assert_raises(ValueError, stats.ppcc_plot, self.x, 1, 0)
# Raise ValueError when given an invalid distribution.
assert_raises(ValueError, stats.ppcc_plot, [1, 2, 3], 0, 1,
dist="plate_of_shrimp")
def test_empty(self):
# For consistency with probplot return for one empty array,
# ppcc contains all zeros and svals is the same as for normal array
# input.
svals, ppcc = stats.ppcc_plot([], 0, 1)
assert_allclose(svals, np.linspace(0, 1, num=80))
assert_allclose(ppcc, np.zeros(80, dtype=float))
class TestPpccMax(TestCase):
def test_ppcc_max_bad_arg(self):
# Raise ValueError when given an invalid distribution.
data = [1]
assert_raises(ValueError, stats.ppcc_max, data, dist="plate_of_shrimp")
def test_ppcc_max_basic(self):
np.random.seed(1234567)
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
# On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7
# it is accurate up to 16 decimals
assert_almost_equal(stats.ppcc_max(x), -0.71215366521264145, decimal=5)
def test_dist(self):
np.random.seed(1234567)
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
# Test that we can specify distributions both by name and as objects.
max1 = stats.ppcc_max(x, dist='tukeylambda')
max2 = stats.ppcc_max(x, dist=stats.tukeylambda)
assert_almost_equal(max1, -0.71215366521264145, decimal=5)
assert_almost_equal(max2, -0.71215366521264145, decimal=5)
# Test that 'tukeylambda' is the default dist
max3 = stats.ppcc_max(x)
assert_almost_equal(max3, -0.71215366521264145, decimal=5)
def test_brack(self):
np.random.seed(1234567)
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
assert_raises(ValueError, stats.ppcc_max, x, brack=(0.0, 1.0, 0.5))
# On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7
# it is accurate up to 16 decimals
assert_almost_equal(stats.ppcc_max(x, brack=(0, 1)),
-0.71215366521264145, decimal=5)
# On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7
# it is accurate up to 16 decimals
assert_almost_equal(stats.ppcc_max(x, brack=(-2, 2)),
-0.71215366521264145, decimal=5)
class TestBoxcox_llf(TestCase):
def test_basic(self):
np.random.seed(54321)
x = stats.norm.rvs(size=10000, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf_expected = -x.size / 2. * np.log(np.sum(x.std()**2))
assert_allclose(llf, llf_expected)
def test_array_like(self):
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, list(x))
assert_allclose(llf, llf2, rtol=1e-12)
def test_2d_input(self):
# Note: boxcox_llf() was already working with 2-D input (sort of), so
# keep it like that. boxcox() doesn't work with 2-D input though, due
# to brent() returning a scalar.
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, np.vstack([x, x]).T)
assert_allclose([llf, llf], llf2, rtol=1e-12)
def test_empty(self):
assert_(np.isnan(stats.boxcox_llf(1, [])))
class TestBoxcox(TestCase):
def test_fixed_lmbda(self):
np.random.seed(12345)
x = stats.loggamma.rvs(5, size=50) + 5
xt = stats.boxcox(x, lmbda=1)
assert_allclose(xt, x - 1)
xt = stats.boxcox(x, lmbda=-1)
assert_allclose(xt, 1 - 1/x)
xt = stats.boxcox(x, lmbda=0)
assert_allclose(xt, np.log(x))
# Also test that array_like input works
xt = stats.boxcox(list(x), lmbda=0)
assert_allclose(xt, np.log(x))
def test_lmbda_None(self):
np.random.seed(1234567)
# Start from normal rv's, do inverse transform to check that
# optimization function gets close to the right answer.
np.random.seed(1245)
lmbda = 2.5
x = stats.norm.rvs(loc=10, size=50000)
x_inv = (x * lmbda + 1)**(-lmbda)
xt, maxlog = stats.boxcox(x_inv)
assert_almost_equal(maxlog, -1 / lmbda, decimal=2)
def test_alpha(self):
np.random.seed(1234)
x = stats.loggamma.rvs(5, size=50) + 5
# Some regular values for alpha, on a small sample size
_, _, interval = stats.boxcox(x, alpha=0.75)
assert_allclose(interval, [4.004485780226041, 5.138756355035744])
_, _, interval = stats.boxcox(x, alpha=0.05)
assert_allclose(interval, [1.2138178554857557, 8.209033272375663])
# Try some extreme values, see we don't hit the N=500 limit
x = stats.loggamma.rvs(7, size=500) + 15
_, _, interval = stats.boxcox(x, alpha=0.001)
assert_allclose(interval, [0.3988867, 11.40553131])
_, _, interval = stats.boxcox(x, alpha=0.999)
assert_allclose(interval, [5.83316246, 5.83735292])
def test_boxcox_bad_arg(self):
# Raise ValueError if any data value is negative.
x = np.array([-1])
assert_raises(ValueError, stats.boxcox, x)
def test_empty(self):
assert_(stats.boxcox([]).shape == (0,))
class TestBoxcoxNormmax(TestCase):
def setUp(self):
np.random.seed(12345)
self.x = stats.loggamma.rvs(5, size=50) + 5
def test_pearsonr(self):
maxlog = stats.boxcox_normmax(self.x)
assert_allclose(maxlog, 1.804465, rtol=1e-6)
def test_mle(self):
maxlog = stats.boxcox_normmax(self.x, method='mle')
assert_allclose(maxlog, 1.758101, rtol=1e-6)
# Check that boxcox() uses 'mle'
_, maxlog_boxcox = stats.boxcox(self.x)
assert_allclose(maxlog_boxcox, maxlog)
def test_all(self):
maxlog_all = stats.boxcox_normmax(self.x, method='all')
assert_allclose(maxlog_all, [1.804465, 1.758101], rtol=1e-6)
class TestBoxcoxNormplot(TestCase):
def setUp(self):
np.random.seed(7654321)
self.x = stats.loggamma.rvs(5, size=500) + 5
def test_basic(self):
N = 5
lmbdas, ppcc = stats.boxcox_normplot(self.x, -10, 10, N=N)
ppcc_expected = [0.57783375, 0.83610988, 0.97524311, 0.99756057,
0.95843297]
assert_allclose(lmbdas, np.linspace(-10, 10, num=N))
assert_allclose(ppcc, ppcc_expected)
@dec.skipif(not have_matplotlib)
def test_plot_kwarg(self):
# Check with the matplotlib.pyplot module
fig = plt.figure()
fig.add_subplot(111)
stats.boxcox_normplot(self.x, -20, 20, plot=plt)
plt.close()
# Check that a Matplotlib Axes object is accepted
fig.add_subplot(111)
ax = fig.add_subplot(111)
stats.boxcox_normplot(self.x, -20, 20, plot=ax)
plt.close()
def test_invalid_inputs(self):
# `lb` has to be larger than `la`
assert_raises(ValueError, stats.boxcox_normplot, self.x, 1, 0)
# `x` can not contain negative values
assert_raises(ValueError, stats.boxcox_normplot, [-1, 1], 0, 1)
def test_empty(self):
assert_(stats.boxcox_normplot([], 0, 1).size == 0)
class TestCircFuncs(TestCase):
def test_circfuncs(self):
x = np.array([355,5,2,359,10,350])
M = stats.circmean(x, high=360)
Mval = 0.167690146
assert_allclose(M, Mval, rtol=1e-7)
V = stats.circvar(x, high=360)
Vval = 42.51955609
assert_allclose(V, Vval, rtol=1e-7)
S = stats.circstd(x, high=360)
Sval = 6.520702116
assert_allclose(S, Sval, rtol=1e-7)
def test_circfuncs_small(self):
x = np.array([20,21,22,18,19,20.5,19.2])
M1 = x.mean()
M2 = stats.circmean(x, high=360)
assert_allclose(M2, M1, rtol=1e-5)
V1 = x.var()
V2 = stats.circvar(x, high=360)
assert_allclose(V2, V1, rtol=1e-4)
S1 = x.std()
S2 = stats.circstd(x, high=360)
assert_allclose(S2, S1, rtol=1e-4)
def test_circmean_axis(self):
x = np.array([[355,5,2,359,10,350],
[351,7,4,352,9,349],
[357,9,8,358,4,356]])
M1 = stats.circmean(x, high=360)
M2 = stats.circmean(x.ravel(), high=360)
assert_allclose(M1, M2, rtol=1e-14)
M1 = stats.circmean(x, high=360, axis=1)
M2 = [stats.circmean(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(M1, M2, rtol=1e-14)
M1 = stats.circmean(x, high=360, axis=0)
M2 = [stats.circmean(x[:,i], high=360) for i in range(x.shape[1])]
assert_allclose(M1, M2, rtol=1e-14)
def test_circvar_axis(self):
x = np.array([[355,5,2,359,10,350],
[351,7,4,352,9,349],
[357,9,8,358,4,356]])
V1 = stats.circvar(x, high=360)
V2 = stats.circvar(x.ravel(), high=360)
assert_allclose(V1, V2, rtol=1e-11)
V1 = stats.circvar(x, high=360, axis=1)
V2 = [stats.circvar(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(V1, V2, rtol=1e-11)
V1 = stats.circvar(x, high=360, axis=0)
V2 = [stats.circvar(x[:,i], high=360) for i in range(x.shape[1])]
assert_allclose(V1, V2, rtol=1e-11)
def test_circstd_axis(self):
x = np.array([[355,5,2,359,10,350],
[351,7,4,352,9,349],
[357,9,8,358,4,356]])
S1 = stats.circstd(x, high=360)
S2 = stats.circstd(x.ravel(), high=360)
assert_allclose(S1, S2, rtol=1e-11)
S1 = stats.circstd(x, high=360, axis=1)
S2 = [stats.circstd(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(S1, S2, rtol=1e-11)
S1 = stats.circstd(x, high=360, axis=0)
S2 = [stats.circstd(x[:,i], high=360) for i in range(x.shape[1])]
assert_allclose(S1, S2, rtol=1e-11)
def test_circfuncs_array_like(self):
x = [355,5,2,359,10,350]
assert_allclose(stats.circmean(x, high=360), 0.167690146, rtol=1e-7)
assert_allclose(stats.circvar(x, high=360), 42.51955609, rtol=1e-7)
assert_allclose(stats.circstd(x, high=360), 6.520702116, rtol=1e-7)
def test_empty(self):
assert_(np.isnan(stats.circmean([])))
assert_(np.isnan(stats.circstd([])))
assert_(np.isnan(stats.circvar([])))
def test_accuracy_wilcoxon():
freq = [1, 4, 16, 15, 8, 4, 5, 1, 2]
nums = range(-4, 5)
x = np.concatenate([[u] * v for u, v in zip(nums, freq)])
y = np.zeros(x.size)
T, p = stats.wilcoxon(x, y, "pratt")
assert_allclose(T, 423)
assert_allclose(p, 0.00197547303533107)
T, p = stats.wilcoxon(x, y, "zsplit")
assert_allclose(T, 441)
assert_allclose(p, 0.0032145343172473055)
T, p = stats.wilcoxon(x, y, "wilcox")
assert_allclose(T, 327)
assert_allclose(p, 0.00641346115861)
# Test the 'correction' option, using values computed in R with:
# > wilcox.test(x, y, paired=TRUE, exact=FALSE, correct={FALSE,TRUE})
x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])
y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])
T, p = stats.wilcoxon(x, y, correction=False)
assert_equal(T, 34)
assert_allclose(p, 0.6948866, rtol=1e-6)
T, p = stats.wilcoxon(x, y, correction=True)
assert_equal(T, 34)
assert_allclose(p, 0.7240817, rtol=1e-6)
def test_wilcoxon_result_attributes():
x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])
y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])
res = stats.wilcoxon(x, y, correction=False)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_wilcoxon_tie():
# Regression test for gh-2391.
# Corresponding R code is:
# > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=FALSE)
# > result$p.value
# [1] 0.001565402
# > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=TRUE)
# > result$p.value
# [1] 0.001904195
stat, p = stats.wilcoxon([0.1] * 10)
expected_p = 0.001565402
assert_equal(stat, 0)
assert_allclose(p, expected_p, rtol=1e-6)
stat, p = stats.wilcoxon([0.1] * 10, correction=True)
expected_p = 0.001904195
assert_equal(stat, 0)
assert_allclose(p, expected_p, rtol=1e-6)
class TestMedianTest(TestCase):
def test_bad_n_samples(self):
# median_test requires at least two samples.
assert_raises(ValueError, stats.median_test, [1, 2, 3])
def test_empty_sample(self):
# Each sample must contain at least one value.
assert_raises(ValueError, stats.median_test, [], [1, 2, 3])
def test_empty_when_ties_ignored(self):
# The grand median is 1, and all values in the first argument are
# equal to the grand median. With ties="ignore", those values are
# ignored, which results in the first sample being (in effect) empty.
# This should raise a ValueError.
assert_raises(ValueError, stats.median_test,
[1, 1, 1, 1], [2, 0, 1], [2, 0], ties="ignore")
def test_empty_contingency_row(self):
# The grand median is 1, and with the default ties="below", all the
# values in the samples are counted as being below the grand median.
# This would result a row of zeros in the contingency table, which is
# an error.
assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1])
# With ties="above", all the values are counted as above the
# grand median.
assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1],
ties="above")
def test_bad_ties(self):
assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5], ties="foo")
def test_bad_keyword(self):
assert_raises(TypeError, stats.median_test, [1, 2, 3], [4, 5], foo="foo")
def test_simple(self):
x = [1, 2, 3]
y = [1, 2, 3]
stat, p, med, tbl = stats.median_test(x, y)
# The median is floating point, but this equality test should be safe.
assert_equal(med, 2.0)
assert_array_equal(tbl, [[1, 1], [2, 2]])
# The expected values of the contingency table equal the contingency table,
# so the statistic should be 0 and the p-value should be 1.
assert_equal(stat, 0)
assert_equal(p, 1)
def test_ties_options(self):
# Test the contingency table calculation.
x = [1, 2, 3, 4]
y = [5, 6]
z = [7, 8, 9]
# grand median is 5.
# Default 'ties' option is "below".
stat, p, m, tbl = stats.median_test(x, y, z)
assert_equal(m, 5)
assert_equal(tbl, [[0, 1, 3], [4, 1, 0]])
stat, p, m, tbl = stats.median_test(x, y, z, ties="ignore")
assert_equal(m, 5)
assert_equal(tbl, [[0, 1, 3], [4, 0, 0]])
stat, p, m, tbl = stats.median_test(x, y, z, ties="above")
assert_equal(m, 5)
assert_equal(tbl, [[0, 2, 3], [4, 0, 0]])
def test_basic(self):
# median_test calls chi2_contingency to compute the test statistic
# and p-value. Make sure it hasn't screwed up the call...
x = [1, 2, 3, 4, 5]
y = [2, 4, 6, 8]
stat, p, m, tbl = stats.median_test(x, y)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
stat, p, m, tbl = stats.median_test(x, y, lambda_=0)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, lambda_=0)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
stat, p, m, tbl = stats.median_test(x, y, correction=False)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, correction=False)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
QuLogic/burnman | examples/example_averaging.py | 1 | 6870 | # BurnMan - a lower mantle toolkit
# Copyright (C) 2012, 2013, Heister, T., Unterborn, C., Rose, I. and Cottaar, S.
# Released under GPL v2 or later.
"""
example_averaging
-----------------
This example shows the effect of different averaging schemes. Currently four
averaging schemes are available:
1. Voight-Reuss-Hill
2. Voight averaging
3. Reuss averaging
4. Hashin-Shtrikman averaging
See :cite:`Watt1976` Journal of Geophysics and Space Physics for explanations
of each averaging scheme.
*Specifically uses:*
* :class:`burnman.averaging_schemes.VoigtReussHill`
* :class:`burnman.averaging_schemes.Voigt`
* :class:`burnman.averaging_schemes.Reuss`
* :class:`burnman.averaging_schemes.HashinShtrikmanUpper`
* :class:`burnman.averaging_schemes.HashinShtrikmanLower`
*Demonstrates:*
* implemented averaging schemes
"""
import os, sys, numpy as np, matplotlib.pyplot as plt
#hack to allow scripts to be placed in subdirectories next to burnman:
if not os.path.exists('burnman') and os.path.exists('../burnman'):
sys.path.insert(1,os.path.abspath('..'))
import burnman
from burnman import minerals
if __name__ == "__main__":
""" choose 'slb2' (finite-strain 2nd order shear modulus,
stixrude and lithgow-bertelloni, 2005)
or 'slb3 (finite-strain 3rd order shear modulus,
stixrude and lithgow-bertelloni, 2005)
or 'mgd3' (mie-gruneisen-debeye 3rd order shear modulus,
matas et al. 2007)
or 'mgd2' (mie-gruneisen-debeye 2nd order shear modulus,
matas et al. 2007)
or 'bm2' (birch-murnaghan 2nd order, if you choose to ignore temperature
(your choice in geotherm will not matter in this case))
or 'bm3' (birch-murnaghan 3rd order, if you choose to ignore temperature
(your choice in geotherm will not matter in this case))"""
amount_perovskite = 0.6
rock = burnman.Composite([amount_perovskite, 1.0-amount_perovskite],
[minerals.SLB_2011.mg_perovskite(),
minerals.SLB_2011.periclase()])
perovskitite = minerals.SLB_2011.mg_perovskite()
periclasite = minerals.SLB_2011.periclase()
#seismic model for comparison:
# pick from .prem() .slow() .fast() (see burnman/seismic.py)
seismic_model = burnman.seismic.PREM()
#set on how many depth slices the computations should be done
number_of_points = 20
# we will do our computation and comparison at the following depth values:
depths = np.linspace(700e3, 2800e3, number_of_points)
#alternatively, we could use the values where prem is defined:
#depths = seismic_model.internal_depth_list()
pressures, seis_rho, seis_vp, seis_vs, seis_vphi = seismic_model.evaluate_all_at(depths)
temperatures = burnman.geotherm.brown_shankland(pressures)
print "Calculations are done for:"
rock.debug_print()
#calculate the seismic velocities of the rock using a whole battery of averaging schemes:
# do the end members, here averaging scheme does not matter (though it defaults to Voigt-Reuss-Hill)
model_pv = burnman.Model(perovskitite, pressures, temperatures, burnman.averaging_schemes.VoigtReussHill())
model_fp = burnman.Model(periclasite, pressures, temperatures, burnman.averaging_schemes.VoigtReussHill())
#Voigt Reuss Hill / Voigt / Reuss averaging
model_vrh = burnman.Model(rock, pressures, temperatures, burnman.averaging_schemes.VoigtReussHill())
model_v = burnman.Model(rock, pressures, temperatures, burnman.averaging_schemes.Voigt())
model_r = burnman.Model(rock, pressures, temperatures, burnman.averaging_schemes.Reuss())
#Upper/lower bound for Hashin-Shtrikman averaging
model_hsu = burnman.Model(rock, pressures, temperatures, burnman.averaging_schemes.HashinShtrikmanUpper())
model_hsl = burnman.Model(rock, pressures, temperatures, burnman.averaging_schemes.HashinShtrikmanLower())
# PLOTTING
# plot vs
fig=plt.figure()
plt.plot(pressures/1.e9,model_v.v_s()/1.e3,color='c',linestyle='-',marker='^', \
markersize=4,label='Voigt')
plt.plot(pressures/1.e9,model_r.v_s()/1.e3,color='k',linestyle='-',marker='v', \
markersize=4,label='Reuss')
plt.plot(pressures/1.e9,model_vrh.v_s()/1.e3,color='b',linestyle='-',marker='x', \
markersize=4,label='Voigt-Reuss-Hill')
plt.plot(pressures/1.e9,model_hsu.v_s()/1.e3,color='r',linestyle='-',marker='x', \
markersize=4,label='Hashin-Shtrikman')
plt.plot(pressures/1.e9,model_hsl.v_s()/1.e3,color='r',linestyle='-',marker='x', \
markersize=4)
plt.plot(pressures/1.e9,model_pv.v_s()/1.e3,color='y',linestyle='-',marker='x', \
markersize=4,label='Mg Perovskite')
plt.plot(pressures/1.e9,model_fp.v_s()/1.e3,color='g',linestyle='-',marker='x', \
markersize=4,label='Periclase')
plt.xlim(min(pressures)/1.e9,max(pressures)/1.e9)
plt.legend(loc='upper left',prop={'size':11},frameon=False)
plt.xlabel('pressure (GPa)')
plt.ylabel('Vs (km/s)')
vs_pv_norm=(model_pv.v_s()-model_fp.v_s())/(model_pv.v_s()-model_fp.v_s())
vs_fp_norm=(model_fp.v_s()-model_fp.v_s())/(model_pv.v_s()-model_fp.v_s())
vs_vrh_norm=(model_vrh.v_s()-model_fp.v_s())/(model_pv.v_s()-model_fp.v_s())
vs_v_norm=(model_v.v_s()-model_fp.v_s())/(model_pv.v_s()-model_fp.v_s())
vs_r_norm=(model_r.v_s()-model_fp.v_s())/(model_pv.v_s()-model_fp.v_s())
vs_hsu_norm=(model_hsu.v_s()-model_fp.v_s())/(model_pv.v_s()-model_fp.v_s())
vs_hsl_norm=(model_hsl.v_s()-model_fp.v_s())/(model_pv.v_s()-model_fp.v_s())
ax=fig.add_axes([0.58, 0.18, 0.3, 0.3])
plt.plot(pressures/1.e9,vs_v_norm,color='c',linestyle='-',marker='^', \
markersize=4,label='Voigt')
plt.plot(pressures/1.e9,vs_r_norm,color='k',linestyle='-',marker='v', \
markersize=4,label='Reuss')
plt.plot(pressures/1.e9,vs_vrh_norm,color='b',linestyle='-',marker='x', \
markersize=4,label='Voigt-Reuss-Hill')
plt.plot(pressures/1.e9,vs_hsl_norm,color='r',linestyle='-',marker='x', \
markersize=4,label='Hashin-Shtrikman')
plt.plot(pressures/1.e9,vs_hsu_norm,color='r',linestyle='-',marker='x', \
markersize=4)
plt.plot(pressures/1.e9,vs_pv_norm,color='y',linestyle='-',marker='x', \
markersize=4,label='Mg Perovskite')
plt.plot(pressures/1.e9,vs_fp_norm,color='g',linestyle='-',marker='x', \
markersize=4,label='Periclase')
ax.tick_params(labelsize=10)
plt.title("normalized by mixture endmembers",fontsize=10)
plt.xlim(min(pressures)/1.e9,max(pressures)/1.e9)
plt.ylim(-0.005,1.005)
plt.xlabel('pressure (GPa)',fontsize=10)
plt.ylabel('normalized Vs',fontsize=10)
#plt.legend(loc='lower right')
plt.savefig("output_figures/example_averaging_normalized.png")
plt.show()
| gpl-2.0 |
dvklopfenstein/biocode | src/bin/colorset_hexvals.py | 1 | 2203 | #!/usr/bin/env python
"""Example using MplColorHelper."""
# Copyright (C) 2014-2017 DV Klopfenstein. All rights reserved.
__author__ = 'DV Klopfenstein'
__copyright__ = "Copyright (C) 2014-2017 DV Klopfenstein. All rights reserved."
__license__ = "GPL"
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from pydvkbiology.matplotlib.ColorObj import MplColorHelper
import numpy as np
#def run(palette='Set1', num_vals=10):
def run(palette='hsv', num_vals=12):
"""Creating hexvalues for discrete colors using a colormap."""
fout_png = 'colors_{P}_{N}.png'.format(P=palette, N=num_vals)
_, axis = plt.subplots(1, 1, figsize=(6, 6))
colobj = MplColorHelper(palette, 0, num_vals-1)
colors = [colobj.get_hexstr(yval) for yval in range(num_vals)]
plt_color_text(colors)
for idx, color in enumerate(reversed(colors)):
print('{N:2} {COLOR}'.format(N=idx, COLOR=color))
axis.set_title('{N} Discrete Colors from {MAP}'.format(N=num_vals, MAP=palette))
plt.show()
plt.savefig(fout_png)
print(' WROTE: {PNG}'.format(PNG=fout_png))
def plt_color_text(colors):
"""Plot a block of color and label it with text"""
num_vals = len(colors)
xvals = [10]*num_vals
yvals = range(num_vals)
for idx, (xval, yval, color) in enumerate(zip(xvals, yvals, colors)):
plt.scatter(xval, yval, s=1000, marker='s', color=color)
plt.text(xval+.004, yval, color, fontsize=20, va='center')
def cli():
"""Command-line interface for creating hexvalues for discrete colors from a colormap."""
palette = "Set1"
num_vals = 10
for arg in sys.argv[1:]:
if arg.isdigit():
num_vals = int(arg)
else:
palette = arg
run(palette, num_vals)
def main():
"""For testing specific color combinations"""
fout_png = 'color0.png'
_, axis = plt.subplots(1, 1, figsize=(6, 6))
colors = [
'#0032ff',
'#00ebff',
'#fdfe02',
'#ff0000',
'#8500ff',
]
plt_color_text(colors)
plt.savefig(fout_png)
if __name__ == '__main__':
cli()
#main()
# Copyright (C) 2014-2017 DV Klopfenstein. All rights reserved.
| mit |
shaypal5/cachier | tests/test_pickle_core.py | 1 | 11700 | """Test for the Cachier python package."""
# This file is part of Cachier.
# https://github.com/shaypal5/cachier
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT-license
# Copyright (c) 2016, Shay Palachy <[email protected]>
# from os.path import (
# realpath,
# dirname
# )
import os
from time import (
time,
sleep
)
from datetime import timedelta
from random import random
import threading
try:
import queue
except ImportError: # python 2
import Queue as queue
import hashlib
import pandas as pd
from cachier import cachier
from cachier.pickle_core import DEF_CACHIER_DIR
# Pickle core tests
@cachier(next_time=False)
def _takes_5_seconds(arg_1, arg_2):
"""Some function."""
sleep(5)
return 'arg_1:{}, arg_2:{}'.format(arg_1, arg_2)
def test_pickle_core():
"""Basic Pickle core functionality."""
_takes_5_seconds.clear_cache()
_takes_5_seconds('a', 'b')
start = time()
_takes_5_seconds('a', 'b', verbose_cache=True)
end = time()
assert end - start < 1
_takes_5_seconds.clear_cache()
SECONDS_IN_DELTA = 3
DELTA = timedelta(seconds=SECONDS_IN_DELTA)
@cachier(stale_after=DELTA, next_time=False)
def _stale_after_seconds(arg_1, arg_2):
"""Some function."""
return random()
def test_stale_after():
"""Testing the stale_after functionality."""
_stale_after_seconds.clear_cache()
val1 = _stale_after_seconds(1, 2)
val2 = _stale_after_seconds(1, 2)
val3 = _stale_after_seconds(1, 3)
assert val1 == val2
assert val1 != val3
sleep(3)
val4 = _stale_after_seconds(1, 2)
assert val4 != val1
_stale_after_seconds.clear_cache()
@cachier(stale_after=DELTA, next_time=True)
def _stale_after_next_time(arg_1, arg_2):
"""Some function."""
return random()
def test_stale_after_next_time():
"""Testing the stale_after with next_time functionality."""
_stale_after_next_time.clear_cache()
val1 = _stale_after_next_time(1, 2)
val2 = _stale_after_next_time(1, 2)
val3 = _stale_after_next_time(1, 3)
assert val1 == val2
assert val1 != val3
sleep(SECONDS_IN_DELTA + 1)
val4 = _stale_after_next_time(1, 2)
assert val4 == val1
sleep(0.5)
val5 = _stale_after_next_time(1, 2)
assert val5 != val1
_stale_after_next_time.clear_cache()
@cachier()
def _random_num():
return random()
@cachier()
def _random_num_with_arg(a):
# print(a)
return random()
def test_overwrite_cache():
"""Tests that the overwrite feature works correctly."""
_random_num.clear_cache()
int1 = _random_num()
int2 = _random_num()
assert int2 == int1
int3 = _random_num(overwrite_cache=True)
assert int3 != int1
int4 = _random_num()
assert int4 == int3
_random_num.clear_cache()
_random_num_with_arg.clear_cache()
int1 = _random_num_with_arg('a')
int2 = _random_num_with_arg('a')
assert int2 == int1
int3 = _random_num_with_arg('a', overwrite_cache=True)
assert int3 != int1
int4 = _random_num_with_arg('a')
assert int4 == int3
_random_num_with_arg.clear_cache()
def test_ignore_cache():
"""Tests that the ignore_cache feature works correctly."""
_random_num.clear_cache()
int1 = _random_num()
int2 = _random_num()
assert int2 == int1
int3 = _random_num(ignore_cache=True)
assert int3 != int1
int4 = _random_num()
assert int4 != int3
assert int4 == int1
_random_num.clear_cache()
_random_num_with_arg.clear_cache()
int1 = _random_num_with_arg('a')
int2 = _random_num_with_arg('a')
assert int2 == int1
int3 = _random_num_with_arg('a', ignore_cache=True)
assert int3 != int1
int4 = _random_num_with_arg('a')
assert int4 != int3
assert int4 == int1
_random_num_with_arg.clear_cache()
@cachier()
def _takes_time(arg_1, arg_2):
"""Some function."""
sleep(2) # this has to be enough time for check_calculation to run twice
return random() + arg_1 + arg_2
def _calls_takes_time(res_queue):
res = _takes_time(0.13, 0.02)
res_queue.put(res)
def test_pickle_being_calculated():
"""Testing pickle core handling of being calculated scenarios."""
_takes_time.clear_cache()
res_queue = queue.Queue()
thread1 = threading.Thread(
target=_calls_takes_time, kwargs={'res_queue': res_queue})
thread2 = threading.Thread(
target=_calls_takes_time, kwargs={'res_queue': res_queue})
thread1.start()
sleep(0.5)
thread2.start()
thread1.join()
thread2.join()
assert res_queue.qsize() == 2
res1 = res_queue.get()
res2 = res_queue.get()
assert res1 == res2
@cachier(stale_after=timedelta(seconds=1), next_time=True)
def _being_calc_next_time(arg_1, arg_2):
"""Some function."""
sleep(1)
return random() + arg_1 + arg_2
def _calls_being_calc_next_time(res_queue):
res = _being_calc_next_time(0.13, 0.02)
res_queue.put(res)
def test_being_calc_next_time():
"""Testing pickle core handling of being calculated scenarios."""
_takes_time.clear_cache()
_being_calc_next_time(0.13, 0.02)
sleep(1.1)
res_queue = queue.Queue()
thread1 = threading.Thread(
target=_calls_being_calc_next_time, kwargs={'res_queue': res_queue})
thread2 = threading.Thread(
target=_calls_being_calc_next_time, kwargs={'res_queue': res_queue})
thread1.start()
sleep(0.5)
thread2.start()
thread1.join()
thread2.join()
assert res_queue.qsize() == 2
res1 = res_queue.get()
res2 = res_queue.get()
assert res1 == res2
@cachier()
def _bad_cache(arg_1, arg_2):
"""Some function."""
sleep(1)
return random() + arg_1 + arg_2
# _BAD_CACHE_FNAME = '.__main__._bad_cache'
_BAD_CACHE_FNAME = '.tests.test_pickle_core._bad_cache'
EXPANDED_CACHIER_DIR = os.path.expanduser(DEF_CACHIER_DIR)
_BAD_CACHE_FPATH = os.path.join(EXPANDED_CACHIER_DIR, _BAD_CACHE_FNAME)
def _calls_bad_cache(res_queue, trash_cache):
try:
res = _bad_cache(0.13, 0.02)
if trash_cache:
with open(_BAD_CACHE_FPATH, 'w') as cache_file:
cache_file.seek(0)
cache_file.truncate()
res_queue.put(res)
except Exception as exc: # skipcq: PYL-W0703
res_queue.put(exc)
def _helper_bad_cache_file(sleeptime):
"""Test pickle core handling of bad cache files."""
_bad_cache.clear_cache()
res_queue = queue.Queue()
thread1 = threading.Thread(
target=_calls_bad_cache,
kwargs={'res_queue': res_queue, 'trash_cache': True})
thread2 = threading.Thread(
target=_calls_bad_cache,
kwargs={'res_queue': res_queue, 'trash_cache': False})
thread1.start()
sleep(sleeptime)
thread2.start()
thread1.join()
thread2.join()
if not res_queue.qsize() == 2:
return False
res1 = res_queue.get()
if not isinstance(res1, float):
return False
res2 = res_queue.get()
if not (res2 is None) or isinstance(res2, KeyError):
return False
return True
# we want this to succeed at leat once
def test_bad_cache_file():
"""Test pickle core handling of bad cache files."""
sleeptimes = [0.5, 0.1, 0.2, 0.3, 0.8, 1, 2]
sleeptimes = sleeptimes + sleeptimes
for sleeptime in sleeptimes:
if _helper_bad_cache_file(sleeptime):
return
assert False
@cachier()
def _delete_cache(arg_1, arg_2):
"""Some function."""
sleep(1)
return random() + arg_1 + arg_2
# _DEL_CACHE_FNAME = '.__main__._delete_cache'
_DEL_CACHE_FNAME = '.tests.test_pickle_core._delete_cache'
_DEL_CACHE_FPATH = os.path.join(EXPANDED_CACHIER_DIR, _DEL_CACHE_FNAME)
def _calls_delete_cache(res_queue, del_cache):
try:
# print('in')
res = _delete_cache(0.13, 0.02)
# print('out with {}'.format(res))
if del_cache:
# print('deleteing!')
os.remove(_DEL_CACHE_FPATH)
# print(os.path.isfile(_DEL_CACHE_FPATH))
res_queue.put(res)
except Exception as exc: # skipcq: PYL-W0703
# print('found')
res_queue.put(exc)
def _helper_delete_cache_file(sleeptime):
"""Test pickle core handling of missing cache files."""
_delete_cache.clear_cache()
res_queue = queue.Queue()
thread1 = threading.Thread(
target=_calls_delete_cache,
kwargs={'res_queue': res_queue, 'del_cache': True})
thread2 = threading.Thread(
target=_calls_delete_cache,
kwargs={'res_queue': res_queue, 'del_cache': False})
thread1.start()
sleep(sleeptime)
thread2.start()
thread1.join()
thread2.join()
if not res_queue.qsize() == 2:
return False
res1 = res_queue.get()
# print(res1)
if not isinstance(res1, float):
return False
res2 = res_queue.get()
if not ((isinstance(res2, KeyError)) or ((res2 is None))):
return False
return True
# print(res2)
# print(type(res2))
def test_delete_cache_file():
"""Test pickle core handling of missing cache files."""
sleeptimes = [0.5, 0.3, 0.1, 0.2, 0.8, 1, 2]
sleeptimes = sleeptimes + sleeptimes
for sleeptime in sleeptimes:
if _helper_delete_cache_file(sleeptime):
return
assert False
def test_clear_being_calculated():
"""Test pickle core clear `being calculated` functionality."""
_takes_time.clear_being_calculated()
@cachier(stale_after=timedelta(seconds=1), next_time=True)
def _error_throwing_func(arg1):
if not hasattr(_error_throwing_func, 'count'):
_error_throwing_func.count = 0
_error_throwing_func.count += 1
if _error_throwing_func.count > 1:
raise ValueError("Tiny Rick!")
return 7
def test_error_throwing_func():
# with
res1 = _error_throwing_func(4)
sleep(1.5)
res2 = _error_throwing_func(4)
assert res1 == res2
# test custom cache dir for pickle core
CUSTOM_DIR = '~/.exparrot'
EXPANDED_CUSTOM_DIR = os.path.expanduser(CUSTOM_DIR)
@cachier(next_time=False, cache_dir=CUSTOM_DIR)
def _takes_5_seconds_custom_dir(arg_1, arg_2):
"""Some function."""
sleep(5)
return 'arg_1:{}, arg_2:{}'.format(arg_1, arg_2)
def test_pickle_core_custom_cache_dir():
"""Basic Pickle core functionality."""
_takes_5_seconds_custom_dir.clear_cache()
_takes_5_seconds_custom_dir('a', 'b')
start = time()
_takes_5_seconds_custom_dir('a', 'b', verbose_cache=True)
end = time()
assert end - start < 1
_takes_5_seconds_custom_dir.clear_cache()
assert _takes_5_seconds_custom_dir.cache_dpath() == EXPANDED_CUSTOM_DIR
def test_callable_hash_param():
def _hash_params(args, kwargs):
def _hash(obj):
if isinstance(obj, pd.core.frame.DataFrame):
return hashlib.sha256(pd.util.hash_pandas_object(obj).values.tobytes()).hexdigest()
return obj
k_args = tuple(map(_hash, args))
k_kwargs = tuple(sorted({k: _hash(v) for k, v in kwargs.items()}.items()))
return k_args + k_kwargs
@cachier(hash_params=_hash_params)
def _params_with_dataframe(*args, **kwargs):
"""Some function."""
return random()
_params_with_dataframe.clear_cache()
df_a = pd.DataFrame.from_dict(dict(a=[0], b=[2], c=[3]))
df_b = pd.DataFrame.from_dict(dict(a=[0], b=[2], c=[3]))
value_a = _params_with_dataframe(df_a, 1)
value_b = _params_with_dataframe(df_b, 1)
assert value_a == value_b # same content --> same key
value_a = _params_with_dataframe(1, df=df_a)
value_b = _params_with_dataframe(1, df=df_b)
assert value_a == value_b # same content --> same key
| mit |
rubikloud/scikit-learn | sklearn/utils/tests/test_estimator_checks.py | 10 | 3753 | import scipy.sparse as sp
import numpy as np
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.testing import assert_raises_regex, assert_true
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.ensemble import AdaBoostClassifier
from sklearn.utils.validation import check_X_y, check_array
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
if not hasattr(self, 'coef_'):
raise CorrectNotFittedError("estimator is not fitted yet")
X = check_array(X)
return np.ones(X.shape[0])
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "it does not implement a 'get_params' methods"
assert_raises_regex(TypeError, msg, check_estimator, object)
# check that we have a fit method
msg = "object has no attribute 'fit'"
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator)
# check that fit does input validation
msg = "TypeError not raised by fit"
assert_raises_regex(AssertionError, msg, check_estimator, BaseBadClassifier)
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict)
# check for sparse matrix input handling
msg = "Estimator type doesn't seem to fail gracefully on sparse data"
# the check for sparse input handling prints to the stdout,
# instead of raising an error, so as not to remove the original traceback.
# that means we need to jump through some hoops to catch it.
old_stdout = sys.stdout
string_buffer = StringIO()
sys.stdout = string_buffer
try:
check_estimator(NoSparseClassifier)
except:
pass
finally:
sys.stdout = old_stdout
assert_true(msg in string_buffer.getvalue())
# doesn't error on actual estimator
check_estimator(AdaBoostClassifier)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "AttributeError or ValueError not raised by predict"
assert_raises_regex(AssertionError, msg, check_estimators_unfitted,
"estimator", NoSparseClassifier)
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier)
| bsd-3-clause |
phdowling/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 230 | 2649 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn import cross_validation
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_validation.cross_val_score(pipeline,
X[:, np.newaxis], y, scoring="mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
| bsd-3-clause |
flemic/ETD | etd_core/enriching_functionality.py | 1 | 8556 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""enriching_functionality.py: Generation of virtual training fingerprints based on propagation modeling."""
__author__ = "Filip Lemic"
__copyright__ = "Copyright 2015, Telecommunication Networks Group (TKN), TU Berlin"
__version__ = "1.0.0"
__maintainer__ = "Filip Lemic"
__email__ = "[email protected]"
__status__ = "Development"
import sys
import urllib2
import json
import raw_data_pb2
from scipy.spatial import Voronoi, voronoi_plot_2d
import scipy
import math
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
# The URL where server listens
apiURL = 'http://localhost:5000/'
def virtual_point_user(coordinates):
pass
def virtual_point_modified_voronoi(coordinates):
"""Define locations of virtual training points based on modified Voronoi diagrams"""
vor = Voronoi(coordinates)
vertices = vor.vertices
vertices = vertices.tolist()
# Remove vertices that are outside the area where original training points are distributed
limit = min_max_x_y(coordinates)
excluded_points_ids = []
for i in range(0,len(vertices)):
if vertices[i][0] < limit[0] or vertices[i][0] > limit[1] or vertices[i][1] < limit[2] or vertices[i][1] > limit[3]:
excluded_points_ids.append(i)
for offset, index in enumerate(excluded_points_ids):
index -= offset
del vertices[index]
# Merge vertices that are close to one another
dist = knn_avg_dist(coordinates)
sum_close_vertices = []
for i in range(0,len(vertices)):
close_vertices = []
close_vertices.append(i)
for j in range(0,len(vertices)):
if i != j and math.sqrt(pow(vertices[i][0]-vertices[j][0],2) + pow(vertices[i][1]-vertices[j][1],2)) < dist/2:
close_vertices.append(j)
sum_close_vertices.append(close_vertices)
for i in range(0,len(sum_close_vertices)):
sum_close_vertices[i].sort()
cleanlist = []
[cleanlist.append(x) for x in sum_close_vertices if x not in cleanlist]
to_delete = []
to_add = []
for i in cleanlist:
if len(i) > 1:
x = 0.0
y = 0.0
num = 0
for j in i:
num = num + 1
x = x + vertices[j][0]
y = y + vertices[j][1]
to_delete.append(j)
to_add.append((x/num,y/num))
to_delete_clean = []
[to_delete_clean.append(x) for x in to_delete if x not in to_delete_clean]
for offset, index in enumerate(sorted(to_delete_clean)):
index -= offset
del vertices[index]
return vertices + to_add
def knn_avg_dist(coordinates):
"""Calculates for points in rows of X, the average distance of each, to their k-nearest neighbors"""
kdt = scipy.spatial.cKDTree(coordinates)
k = 4 # number of nearest neighbors
dists, neighs = kdt.query(coordinates, k+1)
avg_dists = np.mean(dists[:, 1:], axis=1)
return min(avg_dists)
def min_max_x_y(coordinates):
"""Returns min and max X and Y coordinates of the original set of points"""
min_x = coordinates[0][0]
max_x = coordinates[0][0]
min_y = coordinates[0][1]
max_y = coordinates[0][1]
for i in coordinates:
if i[0] < min_x:
min_x = i[0]
if i[0] > max_x:
max_x = i[0]
if i[1] < min_y:
min_y = i[1]
if i[1] > max_y:
max_y = i[1]
return (min_x,max_x,min_y,max_y)
def generate_virtual_fingerprints_idwi(coordinates,rssis,points,transmitters):
"""Propagation modeling based on Inverse Distance Weighted Interpolation"""
x = []
y = []
xi = []
yi = []
for i in coordinates:
x.append(i[0])
y.append(i[1])
for i in points:
xi.append(i[0])
yi.append(i[1])
dist = distance_matrix(x, y, xi, yi)
# In IDW, weights are 1 / distance
weights = 1.0 / dist
# Make weights sum to one
weights /= weights.sum(axis=0)
max_elements = 0
for point in rssis:
for transmitter in point.keys():
if len(point[transmitter]) > max_elements:
max_elements = len(point[transmitter])
virtual_fingerprints = {}
# For each BSSID
for transmitter in transmitters:
# For each meas number
for i in range(0,max_elements):
z = []
# For each point
for point in rssis:
try:
z.append(point[transmitter][i])
except:
z.append(-90)
# Multiply the weights for each interpolated point by all observed Z-values
zi = np.dot(weights.T, z) # Interpolated values for one measurement, one transmitter and all virtual points
iterate = -1
for i in zi:
iterate += 1
try:
virtual_fingerprints[iterate][transmitter].append(i)
except:
try:
virtual_fingerprints[iterate][transmitter] = []
virtual_fingerprints[iterate][transmitter].append(i)
except:
virtual_fingerprints[iterate] = {}
virtual_fingerprints[iterate][transmitter] = []
virtual_fingerprints[iterate][transmitter].append(i)
return virtual_fingerprints
def generate_virtual_fingerprints_multiwall(points, transmitters):
"""Propagation modeling based on Multiwall model"""
img, img_size, ap_locations, parameters, size = load_multiwall_parameters()
virtual_fingerprints = {}
for point in range(0,len(points)):
virtual_fingerprints[point] = {}
for transmitter in transmitters:
value = model_multiwall()
try:
virtual_fingerprints[point][transmitter].append(value)
except:
virtual_fingerprints[point][transmitter] = []
virtual_fingerprints[point][transmitter].append(value)
return virtual_fingerprints
def model_multiwall():
pass
def load_multiwall_parameters():
""""Loading data required for the Multiwall model"""
im = Image.open("multiwall_model/twist_2nd_floor.jpg")
img_size = im.size
img = im.load()
ap_locations_file = open('multiwall_model/AP_locations.txt', 'r')
ap_locations = {}
flag = 0
for line in ap_locations_file:
if flag == 0:
flag = 1
else:
line_clean = [x.strip() for x in line.split(',')]
ap_locations[line_clean[0]] = {}
ap_locations[line_clean[0]]['coordinate_x'] = float(line_clean[1])
ap_locations[line_clean[0]]['coordinate_y'] = float(line_clean[2])
ap_locations[line_clean[0]]['tx_power'] = float(line_clean[3])
parameters_file = open('multiwall_model/multiwall_parameters.txt', 'r')
parameters = {}
flag = 0
for line in parameters_file:
if flag == 0:
flag = 1
else:
line_clean = [x.strip() for x in line.split(',')]
parameters['gamma'] = float(line_clean[0])
parameters['lc'] = float(line_clean[1])
parameters['wall'] = float(line_clean[2])
size_file = open('multiwall_model/floor_size.txt', 'r')
size = {}
flag = 0
for line in size_file:
if flag == 0:
flag = 1
else:
line_clean = [x.strip() for x in line.split(',')]
size['coordinate_x'] = float(line_clean[0])
size['coordinate_y'] = float(line_clean[1])
return img, img_size, ap_locations, parameters, size
def distance_matrix(x0, y0, x1, y1):
"""Provides a distance matrix between all locations"""
obs = np.vstack((x0, y0)).T
interp = np.vstack((x1, y1)).T
d0 = np.subtract.outer(obs[:,0], interp[:,0])
d1 = np.subtract.outer(obs[:,1], interp[:,1])
return np.hypot(d0, d1)
# Enabling DELETE, PUT, etc.
class RequestWithMethod(urllib2.Request):
"""Workaround for using DELETE with urllib2"""
def __init__(self, url, method, data=None, headers={}, origin_req_host=None, unverifiable=False):
self._method = method
urllib2.Request.__init__(self, url, data, headers, origin_req_host, unverifiable)
def get_method(self):
if self._method:
return self._method
else:
return urllib2.Request.get_method(self)
| bsd-3-clause |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/mpl_toolkits/axisartist/axislines.py | 10 | 26147 | """
Axislines includes modified implementation of the Axes class. The
biggest difference is that the artists responsible to draw axis line,
ticks, ticklabel and axis labels are separated out from the mpl's Axis
class, which are much more than artists in the original
mpl. Originally, this change was motivated to support curvilinear
grid. Here are a few reasons that I came up with new axes class.
* "top" and "bottom" x-axis (or "left" and "right" y-axis) can have
different ticks (tick locations and labels). This is not possible
with the current mpl, although some twin axes trick can help.
* Curvilinear grid.
* angled ticks.
In the new axes class, xaxis and yaxis is set to not visible by
default, and new set of artist (AxisArtist) are defined to draw axis
line, ticks, ticklabels and axis label. Axes.axis attribute serves as
a dictionary of these artists, i.e., ax.axis["left"] is a AxisArtist
instance responsible to draw left y-axis. The default Axes.axis contains
"bottom", "left", "top" and "right".
AxisArtist can be considered as a container artist and
has following children artists which will draw ticks, labels, etc.
* line
* major_ticks, major_ticklabels
* minor_ticks, minor_ticklabels
* offsetText
* label
Note that these are separate artists from Axis class of the
original mpl, thus most of tick-related command in the original mpl
won't work, although some effort has made to work with. For example,
color and markerwidth of the ax.axis["bottom"].major_ticks will follow
those of Axes.xaxis unless explicitly specified.
In addition to AxisArtist, the Axes will have *gridlines* attribute,
which obviously draws grid lines. The gridlines needs to be separated
from the axis as some gridlines can never pass any axis.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import matplotlib.axes as maxes
import matplotlib.artist as martist
import matplotlib.text as mtext
import matplotlib.font_manager as font_manager
from matplotlib.path import Path
from matplotlib.transforms import Affine2D, ScaledTranslation, \
IdentityTransform, TransformedPath, Bbox
from matplotlib.collections import LineCollection
from matplotlib import rcParams
from matplotlib.artist import allow_rasterization
import warnings
import numpy as np
import matplotlib.lines as mlines
from .axisline_style import AxislineStyle
from .axis_artist import AxisArtist, GridlinesCollection
class AxisArtistHelper(object):
"""
AxisArtistHelper should define
following method with given APIs. Note that the first axes argument
will be axes attribute of the caller artist.
# LINE (spinal line?)
def get_line(self, axes):
# path : Path
return path
def get_line_transform(self, axes):
# ...
# trans : transform
return trans
# LABEL
def get_label_pos(self, axes):
# x, y : position
return (x, y), trans
def get_label_offset_transform(self, \
axes,
pad_points, fontprops, renderer,
bboxes,
):
# va : vertical alignment
# ha : horizontal alignment
# a : angle
return trans, va, ha, a
# TICK
def get_tick_transform(self, axes):
return trans
def get_tick_iterators(self, axes):
# iter : iterable object that yields (c, angle, l) where
# c, angle, l is position, tick angle, and label
return iter_major, iter_minor
"""
class _Base(object):
"""
Base class for axis helper.
"""
def __init__(self):
"""
"""
self.delta1, self.delta2 = 0.00001, 0.00001
def update_lim(self, axes):
pass
class Fixed(_Base):
"""
Helper class for a fixed (in the axes coordinate) axis.
"""
_default_passthru_pt = dict(left=(0, 0),
right=(1, 0),
bottom=(0, 0),
top=(0, 1))
def __init__(self,
loc, nth_coord=None,
):
"""
nth_coord = along which coordinate value varies
in 2d, nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
self._loc = loc
if loc not in ["left", "right", "bottom", "top"]:
raise ValueError("%s" % loc)
if nth_coord is None:
if loc in ["left", "right"]:
nth_coord = 1
elif loc in ["bottom", "top"]:
nth_coord = 0
self.nth_coord = nth_coord
super(AxisArtistHelper.Fixed, self).__init__()
self.passthru_pt = self._default_passthru_pt[loc]
_verts = np.array([[0., 0.],
[1., 1.]])
fixed_coord = 1-nth_coord
_verts[:,fixed_coord] = self.passthru_pt[fixed_coord]
# axis line in transAxes
self._path = Path(_verts)
def get_nth_coord(self):
return self.nth_coord
# LINE
def get_line(self, axes):
return self._path
def get_line_transform(self, axes):
return axes.transAxes
# LABEL
def get_axislabel_transform(self, axes):
return axes.transAxes
def get_axislabel_pos_angle(self, axes):
"""
label reference position in transAxes.
get_label_transform() returns a transform of (transAxes+offset)
"""
loc = self._loc
pos, angle_tangent = dict(left=((0., 0.5), 90),
right=((1., 0.5), 90),
bottom=((0.5, 0.), 0),
top=((0.5, 1.), 0))[loc]
return pos, angle_tangent
# TICK
def get_tick_transform(self, axes):
trans_tick = [axes.get_xaxis_transform(),
axes.get_yaxis_transform()][self.nth_coord]
return trans_tick
class Floating(_Base):
def __init__(self, nth_coord,
value):
self.nth_coord = nth_coord
self._value = value
super(AxisArtistHelper.Floating,
self).__init__()
def get_nth_coord(self):
return self.nth_coord
def get_line(self, axes):
raise RuntimeError("get_line method should be defined by the derived class")
class AxisArtistHelperRectlinear(object):
class Fixed(AxisArtistHelper.Fixed):
def __init__(self,
axes, loc, nth_coord=None,
):
"""
nth_coord = along which coordinate value varies
in 2d, nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
super(AxisArtistHelperRectlinear.Fixed, self).__init__( \
loc, nth_coord)
self.axis = [axes.xaxis, axes.yaxis][self.nth_coord]
# TICK
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label"""
loc = self._loc
if loc in ["bottom", "top"]:
angle_normal, angle_tangent = 90, 0
else:
angle_normal, angle_tangent = 0, 90
major = self.axis.major
majorLocs = major.locator()
major.formatter.set_locs(majorLocs)
majorLabels = [major.formatter(val, i) for i, val in enumerate(majorLocs)]
minor = self.axis.minor
minorLocs = minor.locator()
minor.formatter.set_locs(minorLocs)
minorLabels = [minor.formatter(val, i) for i, val in enumerate(minorLocs)]
trans_tick = self.get_tick_transform(axes)
tr2ax = trans_tick + axes.transAxes.inverted()
def _f(locs, labels):
for x, l in zip(locs, labels):
c = list(self.passthru_pt) # copy
c[self.nth_coord] = x
# check if the tick point is inside axes
c2 = tr2ax.transform_point(c)
#delta=0.00001
if 0. -self.delta1<= c2[self.nth_coord] <= 1.+self.delta2:
yield c, angle_normal, angle_tangent, l
return _f(majorLocs, majorLabels), _f(minorLocs, minorLabels)
class Floating(AxisArtistHelper.Floating):
def __init__(self, axes, nth_coord,
passingthrough_point, axis_direction="bottom"):
super(AxisArtistHelperRectlinear.Floating, self).__init__( \
nth_coord, passingthrough_point)
self._axis_direction = axis_direction
self.axis = [axes.xaxis, axes.yaxis][self.nth_coord]
def get_line(self, axes):
_verts = np.array([[0., 0.],
[1., 1.]])
fixed_coord = 1-self.nth_coord
trans_passingthrough_point = axes.transData + axes.transAxes.inverted()
p = trans_passingthrough_point.transform_point([self._value,
self._value])
_verts[:,fixed_coord] = p[fixed_coord]
return Path(_verts)
def get_line_transform(self, axes):
return axes.transAxes
def get_axislabel_transform(self, axes):
return axes.transAxes
def get_axislabel_pos_angle(self, axes):
"""
label reference position in transAxes.
get_label_transform() returns a transform of (transAxes+offset)
"""
loc = self._axis_direction
#angle = dict(left=0,
# right=0,
# bottom=.5*np.pi,
# top=.5*np.pi)[loc]
if self.nth_coord == 0:
angle = 0
else:
angle = 90
_verts = [0.5, 0.5]
fixed_coord = 1-self.nth_coord
trans_passingthrough_point = axes.transData + axes.transAxes.inverted()
p = trans_passingthrough_point.transform_point([self._value,
self._value])
_verts[fixed_coord] = p[fixed_coord]
if not (0. <= _verts[fixed_coord] <= 1.):
return None, None
else:
return _verts, angle
def get_tick_transform(self, axes):
return axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label"""
loc = self._axis_direction
if loc in ["bottom", "top"]:
angle_normal, angle_tangent = 90, 0
else:
angle_normal, angle_tangent = 0, 90
if self.nth_coord == 0:
angle_normal, angle_tangent = 90, 0
else:
angle_normal, angle_tangent = 0, 90
#angle = 90 - 90 * self.nth_coord
major = self.axis.major
majorLocs = major.locator()
major.formatter.set_locs(majorLocs)
majorLabels = [major.formatter(val, i) for i, val in enumerate(majorLocs)]
minor = self.axis.minor
minorLocs = minor.locator()
minor.formatter.set_locs(minorLocs)
minorLabels = [minor.formatter(val, i) for i, val in enumerate(minorLocs)]
tr2ax = axes.transData + axes.transAxes.inverted()
def _f(locs, labels):
for x, l in zip(locs, labels):
c = [self._value, self._value]
c[self.nth_coord] = x
c1, c2 = tr2ax.transform_point(c)
if 0. <= c1 <= 1. and 0. <= c2 <= 1.:
if 0. - self.delta1 <= [c1, c2][self.nth_coord] <= 1. + self.delta2:
yield c, angle_normal, angle_tangent, l
return _f(majorLocs, majorLabels), _f(minorLocs, minorLabels)
class GridHelperBase(object):
def __init__(self):
self._force_update = True
self._old_limits = None
super(GridHelperBase, self).__init__()
def update_lim(self, axes):
x1, x2 = axes.get_xlim()
y1, y2 = axes.get_ylim()
if self._force_update or self._old_limits != (x1, x2, y1, y2):
self._update(x1, x2, y1, y2)
self._force_update = False
self._old_limits = (x1, x2, y1, y2)
def _update(self, x1, x2, y1, y2):
pass
def invalidate(self):
self._force_update = True
def valid(self):
return not self._force_update
def get_gridlines(self, which, axis):
"""
Return list of grid lines as a list of paths (list of points).
*which* : "major" or "minor"
*axis* : "both", "x" or "y"
"""
return []
def new_gridlines(self, ax):
"""
Create and return a new GridlineCollection instance.
*which* : "major" or "minor"
*axis* : "both", "x" or "y"
"""
gridlines = GridlinesCollection(None, transform=ax.transData,
colors=rcParams['grid.color'],
linestyles=rcParams['grid.linestyle'],
linewidths=rcParams['grid.linewidth'])
ax._set_artist_props(gridlines)
gridlines.set_grid_helper(self)
ax.axes._set_artist_props(gridlines)
# gridlines.set_clip_path(self.axes.patch)
# set_clip_path need to be deferred after Axes.cla is completed.
# It is done inside the cla.
return gridlines
class GridHelperRectlinear(GridHelperBase):
def __init__(self, axes):
super(GridHelperRectlinear, self).__init__()
self.axes = axes
def new_fixed_axis(self, loc,
nth_coord=None,
axis_direction=None,
offset=None,
axes=None,
):
if axes is None:
warnings.warn("'new_fixed_axis' explicitly requires the axes keyword.")
axes = self.axes
_helper = AxisArtistHelperRectlinear.Fixed(axes, loc, nth_coord)
if axis_direction is None:
axis_direction = loc
axisline = AxisArtist(axes, _helper, offset=offset,
axis_direction=axis_direction,
)
return axisline
def new_floating_axis(self, nth_coord, value,
axis_direction="bottom",
axes=None,
):
if axes is None:
warnings.warn("'new_floating_axis' explicitly requires the axes keyword.")
axes = self.axes
passthrough_point = (value, value)
transform = axes.transData
_helper = AxisArtistHelperRectlinear.Floating( \
axes, nth_coord, value, axis_direction)
axisline = AxisArtist(axes, _helper)
axisline.line.set_clip_on(True)
axisline.line.set_clip_box(axisline.axes.bbox)
return axisline
def get_gridlines(self, which="major", axis="both"):
"""
return list of gridline coordinates in data coordinates.
*which* : "major" or "minor"
*axis* : "both", "x" or "y"
"""
gridlines = []
if axis in ["both", "x"]:
locs = []
y1, y2 = self.axes.get_ylim()
#if self.axes.xaxis._gridOnMajor:
if which in ["both", "major"]:
locs.extend(self.axes.xaxis.major.locator())
#if self.axes.xaxis._gridOnMinor:
if which in ["both", "minor"]:
locs.extend(self.axes.xaxis.minor.locator())
for x in locs:
gridlines.append([[x, x], [y1, y2]])
if axis in ["both", "y"]:
x1, x2 = self.axes.get_xlim()
locs = []
if self.axes.yaxis._gridOnMajor:
#if which in ["both", "major"]:
locs.extend(self.axes.yaxis.major.locator())
if self.axes.yaxis._gridOnMinor:
#if which in ["both", "minor"]:
locs.extend(self.axes.yaxis.minor.locator())
for y in locs:
gridlines.append([[x1, x2], [y, y]])
return gridlines
class SimpleChainedObjects(object):
def __init__(self, objects):
self._objects = objects
def __getattr__(self, k):
_a = SimpleChainedObjects([getattr(a, k) for a in self._objects])
return _a
def __call__(self, *kl, **kwargs):
for m in self._objects:
m(*kl, **kwargs)
class Axes(maxes.Axes):
class AxisDict(dict):
def __init__(self, axes):
self.axes = axes
super(Axes.AxisDict, self).__init__()
def __getitem__(self, k):
if isinstance(k, tuple):
r = SimpleChainedObjects([dict.__getitem__(self, k1) for k1 in k])
return r
elif isinstance(k, slice):
if k.start == None and k.stop == None and k.step == None:
r = SimpleChainedObjects(list(six.itervalues(self)))
return r
else:
raise ValueError("Unsupported slice")
else:
return dict.__getitem__(self, k)
def __call__(self, *v, **kwargs):
return maxes.Axes.axis(self.axes, *v, **kwargs)
def __init__(self, *kl, **kw):
helper = kw.pop("grid_helper", None)
self._axisline_on = True
if helper:
self._grid_helper = helper
else:
self._grid_helper = GridHelperRectlinear(self)
super(Axes, self).__init__(*kl, **kw)
self.toggle_axisline(True)
def toggle_axisline(self, b=None):
if b is None:
b = not self._axisline_on
if b:
self._axisline_on = True
for s in self.spines.values():
s.set_visible(False)
self.xaxis.set_visible(False)
self.yaxis.set_visible(False)
else:
self._axisline_on = False
for s in self.spines.values():
s.set_visible(True)
self.xaxis.set_visible(True)
self.yaxis.set_visible(True)
def _init_axis(self):
super(Axes, self)._init_axis()
def _init_axis_artists(self, axes=None):
if axes is None:
axes = self
self._axislines = self.AxisDict(self)
new_fixed_axis = self.get_grid_helper().new_fixed_axis
for loc in ["bottom", "top", "left", "right"]:
self._axislines[loc] = new_fixed_axis(loc=loc, axes=axes,
axis_direction=loc)
for axisline in [self._axislines["top"], self._axislines["right"]]:
axisline.label.set_visible(False)
axisline.major_ticklabels.set_visible(False)
axisline.minor_ticklabels.set_visible(False)
def _get_axislines(self):
return self._axislines
axis = property(_get_axislines)
def new_gridlines(self, grid_helper=None):
"""
Create and return a new GridlineCollection instance.
*which* : "major" or "minor"
*axis* : "both", "x" or "y"
"""
if grid_helper is None:
grid_helper = self.get_grid_helper()
gridlines = grid_helper.new_gridlines(self)
return gridlines
def _init_gridlines(self, grid_helper=None):
# It is done inside the cla.
gridlines = self.new_gridlines(grid_helper)
self.gridlines = gridlines
def cla(self):
# gridlines need to b created before cla() since cla calls grid()
self._init_gridlines()
super(Axes, self).cla()
# the clip_path should be set after Axes.cla() since that's
# when a patch is created.
self.gridlines.set_clip_path(self.axes.patch)
self._init_axis_artists()
def get_grid_helper(self):
return self._grid_helper
def grid(self, b=None, which='major', axis="both", **kwargs):
"""
Toggle the gridlines, and optionally set the properties of the lines.
"""
# their are some discrepancy between the behavior of grid in
# axes_grid and the original mpl's grid, because axes_grid
# explicitly set the visibility of the gridlines.
super(Axes, self).grid(b, which=which, axis=axis, **kwargs)
if not self._axisline_on:
return
if b is None:
if self.axes.xaxis._gridOnMinor or self.axes.xaxis._gridOnMajor or \
self.axes.yaxis._gridOnMinor or self.axes.yaxis._gridOnMajor:
b=True
else:
b=False
self.gridlines.set_which(which)
self.gridlines.set_axis(axis)
self.gridlines.set_visible(b)
if len(kwargs):
martist.setp(self.gridlines, **kwargs)
def get_children(self):
if self._axisline_on:
children = list(six.itervalues(self._axislines)) + [self.gridlines]
else:
children = []
children.extend(super(Axes, self).get_children())
return children
def invalidate_grid_helper(self):
self._grid_helper.invalidate()
def new_fixed_axis(self, loc, offset=None):
gh = self.get_grid_helper()
axis = gh.new_fixed_axis(loc,
nth_coord=None,
axis_direction=None,
offset=offset,
axes=self,
)
return axis
def new_floating_axis(self, nth_coord, value,
axis_direction="bottom",
):
gh = self.get_grid_helper()
axis = gh.new_floating_axis(nth_coord, value,
axis_direction=axis_direction,
axes=self)
return axis
def draw(self, renderer, inframe=False):
if not self._axisline_on:
super(Axes, self).draw(renderer, inframe)
return
orig_artists = self.artists
self.artists = self.artists + list(self._axislines.values()) + [self.gridlines]
super(Axes, self).draw(renderer, inframe)
self.artists = orig_artists
def get_tightbbox(self, renderer, call_axes_locator=True):
bb0 = super(Axes, self).get_tightbbox(renderer, call_axes_locator)
if not self._axisline_on:
return bb0
bb = [bb0]
for axisline in list(six.itervalues(self._axislines)):
if not axisline.get_visible():
continue
bb.append(axisline.get_tightbbox(renderer))
# if axisline.label.get_visible():
# bb.append(axisline.label.get_window_extent(renderer))
# if axisline.major_ticklabels.get_visible():
# bb.extend(axisline.major_ticklabels.get_window_extents(renderer))
# if axisline.minor_ticklabels.get_visible():
# bb.extend(axisline.minor_ticklabels.get_window_extents(renderer))
# if axisline.major_ticklabels.get_visible() or \
# axisline.minor_ticklabels.get_visible():
# bb.append(axisline.offsetText.get_window_extent(renderer))
#bb.extend([c.get_window_extent(renderer) for c in artists \
# if c.get_visible()])
_bbox = Bbox.union([b for b in bb if b and (b.width!=0 or b.height!=0)])
return _bbox
Subplot = maxes.subplot_class_factory(Axes)
class AxesZero(Axes):
def __init__(self, *kl, **kw):
super(AxesZero, self).__init__(*kl, **kw)
def _init_axis_artists(self):
super(AxesZero, self)._init_axis_artists()
new_floating_axis = self._grid_helper.new_floating_axis
xaxis_zero = new_floating_axis(nth_coord=0,
value=0.,
axis_direction="bottom",
axes=self)
xaxis_zero.line.set_clip_path(self.patch)
xaxis_zero.set_visible(False)
self._axislines["xzero"] = xaxis_zero
yaxis_zero = new_floating_axis(nth_coord=1,
value=0.,
axis_direction="left",
axes=self)
yaxis_zero.line.set_clip_path(self.patch)
yaxis_zero.set_visible(False)
self._axislines["yzero"] = yaxis_zero
SubplotZero = maxes.subplot_class_factory(AxesZero)
if 0:
#if __name__ == "__main__":
import matplotlib.pyplot as plt
fig = plt.figure(1, (4,3))
ax = SubplotZero(fig, 1, 1, 1)
fig.add_subplot(ax)
ax.axis["xzero"].set_visible(True)
ax.axis["xzero"].label.set_text("Axis Zero")
for n in ["top", "right"]:
ax.axis[n].set_visible(False)
xx = np.arange(0, 2*np.pi, 0.01)
ax.plot(xx, np.sin(xx))
ax.set_ylabel("Test")
plt.draw()
plt.show()
if __name__ == "__main__":
#if 1:
import matplotlib.pyplot as plt
fig = plt.figure(1, (4,3))
ax = Subplot(fig, 1, 1, 1)
fig.add_subplot(ax)
xx = np.arange(0, 2*np.pi, 0.01)
ax.plot(xx, np.sin(xx))
ax.set_ylabel("Test")
ax.axis["top"].major_ticks.set_tick_out(True) #set_tick_direction("out")
ax.axis["bottom"].major_ticks.set_tick_out(True) #set_tick_direction("out")
#ax.axis["bottom"].set_tick_direction("in")
ax.axis["bottom"].set_label("Tk0")
plt.draw()
plt.show()
| apache-2.0 |
joshloyal/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 8 | 4765 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n // 2]
Y_train = Y[:n // 2]
X_test = X[n // 2:]
Y_test = Y[n // 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of components exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
f3r/scikit-learn | benchmarks/bench_plot_lasso_path.py | 301 | 4003 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
#ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
#ax.legend()
i += 1
plt.show()
| bsd-3-clause |
JWarmenhoven/seaborn | examples/pairgrid_dotplot.py | 3 | 1063 | """
Dot plot with several variables
===============================
_thumb: .3, .3
"""
import seaborn as sns
sns.set(style="whitegrid")
# Load the dataset
crashes = sns.load_dataset("car_crashes")
# Make the PairGrid
g = sns.PairGrid(crashes.sort_values("total", ascending=False),
x_vars=crashes.columns[:-3], y_vars=["abbrev"],
size=10, aspect=.25)
# Draw a dot plot using the stripplot function
g.map(sns.stripplot, size=10, orient="h",
palette="Reds_r", edgecolor="gray")
# Use the same x axis limits on all columns and add better labels
g.set(xlim=(0, 25), xlabel="Crashes", ylabel="")
# Use semantically meaningful titles for the columns
titles = ["Total crashes", "Speeding crashes", "Alcohol crashes",
"Not distracted crashes", "No previous crashes"]
for ax, title in zip(g.axes.flat, titles):
# Set a different title for each axes
ax.set(title=title)
# Make the grid horizontal instead of vertical
ax.xaxis.grid(False)
ax.yaxis.grid(True)
sns.despine(left=True, bottom=True)
| bsd-3-clause |
smorante/continuous-goal-directed-actions | demonstration-feature-selection/src/libtools.py | 1 | 5062 | # -*- coding: utf-8 -*-
"""
@author: smorante
"""
import numpy as np
from sklearn import preprocessing
from scipy import stats
import matplotlib.pyplot as plt
def sumAll(demo1, demo2, method):
# print "Shapes demo1,demo2: ", np.array(demo1).shape, " , ", np.array(demo2).shape
if np.array(demo1).shape[0] > 1 and \
np.array(demo2).shape[0] > 1 and \
np.array(demo1).shape[1] == np.array(demo2).shape[1]:
if method=="rows":
# print "[INFO] Collapsing rows"
x = np.array(demo1).sum(axis=0) # zero collaps rows into single row
y = np.array(demo2).sum(axis=0)
elif method=="cols":
# print "[INFO] Collapsing cols"
x = np.array(demo1).sum(axis=1) # zero collaps columns into single column
y = np.array(demo2).sum(axis=1)
else:
print "[ERROR] Unrecognized method for summing!"
return False
elif(np.array(demo1).shape[1] != np.array(demo2).shape[1]):
print "[ERROR] NumberOfColumns(demo1) != NumberOfColumns(demo2) "
return False
else:
print "[ERROR] Unknown problem when summing rows or cols"
return False
x = np.array(demo1).flatten()
y = np.array(demo2).flatten()
return x, y
def plot_zscores(ALPHA, zscores, xtitle):
fig = plt.figure()
plt.bar(np.arange(len(zscores)), zscores, alpha=0.4, color='b')
plt.xlabel(xtitle, fontsize=30)
plt.xticks( fontsize=20)
plt.xlim(0,len(zscores))
plt.ylabel('Score', fontsize=30)
plt.yticks( fontsize=20)
plt.axhline(y=ALPHA, xmin=0, xmax=1, hold=None, color='red', lw=4, linestyle='--')
plt.legend()
plt.tight_layout()
plt.show()
def normalize(normalization, demoNames):
demons=[]
print "[INFO] Normalization: ", normalization
if normalization == "NONE":
## for demonstration normalization
for elem in demoNames:
tmp = np.loadtxt(elem)
tmp_clean = tmp[:,1:]
demons.append(tmp_clean)
return demons
elif normalization == "MINMAX":
## for demonstration normalization
for elem in demoNames:
tmp = np.loadtxt(elem)
tmp_clean = tmp[:,1:]
tmp_clean = minmaxscale(tmp_clean)
demons.append(tmp_clean)
return demons
elif normalization == "STANDARDIZED":
## for demonstration normalization
for elem in demoNames:
tmp = np.loadtxt(elem)
tmp_clean = tmp[:,1:]
tmp_clean = standardize(tmp_clean)
demons.append(tmp_clean)
return demons
elif normalization == "PHYSICAL":
## for demonstration normalization
for elem in demoNames:
tmp = np.loadtxt(elem)
tmp_clean = tmp[:,1:]
tmp_clean = physical_limits(tmp_clean)
demons.append(tmp_clean)
return demons
elif normalization == "WHOLE-EXPERIMENT":
### for whole experiment normalization
demons_temp =[]
for elem in demoNames:
tmp = np.loadtxt(elem)
tmp_clean = tmp[:,1:]
demons.append(tmp_clean)
demons_temp.extend(tmp_clean.tolist())
ranges = np.max(np.array(demons_temp), axis=0)
print "[INFO] Maximum values per feature: ", ranges
demons=maxscale_precomputed(demons, ranges)
return demons
else:
print "[ERROR] Unrecognized normalization"
return False
def standardize(X):
return preprocessing.scale(X, axis=0)
def minmaxscale(X):
min_max_scaler = preprocessing.MinMaxScaler()
return min_max_scaler.fit_transform(X)
def maxscale_precomputed(X, ranges):
X_scaled = []
for elem in X:
temp= elem/ranges
X_scaled.append(temp)
return X_scaled
def physical_limits(X):
ranges=np.zeros(([13,2])) # one per feature. minus one because timestamp
ranges[0] = [500,3000] # x
ranges[1] = [-2000,2000] # y
ranges[2] = [-1000,1000] # z
ranges[3] = [500,3000] # x
ranges[4] = [-2000,2000] # y
ranges[5] = [-1000,1000] # z
ranges[6] = [-2500,2500] # x
ranges[7] = [-4000,4000] # y
ranges[8] = [-2000,2000] # z
ranges[9] = [0,2500] # x
ranges[10] = [0,4000] # y
ranges[11] = [0,2000] # z
ranges[12] = [0,5120] # euclidean
X_scaled=np.zeros((X.shape )) # one per feature. minus one because timestamp
for i in range(X.shape[1]): # number of columns
X_scaled[:,i] = (X[:,i] - ranges[i,0] ) / ( ranges[i,1] - ranges[i,0])
return X_scaled
def get_outlier(points, thresh=2):
if len(points.shape) == 1:
points = points[:,None]
results = stats.zscore(points)
return results.flatten() > thresh, results | mit |
tammoippen/nest-simulator | pynest/examples/hh_phaseplane.py | 9 | 4973 | # -*- coding: utf-8 -*-
#
# hh_phaseplane.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
hh_phaseplane makes a numerical phase-plane analysis of the Hodgkin-Huxley
neuron (iaf_psc_alpha). Dynamics is investigated in the V-n space (see remark
below). A constant DC can be specified and its influence on the nullclines
can be studied.
REMARK
To make the two-dimensional analysis possible, the (four-dimensional)
Hodgkin-Huxley formalism needs to be artificially reduced to two dimensions,
in this case by 'clamping' the two other variables, m an h, to
constant values (m_eq and h_eq).
'''
import nest
from matplotlib import pyplot as plt
amplitude = 100. # Set externally applied current amplitude in pA
dt = 0.1 # simulation step length [ms]
nest.ResetKernel()
nest.set_verbosity('M_ERROR')
nest.SetKernelStatus({'resolution': dt})
neuron = nest.Create('hh_psc_alpha')
# Numerically obtain equilibrium state
nest.Simulate(1000)
m_eq = nest.GetStatus(neuron)[0]['Act_m']
h_eq = nest.GetStatus(neuron)[0]['Act_h']
nest.SetStatus(neuron, {'I_e': amplitude}) # Apply external current
# Scan state space
print('Scanning phase space')
V_new_vec = []
n_new_vec = []
# x will contain the phase-plane data as a vector field
x = []
count = 0
for V in range(-100, 42, 2):
n_V = []
n_n = []
for n in range(10, 81):
# Set V_m and n
nest.SetStatus(neuron, {'V_m': V*1.0, 'Inact_n': n/100.0,
'Act_m': m_eq, 'Act_h': h_eq})
# Find state
V_m = nest.GetStatus(neuron)[0]['V_m']
Inact_n = nest.GetStatus(neuron)[0]['Inact_n']
# Simulate a short while
nest.Simulate(dt)
# Find difference between new state and old state
V_m_new = nest.GetStatus(neuron)[0]['V_m'] - V*1.0
Inact_n_new = nest.GetStatus(neuron)[0]['Inact_n'] - n/100.0
# Store in vector for later analysis
n_V.append(abs(V_m_new))
n_n.append(abs(Inact_n_new))
x.append([V_m, Inact_n, V_m_new, Inact_n_new])
if count % 10 == 0:
# Write updated state next to old state
print('')
print('Vm: \t', V_m)
print('new Vm:\t', V_m_new)
print('Inact_n:', Inact_n)
print('new Inact_n:', Inact_n_new)
count += 1
# Store in vector for later analysis
V_new_vec.append(n_V)
n_new_vec.append(n_n)
# Set state for AP generation
nest.SetStatus(neuron, {'V_m': -34., 'Inact_n': 0.2,
'Act_m': m_eq, 'Act_h': h_eq})
print('')
print('AP-trajectory')
# ap will contain the trace of a single action potential as one possible
# numerical solution in the vector field
ap = []
for i in range(1, 1001):
# Find state
V_m = nest.GetStatus(neuron)[0]['V_m']
Inact_n = nest.GetStatus(neuron)[0]['Inact_n']
if i % 10 == 0:
# Write new state next to old state
print('Vm: \t', V_m)
print('Inact_n:', Inact_n)
ap.append([V_m, Inact_n])
# Simulate again
nest.SetStatus(neuron, {'Act_m': m_eq, 'Act_h': h_eq})
nest.Simulate(dt)
# Make analysis
print('')
print('Plot analysis')
V_matrix = [list(x) for x in zip(*V_new_vec)]
n_matrix = [list(x) for x in zip(*n_new_vec)]
n_vec = [x/100. for x in range(10, 81)]
V_vec = [x*1. for x in range(-100, 42, 2)]
nullcline_V = []
nullcline_n = []
print('Searching nullclines')
for i in range(0, len(V_vec)):
index = V_matrix[:][i].index(min(V_matrix[:][i]))
if index != 0 and index != len(n_vec):
nullcline_V.append([V_vec[i], n_vec[index]])
index = n_matrix[:][i].index(min(n_matrix[:][i]))
if index != 0 and index != len(n_vec):
nullcline_n.append([V_vec[i], n_vec[index]])
print('Plotting vector field')
factor = 0.1
for i in range(0, count, 3):
plt.plot([x[i][0], x[i][0] + factor*x[i][2]],
[x[i][1], x[i][1] + factor*x[i][3]], color=[0.6, 0.6, 0.6])
plt.plot(nullcline_V[:][0], nullcline_V[:][1], linewidth=2.0)
plt.plot(nullcline_n[:][0], nullcline_n[:][1], linewidth=2.0)
plt.xlim([V_vec[0], V_vec[-1]])
plt.ylim([n_vec[0], n_vec[-1]])
plt.plot(ap[:][0], ap[:][1], color='black', linewidth=1.0)
plt.xlabel('Membrane potential V [mV]')
plt.ylabel('Inactivation variable n')
plt.title('Phase space of the Hodgkin-Huxley Neuron')
plt.show()
| gpl-2.0 |
lail3344/sms-tools | lectures/05-Sinusoidal-model/plots-code/synthesis-window-2.py | 22 | 2038 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import sys, os, functools, time
from scipy.fftpack import fft, ifft, fftshift
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
M = 601
w = np.blackman(M)
N = 1024
hN = N/2
Ns = 512
hNs = Ns/2
H = Ns/4
pin = 5000
t = -70
x1 = x[pin:pin+w.size]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
freqs = iploc*fs/N
Y = UF.genSpecSines(freqs, ipmag, ipphase, Ns, fs)
mY = 20*np.log10(abs(Y[:hNs]))
pY = np.unwrap(np.angle(Y[:hNs]))
y= fftshift(ifft(Y))*sum(blackmanharris(Ns))
sw = np.zeros(Ns)
ow = triang(2*H);
sw[hNs-H:hNs+H] = ow
bh = blackmanharris(Ns)
bh = bh / sum(bh)
sw[hNs-H:hNs+H] = sw[hNs-H:hNs+H] / bh[hNs-H:hNs+H]
plt.figure(1, figsize=(9, 6))
plt.subplot(3,1,1)
plt.plot(np.arange(-hNs,hNs), y, 'b', lw=1.5)
plt.plot(np.arange(-hNs,hNs), max(y)*bh/max(bh), 'k', alpha=.5, lw=1.5)
plt.axis([-hNs, hNs,min(y),max(y)+.1])
plt.title("y; size = Ns = 512 (Blackman-Harris)")
plt.subplot(3,3,4)
plt.plot(np.arange(-hNs,hNs), bh/max(bh), 'k', alpha=.9, lw=1.5)
plt.axis([-hNs, hNs,0,1])
plt.title("Blackman-Harris")
plt.subplot(3,3,5)
plt.plot(np.arange(-hNs/2,hNs/2), ow/max(ow), 'k', alpha=.9, lw=1.5)
plt.axis([-hNs/2, hNs/2,0,1])
plt.title("triangular")
plt.subplot(3,3,6)
plt.plot(np.arange(-hNs/2,hNs/2), sw[hNs-H:hNs+H]/max(sw), 'k', alpha=.9, lw=1.5)
plt.axis([-hNs, hNs,0,1])
plt.title("triangular / Blackman-Harris")
yw = y * sw / max(sw)
plt.subplot(3,1,3)
plt.plot(np.arange(-hNs,hNs), yw, 'b', lw=1.5)
plt.plot(np.arange(-hNs/2,hNs/2), max(y)*ow/max(ow), 'k', alpha=.5, lw=1.5)
plt.axis([-hNs, hNs,min(yw),max(yw)+.1])
plt.title("yw = y * triangular / Blackman Harris; size = Ns/2 = 256")
plt.tight_layout()
plt.savefig('synthesis-window-2.png')
plt.show()
| agpl-3.0 |
zoeyangyy/event-extraction | remote/model.py | 1 | 4463 | # coding: utf-8
import mxnet as mx
from mxnet import gluon
from mxnet import ndarray as nd
from mxnet import autograd
from mxnet.gluon import nn
import h5py
import numpy as np
import pandas as pd
import getopt,sys
from sklearn.model_selection import train_test_split
opts, args = getopt.getopt(sys.argv[1:], "t:", ["type="])
trainType = []
for op, value in opts:
if op == "--type":
trainType = value.split(',')
print(trainType)
ctx = mx.gpu()
def accuracy(output, labels):
return nd.mean(nd.argmax(output, axis=1) == labels).asscalar()
def evaluate(net, data_iter):
loss, acc, n = 0., 0., 0.
steps = len(data_iter)
for data, label in data_iter:
data, label = data.as_in_context(ctx), label.as_in_context(ctx)
output = net(data)
acc += accuracy(output, label)
loss += nd.mean(softmax_cross_entropy(output, label)).asscalar()
return loss/steps, acc/steps
names = locals()
with h5py.File('features.h5', 'r') as f:
names['features_vgg'] = np.array(f['vgg'])
features_resnet = np.array(f['resnet'])
names['features_densenet'] = np.array(f['densenet'])
features_inception = np.array(f['inception_new'])
labels = np.array(f['labels'])
names['features_resnet'] = features_resnet.reshape(features_resnet.shape[:2])
names['features_inception'] = features_inception.reshape(features_inception.shape[:2])
for index, type in enumerate(trainType):
if index == 0:
features = names['features_'+type]
else:
features = np.concatenate([features, names['features_'+type]], axis=-1)
# features = np.concatenate([features_resnet, features_densenet, features_inception], axis=-1)
X_train, X_val, y_train, y_val = train_test_split(features, labels, test_size=0.2)
dataset_train = gluon.data.ArrayDataset(nd.array(X_train), nd.array(y_train))
dataset_val = gluon.data.ArrayDataset(nd.array(X_val), nd.array(y_val))
batch_size = 128
data_iter_train = gluon.data.DataLoader(dataset_train, batch_size, shuffle=True)
data_iter_val = gluon.data.DataLoader(dataset_val, batch_size)
net = nn.Sequential()
with net.name_scope():
net.add(nn.Dense(256, activation='relu'))
net.add(nn.Dropout(0.5))
net.add(nn.Dense(120))
net.initialize(ctx=ctx)
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
trainer = gluon.Trainer(net.collect_params(),
'adam', {'learning_rate': 1e-4, 'wd': 1e-5})
for epoch in range(50):
train_loss = 0.
train_acc = 0.
steps = len(data_iter_train)
for data, label in data_iter_train:
data, label = data.as_in_context(ctx), label.as_in_context(ctx)
with autograd.record():
output = net(data)
loss = softmax_cross_entropy(output, label)
loss.backward()
trainer.step(batch_size)
train_loss += nd.mean(loss).asscalar()
train_acc += accuracy(output, label)
val_loss, val_acc = evaluate(net, data_iter_val)
print("Epoch %d. loss: %.4f, acc: %.2f%%, val_loss %.4f, val_acc %.2f%%" % (
epoch+1, train_loss/steps, train_acc/steps*100, val_loss, val_acc*100))
with h5py.File('features_pre.h5', 'r') as f:
names['features_vgg_test'] = np.array(f['vgg'])
features_resnet_test = np.array(f['resnet'])
names['features_densenet_test'] = np.array(f['densenet'])
features_inception_test = np.array(f['inception'])
names['features_resnet_test'] = features_resnet_test.reshape(features_resnet_test.shape[:2])
names['features_inception_test'] = features_inception_test.reshape(features_inception_test.shape[:2])
for index, type in enumerate(trainType):
if index == 0:
features_test = names['features_'+type+'_test']
else:
features_test = np.concatenate([features_test, names['features_'+type+'_test']], axis=-1)
# features_test = np.concatenate([features_resnet_test, features_densenet_test, features_inception_test], axis=-1)
output = nd.softmax(net(nd.array(features_test).as_in_context(ctx))).asnumpy()
# with open('result.txt', 'a') as file:
# file.write("train type = %s. loss: %.4f, acc: %.2f%%, val_loss %.4f, val_acc %.2f%%\n" % (
# trainType, train_loss/steps, train_acc/steps*100, val_loss, val_acc*100))
df = pd.read_csv('sample_submission-pre.csv')
for i, c in enumerate(df.columns[1:]):
df[c] = output[:, i]
# df.to_csv('pred-{}.csv'.format('-'.join(trainType)), index=None)
df.to_csv('pred-{}.csv'.format('test'), index=None)
| mit |
biocore-ntnu/pyranges | pyranges/methods/nearest.py | 1 | 4757 | import pandas as pd
from .join import _both_dfs
from .sort import sort_one_by_one
from sorted_nearest import (nearest_previous_nonoverlapping,
nearest_next_nonoverlapping,
nearest_nonoverlapping)
def _insert_distance(ocdf, dist, suffix):
if "Distance" not in ocdf:
distance_column_name = "Distance"
elif "Distance" + suffix not in ocdf:
distance_column_name = "Distance" + suffix
else:
i = 1
while "Distance" + str(i) in ocdf:
i += 1
distance_column_name = "Distance" + str(i)
ocdf.insert(ocdf.shape[1], distance_column_name,
pd.Series(dist, index=ocdf.index).fillna(-1).astype(int))
return ocdf
def _overlapping_for_nearest(scdf, ocdf, suffix):
nearest_df = pd.DataFrame(columns="Chromosome Start End Strand".split())
scdf2, ocdf2 = _both_dfs(scdf, ocdf, how="first")
if not ocdf2.empty:
original_idx = scdf.index
idxs = scdf2.index
original_idx = scdf.index.copy(deep=True)
missing_idxs = ~original_idx.isin(idxs)
missing_overlap = scdf.index[missing_idxs]
df_to_find_nearest_in = scdf.reindex(missing_overlap)
odf = ocdf.reindex(ocdf2.index)
odf.index = idxs
sdf = scdf.reindex(idxs)
nearest_df = sdf.join(odf, rsuffix=suffix)
nearest_df = _insert_distance(nearest_df, 0, suffix)
else:
df_to_find_nearest_in = scdf
return nearest_df, df_to_find_nearest_in
def _next_nonoverlapping(left_ends, right_starts, right_indexes):
left_ends = left_ends.sort_values()
right_starts = right_starts.sort_values()
r_idx, dist = nearest_next_nonoverlapping(
left_ends.values - 1, right_starts.values, right_indexes)
r_idx = pd.Series(r_idx, index=left_ends.index).sort_index().values
dist = pd.Series(dist, index=left_ends.index).sort_index().values
return r_idx, dist
def _previous_nonoverlapping(left_starts, right_ends):
left_starts = left_starts.sort_values()
right_ends = right_ends.sort_values()
r_idx, dist = nearest_previous_nonoverlapping(
left_starts.values, right_ends.values - 1, right_ends.index.values)
r_idx = pd.Series(r_idx, index=left_starts.index).sort_index().values
dist = pd.Series(dist, index=left_starts.index).sort_index().values
return r_idx, dist
def _nearest(scdf, ocdf, **kwargs):
if scdf.empty or ocdf.empty:
return None
overlap = kwargs["overlap"]
how = kwargs["how"]
suffix = kwargs["suffix"]
if how == "upstream":
strand = scdf.Strand.iloc[0]
how = {"+": "previous", "-": "next"}[strand]
elif how == "downstream":
strand = scdf.Strand.iloc[0]
how = {"+": "next", "-": "previous"}[strand]
ocdf = ocdf.reset_index(drop=True)
if overlap:
nearest_df, df_to_find_nearest_in = _overlapping_for_nearest(
scdf, ocdf, suffix)
else:
df_to_find_nearest_in = scdf
if not df_to_find_nearest_in.empty:
df_to_find_nearest_in = sort_one_by_one(df_to_find_nearest_in, "Start",
"End")
ocdf = sort_one_by_one(ocdf, "Start", "End")
df_to_find_nearest_in.index = pd.Index(
range(len(df_to_find_nearest_in)))
if how == "next":
r_idx, dist = _next_nonoverlapping(df_to_find_nearest_in.End,
ocdf.Start, ocdf.index.values)
elif how == "previous":
r_idx, dist = _previous_nonoverlapping(df_to_find_nearest_in.Start,
ocdf.End)
else:
previous_r_idx, previous_dist = _previous_nonoverlapping(
df_to_find_nearest_in.Start, ocdf.End)
next_r_idx, next_dist = _next_nonoverlapping(
df_to_find_nearest_in.End, ocdf.Start, ocdf.index.values)
r_idx, dist = nearest_nonoverlapping(previous_r_idx, previous_dist,
next_r_idx, next_dist)
ocdf = ocdf.reindex(r_idx)
ocdf.index = df_to_find_nearest_in.index
ocdf = _insert_distance(ocdf, dist, suffix)
r_idx = pd.Series(r_idx, index=ocdf.index)
df_to_find_nearest_in = df_to_find_nearest_in.drop(
r_idx.loc[r_idx == -1].index)
df = df_to_find_nearest_in.join(ocdf, rsuffix=suffix)
if overlap and "df" in locals() and not df.empty and not nearest_df.empty:
df = pd.concat([nearest_df, df], sort=False)
elif overlap and not nearest_df.empty:
df = nearest_df
df = df.drop("Chromosome" + suffix, axis=1)
return df
| mit |
zentol/flink | flink-python/pyflink/table/tests/test_pandas_conversion.py | 5 | 7701 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import datetime
import decimal
from pandas.util.testing import assert_frame_equal
from pyflink.table.types import DataTypes, Row
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import PyFlinkBlinkBatchTableTestCase, \
PyFlinkBlinkStreamTableTestCase, PyFlinkStreamTableTestCase
class PandasConversionTestBase(object):
@classmethod
def setUpClass(cls):
super(PandasConversionTestBase, cls).setUpClass()
cls.data = [(1, 1, 1, 1, True, 1.1, 1.2, 'hello', bytearray(b"aaa"),
decimal.Decimal('1000000000000000000.01'), datetime.date(2014, 9, 13),
datetime.time(hour=1, minute=0, second=1),
datetime.datetime(1970, 1, 1, 0, 0, 0, 123000), ['hello', '中文'],
Row(a=1, b='hello', c=datetime.datetime(1970, 1, 1, 0, 0, 0, 123000),
d=[1, 2])),
(2, 2, 2, 2, False, 2.1, 2.2, 'world', bytearray(b"bbb"),
decimal.Decimal('1000000000000000000.02'), datetime.date(2014, 9, 13),
datetime.time(hour=1, minute=0, second=1),
datetime.datetime(1970, 1, 1, 0, 0, 0, 123000), ['hello', '中文'],
Row(a=1, b='hello', c=datetime.datetime(1970, 1, 1, 0, 0, 0, 123000),
d=[1, 2]))]
cls.data_type = DataTypes.ROW(
[DataTypes.FIELD("f1", DataTypes.TINYINT()),
DataTypes.FIELD("f2", DataTypes.SMALLINT()),
DataTypes.FIELD("f3", DataTypes.INT()),
DataTypes.FIELD("f4", DataTypes.BIGINT()),
DataTypes.FIELD("f5", DataTypes.BOOLEAN()),
DataTypes.FIELD("f6", DataTypes.FLOAT()),
DataTypes.FIELD("f7", DataTypes.DOUBLE()),
DataTypes.FIELD("f8", DataTypes.STRING()),
DataTypes.FIELD("f9", DataTypes.BYTES()),
DataTypes.FIELD("f10", DataTypes.DECIMAL(38, 18)),
DataTypes.FIELD("f11", DataTypes.DATE()),
DataTypes.FIELD("f12", DataTypes.TIME()),
DataTypes.FIELD("f13", DataTypes.TIMESTAMP(3)),
DataTypes.FIELD("f14", DataTypes.ARRAY(DataTypes.STRING())),
DataTypes.FIELD("f15", DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.INT()),
DataTypes.FIELD("b", DataTypes.STRING()),
DataTypes.FIELD("c", DataTypes.TIMESTAMP(3)),
DataTypes.FIELD("d", DataTypes.ARRAY(DataTypes.INT()))]))])
cls.pdf = cls.create_pandas_data_frame()
@classmethod
def create_pandas_data_frame(cls):
data_dict = {}
for j, name in enumerate(cls.data_type.names):
data_dict[name] = [cls.data[i][j] for i in range(len(cls.data))]
# need convert to numpy types
import numpy as np
data_dict["f1"] = np.int8(data_dict["f1"])
data_dict["f2"] = np.int16(data_dict["f2"])
data_dict["f3"] = np.int32(data_dict["f3"])
data_dict["f4"] = np.int64(data_dict["f4"])
data_dict["f6"] = np.float32(data_dict["f6"])
data_dict["f7"] = np.float64(data_dict["f7"])
data_dict["f15"] = [row.as_dict() for row in data_dict["f15"]]
import pandas as pd
return pd.DataFrame(data=data_dict,
columns=['f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9',
'f10', 'f11', 'f12', 'f13', 'f14', 'f15'])
class PandasConversionTests(PandasConversionTestBase):
def test_from_pandas_with_incorrect_schema(self):
fields = self.data_type.fields.copy()
fields[0], fields[7] = fields[7], fields[0] # swap str with tinyint
wrong_schema = DataTypes.ROW(fields) # should be DataTypes.STRING()
with self.assertRaisesRegex(Exception, "Expected a string.*got int8"):
self.t_env.from_pandas(self.pdf, schema=wrong_schema)
def test_from_pandas_with_names(self):
# skip decimal as currently only decimal(38, 18) is supported
pdf = self.pdf.drop(['f10', 'f11', 'f12', 'f13', 'f14', 'f15'], axis=1)
new_names = list(map(str, range(len(pdf.columns))))
table = self.t_env.from_pandas(pdf, schema=new_names)
self.assertEqual(new_names, table.get_schema().get_field_names())
table = self.t_env.from_pandas(pdf, schema=tuple(new_names))
self.assertEqual(new_names, table.get_schema().get_field_names())
def test_from_pandas_with_types(self):
new_types = self.data_type.field_types()
new_types[0] = DataTypes.BIGINT()
table = self.t_env.from_pandas(self.pdf, schema=new_types)
self.assertEqual(new_types, table.get_schema().get_field_data_types())
table = self.t_env.from_pandas(self.pdf, schema=tuple(new_types))
self.assertEqual(new_types, table.get_schema().get_field_data_types())
class PandasConversionITTests(PandasConversionTestBase):
def test_from_pandas(self):
table = self.t_env.from_pandas(self.pdf, self.data_type, 5)
self.assertEqual(self.data_type, table.get_schema().to_row_data_type())
table = table.filter("f1 < 2")
table_sink = source_sink_utils.TestAppendSink(
self.data_type.field_names(),
self.data_type.field_types())
self.t_env.register_table_sink("Results", table_sink)
table.insert_into("Results")
self.t_env.execute("test")
actual = source_sink_utils.results()
self.assert_equals(actual,
["1,1,1,1,true,1.1,1.2,hello,[97, 97, 97],"
"1000000000000000000.010000000000000000,2014-09-13,01:00:01,"
"1970-01-01 00:00:00.123,[hello, 中文],1,hello,"
"1970-01-01 00:00:00.123,[1, 2]"])
def test_to_pandas(self):
table = self.t_env.from_pandas(self.pdf, self.data_type)
result_pdf = table.to_pandas()
self.assertEqual(2, len(result_pdf))
assert_frame_equal(self.pdf, result_pdf)
def test_empty_to_pandas(self):
table = self.t_env.from_pandas(self.pdf, self.data_type)
pdf = table.filter("f1 < 0").to_pandas()
self.assertTrue(pdf.empty)
class StreamPandasConversionTests(PandasConversionITTests,
PyFlinkStreamTableTestCase):
pass
class BlinkBatchPandasConversionTests(PandasConversionTests,
PandasConversionITTests,
PyFlinkBlinkBatchTableTestCase):
pass
class BlinkStreamPandasConversionTests(PandasConversionITTests,
PyFlinkBlinkStreamTableTestCase):
pass
| apache-2.0 |
TinyOS-Camp/DDEA-DEV | Archive/[14_09_12] DDEA_example_code/data_tools.py | 7 | 17972 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 16 11:11:01 2014
@author: deokwoo
"""
from __future__ import division # To forace float point division
import numpy as np
import matplotlib.pyplot as plt
#from datetime import datetime
import datetime as dt
import calendar
from numpy.linalg import norm
from shared_constants import *
import re
import mytool as mt
import pandas
import uuid
###############################################################################
# Application Functions
###############################################################################
###############################################################################
# Plotting tool
###############################################################################
# example
#plotting_data(['GW2.CG_PHASE2_ACTIVE_POWER_M'],data_dict,time_slots[50:100])
def plotting_data(plot_list,data_dict,time_slots,opt='avg'):
# Month indicator
time_mat=build_time_states(time_slots)
time_mat_NAMES=['MTH','WD','HR']
month_idx=0; weekday_idx=1; hour_idx=2
num_col=int(np.ceil(np.sqrt(len(plot_list))))
num_row=num_col
time_mn_diff=np.diff(time_mat[:,month_idx])
m_label_idx=time_mn_diff.nonzero()[0]; m_label_str=[]
for m_num in time_mat[m_label_idx,month_idx]:
m_label_str.append(monthDict[m_num])
time_wk_diff=np.diff(time_mat[:,weekday_idx])
w_label_idx=time_wk_diff.nonzero()[0]; w_label_str=[]
for w_num in time_mat[w_label_idx,weekday_idx]:
w_label_str.append(weekDict[int(w_num)])
sample_slot_idx=[data_dict['time_slots'].index(dt_val) for dt_val in time_slots]
for k,sensor in enumerate(plot_list):
num_samples=[]; mean_samples=[]
for i,(t,samples) in enumerate(zip(time_slots,data_dict[sensor][1][sample_slot_idx])):
#import pdb;pdb.set_trace()
num_samples.append(len(samples))
mean_samples.append(np.mean(samples))
if opt=='sd' or opt=='all':
plt.figure('Sampling Density')
plt.subplot(num_col, num_row,k+1)
plt.plot(time_slots,num_samples)
plt.title(sensor,fontsize=8)
#==============================================================================
# plt.xticks(fontsize=8)
#==============================================================================
plt.yticks(fontsize=8)
plt.ylabel('# Samples/Hour',fontsize=8)
if k<len(plot_list)-1:
frame1 = plt.gca()
frame1.axes.get_xaxis().set_visible(False)
if opt=='avg' or opt=='all':
plt.figure('Hourly Average')
plt.subplot(num_col, num_row,k+1)
plt.plot(time_slots,mean_samples)
plt.title(sensor,fontsize=8)
plt.xticks(fontsize=8)
plt.yticks(fontsize=8)
plt.ylabel('Avg Val/Hour',fontsize=8)
if k<len(plot_list)-1:
frame1 = plt.gca()
frame1.axes.get_xaxis().set_visible(False)
#plt.xticks(w_label_idx.tolist(),w_label_str,fontsize=8)
#plt.text(m_label_idx, np.max(num_samples)*0.8, m_label_str, fontsize=12)
if opt=='si' or opt=='all':
plt.figure('Sampling Intervals')
t_secs_diff=np.diff(data_dict[sensor][2][0])
plt.subplot(num_col, num_row,k+1)
plt.plot(t_secs_diff)
plt.title(sensor,fontsize=8)
plt.xticks(fontsize=8)
plt.yticks(fontsize=8)
plt.ylabel('Samping Interval (secs)',fontsize=8)
if k<len(plot_list)-1:
frame1 = plt.gca()
frame1.axes.get_xaxis().set_visible(False)
print ' End of Plotting'
def daterange(start, stop, step=dt.timedelta(days=1), inclusive=False):
# inclusive=False to behave like range by default
if step.days > 0:
while start < stop:
yield start
start = start + step
# not +=! don't modify object passed in if it's mutable
# since this function is not restricted to
# only types from datetime module
elif step.days < 0:
while start > stop:
yield start
start = start + step
if inclusive and start == stop:
yield start
# Convert a unix time u to a datetime object d, and vice versa
#def unix_to_dtime(u): return dt.datetime.utcfromtimestamp(u)
def unix_to_dtime(u):
if len(u)==1:
return dt.datetime.utcfromtimestamp(u[0])
elif len(u)>1:
return [dt.datetime.utcfromtimestamp(x) for x in u]
else:
raise NameError('length of vector must be greater 1')
def dtime_to_unix(d):
if len(d)==1:
return calendar.timegm(d[0].timetuple())
elif len(d)>1:
return [calendar.timegm(ds.timetuple()) for ds in d]
else:
raise NameError('length of vector must be greater 1')
#return calendar.timegm(d.timetuple())
def find_norm_dist_matrix(X_INPUT):
#SIMM_MAT=np.zeros([X_INPUT.shape[1],X_INPUT.shape[1]])
#if X_INPUT.shape[1] > X_INPUT.shape[0]:
# print 'WARNING: num of samples are smaller than num of sensors'
DIST_MAT=np.zeros([X_INPUT.shape[1],X_INPUT.shape[1]])
for i in range(X_INPUT.shape[1]):
for j in range(X_INPUT.shape[1]):
sample1=X_INPUT[:,i].copy()
sample2=X_INPUT[:,j].copy()
temp_dist=norm(sample1-sample2)
DIST_MAT[i,j]=temp_dist
#SIMM_MAT[i,j] = 2-temp_dist
return DIST_MAT
def outlier_detect(val,err_rate=10,sgm_bnd=3):
min_num_samples=10
val_len=len(val)
val_sort=sorted(val)
if err_rate<0 or err_rate>100:
raise NameError('error rate must be between 0 and 100')
start_idx=int(val_len*(err_rate/2)/100)
end_idx=int(val_len-val_len*(err_rate/2)/100)
#import pdb;pdb.set_trace()
#print 'start_idx: ',start_idx,'end_idx: ',end_idx
if end_idx-start_idx>min_num_samples:
mean_val=np.mean(val_sort[start_idx:end_idx])
std_val=np.std(val_sort[start_idx:end_idx])
min_val=np.min(val_sort[start_idx:end_idx])
max_val=np.max(val_sort[start_idx:end_idx])
else:
return []
#print 'min_val: ',min_val,'max_val: ',max_val,'sgm_bnd: ',sgm_bnd
#val_bnd_high=mean_val+sgm_bnd*std_val
val_bnd_high=max_val+sgm_bnd*std_val
#val_bnd_low=mean_val-sgm_bnd*std_val
val_bnd_low=min_val-sgm_bnd*std_val
#print 'val_bnd_low: ',val_bnd_low,'val_bnd_high: ',val_bnd_high
return np.nonzero((val_bnd_high<val ) | (val_bnd_low>val))[0]
# Average Sampling Interval
# Interploated data
def fast_nearest_interp(xi, x, y):
# Assumes that x is monotonically increasing!!.
# Shift x points to centers
spacing = np.diff(x) / 2
x = x + np.hstack([spacing, spacing[-1]])
# Append the last point in y twice for ease of use
y = np.hstack([y, y[-1]])
return y[np.searchsorted(x, xi)]
###
# Convert time (datetime object or utc time) to state [month,weekday,hour]
# if time is datetime ojbect, we assume it is local (Helsinki) time
# otherwise, it is UTC time, and we give the corresponding [month,wweekday,hour]
# in the local time
###
def convert_time_to_state(ts,readable_format=False,from_zone='Europe/Helsinki',to_zone='Europe/Helsinki'):
time_state = []
# If the timestamp is datetime object,
# we assume it is already in local time (Helsinki)
# If not: we assume it is utc time, and we need to convert it to
# local time zone (Helsinki)
if type(ts) == dt.datetime:
local_dt = ts
else:
#local_dt = dt.datetime.fromtimestamp(ts).replace(tzinfo=tz.gettz('UTC')).astimezone(pytz.timezone(zone))
local_dt = dt.datetime.utcfromtimestamp(ts).replace(tzinfo=tz.gettz(from_zone)).astimezone(pytz.timezone(to_zone))
if not readable_format:
time_state = [local_dt.month-1, local_dt.weekday(), local_dt.hour]
else:
time_state = [monthDict[local_dt.month-1],weekDict[local_dt.weekday()], str(local_dt.hour) + 'h']
return time_state
###
# Construct time state matrix Nx3 [Month,Weekday,Hour] from the list of time
# ts_list, where N = len(ts_list)
# If the item in the list is datetime object, we assume it is already in local time (Helsinki)
# Otherwise, we assume the list item is in UTC time
# Setting readable_format to True will result in the time in understandable format
###
def build_time_states(ts_list,readable_format=False,from_zone='Europe/Helsinki',to_zone='Europe/Helsinki'):
time_mat = []
# if not readable_format:
for ts in ts_list:
time_state = convert_time_to_state(ts,readable_format=readable_format,from_zone=from_zone,to_zone=to_zone)
time_mat.append(time_state)
return np.array(time_mat)
def pair_in_idx(a,b=[],FLATTEN=True):
pair_set=[]
if len(b)==0:
for idx1 in range(len(a)):
for idx2 in range(idx1+1,len(a)):
if FLATTEN==True:
if (isinstance(a[idx1],list)==True) and (isinstance(a[idx2],list)==True):
pair_set.append([a[idx1]+a[idx2]][0])
elif isinstance(a[idx1],list)==True and isinstance(a[idx2],list)==False:
pair_set.append([list([a[idx2]])+a[idx1]][0])
elif isinstance(a[idx1],list)==False and isinstance(a[idx2],list)==True:
pair_set.append([a[idx2]+list([a[idx1]])][0])
else:
pair_set.append([a[idx1],a[idx2]])
else:
pair_set.append([a[idx1],a[idx2]])
else:
for idx1 in a:
for idx2 in b:
if FLATTEN==True:
if (isinstance(idx1,list)==True) and (isinstance(idx2,list)==True):
pair_set.append([idx1+idx2][0])
elif isinstance(idx1,list)==True and isinstance(idx2,list)==False:
pair_set.append([idx1+list([idx2])][0])
elif isinstance(idx1,list)==False and isinstance(idx2,list)==True:
pair_set.append([list([idx1])+idx2][0])
else:
pair_set.append([idx1,idx2])
else:
pair_set.append([idx1,idx2])
return pair_set
def plot_compare_sensors(sensor_names,X_Time,X_Feature,X_names):
num_sensors=len(sensor_names)
#sensor_name=data_used[k]
fig = plt.figure('Compare')
fig.suptitle('Compare')
for k,sensor_name in enumerate(sensor_names):
plt.subplot(num_sensors,1,k+1);
plt.plot(X_Time,X_Feature[:,X_names.index(sensor_name)])
plt.title(sensor_name)
plt.get_current_fig_manager().window.showMaximized()
def plot_compare_states(x_idx,data_dict,X_Time,X_Feature,X_STATE,X_names):
if X_STATE.shape!=X_Feature.shape:
raise NameError('the size of state and feature matrix must be same')
if (X_STATE.shape[0]!=X_Time.shape[0]):
raise NameError('the row length of state /feature matrix and time array must be same')
if (X_STATE.shape[1]!=len(X_names)):
raise NameError('the column length of state and name array must be same')
sensor_name=X_names[x_idx]
fig = plt.figure('Regualar Event Classification')
fig.suptitle('Regualar Event Classification');
plt.subplot(3,1,1);
plt.plot(unix_to_dtime(data_dict[sensor_name][2][0]),data_dict[sensor_name][2][1])
plt.ylabel('Power, KWatt')
plt.title(sensor_name+' - Measurements');
plt.subplot(3,1,2);
plt.plot(X_Time,X_Feature[:,x_idx]);
plt.title(X_names[x_idx]+' - Hourly Average');
plt.ylabel('Normalized Measurement')
plt.subplot(3,1,3);
low_peak_idx=np.nonzero(X_STATE[:,x_idx]==-1)[0]
no_peak_idx=np.nonzero(X_STATE[:,x_idx]==0)[0]
high_peak_idx=np.nonzero(X_STATE[:,x_idx]==1)[0]
plt.plot(X_Time[low_peak_idx],X_STATE[low_peak_idx,x_idx],'rv');
plt.plot(X_Time[high_peak_idx],X_STATE[high_peak_idx,x_idx],'b^');
plt.plot(X_Time[no_peak_idx],X_STATE[no_peak_idx,x_idx],'g.');
plt.plot(X_Time,X_STATE[:,x_idx]);
plt.title(sensor_name+' - Classified States ');
plt.ylabel('States'); plt.xlabel('Dates'); plt.ylim([-1.2,1.2])
plt.yticks([-1, 0, 1], ['Low Peak', 'No Peak', 'High Peak'])
plt.get_current_fig_manager().window.showMaximized()
#time.sleep(3)
#fig.savefig(fig_dir+'Reg_Event_Classification_'+input_names[k]+'.png')
def check_data_type(values):
num_samples=len(values)
num_values=len(set(values))
if num_values==0:
return None
if num_samples>1000:
if num_values<MIN_NUM_VAL_FOR_FLOAT:
data_type=INT_TYPE
else:
data_type=FLOAT_TYPE
else:
comp_ratio=num_samples/num_values
if comp_ratio>100:
data_type=INT_TYPE
else:
data_type=FLOAT_TYPE
return data_type
def grep(pattern,l):
expr = re.compile(pattern)
idx_list = [idx for idx in range(len(l)) if expr.search(l[idx])]
#ret = [(idx,elem) for elem in l if expr.search(elem)]
return list(set(idx_list))
class obj(object):
def __init__(self, d):
for a, b in d.items():
if isinstance(b, (list, tuple)):
setattr(self, a, [obj(x) if isinstance(x, dict) else x for x in b])
else:
setattr(self, a, obj(b) if isinstance(b, dict) else b)
def remove_dot(pname_):
if isinstance(pname_,list)==True:
pname_list=[]
for name_ in pname_:
try:
blank_idx=name_.index('.')
name_=name_.replace('.','_')
except:
pass
try:
blank_idx=name_.index(' ')
name_=name_.replace(' ','_')
except:
pass
if name_[0].isdigit():
name_='a_'+name_
pname_list.append(name_)
return pname_list
else:
try:
blank_idx=pname_.index('.')
pname_=pname_.replace('.','_')
except:
pass
try:
blank_idx=pname_.index(' ')
pname_=pname_.replace(' ','_')
except:
pass
if pname_[0].isdigit():
pname_='a_'+pname_
return pname_
# cause sensor names
def get_data_set(f_names,start_t=[],end_t=[]):
s_names={}
for s_name in f_names:
filename=DATA_DIR+s_name+FL_EXT
data = mt.loadObjectBinary(filename)
sensor_val = data["value"]
time_val = data["ts"]
if (start_t!=[]) and (end_t!=[]):
temp_t=np.array([ t_[0] for t_ in time_val])
s_t_idx=np.nonzero((temp_t>start_t) & (temp_t<end_t))[0]
time_val=np.array(time_val)[s_t_idx]
sensor_val=np.array(sensor_val)[s_t_idx]
sensor_dtime=[time_val_[0] for time_val_ in time_val]
temp_obj=obj({'time':sensor_dtime,'val':sensor_val})
s_names.update({remove_dot(s_name):temp_obj})
return obj(s_names)
def plot_data_x(data_x,stype='raw',smark='-',fontsize='small',xpos=0.5):
plt.ioff()
fig=plt.figure(figsize=(20.0,10.0))
sensor_names_x=data_x.__dict__.keys()
num_plots=len(sensor_names_x)
for k ,key in enumerate(sensor_names_x):
plt.subplot(num_plots,1,k+1)
if stype=='diff':
t_=data_x.__dict__[key].time[1:]
val_=abs(np.diff(data_x.__dict__[key].val))
plt.title(key+'- Differential',fontsize=fontsize,x=xpos)
else:
t_=data_x.__dict__[key].time
val_=data_x.__dict__[key].val
plt.title(key,fontsize=fontsize,x=xpos)
plt.plot(t_,val_,smark)
mn_=min(val_);mx_=max(val_)
plt.ylim([mn_-0.1*abs(mn_),mx_+0.1*abs(mx_)])
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
plt.tick_params(labelsize=fontsize)
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
plt.close()
plt.ion()
return png_name
def print_cond_table(peak_state_temp, peak_prob_temp,cause_label):
print '---------------------------------------------'
print 'Conditoinal probability of PEAK given ', cause_label
print '---------------------------------------------'
peak_state_set=[[stateDict[s_] for s_ in ss] for ss in peak_state_temp]
print '----------------------------------------'
print 'Conditional Probability'
print '--------------- ---------------'
print 'Sensor State Cond.Prob of PEAK'
print '--------------- ---------------'
print pandas.DataFrame(np.array(peak_prob_temp),np.array(peak_state_set))
print '----------------------------------------'
def plotting_feature_mat(all_psensors,X_names,X_Feature,start_t,end_t):
for name_ in all_psensors:
idx=grep(name_,X_names)
dt_=X_Time
val_=X_Feature[:,idx]
s_t_idx=np.nonzero((np.array(dt_)>start_t) & (np.array(dt_)<end_t) )[0]
dt_=np.array(dt_)[s_t_idx]
val_=np.array(val_)[s_t_idx]
fig=figure(figsize=(20.0,10.0))
if len(idx)>0:
plt.plot(dt_,val_)
plt.ylabel('Power',fontsize=18)
plt.tick_params(labelsize='large')
mn_=min(val_);mx_=max(val_)
ylim([mn_-0.1*abs(mn_),mx_+0.1*abs(mx_)])
title(name_+'- Feature value',fontsize=18)
fig.savefig(fig_dir+name_+'_'+start_t.strftime("%B %d, %Y") +' - '+end_t.strftime("%B %d, %Y")+'.png')
def is_empty_idx(a):
import pdb;pdb.set_trace()
try:
[ len(a_1) for a_1 in a].index(0)
return True
except:
return False
def get_pngid():
png_id='_'+str(uuid.uuid4().get_hex().upper()[0:2])+'_'
return png_id | gpl-2.0 |
jreback/pandas | asv_bench/benchmarks/frame_methods.py | 2 | 18907 | import string
import warnings
import numpy as np
from pandas import DataFrame, MultiIndex, NaT, Series, date_range, isnull, period_range
from .pandas_vb_common import tm
class GetNumericData:
def setup(self):
self.df = DataFrame(np.random.randn(10000, 25))
self.df["foo"] = "bar"
self.df["bar"] = "baz"
self.df = self.df._consolidate()
def time_frame_get_numeric_data(self):
self.df._get_numeric_data()
class Lookup:
def setup(self):
self.df = DataFrame(np.random.randn(10000, 8), columns=list("abcdefgh"))
self.df["foo"] = "bar"
self.row_labels = list(self.df.index[::10])[:900]
self.col_labels = list(self.df.columns) * 100
self.row_labels_all = np.array(
list(self.df.index) * len(self.df.columns), dtype="object"
)
self.col_labels_all = np.array(
list(self.df.columns) * len(self.df.index), dtype="object"
)
def time_frame_fancy_lookup(self):
self.df.lookup(self.row_labels, self.col_labels)
def time_frame_fancy_lookup_all(self):
self.df.lookup(self.row_labels_all, self.col_labels_all)
class Reindex:
def setup(self):
N = 10 ** 3
self.df = DataFrame(np.random.randn(N * 10, N))
self.idx = np.arange(4 * N, 7 * N)
self.df2 = DataFrame(
{
c: {
0: np.random.randint(0, 2, N).astype(np.bool_),
1: np.random.randint(0, N, N).astype(np.int16),
2: np.random.randint(0, N, N).astype(np.int32),
3: np.random.randint(0, N, N).astype(np.int64),
}[np.random.randint(0, 4)]
for c in range(N)
}
)
def time_reindex_axis0(self):
self.df.reindex(self.idx)
def time_reindex_axis1(self):
self.df.reindex(columns=self.idx)
def time_reindex_both_axes(self):
self.df.reindex(index=self.idx, columns=self.idx)
def time_reindex_upcast(self):
self.df2.reindex(np.random.permutation(range(1200)))
class Rename:
def setup(self):
N = 10 ** 3
self.df = DataFrame(np.random.randn(N * 10, N))
self.idx = np.arange(4 * N, 7 * N)
self.dict_idx = {k: k for k in self.idx}
self.df2 = DataFrame(
{
c: {
0: np.random.randint(0, 2, N).astype(np.bool_),
1: np.random.randint(0, N, N).astype(np.int16),
2: np.random.randint(0, N, N).astype(np.int32),
3: np.random.randint(0, N, N).astype(np.int64),
}[np.random.randint(0, 4)]
for c in range(N)
}
)
def time_rename_single(self):
self.df.rename({0: 0})
def time_rename_axis0(self):
self.df.rename(self.dict_idx)
def time_rename_axis1(self):
self.df.rename(columns=self.dict_idx)
def time_rename_both_axes(self):
self.df.rename(index=self.dict_idx, columns=self.dict_idx)
def time_dict_rename_both_axes(self):
self.df.rename(index=self.dict_idx, columns=self.dict_idx)
class Iteration:
# mem_itertuples_* benchmarks are slow
timeout = 120
def setup(self):
N = 1000
self.df = DataFrame(np.random.randn(N * 10, N))
self.df2 = DataFrame(np.random.randn(N * 50, 10))
self.df3 = DataFrame(
np.random.randn(N, 5 * N), columns=["C" + str(c) for c in range(N * 5)]
)
self.df4 = DataFrame(np.random.randn(N * 1000, 10))
def time_items(self):
# (monitor no-copying behaviour)
if hasattr(self.df, "_item_cache"):
self.df._item_cache.clear()
for name, col in self.df.items():
pass
def time_items_cached(self):
for name, col in self.df.items():
pass
def time_iteritems_indexing(self):
for col in self.df3:
self.df3[col]
def time_itertuples_start(self):
self.df4.itertuples()
def time_itertuples_read_first(self):
next(self.df4.itertuples())
def time_itertuples(self):
for row in self.df4.itertuples():
pass
def time_itertuples_to_list(self):
list(self.df4.itertuples())
def mem_itertuples_start(self):
return self.df4.itertuples()
def peakmem_itertuples_start(self):
self.df4.itertuples()
def mem_itertuples_read_first(self):
return next(self.df4.itertuples())
def peakmem_itertuples(self):
for row in self.df4.itertuples():
pass
def mem_itertuples_to_list(self):
return list(self.df4.itertuples())
def peakmem_itertuples_to_list(self):
list(self.df4.itertuples())
def time_itertuples_raw_start(self):
self.df4.itertuples(index=False, name=None)
def time_itertuples_raw_read_first(self):
next(self.df4.itertuples(index=False, name=None))
def time_itertuples_raw_tuples(self):
for row in self.df4.itertuples(index=False, name=None):
pass
def time_itertuples_raw_tuples_to_list(self):
list(self.df4.itertuples(index=False, name=None))
def mem_itertuples_raw_start(self):
return self.df4.itertuples(index=False, name=None)
def peakmem_itertuples_raw_start(self):
self.df4.itertuples(index=False, name=None)
def peakmem_itertuples_raw_read_first(self):
next(self.df4.itertuples(index=False, name=None))
def peakmem_itertuples_raw(self):
for row in self.df4.itertuples(index=False, name=None):
pass
def mem_itertuples_raw_to_list(self):
return list(self.df4.itertuples(index=False, name=None))
def peakmem_itertuples_raw_to_list(self):
list(self.df4.itertuples(index=False, name=None))
def time_iterrows(self):
for row in self.df.iterrows():
pass
class ToString:
def setup(self):
self.df = DataFrame(np.random.randn(100, 10))
def time_to_string_floats(self):
self.df.to_string()
class ToHTML:
def setup(self):
nrows = 500
self.df2 = DataFrame(np.random.randn(nrows, 10))
self.df2[0] = period_range("2000", periods=nrows)
self.df2[1] = range(nrows)
def time_to_html_mixed(self):
self.df2.to_html()
class ToNumpy:
def setup(self):
N = 10000
M = 10
self.df_tall = DataFrame(np.random.randn(N, M))
self.df_wide = DataFrame(np.random.randn(M, N))
self.df_mixed_tall = self.df_tall.copy()
self.df_mixed_tall["foo"] = "bar"
self.df_mixed_tall[0] = period_range("2000", periods=N)
self.df_mixed_tall[1] = range(N)
self.df_mixed_wide = self.df_wide.copy()
self.df_mixed_wide["foo"] = "bar"
self.df_mixed_wide[0] = period_range("2000", periods=M)
self.df_mixed_wide[1] = range(M)
def time_to_numpy_tall(self):
self.df_tall.to_numpy()
def time_to_numpy_wide(self):
self.df_wide.to_numpy()
def time_to_numpy_mixed_tall(self):
self.df_mixed_tall.to_numpy()
def time_to_numpy_mixed_wide(self):
self.df_mixed_wide.to_numpy()
def time_values_tall(self):
self.df_tall.values
def time_values_wide(self):
self.df_wide.values
def time_values_mixed_tall(self):
self.df_mixed_tall.values
def time_values_mixed_wide(self):
self.df_mixed_wide.values
class Repr:
def setup(self):
nrows = 10000
data = np.random.randn(nrows, 10)
arrays = np.tile(np.random.randn(3, nrows // 100), 100)
idx = MultiIndex.from_arrays(arrays)
self.df3 = DataFrame(data, index=idx)
self.df4 = DataFrame(data, index=np.random.randn(nrows))
self.df_tall = DataFrame(np.random.randn(nrows, 10))
self.df_wide = DataFrame(np.random.randn(10, nrows))
def time_html_repr_trunc_mi(self):
self.df3._repr_html_()
def time_html_repr_trunc_si(self):
self.df4._repr_html_()
def time_repr_tall(self):
repr(self.df_tall)
def time_frame_repr_wide(self):
repr(self.df_wide)
class MaskBool:
def setup(self):
data = np.random.randn(1000, 500)
df = DataFrame(data)
df = df.where(df > 0)
self.bools = df > 0
self.mask = isnull(df)
def time_frame_mask_bools(self):
self.bools.mask(self.mask)
def time_frame_mask_floats(self):
self.bools.astype(float).mask(self.mask)
class Isnull:
def setup(self):
N = 10 ** 3
self.df_no_null = DataFrame(np.random.randn(N, N))
sample = np.array([np.nan, 1.0])
data = np.random.choice(sample, (N, N))
self.df = DataFrame(data)
sample = np.array(list(string.ascii_letters + string.whitespace))
data = np.random.choice(sample, (N, N))
self.df_strings = DataFrame(data)
sample = np.array(
[
NaT,
np.nan,
None,
np.datetime64("NaT"),
np.timedelta64("NaT"),
0,
1,
2.0,
"",
"abcd",
]
)
data = np.random.choice(sample, (N, N))
self.df_obj = DataFrame(data)
def time_isnull_floats_no_null(self):
isnull(self.df_no_null)
def time_isnull(self):
isnull(self.df)
def time_isnull_strngs(self):
isnull(self.df_strings)
def time_isnull_obj(self):
isnull(self.df_obj)
class Fillna:
params = ([True, False], ["pad", "bfill"])
param_names = ["inplace", "method"]
def setup(self, inplace, method):
values = np.random.randn(10000, 100)
values[::2] = np.nan
self.df = DataFrame(values)
def time_frame_fillna(self, inplace, method):
self.df.fillna(inplace=inplace, method=method)
class Dropna:
params = (["all", "any"], [0, 1])
param_names = ["how", "axis"]
def setup(self, how, axis):
self.df = DataFrame(np.random.randn(10000, 1000))
self.df.iloc[50:1000, 20:50] = np.nan
self.df.iloc[2000:3000] = np.nan
self.df.iloc[:, 60:70] = np.nan
self.df_mixed = self.df.copy()
self.df_mixed["foo"] = "bar"
def time_dropna(self, how, axis):
self.df.dropna(how=how, axis=axis)
def time_dropna_axis_mixed_dtypes(self, how, axis):
self.df_mixed.dropna(how=how, axis=axis)
class Count:
params = [0, 1]
param_names = ["axis"]
def setup(self, axis):
self.df = DataFrame(np.random.randn(10000, 1000))
self.df.iloc[50:1000, 20:50] = np.nan
self.df.iloc[2000:3000] = np.nan
self.df.iloc[:, 60:70] = np.nan
self.df_mixed = self.df.copy()
self.df_mixed["foo"] = "bar"
self.df.index = MultiIndex.from_arrays([self.df.index, self.df.index])
self.df.columns = MultiIndex.from_arrays([self.df.columns, self.df.columns])
self.df_mixed.index = MultiIndex.from_arrays(
[self.df_mixed.index, self.df_mixed.index]
)
self.df_mixed.columns = MultiIndex.from_arrays(
[self.df_mixed.columns, self.df_mixed.columns]
)
def time_count_level_multi(self, axis):
self.df.count(axis=axis, level=1)
def time_count_level_mixed_dtypes_multi(self, axis):
self.df_mixed.count(axis=axis, level=1)
class Apply:
def setup(self):
self.df = DataFrame(np.random.randn(1000, 100))
self.s = Series(np.arange(1028.0))
self.df2 = DataFrame({i: self.s for i in range(1028)})
self.df3 = DataFrame(np.random.randn(1000, 3), columns=list("ABC"))
def time_apply_user_func(self):
self.df2.apply(lambda x: np.corrcoef(x, self.s)[(0, 1)])
def time_apply_axis_1(self):
self.df.apply(lambda x: x + 1, axis=1)
def time_apply_lambda_mean(self):
self.df.apply(lambda x: x.mean())
def time_apply_np_mean(self):
self.df.apply(np.mean)
def time_apply_pass_thru(self):
self.df.apply(lambda x: x)
def time_apply_ref_by_name(self):
self.df3.apply(lambda x: x["A"] + x["B"], axis=1)
class Dtypes:
def setup(self):
self.df = DataFrame(np.random.randn(1000, 1000))
def time_frame_dtypes(self):
self.df.dtypes
class Equals:
def setup(self):
N = 10 ** 3
self.float_df = DataFrame(np.random.randn(N, N))
self.float_df_nan = self.float_df.copy()
self.float_df_nan.iloc[-1, -1] = np.nan
self.object_df = DataFrame("foo", index=range(N), columns=range(N))
self.object_df_nan = self.object_df.copy()
self.object_df_nan.iloc[-1, -1] = np.nan
self.nonunique_cols = self.object_df.copy()
self.nonunique_cols.columns = ["A"] * len(self.nonunique_cols.columns)
self.nonunique_cols_nan = self.nonunique_cols.copy()
self.nonunique_cols_nan.iloc[-1, -1] = np.nan
def time_frame_float_equal(self):
self.float_df.equals(self.float_df)
def time_frame_float_unequal(self):
self.float_df.equals(self.float_df_nan)
def time_frame_nonunique_equal(self):
self.nonunique_cols.equals(self.nonunique_cols)
def time_frame_nonunique_unequal(self):
self.nonunique_cols.equals(self.nonunique_cols_nan)
def time_frame_object_equal(self):
self.object_df.equals(self.object_df)
def time_frame_object_unequal(self):
self.object_df.equals(self.object_df_nan)
class Interpolate:
params = [None, "infer"]
param_names = ["downcast"]
def setup(self, downcast):
N = 10000
# this is the worst case, where every column has NaNs.
self.df = DataFrame(np.random.randn(N, 100))
self.df.values[::2] = np.nan
self.df2 = DataFrame(
{
"A": np.arange(0, N),
"B": np.random.randint(0, 100, N),
"C": np.random.randn(N),
"D": np.random.randn(N),
}
)
self.df2.loc[1::5, "A"] = np.nan
self.df2.loc[1::5, "C"] = np.nan
def time_interpolate(self, downcast):
self.df.interpolate(downcast=downcast)
def time_interpolate_some_good(self, downcast):
self.df2.interpolate(downcast=downcast)
class Shift:
# frame shift speedup issue-5609
params = [0, 1]
param_names = ["axis"]
def setup(self, axis):
self.df = DataFrame(np.random.rand(10000, 500))
def time_shift(self, axis):
self.df.shift(1, axis=axis)
class Nunique:
def setup(self):
self.df = DataFrame(np.random.randn(10000, 1000))
def time_frame_nunique(self):
self.df.nunique()
class Duplicated:
def setup(self):
n = 1 << 20
t = date_range("2015-01-01", freq="S", periods=(n // 64))
xs = np.random.randn(n // 64).round(2)
self.df = DataFrame(
{
"a": np.random.randint(-1 << 8, 1 << 8, n),
"b": np.random.choice(t, n),
"c": np.random.choice(xs, n),
}
)
self.df2 = DataFrame(np.random.randn(1000, 100).astype(str)).T
def time_frame_duplicated(self):
self.df.duplicated()
def time_frame_duplicated_wide(self):
self.df2.duplicated()
class XS:
params = [0, 1]
param_names = ["axis"]
def setup(self, axis):
self.N = 10 ** 4
self.df = DataFrame(np.random.randn(self.N, self.N))
def time_frame_xs(self, axis):
self.df.xs(self.N / 2, axis=axis)
class SortValues:
params = [True, False]
param_names = ["ascending"]
def setup(self, ascending):
self.df = DataFrame(np.random.randn(1000000, 2), columns=list("AB"))
def time_frame_sort_values(self, ascending):
self.df.sort_values(by="A", ascending=ascending)
class SortIndexByColumns:
def setup(self):
N = 10000
K = 10
self.df = DataFrame(
{
"key1": tm.makeStringIndex(N).values.repeat(K),
"key2": tm.makeStringIndex(N).values.repeat(K),
"value": np.random.randn(N * K),
}
)
def time_frame_sort_values_by_columns(self):
self.df.sort_values(by=["key1", "key2"])
class Quantile:
params = [0, 1]
param_names = ["axis"]
def setup(self, axis):
self.df = DataFrame(np.random.randn(1000, 3), columns=list("ABC"))
def time_frame_quantile(self, axis):
self.df.quantile([0.1, 0.5], axis=axis)
class Rank:
param_names = ["dtype"]
params = [
["int", "uint", "float", "object"],
]
def setup(self, dtype):
self.df = DataFrame(np.random.randn(10000, 10), columns=range(10), dtype=dtype)
def time_rank(self, dtype):
self.df.rank()
class GetDtypeCounts:
# 2807
def setup(self):
self.df = DataFrame(np.random.randn(10, 10000))
def time_frame_get_dtype_counts(self):
with warnings.catch_warnings(record=True):
self.df.dtypes.value_counts()
def time_info(self):
self.df.info()
class NSort:
params = ["first", "last", "all"]
param_names = ["keep"]
def setup(self, keep):
self.df = DataFrame(np.random.randn(100000, 3), columns=list("ABC"))
def time_nlargest_one_column(self, keep):
self.df.nlargest(100, "A", keep=keep)
def time_nlargest_two_columns(self, keep):
self.df.nlargest(100, ["A", "B"], keep=keep)
def time_nsmallest_one_column(self, keep):
self.df.nsmallest(100, "A", keep=keep)
def time_nsmallest_two_columns(self, keep):
self.df.nsmallest(100, ["A", "B"], keep=keep)
class Describe:
def setup(self):
self.df = DataFrame(
{
"a": np.random.randint(0, 100, 10 ** 6),
"b": np.random.randint(0, 100, 10 ** 6),
"c": np.random.randint(0, 100, 10 ** 6),
}
)
def time_series_describe(self):
self.df["a"].describe()
def time_dataframe_describe(self):
self.df.describe()
class SelectDtypes:
params = [100, 1000]
param_names = ["n"]
def setup(self, n):
self.df = DataFrame(np.random.randn(10, n))
def time_select_dtypes(self, n):
self.df.select_dtypes(include="int")
class MemoryUsage:
def setup(self):
self.df = DataFrame(np.random.randn(100000, 2), columns=list("AB"))
self.df2 = self.df.copy()
self.df2["A"] = self.df2["A"].astype("object")
def time_memory_usage(self):
self.df.memory_usage(deep=True)
def time_memory_usage_object_dtype(self):
self.df2.memory_usage(deep=True)
from .pandas_vb_common import setup # noqa: F401 isort:skip
| bsd-3-clause |
jejjohnson/manifold_learning | src/python/utils/knn_solvers.py | 1 | 6211 | # -*- coding: utf-8 -*-
"""
Created on Tue May 10 18:39:17 2016
@author: eman
"""
from sklearn.neighbors import NearestNeighbors, LSHForest
from annoy import AnnoyIndex
# import hdidx
import numpy as np
class KnnSolver(object):
def __init__(self,
# knn parameters
n_neighbors = 2,
nn_algorithm = 'brute',
metric = 'euclidean',
n_jobs = 1,
affinity = 'nearest_neighbor',
weight = 'heat',
gamma = 1.0,
p_norm = 2,
# ball tree and kdtree parameters
trees = 10,
leaf_size = 30,
# scikit LSHForest parameters
n_estimators = 10,
min_hash_match = 4,
n_candidates = 10,
random_state = 0):
self.n_neighbors = n_neighbors
self.nn_algorithm = nn_algorithm
self.metric = metric
self.n_jobs = n_jobs
self.affinity = affinity
self.weight = weight
self.gamma = gamma
self.trees = trees
self.leaf_size = leaf_size
self.p_norm = p_norm
# scikit LSHForest parameters
self.n_estimators = trees
self.min_hash_match = min_hash_match
self.n_candidates = n_candidates
self.random_state = random_state
def find_knn(self, data):
if self.nn_algorithm in ['brute', 'kd_tree', 'ball_tree']:
return knn_scikit(data,
n_neighbors=self.n_neighbors,
leaf_size = self.leaf_size,
metric = self.metric,
p = self.p_norm)
elif self.nn_algorithm in ['lshf']:
return lshf_scikit(data,
n_neighbors=self.n_neighbors,
n_estimators=self.n_estimators,
min_hash_match=self.min_hash_match,
n_candidates=self.n_candidates,
random_state=self.random_state)
elif self.nn_algorithm in ['annoy']:
return ann_annoy(data,
metric=self.metric,
n_neighbors=self.n_neighbors,
trees=self.trees)
elif self.nn_algorithm in ['hdidx']:
raise NotImplementedError('Unrecognized K-Nearest Neighbor Method.')
#
# return ann_hdidx(data,
# n_neighbors = self.n_neighbors,
# indexer=self.trees)
else:
raise ValueError('Unrecognized NN Method.')
# sklearns nearest neighbors formula
def knn_scikit(data, n_neighbors=4,
algorithm='brute',
leaf_size=30,
metric='euclidean',
p=None):
n_neighbors += 1
# initialize nearest neighbor model
nbrs = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
p=p)
# fit nearest neighbor model to the data
nbrs.fit(data)
# return the distances and indices
return nbrs.kneighbors(data)
# scikit learns locality sensitive hashing function
def lshf_scikit(data, n_neighbors=4,
n_estimators=10,
min_hash_match=4,
n_candidates=10,
random_state=None):
n_neighbors += 1
# initialize nearest neighbor model
nbrs = LSHForest(n_neighbors=n_neighbors,
n_estimators = 10,
min_hash_match = 4,
n_candidates = 10,
random_state = 0)
# fit nearest neighbor model to the data
nbrs.fit(data)
# return the distances and indices
return nbrs.kneighbors(data)
# annoy approximate nearest neighbor function
def ann_annoy(data, metric='euclidean',
n_neighbors=10,
trees=10):
"""My Approximate Nearest Neighbors function (ANN)
using the annoy package.
Parameters
----------
Returns
-------
"""
datapoints = data.shape[0]
dimension = data.shape[1]
# initialize the annoy database
ann = AnnoyIndex(dimension)
# store the datapoints
for (i, row) in enumerate(data):
ann.add_item(i, row.tolist())
# build the index
ann.build(trees)
# find the k-nearest neighbors for all points
idx = np.zeros((datapoints, n_neighbors), dtype='int')
distVals = idx.copy().astype(np.float)
# extract the distance values
for i in range(0, datapoints):
idx[i,:] = ann.get_nns_by_item(i, n_neighbors)
for j in range(0, n_neighbors):
distVals[i,j] = ann.get_distance(i, idx[i,j])
return distVals, idx
# Hdidx package for approximate nearest neighbor function
# def ann_hdidx(data,
# n_neighbors = 10,
# indexer=8):
#
# datapoints = data.shape[0]
# dimensions = data.shape[1]
#
# data_query = np.random.random((n_neighbors, dimensions))
# print np.shape(data_query)
#
# # create Product Quantization Indexer
# idx = hdidx.indexer.IVFPQIndexer()
#
# # build indexer
# idx.build({'vals': data, 'nsubq': indexer})
#
# # add database items to the indexer
# idx.add(data)
#
# # searching in the database and return top-10 items for
# # each query
# idn, distVals = idx.search(data, n_neighbors)
# return distVals, idn
if __name__ == "__main__":
import numpy as np
import time as time
n_dims = 200
n_samples = 5000
X_data = np.random.random((n_samples, n_dims))
print('Size of X is {s}'.format(s=np.shape(X_data)))
for nn_model in ['brute','kd_tree', 'ball_tree', 'annoy']:
t0 = time.time()
# initialize knn model
knn_model = KnnSolver(nn_algorithm=nn_model, n_jobs=5,
n_neighbors=20)
# fit knn model to the data
distVals, idx = knn_model.find_knn(X_data)
t1 = time.time()
print('{m}, time taken: {s:.2f}'.format(m=nn_model, s=t1-t0))
| mit |
bowenliu16/deepchem | deepchem/data/data_loader.py | 1 | 7515 | """
Process an input dataset into a format suitable for machine learning.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import gzip
import pandas as pd
import numpy as np
import csv
import numbers
import tempfile
from rdkit import Chem
import time
import sys
from deepchem.utils.save import log
from deepchem.utils.save import load_csv_files
from deepchem.utils.save import load_sdf_files
from deepchem.feat import UserDefinedFeaturizer
from deepchem.data import DiskDataset
def convert_df_to_numpy(df, tasks, id_field, verbose=False):
"""Transforms a dataframe containing deepchem input into numpy arrays"""
n_samples = df.shape[0]
n_tasks = len(tasks)
time1 = time.time()
y = np.hstack([
np.reshape(np.array(df[task].values), (n_samples, 1)) for task in tasks])
time2 = time.time()
w = np.ones((n_samples, n_tasks))
missing = np.zeros_like(y).astype(int)
feature_shape = None
for ind in range(n_samples):
for task in range(n_tasks):
if y[ind, task] == "":
missing[ind, task] = 1
ids = df[id_field].values
# Set missing data to have weight zero
for ind in range(n_samples):
for task in range(n_tasks):
if missing[ind, task]:
y[ind, task] = 0.
w[ind, task] = 0.
return ids, y.astype(float), w.astype(float)
def featurize_smiles_df(df, featurizer, field, log_every_N=1000, verbose=True):
"""Featurize individual compounds in dataframe.
Given a featurizer that operates on individual chemical compounds
or macromolecules, compute & add features for that compound to the
features dataframe
"""
sample_elems = df[field].tolist()
features = []
for ind, elem in enumerate(sample_elems):
mol = Chem.MolFromSmiles(elem)
if ind % log_every_N == 0:
log("Featurizing sample %d" % ind, verbose)
features.append(featurizer.featurize([mol]))
valid_inds = np.array([1 if elt.size > 0 else 0 for elt in features],
dtype=bool)
features = [elt for (is_valid, elt) in zip(valid_inds, features) if is_valid]
return np.squeeze(np.array(features)), valid_inds
def get_user_specified_features(df, featurizer, verbose=True):
"""Extract and merge user specified features.
Merge features included in dataset provided by user
into final features dataframe
Three types of featurization here:
1) Molecule featurization
-) Smiles string featurization
-) Rdkit MOL featurization
2) Complex featurization
-) PDB files for interacting molecules.
3) User specified featurizations.
"""
time1 = time.time()
df[featurizer.feature_fields] = df[featurizer.feature_fields].apply(pd.to_numeric)
X_shard = df.as_matrix(columns=featurizer.feature_fields)
time2 = time.time()
log("TIMING: user specified processing took %0.3f s" % (time2-time1), verbose)
return X_shard
def featurize_mol_df(df, featurizer, field, verbose=True, log_every_N=1000):
"""Featurize individual compounds in dataframe.
Featurizes .sdf files, so the 3-D structure should be preserved
so we use the rdkit "mol" object created from .sdf instead of smiles
string. Some featurizers such as CoulombMatrix also require a 3-D
structure. Featurizing from .sdf is currently the only way to
perform CM feautization.
"""
sample_elems = df[field].tolist()
features = []
for ind, mol in enumerate(sample_elems):
if ind % log_every_N == 0:
log("Featurizing sample %d" % ind, verbose)
features.append(featurizer.featurize([mol]))
valid_inds = np.array([1 if elt.size > 0 else 0 for elt in features],
dtype=bool)
features = [elt for (is_valid, elt) in zip(valid_inds, features) if is_valid]
return np.squeeze(np.array(features)), valid_inds
class DataLoader(object):
"""
Handles loading/featurizing of chemical samples (datapoints).
Currently knows how to load csv-files/pandas-dataframes/SDF-files. Writes a
dataframe object to disk as output.
"""
def __init__(self, tasks, smiles_field=None,
id_field=None, mol_field=None, featurizer=None,
verbose=True, log_every_n=1000):
"""Extracts data from input as Pandas data frame"""
if not isinstance(tasks, list):
raise ValueError("tasks must be a list.")
self.verbose = verbose
self.tasks = tasks
self.smiles_field = smiles_field
if id_field is None:
self.id_field = smiles_field
else:
self.id_field = id_field
self.mol_field = mol_field
self.user_specified_features = None
if isinstance(featurizer, UserDefinedFeaturizer):
self.user_specified_features = featurizer.feature_fields
self.featurizer = featurizer
self.log_every_n = log_every_n
def featurize(self, input_files, data_dir=None, shard_size=8192):
"""Featurize provided files and write to specified location."""
log("Loading raw samples now.", self.verbose)
log("shard_size: %d" % shard_size, self.verbose)
if not isinstance(input_files, list):
input_files = [input_files]
def shard_generator():
for shard_num, shard in enumerate(self.get_shards(input_files, shard_size)):
time1 = time.time()
X, valid_inds = self.featurize_shard(shard)
ids, y, w = convert_df_to_numpy(shard, self.tasks, self.id_field)
# Filter out examples where featurization failed.
ids, y, w = (ids[valid_inds], y[valid_inds], w[valid_inds])
assert len(X) == len(ids) == len(y) == len(w)
time2 = time.time()
log("TIMING: featurizing shard %d took %0.3f s" % (shard_num, time2-time1),
self.verbose)
yield X, y, w, ids
return DiskDataset.create_dataset(shard_generator(), data_dir, self.tasks)
def get_shards(self, input_files, shard_size):
"""Stub for children classes."""
raise NotImplementedError
def featurize_shard(self, shard):
"""Featurizes a shard of an input dataframe."""
raise NotImplementedError
class CSVLoader(DataLoader):
"""
Handles loading of CSV files.
"""
def get_shards(self, input_files, shard_size, verbose=True):
"""Defines a generator which returns data for each shard"""
return load_csv_files(input_files, shard_size, verbose=verbose)
def featurize_shard(self, shard):
"""Featurizes a shard of an input dataframe."""
return featurize_smiles_df(shard, self.featurizer,
field=self.smiles_field)
class UserCSVLoader(DataLoader):
"""
Handles loading of CSV files with user-defined featurizers.
"""
def get_shards(self, input_files, shard_size):
"""Defines a generator which returns data for each shard"""
return load_csv_files(input_files, shard_size)
def featurize_shard(self, shard):
"""Featurizes a shard of an input dataframe."""
assert isinstance(self.featurizer, UserDefinedFeaturizer)
X = get_user_specified_features(shard, self.featurizer)
return (X, np.ones(len(X), dtype=bool))
class SDFLoader(DataLoader):
"""
Handles loading of SDF files.
"""
def get_shards(self, input_files, shard_size):
"""Defines a generator which returns data for each shard"""
return load_sdf_files(input_files)
def featurize_shard(self, shard):
"""Featurizes a shard of an input dataframe."""
log("Currently featurizing feature_type: %s"
% self.featurizer.__class__.__name__, self.verbose)
return featurize_mol_df(shard, self.featurizer, field=self.mol_field)
| gpl-3.0 |
NelisVerhoef/scikit-learn | sklearn/utils/testing.py | 71 | 26178 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import tempfile
import shutil
import os.path as op
import atexit
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
import sklearn
from sklearn.base import BaseEstimator
from sklearn.externals import joblib
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Python 2
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except expected_exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("%s not raised by %s" %
(expected_exception.__name__,
callable_obj.__name__))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the bacward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
func : callable
Calable object to raise error
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV",
"RandomizedSearchCV"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
return sorted(set(estimators))
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
warnings.warn("if_not_mac_os is deprecated in 0.17 and will be removed"
" in 0.19: use the safer and more generic"
" if_safe_multiprocessing_with_blas instead",
DeprecationWarning)
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def if_safe_multiprocessing_with_blas(func):
"""Decorator for tests involving both BLAS calls and multiprocessing
Under Python < 3.4 and POSIX (e.g. Linux or OSX), using multiprocessing in
conjunction with some implementation of BLAS (or other libraries that
manage an internal posix thread pool) can cause a crash or a freeze of the
Python process.
Under Python 3.4 and later, joblib uses the forkserver mode of
multiprocessing which does not trigger this problem.
In practice all known packaged distributions (from Linux distros or
Anaconda) of BLAS under Linux seems to be safe. So we this problem seems to
only impact OSX users.
This wrapper makes it possible to skip tests that can possibly cause
this crash under OSX with.
"""
@wraps(func)
def run_test(*args, **kwargs):
if sys.platform == 'darwin' and sys.version_info[:2] < (3, 4):
raise SkipTest(
"Possible multi-process bug with some BLAS under Python < 3.4")
return func(*args, **kwargs)
return run_test
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independance)"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap(object):
def __init__(self, data, mmap_mode='r'):
self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
fpath = op.join(self.temp_folder, 'data.pkl')
joblib.dump(self.data, fpath)
data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode)
atexit.register(lambda: _delete_folder(self.temp_folder, warn=True))
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
| bsd-3-clause |
mrares/incubator-airflow | airflow/contrib/hooks/salesforce_hook.py | 4 | 12120 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains a Salesforce Hook
which allows you to connect to your Salesforce instance,
retrieve data from it, and write that data to a file
for other uses.
NOTE: this hook also relies on the simple_salesforce package:
https://github.com/simple-salesforce/simple-salesforce
"""
from simple_salesforce import Salesforce
from airflow.hooks.base_hook import BaseHook
import json
import pandas as pd
import time
from airflow.utils.log.logging_mixin import LoggingMixin
class SalesforceHook(BaseHook, LoggingMixin):
def __init__(
self,
conn_id,
*args,
**kwargs
):
"""
Create new connection to Salesforce
and allows you to pull data out of SFDC and save it to a file.
You can then use that file with other
Airflow operators to move the data into another data source
:param conn_id: the name of the connection that has the parameters
we need to connect to Salesforce.
The conenction shoud be type `http` and include a
user's security token in the `Extras` field.
.. note::
For the HTTP connection type, you can include a
JSON structure in the `Extras` field.
We need a user's security token to connect to Salesforce.
So we define it in the `Extras` field as:
`{"security_token":"YOUR_SECRUITY_TOKEN"}`
"""
self.conn_id = conn_id
self._args = args
self._kwargs = kwargs
# get the connection parameters
self.connection = self.get_connection(conn_id)
self.extras = self.connection.extra_dejson
def sign_in(self):
"""
Sign into Salesforce.
If we have already signed it, this will just return the original object
"""
if hasattr(self, 'sf'):
return self.sf
# connect to Salesforce
sf = Salesforce(
username=self.connection.login,
password=self.connection.password,
security_token=self.extras['security_token'],
instance_url=self.connection.host
)
self.sf = sf
return sf
def make_query(self, query):
"""
Make a query to Salesforce. Returns result in dictionary
:param query: The query to make to Salesforce
"""
self.sign_in()
self.log.info("Querying for all objects")
query = self.sf.query_all(query)
self.log.info(
"Received results: Total size: %s; Done: %s",
query['totalSize'], query['done']
)
query = json.loads(json.dumps(query))
return query
def describe_object(self, obj):
"""
Get the description of an object from Salesforce.
This description is the object's schema
and some extra metadata that Salesforce stores for each object
:param obj: Name of the Salesforce object
that we are getting a description of.
"""
self.sign_in()
return json.loads(json.dumps(self.sf.__getattr__(obj).describe()))
def get_available_fields(self, obj):
"""
Get a list of all available fields for an object.
This only returns the names of the fields.
"""
self.sign_in()
desc = self.describe_object(obj)
return [f['name'] for f in desc['fields']]
def _build_field_list(self, fields):
# join all of the fields in a comma seperated list
return ",".join(fields)
def get_object_from_salesforce(self, obj, fields):
"""
Get all instances of the `object` from Salesforce.
For each model, only get the fields specified in fields.
All we really do underneath the hood is run:
SELECT <fields> FROM <obj>;
"""
field_string = self._build_field_list(fields)
query = "SELECT {0} FROM {1}".format(field_string, obj)
self.log.info(
"Making query to Salesforce: %s",
query if len(query) < 30 else " ... ".join([query[:15], query[-15:]])
)
return self.make_query(query)
@classmethod
def _to_timestamp(cls, col):
"""
Convert a column of a dataframe to UNIX timestamps if applicable
:param col: A Series object representing a column of a dataframe.
"""
# try and convert the column to datetimes
# the column MUST have a four digit year somewhere in the string
# there should be a better way to do this,
# but just letting pandas try and convert every column without a format
# caused it to convert floats as well
# For example, a column of integers
# between 0 and 10 are turned into timestamps
# if the column cannot be converted,
# just return the original column untouched
try:
col = pd.to_datetime(col)
except ValueError:
log = LoggingMixin().log
log.warning(
"Could not convert field to timestamps: %s", col.name
)
return col
# now convert the newly created datetimes into timestamps
# we have to be careful here
# because NaT cannot be converted to a timestamp
# so we have to return NaN
converted = []
for i in col:
try:
converted.append(i.timestamp())
except ValueError:
converted.append(pd.np.NaN)
except AttributeError:
converted.append(pd.np.NaN)
# return a new series that maintains the same index as the original
return pd.Series(converted, index=col.index)
def write_object_to_file(
self,
query_results,
filename,
fmt="csv",
coerce_to_timestamp=False,
record_time_added=False
):
"""
Write query results to file.
Acceptable formats are:
- csv:
comma-seperated-values file. This is the default format.
- json:
JSON array. Each element in the array is a different row.
- ndjson:
JSON array but each element is new-line deliminated
instead of comman deliminated like in `json`
This requires a significant amount of cleanup.
Pandas doesn't handle output to CSV and json in a uniform way.
This is especially painful for datetime types.
Pandas wants to write them as strings in CSV,
but as milisecond Unix timestamps.
By default, this function will try and leave all values as
they are represented in Salesforce.
You use the `coerce_to_timestamp` flag to force all datetimes
to become Unix timestamps (UTC).
This is can be greatly beneficial as it will make all of your
datetime fields look the same,
and makes it easier to work with in other database environments
:param query_results: the results from a SQL query
:param filename: the name of the file where the data
should be dumped to
:param fmt: the format you want the output in.
*Default:* csv.
:param coerce_to_timestamp: True if you want all datetime fields to be
converted into Unix timestamps.
False if you want them to be left in the
same format as they were in Salesforce.
Leaving the value as False will result
in datetimes being strings.
*Defaults to False*
:param record_time_added: *(optional)* True if you want to add a
Unix timestamp field to the resulting data
that marks when the data
was fetched from Salesforce.
*Default: False*.
"""
fmt = fmt.lower()
if fmt not in ['csv', 'json', 'ndjson']:
raise ValueError("Format value is not recognized: {0}".format(fmt))
# this line right here will convert all integers to floats if there are
# any None/np.nan values in the column
# that's because None/np.nan cannot exist in an integer column
# we should write all of our timestamps as FLOATS in our final schema
df = pd.DataFrame.from_records(query_results, exclude=["attributes"])
df.columns = [c.lower() for c in df.columns]
# convert columns with datetime strings to datetimes
# not all strings will be datetimes, so we ignore any errors that occur
# we get the object's definition at this point and only consider
# features that are DATE or DATETIME
if coerce_to_timestamp and df.shape[0] > 0:
# get the object name out of the query results
# it's stored in the "attributes" dictionary
# for each returned record
object_name = query_results[0]['attributes']['type']
self.log.info("Coercing timestamps for: %s", object_name)
schema = self.describe_object(object_name)
# possible columns that can be convereted to timestamps
# are the ones that are either date or datetime types
# strings are too general and we risk unintentional conversion
possible_timestamp_cols = [
i['name'].lower()
for i in schema['fields']
if i['type'] in ["date", "datetime"] and
i['name'].lower() in df.columns
]
df[possible_timestamp_cols] = df[possible_timestamp_cols].apply(
lambda x: self._to_timestamp(x)
)
if record_time_added:
fetched_time = time.time()
df["time_fetched_from_salesforce"] = fetched_time
# write the CSV or JSON file depending on the option
# NOTE:
# datetimes here are an issue.
# There is no good way to manage the difference
# for to_json, the options are an epoch or a ISO string
# but for to_csv, it will be a string output by datetime
# For JSON we decided to output the epoch timestamp in seconds
# (as is fairly standard for JavaScript)
# And for csv, we do a string
if fmt == "csv":
# there are also a ton of newline objects
# that mess up our ability to write to csv
# we remove these newlines so that the output is a valid CSV format
self.log.info("Cleaning data and writing to CSV")
possible_strings = df.columns[df.dtypes == "object"]
df[possible_strings] = df[possible_strings].apply(
lambda x: x.str.replace("\r\n", "")
)
df[possible_strings] = df[possible_strings].apply(
lambda x: x.str.replace("\n", "")
)
# write the dataframe
df.to_csv(filename, index=False)
elif fmt == "json":
df.to_json(filename, "records", date_unit="s")
elif fmt == "ndjson":
df.to_json(filename, "records", lines=True, date_unit="s")
return df
| apache-2.0 |
Clyde-fare/scikit-learn | sklearn/cross_decomposition/pls_.py | 187 | 28507 | """
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <[email protected]>
# License: BSD 3 clause
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
from ..utils.validation import check_is_fitted
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
eps = np.finfo(X.dtype).eps
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
X_pinv = linalg.pinv(X) # compute once pinv(X)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv(Y) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
# 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)
# y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('Invalid number of components: %d' %
self.n_components)
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if self.deflation_mode not in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_\
= _center_scale_xy(X, Y, self.scale)
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):
# Yk constant
warnings.warn('Y residual constant at iteration %s' % k)
break
# 1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
break
# 2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.pinv(np.dot(self.x_loadings_.T, self.x_weights_)))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.pinv(np.dot(self.y_loadings_.T, self.y_weights_)))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# FIXME what's with the if?
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = (1. / self.x_std_.reshape((p, 1)) * self.coef_ *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
# Apply rotation
x_scores = np.dot(X, self.x_rotations_)
if Y is not None:
Y = check_array(Y, ensure_2d=False, copy=copy)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Y -= self.y_mean_
Y /= self.y_std_
y_scores = np.dot(Y, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
Ypred = np.dot(X, self.coef_)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * var(Xk u) var(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
n_components : int, number of components to keep. (default 2).
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * var(Xk u) var(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, default 2
Number of components to keep.
scale : boolean, default True
Whether to scale X and Y.
copy : boolean, default True
Whether to copy X and Y, or perform in-place computations.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if self.n_components > max(Y.shape[1], X.shape[1]):
raise ValueError("Invalid number of components n_components=%d"
" with X of shape %s and Y of shape %s."
% (self.n_components, str(X.shape), str(Y.shape)))
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ =\
_center_scale_xy(X, Y, self.scale)
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components >= np.min(C.shape):
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
check_is_fitted(self, 'x_mean_')
X = check_array(X, dtype=np.float64)
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
| bsd-3-clause |
rabitdash/practice | python-pj/mathlab/input_data.py | 1 | 4261 | # coding: utf-8
import csv
import pandas as pd
import string as s
from nltk.corpus import stopwords
import re
spamdata_T1csv = "./spamdata_T1.csv"
spamdata_T2csv = "./spamdata_T2.csv"
testdatacsv = "./testdata.csv"
def rmstopwords(string):
tmp = string.split(' ')
for x in tmp:
if x in stopwords.words('English'):
tmp.remove(x)
return ' '.join(tmp)
def rmpunc(string):
tmp = string.split(' ')
shit = []
for x in tmp:
mt = x.maketrans(s.punctuation, len(s.punctuation) * ' ')
x = x.translate(mt)
x = re.sub("\s*", '', x)
shit.append(x)
return ' '.join(shit)
#数据清洗
def read_data_sets(filename):
with open(filename, 'rb') as f:
string = str(f.read())
string = string.replace("b\'", '')
strlist = string.split("\\r\\n")
data = []
for string in strlist:
string = string.replace('\"', '')
string = string.replace('\\', '')
string = string.replace('<#>', '')
string = string.strip() #去掉多余空格
if string.find("spam") is 0:
tmpdata = string.split("spam")[1]
tmpdata = tmpdata[1::]
tmpdata = tmpdata.strip()
tmpdata = rmstopwords(tmpdata)
tmpdata = re.sub('\d{5,13}', ' tel ', tmpdata)
tmpdata = re.sub("www.*.com", ' url ', tmpdata)
tmpdata = rmpunc(tmpdata)
data.append([1, tmpdata])
elif string.find("ham") is 0:
tmpdata = string.split("ham")[1]
tmpdata = tmpdata[1::]
tmpdata = tmpdata.strip()
tmpdata = rmstopwords(tmpdata)
tmpdata = re.sub('\d{5,13}', ' tel ', tmpdata)
tmpdata = re.sub("www.*.com", ' url ', tmpdata)
tmpdata = rmpunc(tmpdata)
data.append([0, tmpdata])
elif string is '\'':
pass
else:
string = rmstopwords(string)
string = re.sub('\d{5,13}', ' tel ', string)
string = re.sub("www.*.com", ' url ', string)
string = rmpunc(string)
data.append(string)
print("Load {0} done.".format(filename))
# data = [x[1].replace(s.punctuation, '') for x in data]
return data
traindata = read_data_sets(spamdata_T2csv)
testdata = read_data_sets(testdatacsv)
predictdata = read_data_sets(spamdata_T1csv)
def out2csv():
flag = [x[0] for x in traindata]
content = [x[1] for x in traindata]
content0 = []
content1 = []
flag0 = []
flag1 = []
for (x, y) in zip(flag, content):
if x is 0:
content0.append(y)
flag0.append(0)
elif x is 1:
content1.append(y)
flag1.append(1)
# column_flag0 = pd.Series(flag0)
column_content0 = pd.Series(content0)
# save0 = pd.concat([column_flag0,column_content0], axis=1)
save0 = pd.concat([column_content0], axis=1)
save0.to_csv("ham_data.csv",encoding='utf-8',header=False,index=False)
# column_flag1 = pd.Series(flag1)
column_content1 = pd.Series(content1)
# save1 = pd.concat([column_flag1, column_content1], axis=1)
save1 = pd.concat([column_content1], axis=1)
save1.to_csv("spam_data.csv",encoding='utf-8',header=False,index=False) # 垃圾短信
flag = [x[0] for x in traindata]
column_flag = pd.Series(flag)
content = [x[1] for x in traindata]
column_content = pd.Series(content)
save = pd.concat([column_flag, column_content], axis=1)
save.to_csv("train_data.csv", encoding='utf-8', header=False, index=False)
content = [x for x in testdata]
column_content = pd.Series(content)
save = pd.concat([column_content], axis=1)
save.to_csv("test_data.csv", encoding='utf-8', header=False, index=False)
content = [x for x in predictdata]
column_content = pd.Series(content)
save = pd.concat([column_content], axis=1)
save.to_csv("predict_data.csv", encoding='utf-8', header=False, index=False)
| mit |
shikhar413/openmc | examples/pincell_depletion/run_depletion.py | 8 | 5021 | from math import pi
import openmc
import openmc.deplete
import matplotlib.pyplot as plt
###############################################################################
# Define materials
###############################################################################
# Instantiate some Materials and register the appropriate Nuclides
uo2 = openmc.Material(name='UO2 fuel at 2.4% wt enrichment')
uo2.set_density('g/cm3', 10.29769)
uo2.add_element('U', 1., enrichment=2.4)
uo2.add_element('O', 2.)
helium = openmc.Material(name='Helium for gap')
helium.set_density('g/cm3', 0.001598)
helium.add_element('He', 2.4044e-4)
zircaloy = openmc.Material(name='Zircaloy 4')
zircaloy.set_density('g/cm3', 6.55)
zircaloy.add_element('Sn', 0.014, 'wo')
zircaloy.add_element('Fe', 0.00165, 'wo')
zircaloy.add_element('Cr', 0.001, 'wo')
zircaloy.add_element('Zr', 0.98335, 'wo')
borated_water = openmc.Material(name='Borated water')
borated_water.set_density('g/cm3', 0.740582)
borated_water.add_element('B', 4.0e-5)
borated_water.add_element('H', 5.0e-2)
borated_water.add_element('O', 2.4e-2)
borated_water.add_s_alpha_beta('c_H_in_H2O')
###############################################################################
# Create geometry
###############################################################################
# Define surfaces
pitch = 1.25984
fuel_or = openmc.ZCylinder(r=0.39218, name='Fuel OR')
clad_ir = openmc.ZCylinder(r=0.40005, name='Clad IR')
clad_or = openmc.ZCylinder(r=0.45720, name='Clad OR')
box = openmc.model.rectangular_prism(pitch, pitch, boundary_type='reflective')
# Define cells
fuel = openmc.Cell(fill=uo2, region=-fuel_or)
gap = openmc.Cell(fill=helium, region=+fuel_or & -clad_ir)
clad = openmc.Cell(fill=zircaloy, region=+clad_ir & -clad_or)
water = openmc.Cell(fill=borated_water, region=+clad_or & box)
# Define overall geometry
geometry = openmc.Geometry([fuel, gap, clad, water])
###############################################################################
# Set volumes of depletable materials
###############################################################################
# Set material volume for depletion. For 2D simulations, this should be an area.
uo2.volume = pi * fuel_or.r**2
###############################################################################
# Transport calculation settings
###############################################################################
# Instantiate a Settings object, set all runtime parameters, and export to XML
settings = openmc.Settings()
settings.batches = 100
settings.inactive = 10
settings.particles = 1000
# Create an initial uniform spatial source distribution over fissionable zones
bounds = [-0.62992, -0.62992, -1, 0.62992, 0.62992, 1]
uniform_dist = openmc.stats.Box(bounds[:3], bounds[3:], only_fissionable=True)
settings.source = openmc.source.Source(space=uniform_dist)
entropy_mesh = openmc.RegularMesh()
entropy_mesh.lower_left = [-0.39218, -0.39218, -1.e50]
entropy_mesh.upper_right = [0.39218, 0.39218, 1.e50]
entropy_mesh.dimension = [10, 10, 1]
settings.entropy_mesh = entropy_mesh
###############################################################################
# Initialize and run depletion calculation
###############################################################################
# Create depletion "operator"
chain_file = './chain_simple.xml'
op = openmc.deplete.Operator(geometry, settings, chain_file)
# Perform simulation using the predictor algorithm
time_steps = [1.0, 1.0, 1.0, 1.0, 1.0] # days
power = 174 # W/cm, for 2D simulations only (use W for 3D)
integrator = openmc.deplete.PredictorIntegrator(op, time_steps, power, timestep_units='d')
integrator.integrate()
###############################################################################
# Read depletion calculation results
###############################################################################
# Open results file
results = openmc.deplete.ResultsList.from_hdf5("depletion_results.h5")
# Obtain K_eff as a function of time
time, keff = results.get_eigenvalue()
# Obtain U235 concentration as a function of time
time, n_U235 = results.get_atoms('1', 'U235')
# Obtain Xe135 capture reaction rate as a function of time
time, Xe_capture = results.get_reaction_rate('1', 'Xe135', '(n,gamma)')
###############################################################################
# Generate plots
###############################################################################
days = 24*60*60
plt.figure()
plt.plot(time/days, keff, label="K-effective")
plt.xlabel("Time (days)")
plt.ylabel("Keff")
plt.show()
plt.figure()
plt.plot(time/days, n_U235, label="U235")
plt.xlabel("Time (days)")
plt.ylabel("n U5 (-)")
plt.show()
plt.figure()
plt.plot(time/days, Xe_capture, label="Xe135 capture")
plt.xlabel("Time (days)")
plt.ylabel("RR (-)")
plt.show()
plt.close('all')
| mit |
vanam/clustering | clustering_system/evaluator/measures.py | 1 | 10914 | from collections import Counter
import numpy as np
# import pandas as pd
from pandas import crosstab
from scipy.special import comb
def purity(clusters, classes) -> float:
"""
The purity ranges between 0 (bad) and 1 (good). However, we can trivially achieve a purity of 1
by putting each object into its own cluster,
so this measure does not penalize for the number of clusters.
"""
# Total number of items
N = len(clusters)
# Accumulator
acc = 0
# # For each cluster
# for c in [[classes[i] for i in range(N) if clusters[i] == cluster_label] for cluster_label in np.unique(clusters)]:
# # Count class labels
# counter = Counter(c)
# For each cluster
for cluster_label in np.unique(clusters):
# Count class labels
counter = Counter(classes[clusters == cluster_label])
# Accumulate the most frequent label count
acc += counter.most_common(1)[0][1]
return acc / N
def purity2(clusters, classes):
"""
For each cluster return its purity and size
"""
result = []
# For each cluster
for cluster_label in np.unique(clusters):
# Cluster items
c = classes[clusters == cluster_label]
# Count class labels
counter = Counter(c)
# Size of the cluster
N_i = len(c)
# the most frequent label count / size of the cluster
result.append((counter.most_common(1)[0][1] / N_i, N_i))
return result
def rand_index(clusters, classes) -> float:
"""
same cluster | different clusters
same class | true positives (TP) | false negatives (FN)
different classes | false positives (FP) | true negatives (TN)
"""
# Total number of objects
N = len(clusters)
# The number of pairs of objects put in the same cluster, regardless of label
tp_plus_fp = comb(np.bincount(clusters), 2).sum()
# The number of pairs of objects put in the same class, regardless of cluster label
tp_plus_fn = comb(np.bincount(classes), 2).sum()
# The number of pairs
c2N = comb(N, 2)
# The number of pairs of objects put in the same class with the same cluster label
tp = sum(comb(np.bincount(classes[clusters == cluster_label]), 2).sum() for cluster_label in np.unique(clusters))
# fp = tp_plus_fp - tp
# fn = tp_plus_fn - tp
# tn = c2N - tp - fp - fn
tn = c2N - tp_plus_fp - tp_plus_fn + tp
# return (tp + tn) / (tp + fp + fn + tn) = (tp + tn) / nC2
return (tp + tn) / c2N
def precision(clusters, classes) -> float:
# The number of pairs of objects put in the same cluster, regardless of label
tp_plus_fp = comb(np.bincount(clusters), 2).sum()
# The number of pairs of objects put in the same class with the same cluster label
tp = sum(comb(np.bincount(classes[clusters == cluster_label]), 2).sum() for cluster_label in np.unique(clusters))
fp = tp_plus_fp - tp
return tp / (tp + fp)
def recall(clusters, classes) -> float:
# The number of pairs of objects put in the same class, regardless of cluster label
tp_plus_fn = comb(np.bincount(classes), 2).sum()
# The number of pairs of objects put in the same class with the same cluster label
tp = sum(comb(np.bincount(classes[clusters == cluster_label]), 2).sum() for cluster_label in np.unique(clusters))
fn = tp_plus_fn - tp
return tp / (tp + fn)
def f1_measure(clusters, classes) -> float:
# The number of pairs of objects put in the same cluster, regardless of label
tp_plus_fp = comb(np.bincount(clusters), 2).sum()
# The number of pairs of objects put in the same class, regardless of cluster label
tp_plus_fn = comb(np.bincount(classes), 2).sum()
# The number of pairs of objects put in the same class with the same cluster label
tp = sum(comb(np.bincount(classes[clusters == cluster_label]), 2).sum() for cluster_label in np.unique(clusters))
fn = tp_plus_fn - tp
fp = tp_plus_fp - tp
# p = precision(clusters, classes)
# r = recall(clusters, classes)
# return 2 * p * r / (p + r)
return 2 * tp / (2 * tp + fn + fp)
def mutual_information(clusters, classes, contingency=None) -> float:
if contingency is None:
contingency = crosstab(clusters, classes, rownames=['clusters'], colnames=['classes'], margins=True)
# Number of data points
N = len(clusters)
if N == 0:
return 0.0
# Find cluster labels
cluster_labels = np.unique(clusters)
# Find class labels
class_labels = np.unique(classes)
mutual_information = 0.0
for kl in cluster_labels:
for cl in class_labels:
a_ij = contingency[cl][kl]
if a_ij == 0.0:
continue
mutual_information += a_ij / N * (np.log(a_ij * N) - np.log(contingency['All'][kl] * contingency[cl]['All']))
return mutual_information
def normalized_mutual_information(clusters, classes) -> float:
I = mutual_information(clusters, classes)
entropy_K, entropy_C = entropy(clusters), entropy(classes)
nmi = I / np.sqrt(entropy_K * entropy_C)
return nmi
def normalized_mutual_information2(clusters, classes) -> float:
"""
See (25.12) in Murphy, p. 134.
"""
I = mutual_information(clusters, classes)
entropy_K, entropy_C = entropy(clusters), entropy(classes)
nmi = I / ((entropy_K + entropy_C) / 2)
return nmi
def homogeneity(clusters, classes) -> float:
return _homogeneity_completeness_v_measure(clusters, classes)[0]
def completeness(clusters, classes) -> float:
return _homogeneity_completeness_v_measure(clusters, classes)[1]
def v_measure(clusters, classes) -> float:
return _homogeneity_completeness_v_measure(clusters, classes)[2]
def _homogeneity_completeness_v_measure(clusters, classes):
"""
https://www.researchgate.net/publication/221012656_V-Measure_A_Conditional_Entropy-Based_External_Cluster_Evaluation_Measure
https://github.com/scikit-learn/scikit-learn/blob/f3320a6f/sklearn/metrics/cluster/supervised.py#L217
"""
# Calculate contingency table
A = crosstab(clusters, classes, rownames=['clusters'], colnames=['classes'], margins=True)
# Calculate entropy - H(C) - of class labeling
# entropy_C = -np.sum([A[l]['All'] / N * np.log(A[l]['All'] / N) for l in class_labels])
entropy_C = entropy(classes)
# Calculate entropy - H(K) - of cluster labeling
# entropy_K = -np.sum([A['All'][l] / N * np.log(A['All'][l] / N) for l in cluster_labels])
entropy_K = entropy(clusters)
# # Number of data points
# N = len(clusters)
#
# # Find cluster labels
# cluster_labels = np.unique(clusters)
#
# # Find class labels
# class_labels = np.unique(classes)
#
# # Calculate conditional entropy H(K|C)
# entropy_C_K = -np.sum([np.sum([A[cl][kl] / N * np.log(A[cl][kl] / A['All'][kl]) for cl in class_labels if A[cl][kl] != 0.0]) for kl in cluster_labels])
#
# # Calculate conditional entropy H(C|K)
# entropy_K_C = -np.sum([np.sum([A[cl][kl] / N * np.log(A[cl][kl] / A[cl]['All']) for kl in cluster_labels if A[cl][kl] != 0.0]) for cl in class_labels])
# Mutual information
# I(C; K) = H(C) − H(C|K) = H(K) − H(K|C)
# I = entropy_C - entropy_C_K
# I = entropy_K - entropy_K_C
I = mutual_information(clusters, classes, contingency=A)
# 1 - H(C|K) / H(C) = I / H(C)
# homogeneity = 1 - entropy_C_K / entropy_C if entropy_C else 1.0
homogeneity = I / entropy_C if entropy_C else 1.0
# 1 - H(K|C) / H(K) = I / H(K)
# completeness = 1 - entropy_K_C / entropy_K if entropy_K else 1.0
completeness = I / entropy_K if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure = 0.0
else:
v_measure = 2 * (homogeneity * completeness) / (homogeneity + completeness)
return homogeneity, completeness, v_measure
def nv_measure(clusters, classes, p=1):
K = len(np.unique(clusters))
C = len(np.unique(classes))
x = min(K, C) / max(K, C)
return (1 - (1 - x**p)**(1 / p)) * v_measure(clusters, classes)
def entropy(labels) -> float:
"""
Calculate entropy using maximum likelihood estimates for label probabilities.
"""
# Length of an array
N = len(labels)
if N == 0:
return 0.0
# Convert labels to natural numbers
label_idx = np.unique(labels, return_inverse=True)[1]
# Count frequency of labels
pi = np.bincount(label_idx).astype(np.float64)
# Keep only non-zero ones
pi = pi[pi > 0]
# log(a / b) calculated as log(a) - log(b)
return -np.sum((pi / N) * (np.log(pi) - np.log(N)))
def _evaluate(clusters, classes):
print("Evaluation")
print("==========")
print("Number of observations: %d" % len(classes))
print("Number of classes: %d" % len(np.unique(classes)))
print("Number of clusters: %d" % len(np.unique(clusters)))
print("Purity: %f" % purity(clusters, classes))
print("Purity2: %s" % purity2(clusters, classes))
print("Rand index: %f" % rand_index(clusters, classes))
print("Entropy (clusters): %f" % entropy(clusters))
print("Entropy (classes): %f" % entropy(classes))
print("Homogeneity: %f" % homogeneity(clusters, classes))
print("Completeness: %f" % completeness(clusters, classes))
print("V-Measure: %f" % v_measure(clusters, classes))
print("NV-Measure: %f" % nv_measure(clusters, classes))
print("Precision: %f" % precision(clusters, classes))
print("Recall: %f" % recall(clusters, classes))
print("F1-Measure: %f" % f1_measure(clusters, classes))
print("Mutual Information: %f" % mutual_information(clusters, classes))
print("Normalized Mutual Information: %f" % normalized_mutual_information(clusters, classes))
print("Normalized Mutual Information 2: %f" % normalized_mutual_information2(clusters, classes))
if __name__ == "__main__":
# Based on Figure 16.4 of (Manning et al. 2008)
# clusters = [
# [
# 'A', 'A', 'A',
# 'A', 'A', 'B'
# ],
# [
# 'A', 'B', 'B',
# 'B', 'B', 'C'
# ],
# [
# 'A', 'A',
# 'C', 'C', 'C'
# ]
# ]
classes = np.array([
11, 11, 11,
11, 11, 22,
11, 22, 22,
22, 22, 33,
11, 11,
33, 33, 33,
])
clusters = np.array([
1, 1, 1,
1, 1, 1,
2, 2, 2,
2, 2, 2,
3, 3,
3, 3, 3,
])
_evaluate(clusters, classes)
| mit |
dhruvparamhans/zipline | zipline/finance/risk/cumulative.py | 4 | 16772 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logbook
import math
import numpy as np
from zipline.finance import trading
import zipline.utils.math_utils as zp_math
import pandas as pd
from pandas.tseries.tools import normalize_date
from six import iteritems
from . risk import (
alpha,
check_entry,
choose_treasury,
downside_risk,
sharpe_ratio,
sortino_ratio,
)
log = logbook.Logger('Risk Cumulative')
choose_treasury = functools.partial(choose_treasury, lambda *args: '10year',
compound=False)
def information_ratio(algo_volatility, algorithm_return, benchmark_return):
"""
http://en.wikipedia.org/wiki/Information_ratio
Args:
algorithm_returns (np.array-like):
All returns during algorithm lifetime.
benchmark_returns (np.array-like):
All benchmark returns during algo lifetime.
Returns:
float. Information ratio.
"""
if zp_math.tolerant_equals(algo_volatility, 0):
return np.nan
return (
(algorithm_return - benchmark_return)
# The square of the annualization factor is in the volatility,
# because the volatility is also annualized,
# i.e. the sqrt(annual factor) is in the volatility's numerator.
# So to have the the correct annualization factor for the
# Sharpe value's numerator, which should be the sqrt(annual factor).
# The square of the sqrt of the annual factor, i.e. the annual factor
# itself, is needed in the numerator to factor out the division by
# its square root.
/ algo_volatility)
class RiskMetricsCumulative(object):
"""
:Usage:
Instantiate RiskMetricsCumulative once.
Call update() method on each dt to update the metrics.
"""
METRIC_NAMES = (
'alpha',
'beta',
'sharpe',
'algorithm_volatility',
'benchmark_volatility',
'downside_risk',
'sortino',
'information',
)
def __init__(self, sim_params,
returns_frequency=None,
create_first_day_stats=False):
"""
- @returns_frequency allows for configuration of the whether
the benchmark and algorithm returns are in units of minutes or days,
if `None` defaults to the `emission_rate` in `sim_params`.
"""
self.treasury_curves = trading.environment.treasury_curves
self.start_date = sim_params.period_start.replace(
hour=0, minute=0, second=0, microsecond=0
)
self.end_date = sim_params.period_end.replace(
hour=0, minute=0, second=0, microsecond=0
)
self.trading_days = trading.environment.days_in_range(
self.start_date,
self.end_date)
# Hold on to the trading day before the start,
# used for index of the zero return value when forcing returns
# on the first day.
self.day_before_start = self.start_date - \
trading.environment.trading_days.freq
last_day = normalize_date(sim_params.period_end)
if last_day not in self.trading_days:
last_day = pd.tseries.index.DatetimeIndex(
[last_day]
)
self.trading_days = self.trading_days.append(last_day)
self.sim_params = sim_params
self.create_first_day_stats = create_first_day_stats
if returns_frequency is None:
returns_frequency = self.sim_params.emission_rate
self.returns_frequency = returns_frequency
if returns_frequency == 'daily':
cont_index = self.get_daily_index()
elif returns_frequency == 'minute':
cont_index = self.get_minute_index(sim_params)
self.cont_index = cont_index
self.algorithm_returns_cont = pd.Series(index=cont_index)
self.benchmark_returns_cont = pd.Series(index=cont_index)
self.mean_returns_cont = pd.Series(index=cont_index)
self.annualized_mean_returns_cont = pd.Series(index=cont_index)
self.mean_benchmark_returns_cont = pd.Series(index=cont_index)
self.annualized_mean_benchmark_returns_cont = pd.Series(
index=cont_index)
# The returns at a given time are read and reset from the respective
# returns container.
self.algorithm_returns = None
self.benchmark_returns = None
self.mean_returns = None
self.annualized_mean_returns = None
self.mean_benchmark_returns = None
self.annualized_mean_benchmark_returns = None
self.algorithm_cumulative_returns = pd.Series(index=cont_index)
self.benchmark_cumulative_returns = pd.Series(index=cont_index)
self.excess_returns = pd.Series(index=cont_index)
self.latest_dt = cont_index[0]
self.metrics = pd.DataFrame(index=cont_index,
columns=self.METRIC_NAMES)
self.drawdowns = pd.Series(index=cont_index)
self.max_drawdowns = pd.Series(index=cont_index)
self.max_drawdown = 0
self.current_max = -np.inf
self.daily_treasury = pd.Series(index=self.trading_days)
self.treasury_period_return = np.nan
self.num_trading_days = 0
def get_minute_index(self, sim_params):
"""
Stitches together multiple days worth of business minutes into
one continous index.
"""
trading_minutes = None
for day in self.trading_days:
minutes_for_day = trading.environment.market_minutes_for_day(day)
if trading_minutes is None:
# Create container for all minutes on first iteration
trading_minutes = minutes_for_day
else:
trading_minutes = trading_minutes + minutes_for_day
return trading_minutes
def get_daily_index(self):
return self.trading_days
def update(self, dt, algorithm_returns, benchmark_returns):
# Keep track of latest dt for use in to_dict and other methods
# that report current state.
self.latest_dt = dt
self.algorithm_returns_cont[dt] = algorithm_returns
self.algorithm_returns = self.algorithm_returns_cont[:dt]
self.num_trading_days = len(self.algorithm_returns)
if self.create_first_day_stats:
if len(self.algorithm_returns) == 1:
self.algorithm_returns = pd.Series(
{self.day_before_start: 0.0}).append(
self.algorithm_returns)
self.algorithm_cumulative_returns[dt] = \
self.calculate_cumulative_returns(self.algorithm_returns)
algo_cumulative_returns_to_date = \
self.algorithm_cumulative_returns[:dt]
self.mean_returns_cont[dt] = \
algo_cumulative_returns_to_date[dt] / self.num_trading_days
self.mean_returns = self.mean_returns_cont[:dt]
self.annualized_mean_returns_cont[dt] = \
self.mean_returns_cont[dt] * 252
self.annualized_mean_returns = self.annualized_mean_returns_cont[:dt]
if self.create_first_day_stats:
if len(self.mean_returns) == 1:
self.mean_returns = pd.Series(
{self.day_before_start: 0.0}).append(self.mean_returns)
self.annualized_mean_returns = pd.Series(
{self.day_before_start: 0.0}).append(
self.annualized_mean_returns)
self.benchmark_returns_cont[dt] = benchmark_returns
self.benchmark_returns = self.benchmark_returns_cont[:dt]
if self.create_first_day_stats:
if len(self.benchmark_returns) == 1:
self.benchmark_returns = pd.Series(
{self.day_before_start: 0.0}).append(
self.benchmark_returns)
self.benchmark_cumulative_returns[dt] = \
self.calculate_cumulative_returns(self.benchmark_returns)
benchmark_cumulative_returns_to_date = \
self.benchmark_cumulative_returns[:dt]
self.mean_benchmark_returns_cont[dt] = \
benchmark_cumulative_returns_to_date[dt] / self.num_trading_days
self.mean_benchmark_returns = self.mean_benchmark_returns_cont[:dt]
self.annualized_mean_benchmark_returns_cont[dt] = \
self.mean_benchmark_returns_cont[dt] * 252
self.annualized_mean_benchmark_returns = \
self.annualized_mean_benchmark_returns_cont[:dt]
if not self.algorithm_returns.index.equals(
self.benchmark_returns.index
):
message = "Mismatch between benchmark_returns ({bm_count}) and \
algorithm_returns ({algo_count}) in range {start} : {end} on {dt}"
message = message.format(
bm_count=len(self.benchmark_returns),
algo_count=len(self.algorithm_returns),
start=self.start_date,
end=self.end_date,
dt=dt
)
raise Exception(message)
self.update_current_max()
self.metrics.benchmark_volatility[dt] = \
self.calculate_volatility(self.benchmark_returns)
self.metrics.algorithm_volatility[dt] = \
self.calculate_volatility(self.algorithm_returns)
# caching the treasury rates for the minutely case is a
# big speedup, because it avoids searching the treasury
# curves on every minute.
# In both minutely and daily, the daily curve is always used.
treasury_end = dt.replace(hour=0, minute=0)
if np.isnan(self.daily_treasury[treasury_end]):
treasury_period_return = choose_treasury(
self.treasury_curves,
self.start_date,
treasury_end
)
self.daily_treasury[treasury_end] = treasury_period_return
self.treasury_period_return = self.daily_treasury[treasury_end]
self.excess_returns[self.latest_dt] = (
self.algorithm_cumulative_returns[self.latest_dt]
-
self.treasury_period_return)
self.metrics.beta[dt] = self.calculate_beta()
self.metrics.alpha[dt] = self.calculate_alpha()
self.metrics.sharpe[dt] = self.calculate_sharpe()
self.metrics.downside_risk[dt] = self.calculate_downside_risk()
self.metrics.sortino[dt] = self.calculate_sortino()
self.metrics.information[dt] = self.calculate_information()
self.max_drawdown = self.calculate_max_drawdown()
self.max_drawdowns[dt] = self.max_drawdown
def to_dict(self):
"""
Creates a dictionary representing the state of the risk report.
Returns a dict object of the form:
"""
dt = self.latest_dt
period_label = dt.strftime("%Y-%m")
rval = {
'trading_days': self.num_trading_days,
'benchmark_volatility': self.metrics.benchmark_volatility[dt],
'algo_volatility': self.metrics.algorithm_volatility[dt],
'treasury_period_return': self.treasury_period_return,
# Though the two following keys say period return,
# they would be more accurately called the cumulative return.
# However, the keys need to stay the same, for now, for backwards
# compatibility with existing consumers.
'algorithm_period_return': self.algorithm_cumulative_returns[dt],
'benchmark_period_return': self.benchmark_cumulative_returns[dt],
'beta': self.metrics.beta[dt],
'alpha': self.metrics.alpha[dt],
'sharpe': self.metrics.sharpe[dt],
'sortino': self.metrics.sortino[dt],
'information': self.metrics.information[dt],
'excess_return': self.excess_returns[dt],
'max_drawdown': self.max_drawdown,
'period_label': period_label
}
return {k: (None if check_entry(k, v) else v)
for k, v in iteritems(rval)}
def __repr__(self):
statements = []
for metric in self.METRIC_NAMES:
value = getattr(self.metrics, metric)[-1]
if isinstance(value, list):
if len(value) == 0:
value = np.nan
else:
value = value[-1]
statements.append("{m}:{v}".format(m=metric, v=value))
return '\n'.join(statements)
def calculate_cumulative_returns(self, returns):
return (1. + returns).prod() - 1
def update_current_max(self):
if len(self.algorithm_cumulative_returns) == 0:
return
current_cumulative_return = \
self.algorithm_cumulative_returns[self.latest_dt]
if self.current_max < current_cumulative_return:
self.current_max = current_cumulative_return
def calculate_max_drawdown(self):
if len(self.algorithm_cumulative_returns) == 0:
return self.max_drawdown
# The drawdown is defined as: (high - low) / high
# The above factors out to: 1.0 - (low / high)
#
# Instead of explicitly always using the low, use the current total
# return value, and test that against the max drawdown, which will
# exceed the previous max_drawdown iff the current return is lower than
# the previous low in the current drawdown window.
cur_drawdown = 1.0 - (
(1.0 + self.algorithm_cumulative_returns[self.latest_dt])
/
(1.0 + self.current_max))
self.drawdowns[self.latest_dt] = cur_drawdown
if self.max_drawdown < cur_drawdown:
return cur_drawdown
else:
return self.max_drawdown
def calculate_sharpe(self):
"""
http://en.wikipedia.org/wiki/Sharpe_ratio
"""
return sharpe_ratio(self.metrics.algorithm_volatility[self.latest_dt],
self.annualized_mean_returns[self.latest_dt],
self.daily_treasury[self.latest_dt.date()])
def calculate_sortino(self):
"""
http://en.wikipedia.org/wiki/Sortino_ratio
"""
return sortino_ratio(self.annualized_mean_returns[self.latest_dt],
self.daily_treasury[self.latest_dt.date()],
self.metrics.downside_risk[self.latest_dt])
def calculate_information(self):
"""
http://en.wikipedia.org/wiki/Information_ratio
"""
return information_ratio(
self.metrics.algorithm_volatility[self.latest_dt],
self.annualized_mean_returns[self.latest_dt],
self.annualized_mean_benchmark_returns[self.latest_dt])
def calculate_alpha(self):
"""
http://en.wikipedia.org/wiki/Alpha_(investment)
"""
return alpha(self.annualized_mean_returns[self.latest_dt],
self.treasury_period_return,
self.annualized_mean_benchmark_returns[self.latest_dt],
self.metrics.beta[self.latest_dt])
def calculate_volatility(self, daily_returns):
if len(daily_returns) <= 1:
return 0.0
return np.std(daily_returns, ddof=1) * math.sqrt(252)
def calculate_downside_risk(self):
return downside_risk(self.algorithm_returns,
self.mean_returns,
252)
def calculate_beta(self):
"""
.. math::
\\beta_a = \\frac{\mathrm{Cov}(r_a,r_p)}{\mathrm{Var}(r_p)}
http://en.wikipedia.org/wiki/Beta_(finance)
"""
# it doesn't make much sense to calculate beta for less than two days,
# so return none.
if len(self.annualized_mean_returns) < 2:
return 0.0
returns_matrix = np.vstack([self.algorithm_returns,
self.benchmark_returns])
C = np.cov(returns_matrix, ddof=1)
algorithm_covariance = C[0][1]
benchmark_variance = C[1][1]
beta = algorithm_covariance / benchmark_variance
return beta
| apache-2.0 |
jkarnows/scikit-learn | sklearn/metrics/cluster/tests/test_supervised.py | 206 | 7643 | import numpy as np
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster import entropy
from sklearn.utils.testing import assert_raise_message
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from numpy.testing import assert_array_almost_equal
score_funcs = [
adjusted_rand_score,
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
]
def test_error_messages_on_wrong_input():
for score_func in score_funcs:
expected = ('labels_true and labels_pred must have same size,'
' got 2 and 3')
assert_raise_message(ValueError, expected, score_func,
[0, 1], [1, 1, 1])
expected = "labels_true must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[[0, 1], [1, 0]], [1, 1, 1])
expected = "labels_pred must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[0, 1, 0], [[1, 1], [0, 0]])
def test_perfect_matches():
for score_func in score_funcs:
assert_equal(score_func([], []), 1.0)
assert_equal(score_func([0], [1]), 1.0)
assert_equal(score_func([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(score_func([0, 1, 0], [42, 7, 42]), 1.0)
assert_equal(score_func([0., 1., 0.], [42., 7., 42.]), 1.0)
assert_equal(score_func([0., 1., 2.], [42., 7., 2.]), 1.0)
assert_equal(score_func([0, 1, 2], [42, 7, 2]), 1.0)
def test_homogeneous_but_not_complete_labeling():
# homogeneous but not complete clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 2, 2])
assert_almost_equal(h, 1.00, 2)
assert_almost_equal(c, 0.69, 2)
assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
# complete but not homogeneous clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 1, 1])
assert_almost_equal(h, 0.58, 2)
assert_almost_equal(c, 1.00, 2)
assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
# neither complete nor homogeneous but not so bad either
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
def test_non_consicutive_labels():
# regression tests for labels with gaps
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 2, 2, 2],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 4, 0, 4, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ari_1, 0.24, 2)
assert_almost_equal(ari_2, 0.24, 2)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
seed=42):
# Compute score for random uniform cluster labelings
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(k_range), n_runs))
for i, k in enumerate(k_range):
for j in range(n_runs):
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def test_adjustment_for_chance():
# Check that adjusted scores are almost zero on random labels
n_clusters_range = [2, 10, 50, 90]
n_samples = 100
n_runs = 10
scores = uniform_labelings_scores(
adjusted_rand_score, n_samples, n_clusters_range, n_runs)
max_abs_scores = np.abs(scores).max(axis=1)
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
def test_adjusted_mutual_info_score():
# Compute the Adjusted Mutual Information and test against known values
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
# Mutual information
mi = mutual_info_score(labels_a, labels_b)
assert_almost_equal(mi, 0.41022, 5)
# Expected mutual information
C = contingency_matrix(labels_a, labels_b)
n_samples = np.sum(C)
emi = expected_mutual_information(C, n_samples)
assert_almost_equal(emi, 0.15042, 5)
# Adjusted mutual information
ami = adjusted_mutual_info_score(labels_a, labels_b)
assert_almost_equal(ami, 0.27502, 5)
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
assert_equal(ami, 1.0)
# Test with a very large array
a110 = np.array([list(labels_a) * 110]).flatten()
b110 = np.array([list(labels_b) * 110]).flatten()
ami = adjusted_mutual_info_score(a110, b110)
# This is not accurate to more than 2 places
assert_almost_equal(ami, 0.37, 2)
def test_entropy():
ent = entropy([0, 0, 42.])
assert_almost_equal(ent, 0.6365141, 5)
assert_almost_equal(entropy([]), 1)
def test_contingency_matrix():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C2 = np.histogram2d(labels_a, labels_b,
bins=(np.arange(1, 5),
np.arange(1, 5)))[0]
assert_array_almost_equal(C, C2)
C = contingency_matrix(labels_a, labels_b, eps=.1)
assert_array_almost_equal(C, C2 + .1)
def test_exactly_zero_info_score():
# Check numerical stability when information is exactly zero
for i in np.logspace(1, 4, 4).astype(np.int):
labels_a, labels_b = np.ones(i, dtype=np.int),\
np.arange(i, dtype=np.int)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(v_measure_score(labels_a, labels_b), 0.0)
assert_equal(adjusted_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
def test_v_measure_and_mutual_information(seed=36):
# Check relation between v_measure, entropy and mutual information
for i in np.logspace(1, 4, 4).astype(np.int):
random_state = np.random.RandomState(seed)
labels_a, labels_b = random_state.random_integers(0, 10, i),\
random_state.random_integers(0, 10, i)
assert_almost_equal(v_measure_score(labels_a, labels_b),
2.0 * mutual_info_score(labels_a, labels_b) /
(entropy(labels_a) + entropy(labels_b)), 0)
| bsd-3-clause |
JackKelly/neuralnilm_prototype | scripts/e417.py | 2 | 6371 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter
from neuralnilm.updates import clipped_nesterov_momentum
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.layers.batch_norm import BatchNormLayer
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
e400
'learn_init': False
independently_centre_inputs : True
e401
input is in range [0,1]
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
# max_input_power=100,
max_diff = 100,
on_power_thresholds=[5] * 5,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
# random_window=64,
output_one_appliance=True,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.75,
skip_probability_for_first_appliance=0,
one_target_per_seq=False,
n_seq_per_batch=64,
# subsample_target=4,
include_diff=True,
include_power=False,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs=True,
# standardise_input=True,
# standardise_targets=True,
# unit_variance_targets=False,
# input_padding=2,
lag=0,
clip_input=False
# classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-6,
learning_rate_changes_by_iteration={
# 1000: 1e-4,
# 4000: 1e-5
# 800: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
# auto_reshape=False,
# plotter=CentralOutputPlotter
plotter=Plotter(n_seq_to_plot=10)
)
def exp_a(name):
# ReLU hidden layers
# linear output
# output one appliance
# 0% skip prob for first appliance
# 100% skip prob for other appliances
# input is diff
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config']= [
{
'type': BidirectionalRecurrentLayer,
'num_units': 50,
'W_in_to_hid': Normal(std=1),
'W_hid_to_hid': Identity(scale=0.9),
'nonlinearity': rectify,
'learn_init': False,
'precompute_input': True
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=1/sqrt(50))
}
]
net = Net(**net_dict_copy)
net.load_params(5000)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
# EXPERIMENTS = list('abcdefghi')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
else:
del net.source.train_activations
gc.collect()
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
peterfpeterson/mantid | Testing/PerformanceTests/analysis.py | 3 | 19669 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
""" Module containing functions for test
performance analysis, plotting, and saving
to other formats (CSV, PDF) """
import testresult
import os
import sys
import sqlresults
from sqlresults import get_results
from matplotlib import pyplot as plt
import numpy as np
import datetime
import random
import plotly.offline as offline
import plotly.graph_objs as go
# This is the date string format as returned by the database
DATE_STR_FORMAT_MICRO = "%Y-%m-%d %H:%M:%S.%f"
DATE_STR_FORMAT_NO_MICRO = "%Y-%m-%d %H:%M:%S"
MANTID_ADDRESS = "https://github.com/mantidproject/mantid"
# The default HTML header
DEFAULT_HTML_HEADER = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"><html>
<head><LINK href="report.css" rel="stylesheet" type="text/css"></head>
"""
DEFAULT_PLOTLY_HEADER = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"><html>
<head><LINK href="report.css" rel="stylesheet" type="text/css"><script src="https://cdn.plot.ly/plotly-latest.min.js"></script></head>
"""
DEFAULT_HTML_FOOTER = """</body></html>"""
# ============================================================================================
def to_datetime(formatted_str):
"""Return a datetime object from a formatted string
It deals with the possible absence of a microseconds field
"""
try:
date = datetime.datetime.strptime(formatted_str, DATE_STR_FORMAT_MICRO)
except ValueError:
date = datetime.datetime.strptime(formatted_str, DATE_STR_FORMAT_NO_MICRO)
return date
# ============================================================================================
def get_orderby_clause(last_num):
"""Returns a order by clause that limits to the last # revisions """
if last_num is not None:
return " ORDER BY revision DESC limit %d" % last_num
else:
return ''
# ============================================================================================
def get_runtime_data(name='', type='', x_field='revision', last_num=None):
"""Get the test runtime/iteration as a function of an X variable.
Parameters
----------
name :: full name of the test
type :: type of test to filter by
x_field :: name of the field for the X axis.
e.g. 'revision' (default)
or 'date' : exact date/time of launch
or 'index' : using the date, but returning an index of build #
instead of the date (better scaling)
last_num :: only get the last this-many entries from the table, sorted by revision.
if < 0, then get everything
Returns
-------
x :: list of X values, sorted increasing
y :: list of runtime/iteration for each x
"""
results = get_results(name, type, where_clause='', orderby_clause=get_orderby_clause(last_num))
# Data dict. Key = X variable; Value = (iterations total, runtime total)
data = {}
for res in results:
# Get the x field value
if x_field == 'index':
x = res['date']
else:
x = res[x_field]
if data.has_key(x):
old = data[x]
iters = old[0] + 1 # Iterations
runtime = old[1] + res["runtime"]
else:
iters = 1
runtime = res["runtime"]
# Save the # of iterations and runtime
data[x] = (iters, runtime)
# Now make a sorted list of (x, runtime/iteration)
sorted_list = [(x, y[1] / y[0]) for (x, y) in data.items()]
sorted_list.sort()
x = [a for (a, b) in sorted_list]
# For index, convert into an integer index
if x_field == 'index':
x = range(len(x))
y = [b for (a, b) in sorted_list]
return (x, y)
# ============================================================================================
def get_unique_fields(results, field):
"""Given a list of TestResult, return a
list of all unique values of 'field'"""
out = set()
for res in results:
out.add(res[field])
return list(out)
# ============================================================================================
def get_results_matching(results, field, value):
"""Given a list of TestResult,
return a list of TestResult's where 'field' matches 'value'."""
out = []
for res in results:
if res[field] == value:
out.append(res)
return out
# ============================================================================================
def plot_runtime(annotate, saveImage, path, **kwargs):
name = kwargs['name']
(xData, yData) = get_runtime_data(**kwargs)
trace1 = go.Scatter(x=xData, y=yData,
mode='lines+markers',
marker=dict(
size='5',
color="blue"
)
)
annotations = []
last_num = kwargs.get('last_num', None)
if annotate and not saveImage:
# retrieve commitids for annotation on the plotly graph
results = get_results(name, orderby_clause='ORDER BY revision, variables, date')
commitids = ["""<a href="%s/commit/%s"> </a>""" % (MANTID_ADDRESS, res["commitid"]) for res in results]
if last_num is not None:
commitids = commitids[-last_num:]
for x, y, text in zip(xData, yData, commitids):
annotations.append(
dict(
x=x,
y=y,
text=text,
showarrow=False,
font=dict(family='sans serif', size=10),
xanchor='center',
yanchor='center'
)
)
if last_num is not None:
title = "Runtime History of %s (last %d revs)" % (name, last_num)
else:
title = "Runtime History of %s (all revs)" % name
yAxisTitle = 'Runtime/iteration (sec)'
xAxisTitle = kwargs['x_field']
layout = go.Layout(showlegend=False, annotations=annotations,
title=title,
xaxis=dict(title=xAxisTitle),
yaxis=dict(
title=yAxisTitle,
range=[0, np.amax(yData)]
)
)
data = [trace1]
fig = go.Figure(data=data, layout=layout)
if saveImage:
im_filename = name + ".png"
plt.ioff()
plt.figure()
plt.title(title)
plt.xlabel(xAxisTitle)
plt.ylabel(yAxisTitle)
plt.plot(xData, yData, "-b.")
plt.ylim(ymin=0)
plt.savefig(os.path.join(path, im_filename))
plt.close()
return """<img src="%s"/>""" % im_filename
else:
return offline.plot(fig, output_type='div', show_link=False, auto_open=False, include_plotlyjs=False)
# ============================================================================================
def make_css_file(path):
""" Make a save the report.css file to be used by all html """
default_css = """
table
{
border-collapse:collapse;
background-color:FFAAAA;
}
table, th, td
{
border: 1px solid black;
padding: 2px 6px;
}
.failedrow, .failedrow TD, .failedrow TH
{
background-color:#FFAAAA;
color:black;
}
.alternaterow, .alternaterow TD, .alternaterow TH
{
background-color:#FFFFAA;
color:black;
}
.error
{
color:red;
font-weight: bold;
}
"""
f = open(os.path.join(path, "report.css"), 'w')
f.write(default_css)
f.close()
# ============================================================================================
def make_environment_html(res):
"""Return a HTML string with details of test environment, taken from the
'res' TestResult object"""
html = """<table border=1>
<tr><th>Host name:</th> <td>%s</td> </tr>
<tr><th>Environment:</th> <td>%s</td> </tr>
<tr><th>Type of runner:</th> <td>%s</td> </tr>
</table>
""" % (res['host'], res['environment'], res['runner'])
return html
# ============================================================================================
def make_detailed_html_file(basedir, name, fig1, fig2, last_num):
""" Create a detailed HTML report for the named test """
html = DEFAULT_PLOTLY_HEADER
html += """<h1>Detailed report for %s</h1><br>""" % (name)
html += fig1 + "\n"
html += fig2 + "\n"
html += """<h3>Test Results</h3>"""
fields = ['revision', 'date', 'commitid', 'compare', 'status', 'runtime', 'cpu_fraction', 'variables']
table_row_header = "<tr>"
for field in fields:
if field == "runtime": field = "Runtime/Iter."
field = field[0].upper() + field[1:]
table_row_header += "<th>%s</th>" % field
table_row_header += "</tr>"
html += """<table border="1">""" + table_row_header
table_html = ''
results = get_results(name, type='', where_clause='', orderby_clause="ORDER BY revision, variables, date")
data = [(res["revision"], res["variables"], res["date"], res) for res in results]
count = 0
last_rev = 0
commitid = ''
last_commitid = ''
row_class = ''
table_rows = []
for (rev, variable, date, res) in data:
table_row_html = ''
if (rev != last_rev):
# Changed SVN revision. Swap row color
if row_class == '':
row_class = "class=alternaterow"
else:
row_class = ''
last_rev = rev
if commitid != last_commitid:
last_commitid = commitid
if res["success"]:
table_row_html += "<tr %s>\n" % row_class
else:
table_row_html += "<tr class=failedrow>\n"
for field in fields:
val = ''
if field == 'compare':
# Comparison to previous commit, if anything can be done
if (last_commitid != ""):
val = """<a href="%s/compare/%s...%s">diff</a>""" % (
MANTID_ADDRESS, last_commitid, commitid)
else:
# Normal fields
val = res[field]
# Trim the fractional seconds
if field == "date":
val = str(val)[0:19]
# Add a trac link
if field == "commitid":
commitid = val
partial_commitid = val
if (len(partial_commitid) > 7): partial_commitid = partial_commitid[0:7];
val = """<a href="%s/commit/%s">%s</a>""" % (MANTID_ADDRESS, commitid, partial_commitid)
if field == "runtime":
val = "%.3f" % (res["runtime"])
table_row_html += "<td>%s</td>" % val
table_row_html += "\n</tr>\n"
table_rows.append(table_row_html)
# Now print out all the rows in reverse order
table_rows.reverse()
for row in table_rows:
html += row
# And one more at the end for good measure
html += table_row_header
html += "</table>"
if results:
html += """<h3>Environment</h3>
%s""" % make_environment_html(results[0])
html += DEFAULT_HTML_FOOTER
f = open(os.path.join(basedir, "%s.htm" % name), "w")
html = html.replace("\n", os.linesep) # Fix line endings for windows
f.write(html)
f.close()
# ============================================================================================
def how_long_ago(timestr):
"""Returns a string giving how long ago something happened,
in human-friendly way """
import time
now = datetime.datetime.now()
then = to_datetime(timestr)
td = (now - then)
sec = td.seconds
min = int(sec / 60)
hours = int(min / 60)
days = td.days
weeks = int(days / 7)
sec = sec % 60
min = min % 60
hours = hours % 24
days = days % 7
if weeks > 0:
return "%dw%dd" % (weeks, days)
elif days > 0:
return "%dd%dh" % (days, hours)
elif hours > 0:
return "%dh%dm" % (hours, min)
elif min > 0:
return "%dm%ds" % (min, sec)
else:
return "%ds" % (sec)
return ""
# ============================================================================================
def get_html_summary_table(test_names):
"""Returns a html string summarizing the tests with these names """
html = """
<table ><tr>
<th>Test Name</th>
<th>Type</th>
<th>Status</th>
<th>When?</th>
<th>Total runtime (s)</th>
"""
for name in test_names:
res = sqlresults.get_latest_result(name)
if not res is None:
# Calculate how long ago
if not res["success"]:
html += """<tr class="failedrow">"""
else:
html += """<tr>"""
html += """<td><a href="%s.htm">%s</a></td>""" % (name, name)
html += """<td>%s</td>""" % res['type']
html += """<td>%s</td>""" % res['status']
# Friendly date
date = to_datetime(res['date'])
html += """<td>%s</td>""" % date.strftime("%b %d, %H:%M:%S")
html += """<td>%s</td>""" % res['runtime']
html += """</tr>"""
html += """</table>"""
return html
# ============================================================================================
def generate_html_subproject_report(path, last_num, x_field='revision', starts_with=""):
""" HTML report for a subproject set of tests.
starts_with : the prefix of the test name
Returns: (filename saved, HTML for a page with ALL figures in it)
"""
basedir = os.path.abspath(path)
if not os.path.exists(basedir):
os.mkdir(basedir)
# Detect if you can do figures
dofigs = True
try:
plt.figure()
except:
dofigs = False
# Start the HTML
overview_html = DEFAULT_HTML_HEADER
html = DEFAULT_HTML_HEADER
html += """<h1>Mantid System Tests: %s</h1>""" % starts_with
if not dofigs:
html += """<p class="error">There was an error generating plots. No figures will be present in the report.</p>"""
# ------ Find the test names of interest ----------------
# Limit with only those tests that exist in the latest rev
latest_rev = sqlresults.get_latest_revison()
temp_names = list(sqlresults.get_all_test_names(" revision = %d" % latest_rev))
# Filter by their start
test_names = []
for name in temp_names:
if name.startswith(starts_with):
test_names.append(name)
test_names.sort()
# --------- Table with the summary of latest results --------
html += """<h2>Latest Results Summary</h2>"""
html += get_html_summary_table(test_names)
# -------- Report for each test ------------------------
for name in test_names:
print "Plotting", name
html += """<hr><h2>%s</h2>\n""" % name
overview_html += """<hr><h2>%s</h2>\n""" % name
if dofigs:
# Only the latest X entries
imgTagHtml = plot_runtime(False, True, path, name=name, x_field=x_field, last_num=last_num)
divShort = plot_runtime(True, False, path, name=name, x_field=x_field, last_num=last_num)
# Plot all svn times
divDetailed = plot_runtime(True, False, path, name=name, x_field=x_field, last_num=None)
html += divDetailed + "\n"
overview_html += imgTagHtml + "\n"
make_detailed_html_file(basedir, name, divShort, divDetailed, last_num)
detailed_html = """<br><a href="%s.htm">Detailed test report for %s</a>
<br><br>
""" % (name, name)
html += detailed_html
overview_html += detailed_html
html += DEFAULT_HTML_FOOTER
overview_html += "</body></html>"
filename = starts_with + ".htm"
f = open(os.path.join(basedir, filename), "w")
html = html.replace("\n", os.linesep) # Fix line endings for windows
f.write(html)
f.close()
return (filename, overview_html)
# ============================================================================================
def generate_html_report(path, last_num, x_field='revision'):
"""Make a comprehensive HTML report of runtime history for all tests.
Parameters
----------
path :: base path to the report folder
last_num :: in the shorter plot, how many SVN revs to show?
x_field :: the field to use as the x-axis. 'revision' or 'date' make sense
"""
basedir = os.path.abspath(path)
if not os.path.exists(basedir):
os.mkdir(basedir)
# Make the CSS file to be used by all HTML
make_css_file(path)
# Detect if you can do figures
dofigs = True
# --------- Start the HTML --------------
html = DEFAULT_HTML_HEADER
html += """<h1>Mantid System Tests Auto-Generated Report</h1>"""
html += """<p><a href="overview_plot.htm">See an overview of performance plots for all tests by clicking here.</a></p> """
if not dofigs:
html += """<p class="error">There was an error generating plots. No figures will be present in the report.</p>"""
html += """<h2>Run Environment</h2>
%s
""" % (make_environment_html(sqlresults.get_latest_result()))
overview_html = ""
# ------ Find the test names of interest ----------------
# Limit with only those tests that exist in the latest rev
latest_rev = sqlresults.get_latest_revison()
test_names = list(sqlresults.get_all_test_names(" revision = %d" % latest_rev))
test_names.sort()
# ------ Find a list of subproject names --------
subprojects = set()
for name in test_names:
n = name.find(".")
if n > 0:
subprojects.add(name[:n])
subprojects = list(subprojects)
subprojects.sort()
html += """<h2>Test Subprojects</h2>
<big>
<table cellpadding="10"> """
for subproject in subprojects:
(filename, this_overview) = generate_html_subproject_report(path, last_num, x_field, subproject)
overview_html += this_overview
html += """<tr> <td> <a href="%s">%s</a> </td> </tr>
""" % (filename, subproject)
html += """</table></big>"""
# --------- Table with the summary of latest results --------
html += """<h2>Overall Results Summary</h2>"""
html += get_html_summary_table(test_names)
html += DEFAULT_HTML_FOOTER
f = open(os.path.join(basedir, "report.htm"), "w")
html = html.replace("\n", os.linesep) # Fix line endings for windows
f.write(html)
f.close()
# -------- Overview of plots ------------
f = open(os.path.join(basedir, "overview_plot.htm"), "w")
overview_html = overview_html.replace("\n", os.linesep) # Fix line endings for windows
f.write(overview_html)
f.close()
print "Report complete!"
# ============================================================================================
if __name__ == "__main__":
sqlresults.set_database_filename("MyFakeData.db")
# Make up some test data
if 0:
if os.path.exists("MyFakeData.db"): os.remove("MyFakeData.db")
sqlresults.generate_fake_data(300)
generate_html_report("../Report", 50)
| gpl-3.0 |
jiajiechen/mxnet | example/deep-embedded-clustering/data.py | 16 | 1384 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
from __future__ import print_function
import os
import numpy as np
from sklearn.datasets import fetch_mldata
def get_mnist():
""" Gets MNIST dataset """
np.random.seed(1234) # set seed for deterministic ordering
data_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
data_path = os.path.join(data_path, '../../data')
mnist = fetch_mldata('MNIST original', data_home=data_path)
p = np.random.permutation(mnist.data.shape[0])
X = mnist.data[p].astype(np.float32)*0.02
Y = mnist.target[p]
return X, Y
| apache-2.0 |
phdowling/scikit-learn | sklearn/metrics/tests/test_score_objects.py | 138 | 14048 | import pickle
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
REGRESSION_SCORERS = ['r2', 'mean_absolute_error', 'mean_squared_error',
'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'log_loss',
'adjusted_rand_score' # not really, but works
]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should a be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
# Test clustering scorers against gold standard labeling.
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
estimator = dict([(name, sensible_regr)
for name in REGRESSION_SCORERS] +
[(name, sensible_clf)
for name in CLF_SCORERS] +
[(name, sensible_ml_clf)
for name in MULTILABEL_ONLY_SCORERS])
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
| bsd-3-clause |
ajnam12/MusicNLP | train.py | 1 | 5991 | import glob
from hdf5_getters import *
import os
import numpy as np
from collections import Counter
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD, Adagrad, Nadam
from keras.regularizers import l2, l1
import music_utils
from music_utils import *
from graph_utils import *
import cPickle as pickle
import random
import sklearn
random.seed(221)
genres = ['jazz', 'hip hop rnb and dance hall', 'folk', 'rock']
genre_idxs = dict(zip(genres, range(len(genres))))
### NN MODELS
def vanilla_model(input_dim, output_dim, hidden_dim=200, num_layers=1, reg=0.05, non_linearity='tanh'):
model = Sequential()
model.add(Dense(hidden_dim, input_dim=input_dim, W_regularizer=l2(reg), init='glorot_normal'))
model.add(Activation(non_linearity))
model.add(Dropout(.5))
for i in range(1,num_layers):
model.add(Dense(hidden_dim, W_regularizer=l2(reg), init='glorot_normal'))
model.add(Activation(non_linearity))
model.add(Dropout(.5))
model.add(Dense(output_dim, W_regularizer=l2(reg), init='glorot_normal'))
model.add(Activation('softmax'))
return model
def make_train_example(h5, feature_extractor):
song = convert_raw_to_word(h5)
return feature_extractor(song)
#pitches = get_segments_pitches(h5)[:11] # limit: only look at beginning
#pitch_diffs = [pitches[i] - pitches[i - 1] for i in xrange(1, len(pitches))]
#return {'title': title, 'pitch_diffs': pitch_diffs}
### MAKE DATASET
"""
genres = ['rock', 'punk', 'folk', 'hip hop rnb and dance hall']
genre_idxs = dict(zip(genres, range(len(genres))))
genre_songs = {'rock': [], 'punk': [], 'folk': [], 'hip hop rnb and dance hall': []}
tags_list = []
data_path = "~/MillionSongSubset/data"
for root, dirs, files in os.walk("MillionSongSubset/data"):
files = glob.glob(os.path.join(root, '*h5'))
for f in files:
h5 = open_h5_file_read(f)
tags = get_artist_mbtags(h5).tolist()
tags_list += tags
for tag in tags:
if tag in genre_songs:
genre_songs[tag].append(make_train_example(get_segments_pitches(h5),fe))
#h5.close()
#exit()
break
h5.close()
#train_pair = make_train_pair(h5)
#titles.append(train_pair['title'])
#pitch_diff_list.append(train_pair['pitch_diffs'])
"""
def load_dataset(from_full_dict=True):
train_data = {}
test_data = {}
if not from_full_dict:
with open('../train.p', 'rb') as f:
train_data = pickle.load(f)
with open('../test.p', 'rb') as f:
test_data = pickle.load(f)
else:
with open('10k_genre_dict.pkl', 'rb') as f:
data = pickle.load(f)
for k,v in data.iteritems():
test_data[k] = v[:1000]
train_data[k] = v[1000:]
return train_data, test_data
def make_dataset(train_data, test_data, feature_extractor, genres, genre_idxs):
#train_data, test_data = load_dataset()
print "data loaded"
x_train = []
y_train = []
x_test = []
y_test = []
print "extracting train data"
for k,v in train_data.iteritems():
print "- extracting %s"%k
y_train += [make_one_hot(genre_idxs[k])]*len(v)
for song in v:
feats = feature_extractor(song)
x_train.append(feats)
print "extracting test data"
for k,v in test_data.iteritems():
print "- extracting %s"%k
y_test += [make_one_hot(genre_idxs[k])]*len(v)
for song in v:
feats = feature_extractor(song)
x_test.append(feats)
x_train = np.array(x_train)
y_train = np.array(y_train)
x_test = np.array(x_test)
y_test = np.array(y_test)
x_train = np.hstack((x_train,y_train))
np.random.shuffle(x_train)
x_train, y_train = np.hsplit(x_train, [-4])
print x_train.shape
print y_train.shape
print x_test.shape
print y_test.shape
return x_train, y_train, x_test, y_test
# genres = ['jazz', 'hip hop rnb and dance hall', 'folk', 'rock']
# genre_idxs = dict(zip(genres, range(len(genres))))
# fe = trigrams
# x_train, y_train, x_test, y_test = make_dataset(train_data, test_data, fe, genres, genre_idxs)
# vec_size = 12**3 + 21**2
### TRAIN MODEL
#def train_model(train_data, test_data):
def train_model(x_train, y_train, x_test, y_test, vec_size):
genres = ['jazz', 'hip hop rnb and dance hall', 'folk', 'rock']
genre_idxs = dict(zip(genres, range(len(genres))))
print x_train.shape
print x_test.shape
regs = [0.005, 0.01, 0.05]
lrs = [0.01, 0.05, 0.1]
num_layers = [1, 2, 3]
results = {}
idx = 0
tot = len(regs)*len(lrs)*len(num_layers)
for num_layer in num_layers:
for lr in lrs:
for reg in regs:
print "Combination %d/%d" % (idx, tot)
model = vanilla_model(vec_size, len(genres), num_layers=num_layer, reg=reg, non_linearity='relu')
opt = Adagrad()
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
print "Training..."
model.fit(x_train, y_train, validation_data=(x_test, y_test), nb_epoch=40,batch_size=50)
#model.fit(x_train, y_train, nb_epoch=10,batch_size=50)
print "Testing..."
score = model.evaluate(x_test,y_test,batch_size=1)
preds = model.predict(x_test,batch_size=1,verbose=1)
results[(num_layer, lr, reg)] = (score, preds)
#print score
idx += 1
return (y_test, results)
### GRAPH RESULTS
def create_confusion_matrix(ground_truths, predictions):
gts = np.argmax(ground_truths, axis=1)
preds = np.argmax(predictions, axis=1)
print ground_truths.shape[0]
print gts.shape[0]
print predictions.shape[0]
print preds.shape[0]
conf_mat = sklearn.metrics.confusion_matrix(gts, preds)
plot_confusion_matrix(conf_mat, genres, 'test')
print 'bottom'
| mit |
zack3241/incubator-airflow | airflow/contrib/operators/hive_to_dynamodb.py | 15 | 3701 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from airflow.contrib.hooks.aws_dynamodb_hook import AwsDynamoDBHook
from airflow.hooks.hive_hooks import HiveServer2Hook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class HiveToDynamoDBTransferOperator(BaseOperator):
"""
Moves data from Hive to DynamoDB, note that for now the data is loaded
into memory before being pushed to DynamoDB, so this operator should
be used for smallish amount of data.
:param sql: SQL query to execute against the hive database
:type sql: str
:param table_name: target DynamoDB table
:type table_name: str
:param table_keys: partition key and sort key
:type table_keys: list
:param pre_process: implement pre-processing of source data
:type pre_process: function
:param pre_process_args: list of pre_process function arguments
:type pre_process_args: list
:param pre_process_kwargs: dict of pre_process function arguments
:type pre_process_kwargs: dict
:param region_name: aws region name (example: us-east-1)
:type region_name: str
:param schema: hive database schema
:type schema: str
:param hiveserver2_conn_id: source hive connection
:type hiveserver2_conn_id: str
:param aws_conn_id: aws connection
:type aws_conn_id: str
"""
template_fields = ('sql',)
template_ext = ('.sql',)
ui_color = '#a0e08c'
@apply_defaults
def __init__(
self,
sql,
table_name,
table_keys,
pre_process=None,
pre_process_args=None,
pre_process_kwargs=None,
region_name=None,
schema='default',
hiveserver2_conn_id='hiveserver2_default',
aws_conn_id='aws_default',
*args, **kwargs):
super(HiveToDynamoDBTransferOperator, self).__init__(*args, **kwargs)
self.sql = sql
self.table_name = table_name
self.table_keys = table_keys
self.pre_process = pre_process
self.pre_process_args = pre_process_args
self.pre_process_kwargs = pre_process_kwargs
self.region_name = region_name
self.schema = schema
self.hiveserver2_conn_id = hiveserver2_conn_id
self.aws_conn_id = aws_conn_id
def execute(self, context):
hive = HiveServer2Hook(hiveserver2_conn_id=self.hiveserver2_conn_id)
logging.info('Extracting data from Hive')
logging.info(self.sql)
data = hive.get_pandas_df(self.sql, schema=self.schema)
dynamodb = AwsDynamoDBHook(aws_conn_id=self.aws_conn_id,
table_name=self.table_name, table_keys=self.table_keys, region_name=self.region_name)
logging.info('Inserting rows into dynamodb')
if self.pre_process is None:
dynamodb.write_batch_data(
json.loads(data.to_json(orient='records')))
else:
dynamodb.write_batch_data(
self.pre_process(data=data, args=self.pre_process_args, kwargs=self.pre_process_kwargs))
logging.info('Done.')
| apache-2.0 |
voxlol/scikit-learn | sklearn/datasets/mldata.py | 309 | 7838 | """Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: BSD 3 clause
import os
from os.path import join, exists
import re
import numbers
try:
# Python 2
from urllib2 import HTTPError
from urllib2 import quote
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import urlopen
import numpy as np
import scipy as sp
from scipy import io
from shutil import copyfileobj
from .base import get_data_home, Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename."""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname:
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name: optional, default: 'label'
Name or index of the column containing the target values.
data_name: optional, default: 'data'
Name or index of the column containing the data.
transpose_data: optional, default: True
If True, transpose the downloaded data array.
data_home: optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> import tempfile
>>> test_data_home = tempfile.mkdtemp()
>>> iris = fetch_mldata('iris', data_home=test_data_home)
>>> iris.target.shape
(150,)
>>> iris.data.shape
(150, 4)
Load the 'leukemia' dataset from mldata.org, which needs to be transposed
to respects the sklearn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=True,
... data_home=test_data_home)
>>> leuk.data.shape
(72, 7129)
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0, data_home=test_data_home)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0',
... data_home=test_data_home)
>>> import shutil
>>> shutil.rmtree(test_data_home)
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % quote(dataname)
try:
mldata_url = urlopen(urlname)
except HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, numbers.Integral):
target_name = col_names[target_name]
if isinstance(data_name, numbers.Integral):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to sklearn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
if not sp.sparse.issparse(dataset['target']):
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
# The following is used by nosetests to setup the docstring tests fixture
def setup_module(module):
# setup mock urllib2 module to avoid downloading from mldata.org
from sklearn.utils.testing import install_mldata_mock
install_mldata_mock({
'iris': {
'data': np.empty((150, 4)),
'label': np.empty(150),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
'leukemia': {
'data': np.empty((72, 7129)),
},
})
def teardown_module(module):
from sklearn.utils.testing import uninstall_mldata_mock
uninstall_mldata_mock()
| bsd-3-clause |
architecture-building-systems/CEAforArcGIS | cea/datamanagement/archetypes_mapper.py | 1 | 19240 | """
building properties algorithm
"""
# HISTORY:
# J. A. Fonseca script development 22.03.15
import warnings
import numpy as np
import pandas as pd
from typing import List, Tuple
import cea.config
import cea.inputlocator
from cea import InvalidOccupancyNameException
from cea.datamanagement.schedule_helper import calc_mixed_schedule
from cea.utilities.dbf import dbf_to_dataframe, dataframe_to_dbf
__author__ = "Jimeno A. Fonseca"
__copyright__ = "Copyright 2015, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Jimeno A. Fonseca", "Daren Thomas", "Martin Mosteiro"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "[email protected]"
__status__ = "Production"
def archetypes_mapper(locator,
update_architecture_dbf,
update_air_conditioning_systems_dbf,
update_indoor_comfort_dbf,
update_internal_loads_dbf,
update_supply_systems_dbf,
update_schedule_operation_cea,
buildings):
"""
algorithm to query building properties from statistical database
Archetypes_HVAC_properties.csv. for more info check the integrated demand
model of Fonseca et al. 2015. Appl. energy.
:param InputLocator locator: an InputLocator instance set to the scenario to work on
:param boolean update_architecture_dbf: if True, update the construction and architecture properties.
:param boolean update_indoor_comfort_dbf: if True, get properties about thermal comfort.
:param boolean update_air_conditioning_systems_dbf: if True, get properties about types of HVAC systems, otherwise False.
:param boolean update_internal_loads_dbf: if True, get properties about internal loads, otherwise False.
The following files are created by this script, depending on which flags were set:
- building_HVAC: .dbf
describes the queried properties of HVAC systems.
- architecture.dbf
describes the queried properties of architectural features
- building_thermal: .shp
describes the queried thermal properties of buildings
- indoor_comfort.shp
describes the queried thermal properties of buildings
"""
# get occupancy and age files
building_typology_df = dbf_to_dataframe(locator.get_building_typology())
# validate list of uses in case study
list_uses = get_list_of_uses_in_case_study(building_typology_df)
# get occupant densities from archetypes schedules
occupant_densities = {}
occ_densities = pd.read_excel(locator.get_database_use_types_properties(), 'INTERNAL_LOADS').set_index('code')
for use in list_uses:
if occ_densities.loc[use, 'Occ_m2pax'] > 0.0:
occupant_densities[use] = 1 / occ_densities.loc[use, 'Occ_m2pax']
else:
occupant_densities[use] = 0.0
# get properties about the construction and architecture
if update_architecture_dbf:
architecture_mapper(locator, building_typology_df)
# get properties about types of HVAC systems
if update_air_conditioning_systems_dbf:
aircon_mapper(locator, building_typology_df)
if update_indoor_comfort_dbf:
indoor_comfort_mapper(list_uses, locator, occupant_densities, building_typology_df)
if update_internal_loads_dbf:
internal_loads_mapper(list_uses, locator, occupant_densities, building_typology_df)
if update_schedule_operation_cea:
calc_mixed_schedule(locator, building_typology_df, buildings)
if update_supply_systems_dbf:
supply_mapper(locator, building_typology_df)
def indoor_comfort_mapper(list_uses, locator, occupant_densities, building_typology_df):
comfort_DB = pd.read_excel(locator.get_database_use_types_properties(), 'INDOOR_COMFORT')
# define comfort
prop_comfort_df = building_typology_df.merge(comfort_DB, left_on='1ST_USE', right_on='code')
# write to shapefile
fields = ['Name',
'Tcs_set_C',
'Ths_set_C',
'Tcs_setb_C',
'Ths_setb_C',
'Ve_lpspax',
'RH_min_pc',
'RH_max_pc']
prop_comfort_df_merged = calculate_average_multiuse(fields,
prop_comfort_df,
occupant_densities,
list_uses,
comfort_DB)
dataframe_to_dbf(prop_comfort_df_merged[fields], locator.get_building_comfort())
def internal_loads_mapper(list_uses, locator, occupant_densities, building_typology_df):
internal_DB = pd.read_excel(locator.get_database_use_types_properties(), 'INTERNAL_LOADS')
# define comfort
prop_internal_df = building_typology_df.merge(internal_DB, left_on='1ST_USE', right_on='code')
# write to shapefile
fields = ['Name',
'Occ_m2pax',
'Qs_Wpax',
'X_ghpax',
'Ea_Wm2',
'El_Wm2',
'Ed_Wm2',
'Ev_kWveh',
'Qcre_Wm2',
'Vww_lpdpax',
'Vw_lpdpax',
'Qhpro_Wm2',
'Qcpro_Wm2',
'Epro_Wm2']
prop_internal_df_merged = calculate_average_multiuse(fields,
prop_internal_df,
occupant_densities,
list_uses,
internal_DB)
dataframe_to_dbf(prop_internal_df_merged[fields], locator.get_building_internal())
def supply_mapper(locator, building_typology_df):
supply_DB = pd.read_excel(locator.get_database_construction_standards(), 'SUPPLY_ASSEMBLIES')
prop_supply_df = building_typology_df.merge(supply_DB, left_on='STANDARD', right_on='STANDARD')
fields = ['Name',
'type_cs',
'type_hs',
'type_dhw',
'type_el']
dataframe_to_dbf(prop_supply_df[fields], locator.get_building_supply())
def aircon_mapper(locator, typology_df):
air_conditioning_DB = pd.read_excel(locator.get_database_construction_standards(), 'HVAC_ASSEMBLIES')
# define HVAC systems types
prop_HVAC_df = typology_df.merge(air_conditioning_DB, left_on='STANDARD', right_on='STANDARD')
# write to shapefile
fields = ['Name',
'type_cs',
'type_hs',
'type_dhw',
'type_ctrl',
'type_vent',
'heat_starts',
'heat_ends',
'cool_starts',
'cool_ends']
dataframe_to_dbf(prop_HVAC_df[fields], locator.get_building_air_conditioning())
def architecture_mapper(locator, typology_df):
architecture_DB = pd.read_excel(locator.get_database_construction_standards(), 'ENVELOPE_ASSEMBLIES')
prop_architecture_df = typology_df.merge(architecture_DB, left_on='STANDARD', right_on='STANDARD')
fields = ['Name',
'Hs_ag',
'Hs_bg',
'Ns',
'Es',
'void_deck',
'wwr_north',
'wwr_west',
'wwr_east',
'wwr_south',
'type_cons',
'type_leak',
'type_floor',
'type_part',
'type_base',
'type_roof',
'type_wall',
'type_win',
'type_shade']
dataframe_to_dbf(prop_architecture_df[fields], locator.get_building_architecture())
def get_list_of_uses_in_case_study(building_typology_df):
"""
validates lists of uses in case study.
refactored from archetypes_mapper function
:param building_typology_df: dataframe of occupancy.dbf input (can be read in archetypes-mapper or in building-properties)
:type building_typology_df: pandas.DataFrame
:return: list of uses in case study
:rtype: pandas.DataFrame.Index
"""
list_var_names = ["1ST_USE", '2ND_USE', '3RD_USE']
list_var_values = ["1ST_USE_R", '2ND_USE_R', '3RD_USE_R']
# validate list of uses
list_uses = []
n_records = building_typology_df.shape[0]
for row in range(n_records):
for var_name, var_value in zip(list_var_names,list_var_values):
if building_typology_df.loc[row, var_value] > 0.0:
list_uses.append(building_typology_df.loc[row, var_name]) # append valid uses
unique_uses = list(set(list_uses))
return unique_uses
def calc_code(code1, code2, code3, code4):
return str(code1) + str(code2) + str(code3) + str(code4)
def calc_mainuse(uses_df, uses):
"""
Calculate a building's main use
:param uses_df: DataFrame containing the share of each building that corresponds to each occupancy type
:type uses_df: DataFrame
:param uses: list of building uses actually available in the area
:type uses: list
:return mainuse: array containing each building's main occupancy
:rtype mainuse: ndarray
"""
# print a warning if there are equal shares of more than one "main" use
# check if 'Name' is already the index, this is necessary because the function is used in data-helper
# and in building properties
if uses_df.index.name not in ['Name']:
# this is the behavior in data-helper
indexed_df = uses_df.set_index('Name')
else:
# this is the behavior in building-properties
indexed_df = uses_df.copy()
uses_df = uses_df.reset_index()
for building in indexed_df.index:
mainuses = [use for use in uses if
(indexed_df.loc[building, use] == indexed_df.max(axis=1)[building]) and (use != 'PARKING')]
if len(mainuses) > 1:
print("%s has equal share of %s; the construction properties and systems for %s will be used." % (
building, ' and '.join(mainuses), mainuses[0]))
# get array of main use for each building
databaseclean = uses_df[uses].transpose()
array_max = np.array(databaseclean[databaseclean[:] > 0].idxmax(skipna=True), dtype='S10')
for i in range(len(array_max)):
if databaseclean[i][array_max[i]] != 1:
databaseclean[i][array_max[i]] = 0
array_second = np.array(databaseclean[databaseclean[:] > 0].idxmax(skipna=True), dtype='S10')
mainuse = np.array(map(calc_comparison, array_second, array_max))
return mainuse
def calc_comparison(array_second, array_max):
if array_max == 'PARKING':
if array_second != 'PARKING':
array_max = array_second
return array_max
def correct_archetype_areas(prop_architecture_df, architecture_DB, list_uses):
"""
Corrects the heated area 'Hs_ag' and 'Hs_bg' for buildings with multiple uses.
:var prop_architecture_df: DataFrame containing each building's occupancy, construction and renovation data as
well as the architectural properties obtained from the archetypes.
:type prop_architecture_df: DataFrame
:var architecture_DB: architecture database for each archetype
:type architecture_DB: DataFrame
:var list_uses: list of all occupancy types in the project
:type list_uses: list[str]
:return: Hs_ag_list, Hs_bg_list, Ns_list, Es_list: the corrected values for 'Hs_ag', 'Hs_bg', 'Ns' and 'Es' for each
building
:rtype Tuple[List[float], List[float], List[float], List[float]]
"""
indexed_DB = architecture_DB.set_index('Code')
# weighted average of values
def calc_average(last, current, share_of_use):
return last + current * share_of_use
Hs_ag_list = []
Hs_bg_list = []
Ns_list = []
Es_list = []
for building in prop_architecture_df.index:
Hs_ag = 0.0
Hs_bg = 0.0
Ns = 0.0
Es = 0.0
for use in list_uses:
# if the use is present in the building, find the building archetype properties for that use
if prop_architecture_df[use][building] > 0.0:
# get archetype code for the current occupancy type
current_use_code = use + str(prop_architecture_df['year_start'][building]) + \
str(prop_architecture_df['year_end'][building]) + \
str(prop_architecture_df['standard'][building])
# recalculate heated floor area as an average of the archetype value for each occupancy type in the
# building
Hs_ag = calc_average(Hs_ag, indexed_DB['Hs_ag'][current_use_code], prop_architecture_df[use][building])
Hs_bg = calc_average(Hs_bg, indexed_DB['Hs_bg'][current_use_code], prop_architecture_df[use][building])
Ns = calc_average(Ns, indexed_DB['Ns'][current_use_code], prop_architecture_df[use][building])
Es = calc_average(Es, indexed_DB['Es'][current_use_code], prop_architecture_df[use][building])
Hs_ag_list.append(Hs_ag)
Hs_bg_list.append(Hs_bg)
Ns_list.append(Ns)
Es_list.append(Es)
return Hs_ag_list, Hs_bg_list, Ns_list, Es_list
def get_prop_architecture(typology_df, architecture_DB):
"""
This function obtains every building's architectural properties based on the construction and renovation years.
:param typology_df: DataFrame containing each building's construction and renovation categories for each building
component based on the construction and renovation years
:type typology_df: DataFrame
:param architecture_DB: DataFrame containing the archetypal architectural properties for each use type, construction
and renovation year
:type categories_df: DataFrame
:return prop_architecture_df: DataFrame containing the architectural properties of each building in the area
:rtype prop_architecture_df: DataFrame
"""
# create prop_architecture_df based on the construction categories and archetype architecture database
prop_architecture_df = typology_df.merge(architecture_DB, left_on='STANDARD', right_on='STANDARD')
return prop_architecture_df
def calculate_average_multiuse(fields, properties_df, occupant_densities, list_uses, properties_DB, list_var_names=None,
list_var_values=None):
"""
This script calculates the average internal loads and ventilation properties for multiuse buildings.
:param properties_df: DataFrame containing the building's occupancy type and the corresponding indoor comfort
properties or internal loads.
:type properties_df: DataFrame
:param occupant_densities: DataFrame containing the number of people per square meter for each occupancy type based
on the archetypes
:type occupant_densities: Dict
:param list_uses: list of uses in the project
:type list_uses: list[str]
:param properties_DB: DataFrame containing each occupancy type's indoor comfort properties or internal loads based
on the corresponding archetypes
:type properties_DB: DataFrame
:param list_var_names: List of column names in properties_df that contain the names of use-types being caculated
:type: list_var_names: list[str]
:param list_var_values: List of column names in properties_df that contain values of use-type ratio in respect to list_var_names
:type: list_var_values: list[str]
:return properties_df: the same DataFrame as the input parameter, but with the updated properties for multiuse
buildings
"""
if list_var_names is None:
list_var_names = ["1ST_USE", '2ND_USE', '3RD_USE']
if list_var_values is None:
list_var_values = ["1ST_USE_R", '2ND_USE_R', '3RD_USE_R']
properties_DB = properties_DB.set_index('code')
for column in fields:
if column in ['Ve_lpspax', 'Qs_Wpax', 'X_ghpax', 'Vww_lpdpax', 'Vw_lpdpax']:
# some properties are imported from the Excel files as int instead of float
properties_df[column] = properties_df[column].astype(float)
for building in properties_df.index:
column_total = 0
people_total = 0
for use in list_uses:
for var_name, var_value in zip(list_var_names, list_var_values):
if use in [properties_df[var_name][building]]:
column_total += (properties_df[var_value][building]
* occupant_densities[use]
* properties_DB[column][use])
people_total += properties_df[var_value][building] * occupant_densities[use]
if people_total > 0.0:
properties_df.loc[building, column] = column_total / people_total
else:
properties_df.loc[building, column] = 0
elif column in ['Ea_Wm2', 'El_Wm2', 'Epro_Wm2', 'Qcre_Wm2', 'Ed_Wm2', 'Qhpro_Wm2', 'Qcpro_Wm2', 'Occ_m2pax']:
for building in properties_df.index:
average = 0.0
for use in list_uses:
for var_name, var_value in zip(list_var_names, list_var_values):
if use in [properties_df[var_name][building]]:
average += properties_df[var_value][building] * properties_DB[column][use]
properties_df.loc[building, column] = average
return properties_df
def main(config):
"""
Run the properties script with input from the reference case and compare the results. This ensures that changes
made to this script (e.g. refactorings) do not stop the script from working and also that the results stay the same.
"""
print('Running archetypes-mapper with scenario = %s' % config.scenario)
update_architecture_dbf = 'architecture' in config.archetypes_mapper.input_databases
update_air_conditioning_systems_dbf = 'air-conditioning' in config.archetypes_mapper.input_databases
update_indoor_comfort_dbf = 'comfort' in config.archetypes_mapper.input_databases
update_internal_loads_dbf = 'internal-loads' in config.archetypes_mapper.input_databases
update_supply_systems_dbf = 'supply' in config.archetypes_mapper.input_databases
update_schedule_operation_cea = 'schedules' in config.archetypes_mapper.input_databases
buildings = config.archetypes_mapper.buildings
locator = cea.inputlocator.InputLocator(config.scenario)
archetypes_mapper(locator=locator,
update_architecture_dbf=update_architecture_dbf,
update_air_conditioning_systems_dbf=update_air_conditioning_systems_dbf,
update_indoor_comfort_dbf=update_indoor_comfort_dbf,
update_internal_loads_dbf=update_internal_loads_dbf,
update_supply_systems_dbf=update_supply_systems_dbf,
update_schedule_operation_cea=update_schedule_operation_cea,
buildings=buildings)
if __name__ == '__main__':
main(cea.config.Configuration())
| mit |
YihaoLu/statsmodels | statsmodels/datasets/utils.py | 25 | 10983 | from statsmodels.compat.python import (range, StringIO, urlopen,
HTTPError, URLError, lrange,
cPickle, urljoin, BytesIO)
import sys
import shutil
from os import environ
from os import makedirs
from os.path import expanduser
from os.path import exists
from os.path import join
import numpy as np
from numpy import array
from pandas import read_csv, DataFrame, Index
def webuse(data, baseurl='http://www.stata-press.com/data/r11/', as_df=True):
"""
Download and return an example dataset from Stata.
Parameters
----------
data : str
Name of dataset to fetch.
baseurl : str
The base URL to the stata datasets.
as_df : bool
If True, returns a `pandas.DataFrame`
Returns
-------
dta : Record Array
A record array containing the Stata dataset.
Examples
--------
>>> dta = webuse('auto')
Notes
-----
Make sure baseurl has trailing forward slash. Doesn't do any
error checking in response URLs.
"""
# lazy imports
from statsmodels.iolib import genfromdta
url = urljoin(baseurl, data+'.dta')
dta = urlopen(url)
dta = BytesIO(dta.read()) # make it truly file-like
if as_df: # could make this faster if we don't process dta twice?
return DataFrame.from_records(genfromdta(dta))
else:
return genfromdta(dta)
class Dataset(dict):
def __init__(self, **kw):
# define some default attributes, so pylint can find them
self.endog = None
self.exog = None
self.data = None
self.names = None
dict.__init__(self, kw)
self.__dict__ = self
# Some datasets have string variables. If you want a raw_data
# attribute you must create this in the dataset's load function.
try: # some datasets have string variables
self.raw_data = self.data.view((float, len(self.names)))
except:
pass
def __repr__(self):
return str(self.__class__)
def process_recarray(data, endog_idx=0, exog_idx=None, stack=True, dtype=None):
names = list(data.dtype.names)
if isinstance(endog_idx, int):
endog = array(data[names[endog_idx]], dtype=dtype)
endog_name = names[endog_idx]
endog_idx = [endog_idx]
else:
endog_name = [names[i] for i in endog_idx]
if stack:
endog = np.column_stack(data[field] for field in endog_name)
else:
endog = data[endog_name]
if exog_idx is None:
exog_name = [names[i] for i in range(len(names))
if i not in endog_idx]
else:
exog_name = [names[i] for i in exog_idx]
if stack:
exog = np.column_stack(data[field] for field in exog_name)
else:
exog = data[exog_name]
if dtype:
endog = endog.astype(dtype)
exog = exog.astype(dtype)
dataset = Dataset(data=data, names=names, endog=endog, exog=exog,
endog_name=endog_name, exog_name=exog_name)
return dataset
def process_recarray_pandas(data, endog_idx=0, exog_idx=None, dtype=None,
index_idx=None):
data = DataFrame(data, dtype=dtype)
names = data.columns
if isinstance(endog_idx, int):
endog_name = names[endog_idx]
endog = data[endog_name]
if exog_idx is None:
exog = data.drop([endog_name], axis=1)
else:
exog = data.filter(names[exog_idx])
else:
endog = data.ix[:, endog_idx]
endog_name = list(endog.columns)
if exog_idx is None:
exog = data.drop(endog_name, axis=1)
elif isinstance(exog_idx, int):
exog = data.filter([names[exog_idx]])
else:
exog = data.filter(names[exog_idx])
if index_idx is not None: # NOTE: will have to be improved for dates
endog.index = Index(data.ix[:, index_idx])
exog.index = Index(data.ix[:, index_idx])
data = data.set_index(names[index_idx])
exog_name = list(exog.columns)
dataset = Dataset(data=data, names=list(names), endog=endog, exog=exog,
endog_name=endog_name, exog_name=exog_name)
return dataset
def _maybe_reset_index(data):
"""
All the Rdatasets have the integer row.labels from R if there is no
real index. Strip this for a zero-based index
"""
if data.index.equals(Index(lrange(1, len(data) + 1))):
data = data.reset_index(drop=True)
return data
def _get_cache(cache):
if cache is False:
# do not do any caching or load from cache
cache = None
elif cache is True: # use default dir for cache
cache = get_data_home(None)
else:
cache = get_data_home(cache)
return cache
def _cache_it(data, cache_path):
if sys.version_info[0] >= 3:
# for some reason encode("zip") won't work for me in Python 3?
import zlib
# use protocol 2 so can open with python 2.x if cached in 3.x
open(cache_path, "wb").write(zlib.compress(cPickle.dumps(data,
protocol=2)))
else:
open(cache_path, "wb").write(cPickle.dumps(data).encode("zip"))
def _open_cache(cache_path):
if sys.version_info[0] >= 3:
# NOTE: don't know why but decode('zip') doesn't work on my
# Python 3 build
import zlib
data = zlib.decompress(open(cache_path, 'rb').read())
# return as bytes object encoded in utf-8 for cross-compat of cached
data = cPickle.loads(data).encode('utf-8')
else:
data = open(cache_path, 'rb').read().decode('zip')
data = cPickle.loads(data)
return data
def _urlopen_cached(url, cache):
"""
Tries to load data from cache location otherwise downloads it. If it
downloads the data and cache is not None then it will put the downloaded
data in the cache path.
"""
from_cache = False
if cache is not None:
cache_path = join(cache,
url.split("://")[-1].replace('/', ',') + ".zip")
try:
data = _open_cache(cache_path)
from_cache = True
except:
pass
# not using the cache or didn't find it in cache
if not from_cache:
data = urlopen(url).read()
if cache is not None: # then put it in the cache
_cache_it(data, cache_path)
return data, from_cache
def _get_data(base_url, dataname, cache, extension="csv"):
url = base_url + (dataname + ".%s") % extension
try:
data, from_cache = _urlopen_cached(url, cache)
except HTTPError as err:
if '404' in str(err):
raise ValueError("Dataset %s was not found." % dataname)
else:
raise err
data = data.decode('utf-8', 'strict')
return StringIO(data), from_cache
def _get_dataset_meta(dataname, package, cache):
# get the index, you'll probably want this cached because you have
# to download info about all the data to get info about any of the data...
index_url = ("https://raw.github.com/vincentarelbundock/Rdatasets/master/"
"datasets.csv")
data, _ = _urlopen_cached(index_url, cache)
# Python 3
if sys.version[0] == '3': # pragma: no cover
data = data.decode('utf-8', 'strict')
index = read_csv(StringIO(data))
idx = np.logical_and(index.Item == dataname, index.Package == package)
dataset_meta = index.ix[idx]
return dataset_meta["Title"].item()
def get_rdataset(dataname, package="datasets", cache=False):
"""download and return R dataset
Parameters
----------
dataname : str
The name of the dataset you want to download
package : str
The package in which the dataset is found. The default is the core
'datasets' package.
cache : bool or str
If True, will download this data into the STATSMODELS_DATA folder.
The default location is a folder called statsmodels_data in the
user home folder. Otherwise, you can specify a path to a folder to
use for caching the data. If False, the data will not be cached.
Returns
-------
dataset : Dataset instance
A `statsmodels.data.utils.Dataset` instance. This objects has
attributes::
* data - A pandas DataFrame containing the data
* title - The dataset title
* package - The package from which the data came
* from_cache - Whether not cached data was retrieved
* __doc__ - The verbatim R documentation.
Notes
-----
If the R dataset has an integer index. This is reset to be zero-based.
Otherwise the index is preserved. The caching facilities are dumb. That
is, no download dates, e-tags, or otherwise identifying information
is checked to see if the data should be downloaded again or not. If the
dataset is in the cache, it's used.
"""
# NOTE: use raw github bc html site might not be most up to date
data_base_url = ("https://raw.github.com/vincentarelbundock/Rdatasets/"
"master/csv/"+package+"/")
docs_base_url = ("https://raw.github.com/vincentarelbundock/Rdatasets/"
"master/doc/"+package+"/rst/")
cache = _get_cache(cache)
data, from_cache = _get_data(data_base_url, dataname, cache)
data = read_csv(data, index_col=0)
data = _maybe_reset_index(data)
title = _get_dataset_meta(dataname, package, cache)
doc, _ = _get_data(docs_base_url, dataname, cache, "rst")
return Dataset(data=data, __doc__=doc.read(), package=package, title=title,
from_cache=from_cache)
# The below function were taken from sklearn
def get_data_home(data_home=None):
"""Return the path of the statsmodels data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'statsmodels_data'
in the user home folder.
Alternatively, it can be set by the 'STATSMODELS_DATA' environment
variable or programatically by giving an explit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('STATSMODELS_DATA',
join('~', 'statsmodels_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def check_internet():
"""Check if internet is available"""
try:
urlopen("https://github.com")
except URLError as err:
return False
return True
| bsd-3-clause |
afrendeiro/pipelines | pipelines/pipelines.py | 1 | 16334 | #!/usr/bin/env python
"""
pipelines
=========
Project management and Sample loop.
"""
from argparse import ArgumentParser
from .models import Project
from . import toolkit as tk
import cPickle as pickle
import os
import pandas as pd
import sys
import textwrap
import time
__author__ = "Andre Rendeiro"
__copyright__ = "Copyright 2015, Andre Rendeiro"
__credits__ = []
__license__ = "GPL2"
__version__ = "0.1"
__maintainer__ = "Andre Rendeiro"
__email__ = "[email protected]"
__status__ = "Development"
def main():
# Parse command-line arguments
parser = ArgumentParser(
prog="pipelines",
description="pipelines. Project management and sample loop."
)
parser = add_args(parser)
# Parse
args = parser.parse_args()
# Start project
prj = Project(args.project_name)
prj.addSampleSheet(args.csv)
# Start main function
if args.stats:
read_stats(prj)
elif args.compare:
compare()
else:
sample_loop(args, prj)
# Exit
print("Finished and exiting.")
sys.exit(0)
def add_args(parser):
"""
Options for project and pipelines.
"""
# Project
parser.add_argument(dest="project_name", help="Project name.", type=str)
parser.add_argument(dest="csv", help="CSV file with sample annotation.", type=str) # improvement: check project dirs for csv
# Behaviour
parser.add_argument("--stats", dest="stats", action="store_true",
help="Do not run pipelines, but gather stats on produced files.")
parser.add_argument("--compare", dest="compare", action="store_true",
help="Do not loop through samples, but perform comparisons betweem them.")
parser.add_argument("-r", "--rm-tmp", dest="rm_tmp", action="store_true",
help="Remove intermediary files. If not it will preserve all intermediary files. Default=False.")
parser.add_argument("--dry-run", dest="dry_run", action="store_true",
help="Dry run. Assemble commands, but do not submit jobs to slurm. Default=False.")
parser.add_argument("--no-checks", dest="checks", action="store_false",
help="Don't check file existence and integrity. Default=False.")
# Pypiper
parser.add_argument("--overwrite", dest="recover", action="store_true",
help="Overwrite existing files. Default=False.")
parser.add_argument("--fresh-start", dest="fresh", action="store_true",
help="Start from beginning of pipeline. Default=False.")
parser.add_argument("--manual-clean", dest="manual_clean", action="store_true",
help="Manually clean temporary files. Default=False.")
# Slurm-related
parser.add_argument("-c", "--cpus", default=4, dest="cpus",
help="Number of CPUs to use. Default is specified in the pipeline config file.", type=int)
parser.add_argument("-m", "--mem-per-cpu", default=4000, dest="mem",
help="Memory per CPU to use. Default is specified in the pipeline config file.", type=int)
parser.add_argument("-q", "--queue", default="shortq", dest="queue",
choices=["develop", "shortq", "mediumq", "longq"],
help="Queue to submit jobs to. Default is specified in the pipeline config file.", type=str)
parser.add_argument("-t", "--time", default="10:00:00", dest="time",
help="Maximum time for jobs to run. Default is specified in the pipeline config file.", type=str)
parser.add_argument("-u", "--user-mail", default="[email protected]", dest="user_mail",
help="User email.", type=str)
# Preprocessing: trimming, mapping, etc...
parser.add_argument("--trimmer", default="skewer", choices=["trimmomatic", "skewer"],
dest="trimmer", help="Trimmer to use. Default=skewer.", type=str)
parser.add_argument("-i", "--max-insert-size", default=2000,
dest="maxinsert",
help="Maximum allowed insert size allowed for paired end mates. Default=2000.",
type=int)
parser.add_argument("-Q", "--quality", default=30,
dest="quality",
help="Minimum read quality to keep. Default=30.",
type=int)
# Further downstream
parser.add_argument("--window-size", default=1000, dest="windowsize",
help="Window size used for genome-wide correlations. Default=1000.",
type=int)
parser.add_argument("--peak-caller", default="macs2", choices=["macs2", "spp"],
dest="peak_caller", help="Peak caller to use. Default=macs2.", type=str)
parser.add_argument("--peak-window-width", default=2000,
dest="peak_window_width",
help="Width of window around peak motifs. Default=2000.",
type=int)
return parser
def sample_loop(args, prj):
"""
Loop through all samples and submit jobs to the pipeline under Slurm.
:param args: Parsed ArgumentParser object.
:type args: argparse.ArgumentParser
:param prj: `Project` object.
:type prj: pipelines.Project
"""
print("Starting sample preprocessing into jobs.")
# start pipeline
run_name = "_".join([prj.name, time.strftime("%Y%m%d-%H%M%S")])
# add track headers to track hubs
for genome in pd.Series([s.genome for s in prj.samples]).unique():
if not os.path.exists(os.path.join(prj.dirs.html, "trackHub_{0}.txt".format(genome))):
with open(os.path.join(prj.dirs.html, "trackHub_{0}.txt".format(genome)), "w") as handle:
handle.write("browser position {0}\n".format(prj.config["defaultposition"]))
# Loop through samples, submit to corresponding job (preprocess, analyse)
for sample in prj.samples:
# get job_name
job_name = "_".join([run_name, sample.name])
# if unmappedBam is a list, add final "unmapped" attr to sample object
if type(sample.unmappedBam) is list:
sample.unmapped = os.path.join(sample.dirs.unmapped, sample.name + ".bam")
# assemble command
# slurm header
job_code = tk.slurmHeader(
jobName=job_name,
output=os.path.join(prj.dirs.logs, job_name + ".slurm.log"),
queue=args.queue,
time=args.time,
cpusPerTask=args.cpus,
memPerCpu=args.mem,
userMail=args.user_mail
)
sample_pickle = os.path.join(prj.dirs.pickles, job_name + ".pickle")
# self reference the pickle file in its sample
sample.pickle = sample_pickle
# If sample has control attribute, get that sample and pair them
if hasattr(sample, "controlname"):
if type(sample.controlname) == str:
# Assign the sample with that name to ctrl
ctrl = [s for s in prj.samples if s.name == sample.controlname]
# if there is only one record, use that as control
if len(ctrl) == 1:
sample.ctrl = ctrl[0]
else:
# if not, process sample anyway, but without a matched control
print("Provided control sample name does not exist or is ambiguous: %s" % sample.controlname)
# save pickle with all objects (this time, 2nd element is a tuple!)
pickle.dump((prj, sample, args), open(sample_pickle, "wb"))
# Actual call to pipeline
technique = sample.technique.upper()
if technique in prj.config["techniques"]["chipseq"]:
job_code += "chipseq-pipeline {0}\n".format(sample_pickle)
elif technique in prj.config["techniques"]["cm"]:
job_code += "chipseq-pipeline {0}\n".format(sample_pickle)
elif technique in prj.config["techniques"]["atacseq"]:
job_code += "atacseq-pipeline {0}\n".format(sample_pickle)
elif technique in prj.config["techniques"]["dnase"]:
job_code += "atacseq-pipeline {0}\n".format(sample_pickle)
elif technique in prj.config["techniques"]["quantseq"]:
job_code += "quantseq-pipeline {0}\n".format(sample_pickle)
elif technique in prj.config["techniques"]["chemseq"]:
job_code += "chipseq-pipeline {0}\n".format(sample_pickle)
else:
raise TypeError("Sample is not in known sample class.")
# Slurm footer
job_code += tk.slurmFooter()
# Save code as executable
job_file = os.path.join(prj.dirs.executables, job_name + ".sh")
with open(job_file, 'w') as handle:
handle.write(textwrap.dedent(job_code))
# Submit to slurm
if not args.dry_run:
status = tk.slurmSubmitJob(job_file)
if status != 0:
print("Could not submit job '%s' to slurm." % job_file)
sys.exit(1)
print("Submitted job to slurm: '%s'" % job_name)
# Create link to trackHub in project folder
tk.linkToTrackHub(
trackHubURL=os.path.join(prj.dirs.html, "trackHub_{0}.txt".format(sample.genome)),
fileName=os.path.join(prj.dirs.root, "ucsc_tracks_{0}.html".format(sample.genome)),
genome=sample.genome
)
# write original annotation sheet to project folder
# add field for manual sample-control pairing
prj.sheet.df.controlname = None
prj.sheet.to_csv(os.path.join(prj.dirs.root, prj.name + ".annotation_sheet.csv"))
print("Finished preprocessing")
def read_stats(prj):
"""
Given an annotation sheet with replicates, gets number of reads mapped, duplicates, etc...
:param prj: `Project` object.
:type prj: pipelines.Project
"""
print("Starting sample read stats.")
bowtie_cols = ["readCount", "unpaired", "unaligned", "unique", "multiple", "alignmentRate"]
samples = pd.DataFrame(index=["name"] + bowtie_cols + ["single-ends", "paired-ends", "duplicates", "NSC", "RSC", "qualityTag", "peakNumber", "FRiP"])
for sample in prj.samples:
sample = sample.asSeries()
# Get alignment stats
try:
sample = sample.append(parse_bowtie_stats(sample.alnRates))
except:
print("Record with alignment rates is empty or not found for sample %s" % sample.name)
# Get duplicate stats
try:
sample = sample.append(parse_duplicate_stats(sample.dupsMetrics))
except:
print("Record with duplicates is empty or not found for sample %s" % sample.name)
# Get NSC and RSC
try:
sample = sample.append(parse_qc(sample.qc))
except:
print("Record with quality control is empty or not found for sample %s" % sample.name)
# Count peak number (if peaks exist)
if hasattr(sample, "peaks"):
# and if sample has peaks
if str(sample.peaks) != "nan":
# if peak file exist
if os.path.exists(sample.peaks):
sample = get_peak_number(sample)
# Get FRiP from file (if exists) and add to sheet
if hasattr(sample, "peaks"):
# if sample has peaks
if str(sample.peaks) != "nan":
try:
sample = get_frip(sample)
except:
print("Record with FRiP value is empty or not found for sample %s" % sample.name)
samples[sample["name"]] = sample
# write annotation sheet with statistics
samples.T.to_csv(prj.sampleStats, index=False)
print("Finished getting read statistics.")
def compare():
raise NotImplementedError
def parse_bowtie_stats(stats_file):
"""
Parses Bowtie2 stats file, returns series with values.
:param stats_file: Bowtie2 output file with alignment statistics.
:type stats_file: str
"""
import re
stats = pd.Series(index=["readCount", "unpaired", "unaligned", "unique", "multiple", "alignmentRate"])
try:
with open(stats_file) as handle:
content = handle.readlines() # list of strings per line
except:
return stats
# total reads
try:
line = [i for i in range(len(content)) if " reads; of these:" in content[i]][0]
stats["readCount"] = re.sub("\D.*", "", content[line])
if 7 > len(content) > 2:
line = [i for i in range(len(content)) if "were unpaired; of these:" in content[i]][0]
stats["unpaired"] = re.sub("\D", "", re.sub("\(.*", "", content[line]))
else:
line = [i for i in range(len(content)) if "were paired; of these:" in content[i]][0]
stats["unpaired"] = stats["readCount"] - int(re.sub("\D", "", re.sub("\(.*", "", content[line])))
line = [i for i in range(len(content)) if "aligned 0 times" in content[i]][0]
stats["unaligned"] = re.sub("\D", "", re.sub("\(.*", "", content[line]))
line = [i for i in range(len(content)) if "aligned exactly 1 time" in content[i]][0]
stats["unique"] = re.sub("\D", "", re.sub("\(.*", "", content[line]))
line = [i for i in range(len(content)) if "aligned >1 times" in content[i]][0]
stats["multiple"] = re.sub("\D", "", re.sub("\(.*", "", content[line]))
line = [i for i in range(len(content)) if "overall alignment rate" in content[i]][0]
stats["alignmentRate"] = re.sub("\%.*", "", content[line]).strip()
except IndexError:
pass
return stats
def parse_duplicate_stats(stats_file):
"""
Parses Bowtie2 stats file, returns series with values.
:param stats_file: Bowtie2 output file with alignment statistics.
:type stats_file: str
"""
import re
series = pd.Series()
try:
with open(stats_file) as handle:
content = handle.readlines() # list of strings per line
except:
return series
try:
line = [i for i in range(len(content)) if "single ends (among them " in content[i]][0]
series["single-ends"] = re.sub("\D", "", re.sub("\(.*", "", content[line]))
line = [i for i in range(len(content)) if " end pairs... done in " in content[i]][0]
series["paired-ends"] = re.sub("\D", "", re.sub("\.\.\..*", "", content[line]))
line = [i for i in range(len(content)) if " duplicates, sorting the list... done in " in content[i]][0]
series["duplicates"] = re.sub("\D", "", re.sub("\.\.\..*", "", content[line]))
except IndexError:
pass
return series
def parse_qc(qc_file):
"""
Parses QC table produced by phantompeakqualtools (spp) and returns sample quality metrics.
:param qc_file: phantompeakqualtools output file sample quality measurements.
:type qc_file: str
"""
series = pd.Series()
try:
with open(qc_file) as handle:
line = handle.readlines()[0].strip().split("\t") # list of strings per line
series["NSC"] = line[-3]
series["RSC"] = line[-2]
series["qualityTag"] = line[-1]
except:
pass
return series
def get_peak_number(sample):
"""
Counts number of peaks from a sample's peak file.
:param sample: A Sample object with the "peaks" attribute.
:type name: pipelines.Sample
"""
import subprocess
import re
proc = subprocess.Popen(["wc", "-l", sample.peaks], stdout=subprocess.PIPE)
out, err = proc.communicate()
sample["peakNumber"] = re.sub("\D.*", "", out)
return sample
def get_frip(sample):
"""
Calculates the fraction of reads in peaks for a given sample.
:param sample: A Sample object with the "peaks" attribute.
:type name: pipelines.Sample
"""
import re
with open(sample.frip, "r") as handle:
content = handle.readlines()
reads_in_peaks = int(re.sub("\D", "", content[0]))
mapped_reads = sample["readCount"] - sample["unaligned"]
return pd.Series(reads_in_peaks / mapped_reads, index="FRiP")
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print("Program canceled by user!")
sys.exit(1)
| gpl-2.0 |
bcspragu/Machine-Learning-Projects | MovieLens/Code/data2.py | 1 | 3956 | import numpy as np
import matplotlib.pyplot as plt
res = eval("""[ 1, 1, 2, 0, 1, 2, 0, 0, 0, 0, 1, 0, 1, 0, 2, 0,
2, 1, 0, 2, 1, 1, 1, 0, 0, 2, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0,
0, 0, 0, 2, 1, 0, 0, 1, 0, 0, 1, 1, 2, 1, 1, 0, 1, 0, 1, 0,
0, 1, 0, 0, 2, 1, 2, 1, 0, 1, 0, 2, 1, 1, 2, 0, 1, 0, 1, 2,
1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0,
0, 2, 0, 1, 1, 2, 1, 2, 1, 1, 1, 1, 0, 1, 2, 0, 0, 1, 1, 2,
0, 0, 0, 2, 2, 0, 2, 1, 0, 0, 0, 1, 2, 0, 1, 2, 2, 0, 1, 0,
0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 2, 0, 0, 0, 2, 1, 2, 2,
0, 0, 0, 1, 2, 1, 2, 0, 1, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 1,
1, 0, 1, 1, 1, 1, 2, 1, 0, 0, 1, 0, 0, 1, 1, 1, 2, 2, 1, 0,
0, 2, 2, 0, 2, 2, 1, 2, 1, 2, 2, 0, 2, 0, 2, 1, 0, 1, 1, 1,
2, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 2, 2, 0, 0, 1, 0, 2, 1, 2,
0, 1, 1, 1, 2, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0,
0, 0, 0, 0, 0, 2, 0, 0, 0, 1, 0, 2, 2, 0, 1, 0, 2, 0, 2, 1,
1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 2, 0, 0, 0,
1, 0, 1, 0, 0, 1, 1, 1, 2, 2, 1, 1, 0, 1, 0, 0, 1, 0, 1, 2,
0, 0, 0, 0, 2, 0, 1, 0, 2, 2, 2, 1, 2, 0, 1, 0, 0, 1, 0, 2,
0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 2, 1, 0, 1,
0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 2, 0, 0, 0, 0, 0, 1,
0, 1, 0, 2, 0, 2, 0, 0, 2, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0,
1, 1, 1, 1, 2, 0, 0, 1, 2, 1, 1, 1, 1, 2, 1, 0, 1, 0, 0, 0,
1, 2, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 2, 0, 0, 1, 0, 1, 0,
2, 0, 0, 0, 0, 1, 1, 0, 2, 2, 1, 2, 1, 0, 1, 1, 1, 2, 1, 1,
0, 1, 0, 2, 2, 0, 2, 0, 2, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1,
0, 1, 1, 2, 0, 0, 2, 0, 1, 2, 0, 2, 0, 2, 1, 2, 0, 1, 0, 1,
1, 2, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 2, 1, 0, 0, 0, 1, 1, 1,
1, 0, 0, 0, 1, 0, 0, 1, 1, 2, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0,
2, 1, 0, 0, 0, 1, 1, 2, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0,
0, 0, 1, 1, 2, 1, 0, 1, 0, 1, 1, 2, 1, 2, 2, 2, 2, 0, 2, 0,
0, 2, 1, 0, 2, 1, 0, 1, 0, 1, 1, 0, 0, 2, 1, 1, 0, 2, 1, 0,
0, 0, 0, 0, 2, 0, 0, 0, 1, 0, 1, 2, 2, 2, 0, 0, 1, 1, 1, 0,
2, 2, 1, 0, 0, 1, 1, 0, 1, 2, 1, 0, 0, 1, 1, 1, 2, 0, 1, 0,
2, 1, 2, 0, 1, 0, 1, 0, 1, 2, 0, 1, 0, 2, 2, 1, 2, 0, 2, 2,
2, 1, 1, 2, 0, 1, 1, 1, 1, 1, 1, 0, 2, 0, 0, 1, 0, 0, 2, 0,
0, 1, 1, 0, 0, 1, 2, 1, 2, 0, 2, 0, 0, 1, 0, 2, 2, 0, 1, 1,
0, 2, 1, 2, 0, 2, 0, 2, 0, 0, 1, 1, 0, 1, 1, 0, 2, 0, 1, 0,
0, 0, 2, 0, 0, 0, 2, 2, 0, 2, 1, 0, 1, 1, 2, 0, 2, 2, 2, 2,
1, 1, 0, 1, 1, 2, 1, 1, 2, 1, 0, 1, 0, 1, 0, 1, 2, 1, 1, 2,
1, 0, 0, 0, 1, 2, 1, 0, 2, 1, 0, 2, 1, 0, 0, 0, 2, 2, 1, 1,
0, 2, 0, 0, 1, 2, 0, 0, 1, 0, 2, 1, 1, 1, 0, 1, 0, 0, 2, 0,
2, 0, 2, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 2, 0, 0,
0, 1, 0, 1, 0, 2, 0, 2, 0, 0, 1, 2, 1, 1, 2, 1, 2, 0, 0, 1,
2, 0, 1, 0, 0, 0, 2, 1, 2, 0, 2, 0, 0, 0, 0, 0, 2, 2, 2, 1,
1, 2, 0, 1, 0, 0, 1, 0, 2, 2, 0, 2, 2, 2, 1, 0, 2, 1, 0, 1,
0, 2, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 2,
0, 1, 1, 2, 0, 2, 0, 0, 1, 1, 0, 2, 0, 2, 1, 2, 2, 2, 2, 1,
1, 2, 1, 2, 1, 1, 0, 1, 2, 1, 0, 0, 2, 2, 0, 0, 2, 0, 0, 0,
2, 1, 0, 2, 0, 0, 1]""")
plt.xlabel("Cluster ID")
plt.ylabel("Number of Data Cases")
plt.title("Data Cases per Cluster")
plt.hist(res, bins=3)
plt.xticks(range(3),[0,1,2])
plt.show()
| mit |
potash/scikit-learn | examples/decomposition/plot_ica_vs_pca.py | 306 | 3329 | """
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
| bsd-3-clause |
DamienIrving/ocean-analysis | visualisation/drift_paper/plot_conservation_scatter.py | 1 | 18441 | """
Filename: plot_conservation_scatter.py
Author: Damien Irving, [email protected]
Description: Create a scatterplot showing energy, mass and salt conservation
"""
# Import general Python modules
import sys
import os
import re
import pdb
import argparse
import copy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
from matplotlib.gridspec import GridSpec
from brokenaxes import brokenaxes
import cmdline_provenance as cmdprov
cwd = os.getcwd()
repo_dir = '/'
for directory in cwd.split('/')[1:]:
repo_dir = os.path.join(repo_dir, directory)
if directory == 'ocean-analysis':
break
import matplotlib as mpl
mpl.rcParams['axes.labelsize'] = 'xx-large'
mpl.rcParams['axes.titlesize'] = 20
mpl.rcParams['xtick.labelsize'] = 'x-large'
mpl.rcParams['ytick.labelsize'] = 'x-large'
mpl.rcParams['legend.fontsize'] = 'xx-large'
# From https://sashat.me/2017/01/11/list-of-20-simple-distinct-colors/
# '#800000' Maroon
# '#a9a9a9' Grey
# '#808000' Olive
# '#469990' Teal
# '#000075' Navy
# '#e6194B' Red
# '#f58231' Orange
# '#fabebe' Pink
# '#ffe119' Yellow
# '#bfef45' Lime
# '#3cb44b' Green
# '#42d4f4' Cyan
# '#4363d8' Blue
# '#911eb4' Purple
# '#f032e6' Magenta
institution_colors = {'BCC': '#800000',
'BNU': '#a9a9a9',
'CMCC': '#808000',
'CNRM-CERFACS': '#469990',
'CSIRO': '#000075',
'E3SM-Project': '#e6194B',
'EC-Earth-Consortium': '#f58231',
'HAMMOZ-Consortium': '#fabebe',
'IPSL': '#ffe119',
'MIROC': '#bfef45',
'MOHC': '#3cb44b',
'MPI-M': '#42d4f4',
'NASA-GISS': '#4363d8',
'NCC': '#911eb4',
'NOAA-GFDL': '#f032e6'
}
markers = ['o', '^', 's', '<', '>', 'v', 'p', 'D', 'd', 'h', 'H', 'X']
axis_labels = {'thermal OHC': 'change in OHC temperature component \n $dH_T/dt$',
'masso': 'change in ocean mass \n $dM/dt$',
'massa': 'change in mass of atmospheric water vapor \n $dM_a/dt$',
'netTOA': 'time-integrated netTOA \n $dQ_r/dt$',
'hfdsgeou': 'time-integrated heat flux into ocean \n $dQ_h/dt$',
'soga': 'change in ocean salinity \n $dS/dt$',
'wfo': 'time-integrated freshwater flux into ocean \n $dQ_m/dt$',
'wfa': 'time-integrated moisture flux into atmosphere \n $dQ_{ep}/dt$'}
stats_done = []
quartiles = []
cmip6_data_points = {}
cmip5_data_points = {}
# Define functions
def record_quartiles(variable, data, project):
"""Get the ensemble quartiles"""
quartiles.append('# ' + variable + ' quartiles')
abs_data = np.abs(data)
clean_abs_data = abs_data[np.logical_not(np.isnan(abs_data))]
upper_quartile = np.percentile(clean_abs_data, 75)
median = np.percentile(clean_abs_data, 50)
lower_quartile = np.percentile(clean_abs_data, 25)
nmodels = len(data)
valid_nmodels = len(clean_abs_data)
upper_quartile_text = "%s upper quartile: %f" %(project, upper_quartile)
median_text = "%s median: %f" %(project, median)
lower_quartile_text = "%s lower quartile: %f" %(project, lower_quartile)
nmodels_text = "%s number of models: %i (%i not nan)" %(project, nmodels, valid_nmodels)
quartiles.append(upper_quartile_text)
quartiles.append(median_text)
quartiles.append(lower_quartile_text)
quartiles.append(nmodels_text)
def plot_abline(ax, slope, intercept, static_bounds=True):
"""Plot a line from slope and intercept"""
xlim = ax.get_xlim()
ylim = ax.get_ylim()
if type(xlim[0]) in (list, tuple):
for lims in xlim:
x_vals = np.array(lims)
y_vals = intercept + slope * x_vals
ax.plot(x_vals, y_vals, linestyle='--', c='0.5')
else:
x_vals = np.array(xlim)
y_vals = intercept + slope * x_vals
ax.plot(x_vals, y_vals, linestyle='--', c='0.5')
if static_bounds:
ax.set_xlim(xlim)
ax.set_ylim(ylim)
def plot_shading(ax):
"""Plot shading to indicate dominant source of drift."""
xlim = ax.get_xlim()
ylim = ax.get_ylim()
x_vals = np.array(xlim)
y_vals = x_vals * 2
ax.fill_between(x_vals, 0, y_vals, alpha=0.3, color='0.5')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
def plot_eei_shading(ax):
"""Plot shading to indicate netTOA / OHC valid range."""
xlim = ax.get_xlim()
ylim = ax.get_ylim()
x_vals = np.array(xlim)
y_vals = x_vals * 0.8
ax.fill_between(x_vals, x_vals, y_vals, alpha=0.3, color='0.5')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
def format_axis_label(orig_label, units, scale_factor):
"""Put LaTeX math into axis labels"""
label = orig_label.split('(')[0] + '(' + units + ')'
label = label.replace('s-1', 's$^{-1}$')
label = label.replace('m-2', 'm$^{-2}$')
label = label.replace('yr-1', 'yr$^{-1}$')
if scale_factor:
scale_factor = int(scale_factor) * -1
label = label.replace('(', '(10$^{%s}$ ' %(str(scale_factor)))
for var in axis_labels.keys():
if var in label:
label = label.replace(var, axis_labels[var])
return label
def plot_two_var_aesthetics(ax, yvar, xvar, units, scinotation, shading, scale_factor,
xpad=None, ypad=None, non_square=True):
"""Set the plot aesthetics"""
plot_abline(ax, 1, 0, static_bounds=non_square)
ax.axhline(y=0, color='black', linewidth=1.0)
ax.axvline(x=0, color='black', linewidth=1.0)
ylabel = format_axis_label(yvar, units, scale_factor)
if ypad:
ax.set_ylabel(ylabel, labelpad=ypad)
else:
ax.set_ylabel(ylabel)
xlabel = format_axis_label(xvar, units, scale_factor)
if xpad:
ax.set_xlabel(xlabel, labelpad=xpad)
else:
ax.set_xlabel(xlabel)
ax.set_xlabel(xlabel, labelpad=xpad)
#plt.sca(ax)
if scinotation:
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0), useMathText=True)
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0), useMathText=True)
if 'W m$^{-2}$' in ylabel:
ax.axhspan(0.4, 1.0, color='0.95', zorder=1)
ax.axhspan(-0.4, -1.0, color='0.95', zorder=1)
ax.axvspan(0.4, 1.0, color='0.95', zorder=1)
ax.axvspan(-0.4, -1.0, color='0.95', zorder=1)
# ax.axhline(y=-0.5, color='0.5', linewidth=0.5, linestyle='--')
# ax.axhline(y=0.5, color='0.5', linewidth=0.5, linestyle='--')
# ax.axvline(x=-0.5, color='0.5', linewidth=0.5, linestyle='--')
# ax.axvline(x=0.5, color='0.5', linewidth=0.5, linestyle='--')
elif 'kg yr-1' in xvar:
ref = convert_units(1.8, 'mm yr-1', 'kg yr-1')
ref = ref * 10**scale_factor
ax.axhline(y=-1 * ref, color='0.5', linewidth=0.5, linestyle='--')
ax.axhline(y=ref, color='0.5', linewidth=0.5, linestyle='--')
ax.axvline(x=-1 * ref, color='0.5', linewidth=0.5, linestyle='--')
ax.axvline(x=ref, color='0.5', linewidth=0.5, linestyle='--')
return xlabel, ylabel
def plot_one_var_aesthetics(ax, yvar, units, scinotation, scale_factor, ypad=None):
"""Set the plot aesthetics"""
ax.axhline(y=0, color='black', linewidth=1.0)
ylabel = format_axis_label(yvar, units, scale_factor)
if ypad:
ax.set_ylabel(ylabel, labelpad=ypad)
else:
ax.set_ylabel(ylabel)
if scinotation:
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0), useMathText=True)
ax.set_yscale('symlog')
ax.grid(axis='y')
#ax.get_xaxis().set_visible(False)
ax.get_xaxis().set_ticks([])
ax.set_xlabel('CMIP5/CMIP6 model')
ax.axhline(y=-1.68e13 * 10**scale_factor, color='0.5', linewidth=0.5, linestyle='--')
ax.axhline(y=1.68e13 * 10**scale_factor, color='0.5', linewidth=0.5, linestyle='--')
return ylabel
def get_units(column_header):
"""Get the units from the column header."""
units = column_header.split('(')[-1].split(')')[0]
return units
def convert_units(value, start_units, end_units):
"""Convert units."""
sec_in_year = 365.25 * 24 * 60 * 60
ocean_density = 1026 # kg m-3
ocean_area = 3.6e14 #m2
if start_units == end_units:
new_value = value
else:
assert start_units in ['J yr-1', 'm yr-1', 'kg yr-1', 'g/kg yr-1', 'm yr-1', 'mm yr-1']
assert end_units in ['PW', 'W m-2', 'mm yr-1', 'kg s-1', 'g/kg s-1', 'm s-1', 'kg yr-1']
if start_units == 'J yr-1':
new_value = value / sec_in_year
if end_units == 'W m-2':
earth_surface_area = 5.1e14
new_value = new_value / earth_surface_area
elif end_units == 'PW':
new_value = new_value / 1e15
elif (start_units == 'm yr-1') and (end_units == 'mm yr-1'):
new_value = value * 1000
elif (start_units == 'kg yr-1') and (end_units == 'mm yr-1'):
volume_trend = value / ocean_density
new_value = (volume_trend / ocean_area) * 1000
elif (start_units == 'mm yr-1') and (end_units == 'kg yr-1'):
new_value = (value / 1000) * ocean_area * ocean_density
elif (start_units == 'kg yr-1') and (end_units == 'kg s-1'):
new_value = value / sec_in_year
elif (start_units == 'g/kg yr-1') and (end_units == 'g/kg s-1'):
new_value = value / sec_in_year
elif (start_units == 'm yr-1') and (end_units == 'm s-1'):
new_value = value / sec_in_year
return new_value
def plot_broken_comparison(ax, df, title, xvar, yvar, plot_units,
scale_factor=0, scinotation=False, shading=False,
xpad=None, ypad=None, broken=False, legend=False):
"""Plot comparison for given x and y variables.
Data are multiplied by 10^scale_factor.
"""
cmip5_institution_counts = {}
for institution in institution_colors.keys():
cmip5_institution_counts[institution] = 0
cmip6_institution_counts = cmip5_institution_counts.copy()
cmip6_xdata = []
cmip6_ydata = []
cmip5_xdata = []
cmip5_ydata = []
x_input_units = get_units(xvar)
y_input_units = get_units(yvar)
for dotnum in range(len(df['model'])):
x = convert_units(df[xvar][dotnum], x_input_units, plot_units) * 10**scale_factor
y = convert_units(df[yvar][dotnum], y_input_units, plot_units) * 10**scale_factor
label = df['model'][dotnum]
#label = df['model'][dotnum] + ' (' + df['run'][dotnum] + ')'
institution = df['institution'][dotnum]
color = institution_colors[institution]
if df['project'][dotnum] == 'cmip6':
facecolors = color
edgecolors ='none'
marker_num = cmip6_institution_counts[institution]
cmip6_institution_counts[institution] = cmip6_institution_counts[institution] + 1
cmip6_xdata.append(x)
cmip6_ydata.append(y)
else:
facecolors = 'none'
edgecolors = color
marker_num = cmip5_institution_counts[institution]
cmip5_institution_counts[institution] = cmip5_institution_counts[institution] + 1
cmip5_xdata.append(x)
cmip5_ydata.append(y)
marker = markers[marker_num]
x_for_plot = dotnum + 1 if 'massa' in xvar else x
ax.scatter(x_for_plot, y, label=label, s=130, linewidth=1.2, marker=marker,
facecolors=facecolors, edgecolors=edgecolors, zorder=2)
if dotnum == 0:
xmin = x
xmax = x
ymin = y
ymax = y
else:
xmin = min(x, xmin)
xmax = max(x, xmax)
ymin = min(y, ymin)
ymax = max(y, ymax)
print(title)
print(f'x-axis: {xmin} to {xmax}')
print(f'y-axis: {ymin} to {ymax}')
if broken:
non_square = False
else:
non_square = True
if not 'massa' in xvar:
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.set_title(title)
if 'massa' in xvar:
ylabel = plot_one_var_aesthetics(ax, yvar, plot_units, scinotation, scale_factor, ypad=ypad)
xlabel = format_axis_label(xvar, plot_units, scale_factor)
else:
xlabel, ylabel = plot_two_var_aesthetics(ax, yvar, xvar, plot_units, scinotation, shading, scale_factor,
xpad=xpad, ypad=ypad, non_square=non_square)
if not xlabel in stats_done:
cmip6_data_points[xlabel] = cmip6_xdata
cmip5_data_points[xlabel] = cmip5_xdata
if not ylabel in stats_done:
cmip6_data_points[ylabel] = cmip6_ydata
cmip5_data_points[ylabel] = cmip5_ydata
stats_done.append(xlabel)
stats_done.append(ylabel)
def get_legend_info(ax, df_subset):
"""Get the legend handles and labels.
df_subset should only contain rows plotted in ax
"""
legend_info = ax.get_legend_handles_labels()
if len(legend_info[0]) == 2:
legend_info = legend_info[0]
assert len(legend_info) == 2
handles = legend_info[0]
labels = legend_info[1]
for index, model in enumerate(labels):
if df_subset.loc[model].isnull().values.any():
handles[index] = None
return handles, labels
def update_legend_info(ax, df_subset, handles, labels):
"""Update legend information.
df_subset should only contain rows plotted in ax
"""
new_handles, new_labels = get_legend_info(ax, df_subset)
assert len(handles) == len(new_handles)
for index, handle in enumerate(handles):
if not handle:
handles[index] = new_handles[index]
return handles, labels
def main(inargs):
"""Run the program."""
df = pd.read_csv(inargs.infile)
#df.set_index(df['model'] + ' (' + df['run'] + ')', drop=True, inplace=True)
df.set_index(df['model'], drop=True, inplace=True)
fig = plt.figure(figsize=[18.5, 21]) # width, height
gs = GridSpec(3, 2)
# EEI conservation
eei_ax = fig.add_subplot(gs[0, 0])
plot_broken_comparison(eei_ax, df, '(a) planetary energy imbalance', 'netTOA (J yr-1)',
'thermal OHC (J yr-1)', 'W m-2', legend=True)
handles, labels = get_legend_info(eei_ax, df[['netTOA (J yr-1)', 'thermal OHC (J yr-1)']])
# Ocean energy conservation
xlims=[(-41.05, -40.82), (-0.55, 0.71)]
ylims=[(-0.55, 0.66)]
wspace = hspace = 0.08
ocean_energy_ax = brokenaxes(xlims=xlims, ylims=ylims, hspace=hspace, wspace=wspace,
subplot_spec=gs[0, 1], d=0.0)
#ocean_energy_ax = fig.add_subplot(gs[0, 1])
plot_broken_comparison(ocean_energy_ax, df, '(b) ocean energy conservation', 'hfdsgeou (J yr-1)',
'thermal OHC (J yr-1)', 'W m-2', xpad=25, ypad=45, broken=True)
handles, labels = update_legend_info(ocean_energy_ax, df[['hfdsgeou (J yr-1)', 'thermal OHC (J yr-1)']],
handles, labels)
# Ocean mass conservation
xlims=[(-7, 4), (472, 474), (492, 495)]
ylims=[(-0.7, 0.25)]
hspace = 0.1
ocean_mass_ax = brokenaxes(xlims=xlims, ylims=ylims, hspace=hspace, subplot_spec=gs[1, 0], d=0.0)
#ocean_mass_ax = fig.add_subplot(gs[1, 0])
plot_broken_comparison(ocean_mass_ax, df, '(c) ocean mass conservation', 'wfo (kg yr-1)', 'masso (kg yr-1)',
'kg yr-1', scale_factor=-15, broken=True, xpad=30, ypad=50)
handles, labels = update_legend_info(ocean_mass_ax, df[['wfo (kg yr-1)', 'masso (kg yr-1)']],
handles, labels)
# Salt conservation
xlims=[(-0.73, 0.35), (3.55, 3.7)]
ylims=[(-0.8, 3.1)]
hspace = wspace = 0.1
salt_ax = brokenaxes(xlims=xlims, ylims=ylims, hspace=hspace, wspace=wspace, subplot_spec=gs[1, 1], d=0.0)
#salt_ax = fig.add_subplot(gs[1, 1])
plot_broken_comparison(salt_ax, df, '(d) salt conservation', 'masso (kg yr-1)', 'soga (kg yr-1)',
'kg yr-1', scale_factor=-15, xpad=30, ypad=40, broken=True)
handles, labels = update_legend_info(salt_ax, df[['masso (kg yr-1)', 'soga (kg yr-1)']],
handles, labels)
# Atmosphere mass conservation
atmos_mass_ax = fig.add_subplot(gs[2, :])
plot_broken_comparison(atmos_mass_ax, df, '(e) atmospheric mass conservation', 'massa (kg yr-1)', 'wfa (kg yr-1)',
'kg yr-1', scale_factor=-12, ypad=20)
handles, labels = update_legend_info(atmos_mass_ax, df[['wfa (kg yr-1)']], handles, labels)
fig.legend(handles, labels, loc='center left', bbox_to_anchor=(0.815, 0.5))
plt.tight_layout(rect=(0, 0, 0.8, 1))
for variable, data in cmip6_data_points.items():
record_quartiles(variable, data, 'cmip6')
for variable, data in cmip5_data_points.items():
record_quartiles(variable, data, 'cmip5')
plt.savefig(inargs.outfile, dpi=400)
log_file = re.sub('.png', '.met', inargs.outfile)
log_text = cmdprov.new_log(git_repo=repo_dir, extra_notes=quartiles)
cmdprov.write_log(log_file, log_text)
if __name__ == '__main__':
extra_info ="""
author:
Damien Irving, [email protected]
"""
description = 'Create a scatterplot showing energy, mass and salt conservation'
parser = argparse.ArgumentParser(description=description,
epilog=extra_info,
argument_default=argparse.SUPPRESS,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("infile", type=str, help="Input file name")
parser.add_argument("outfile", type=str, help="Output file name")
args = parser.parse_args()
main(args)
| mit |
ilastikdev/opengm | src/interfaces/python/examples/mrf/denoise.py | 6 | 6709 | # FIXMEEEEEEEEEEE
import opengm
import vigra # only to read images
import numpy
#import sys
# to animate the current labeling matplotlib is used
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib import animation
class PyCallback(object):
"""
callback functor which will be passed to an inference
visitor.
In that way, pure python code can be injected into the c++ inference.
This functor visualizes the labeling as an image during inference.
Args :
shape : shape of the image
numLabels : number of labels
"""
def __init__(self,shape,numLabels):
self.shape=shape
self.numLabels=numLabels
matplotlib.interactive(True)
def begin(self,inference):
"""
this function is called from c++ when inference is started
Args :
inference : python wrapped c++ solver which is passed from c++
"""
print "begin"
def end(self,inference):
"""
this function is called from c++ when inference ends
Args :
inference : python wrapped c++ solver which is passed from c++
"""
print "end"
def visit(self,inference):
"""
this function is called from c++ each time the visitor is called
Args :
inference : python wrapped c++ solver which is passed from c++
"""
arg = inference.arg()
gm = inference.gm()
print "energy ",gm.evaluate(arg)
arg=arg.reshape(self.shape)*255
plt.imshow(arg.T, cmap='gray',interpolation="nearest")
plt.draw()
def denoiseModel(
img,
norm = 2,
weight = 1.0,
truncate = None,
numLabels = 256,
neighbourhood = 4,
inpaintPixels = None,
randInpaitStartingPoint = False
):
"""
this function is used to set up a graphical model similar to
**Denoising and inpainting problems:** from `Mrf- Benchmark <http://vision.middlebury.edu/MRF/results/ >`_
Args :
img : a grayscale image in the range [0,256)
norm : used norm for unaries and 2-order functions (default : 2)
weight : weight of 2-order functions (default : 1.0)
truncate : Truncate second order function at an given value (defaut : None)
numLabels : number of labels for each variable in the graphical model,
set this to a lower number to speed up inference (default : 255)
neighbourhood : neighbourhood for the second order functions, so far only 4 is allowed (default : 4)
inpaintPixels : a tuple of x and y coordinates where no unaries are added
randInpaitStartingPoint : use a random starting point for all pixels without unaries (default : False)
"""
shape = img.shape
if(img.ndim!=2):
raise RuntimeError("image must be gray")
if neighbourhood != 4 :
raise RuntimeError("A neighbourhood other than 4 is not yet implemented")
# normalize and flatten image
iMin = numpy.min(img)
iMax = numpy.max(img)
imgNorm = ((img[:,:]-iMin)/(iMax-iMin))*float(numLabels)
imgFlat = imgNorm.reshape(-1).astype(numpy.uint64)
# Set up Grapical Model:
numVar = int(img.size)
gm = opengm.gm([numLabels]*numVar,operator='adder')
gm.reserveFunctions(numLabels,'explicit')
numberOfPairwiseFactors=shape[0]*(shape[1]-1) + shape[1]*(shape[0]-1)
gm.reserveFactors(numVar-len(inpaintPixels[0]) + numberOfPairwiseFactors )
# Set up unaries:
# - create a range of all possible labels
allPossiblePixelValues=numpy.arange(numLabels)
pixelValueRep = numpy.repeat(allPossiblePixelValues[:,numpy.newaxis],numLabels,1)
# - repeat [0,1,2,3,...,253,254,255] numVar times
labelRange = numpy.arange(numLabels,dtype=opengm.value_type)
labelRange = numpy.repeat(labelRange[numpy.newaxis,:], numLabels, 0)
unaries = numpy.abs(pixelValueRep - labelRange)**norm
# - add unaries to the graphical model
fids=gm.addFunctions(unaries.astype(opengm.value_type))
# add unary factors to graphical model
if(inpaintPixels is None):
for l in xrange(numLabels):
whereL=numpy.where(imgFlat==l)
gm.addFactors(fids[l],whereL[0].astype(opengm.index_type))
else:
# get vis of inpaint pixels
ipX = inpaintPixels[0]
ipY = inpaintPixels[1]
ipVi = ipX*shape[1] + ipY
for l in xrange(numLabels):
whereL=numpy.where(imgFlat==l)
notInInpaint=numpy.setdiff1d(whereL[0],ipVi)
gm.addFactors(fids[l],notInInpaint.astype(opengm.index_type))
# add ONE second order function
f=opengm.differenceFunction(shape=[numLabels,numLabels],norm=2,weight=weight)
fid=gm.addFunction(f)
vis2Order=opengm.secondOrderGridVis(shape[0],shape[1],True)
# add all second order factors
gm.addFactors(fid,vis2Order)
# create a starting point
startingPoint = imgFlat.copy()
if randInpaitStartingPoint :
startingPointRandom = numpy.random.randint(0,numLabels,size=numVar).astype(opengm.index_type)
ipVi = inpaintPixels[0]*shape[1] + inpaintPixels[1]
for x in ipVi:
startingPoint[x]=startingPointRandom[x]
startingPoint[startingPoint==numLabels]=numLabels-1
return gm,startingPoint.astype(opengm.index_type)
if __name__ == "__main__":
# setup
imgPath = 'houseM-input.png'
norm = 2
weight = 5.0
numLabels = 50 # use 256 for full-model (slow)
# Read image
img = numpy.array(numpy.squeeze(vigra.impex.readImage(imgPath)),dtype=opengm.value_type)#[0:100,0:40]
shape = img.shape
# get graphical model an starting point
gm,startingPoint=denoiseModel(img,norm=norm,weight=weight,inpaintPixels=numpy.where(img==0),
numLabels=numLabels,randInpaitStartingPoint=True)
inf=opengm.inference.Imc(gm,parameter=opengm.InfParam())
print "inf"
inf.setStartingPoint(inf.arg())
# set up visitor
callback=PyCallback(shape,numLabels)
visitor=inf.pythonVisitor(callback,visitNth=1)
inf.infer(visitor)
# get the result
arg=inf.arg()
arg=arg.reshape(shape)
# plot final result
matplotlib.interactive(False)
# Two subplots, the axes array is 1-d
f, axarr = plt.subplots(1,2)
axarr[0].imshow(img.T, cmap = cm.Greys_r)
axarr[0].set_title('Input Image')
axarr[1].imshow(arg.T, cmap = cm.Greys_r)
axarr[1].set_title('Solution')
plt.show() | mit |
jorge2703/scikit-learn | sklearn/manifold/tests/test_t_sne.py | 162 | 9771 | import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold._utils import _binary_search_perplexity
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
distances = random_state.randn(50, 2)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, desired_perplexity, verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
fun = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[0]
grad = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
for init in ('random', 'pca'):
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
init=init, random_state=0)
X_embedded = tsne.fit_transform(X)
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_fit_csr_matrix():
# X can be a sparse matrix.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0)
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
metric="precomputed", random_state=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
# 'init' must be 'pca' or 'random'.
assert_raises_regexp(ValueError, "'init' must be either 'pca' or 'random'",
TSNE, init="not available")
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_verbose():
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
| bsd-3-clause |
mw10178/ctplot_iw | ctplot/plot.py | 2 | 44310 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys, re, json, tables, ticks, time, logging
from tempfile import gettempdir
from os import path
from collections import OrderedDict, namedtuple
from itertools import chain
import numpy as np
import numpy.ma as ma
from scipy.optimize import curve_fit
import matplotlib as mpl
import matplotlib.pyplot as plt
from utils import get_args_from, isseq, set_defaults, number_mathformat, number_format, hashargs, noop
from itertools import product
from locket import lock_file
from i18n import _
from safeeval import safeeval
logging.basicConfig(level = logging.DEBUG, format = '%(filename)s:%(funcName)s:%(lineno)d:%(message)s')
log = logging.getLogger('plot')
# override eval by safe version
eval = safeeval()
TableSpecs = namedtuple('TableSpecs', ('title', 'colnames', 'units', 'rows'))
def available_tables(d = os.path.dirname(__file__) + '/data'):
files = []
dirlen = len(d)
for p, d, f in os.walk(d):
for ff in f:
files.append(path.join(p, ff))
files = map(lambda f:f.replace('\\', '/'), files)
files = filter(lambda f:f.lower().endswith('.h5'), files)
files.sort()
tabs = OrderedDict()
for f in files:
try:
h5 = tables.openFile(f, 'r')
for n in h5.walkNodes(classname = 'Table'):
tab = f[dirlen+1:] + ':' + n._v_pathname
tabs[tab] = TableSpecs(n._v_title, n.colnames, json.loads(n.attrs.units), int(n.nrows))
h5.close()
except:
pass
return tabs
def _get(d, k, default = None):
v = d.get(k)
if v:
return v.strip() if isinstance(v, str) else v
else:
return default
def get_binning(bins, data):
if np.isscalar(bins):
edges = np.linspace(np.nanmin(data), np.nanmax(data), bins + 1)
elif isseq(bins) and len(bins) == 3:
edges = np.linspace(bins[0], bins[1], bins[2] + 1)
else:
edges = np.array(bins)
centers = (edges[1:] + edges[:-1]) / 2
assert len(centers) == len(edges) - 1
widths = np.diff(edges)
return edges, centers, widths
def get_cumulative(bincontents, binerrors, cumulative = 0, binwidths = 1):
cumulative = float(cumulative)
if cumulative > 0:
bincontents = np.cumsum(bincontents * binwidths)
binerrors = np.sqrt(np.cumsum((binerrors * binwidths) ** 2))
elif cumulative < 0:
bincontents = np.sum(bincontents * binwidths) - np.cumsum(bincontents * binwidths)
binerrors = np.sqrt(np.sum((binerrors * binwidths) ** 2) - np.cumsum((binerrors * binwidths) ** 2))
return bincontents, binerrors
def get_density(bincontents, binerrors, binwidths):
f = 1.0 / (np.sum(bincontents) * binwidths)
bincontents = f * bincontents
bincontents[np.logical_not(np.isfinite(bincontents))] = 0
binerrors = f * binerrors
return bincontents, binerrors
def get_density2d(bincontents, xwidths, ywidths):
f = 1.0 / (np.sum(bincontents) * (xwidths * ywidths.reshape(len(ywidths), 1)))
bincontents = f * bincontents
bincontents[np.logical_not(np.isfinite(bincontents))] = 0
return bincontents
def get_step_points(bincontents, binedges):
assert len(bincontents) + 1 == len(binedges)
x = np.zeros(2 * len(binedges), dtype = float)
y = np.zeros(x.shape, dtype = float)
x[::2] = binedges
x[1::2] = binedges
y[1:-1:2] = bincontents
y[2:-1:2] = bincontents
assert len(x) == len(y) == 2 * len(binedges)
return x, y
def adjust_limits(xy, data, limits = None, marl = 0.05, maru = 0.05):
assert xy in ('x', 'y')
lim = getattr(plt, xy + 'lim')
if limits is None:
limits = lim()
mi, ma = limits
data = data[np.isfinite(data)]
mind = np.min(data)
maxd = np.max(data)
span = maxd - mind
lim(min(mi, min(data) - marl * span), max(ma, max(data) + maru * span))
def sproduct(a, b):
for x, y in product(a, b):
yield '{}{}'.format(x, y)
text_poss = map(np.array, [(1, -1), (-1, -1), (-1, 1), (1, 1), (0.5, -1), (-1, 0.5), (0.5, 1), (1, 0.5)])
text_algn = [('left', 'top'), ('right', 'top'), ('right', 'bottom'), ('left', 'bottom'), ('center', 'top'), ('right', 'center'), ('center', 'bottom'), ('left', 'center')]
stats_abrv = {'n':'N', 'u':'uflow', 'o':'oflow', 'm':'mean', 's':'std', 'p':'mode', 'e':'median', 'w':'skew', 'k':'kurtos', 'x':'excess', 'c':'cov'}
class Plot(object):
def __init__(self, config , **kwargs):
log.debug('config %s', json.dumps(config))
log.debug('settings %s', json.dumps(kwargs))
self.config = config
# configure plot according too kwargs
# all options are strings
for N in xrange(10):
n = str(N)
# x, y, z, cut, mode, source, name
# x = expression plotted on x-axis
# y = expression plotted on y-axis
# z = expression plotted on z-axis (as color of line/markers)
# c = cut expression, discard data point for which this is False (if given)
# s = HDF5 sourcefile and table
# n = name of the plot, used in legend
for v in 'xyzcmsn':
self._append(v, _get(kwargs, v + n))
# twin axes
self._append('tw', _get(kwargs, 'tw' + n))
# window, shift, condition for the rate calculation
self._append('rw', _get(kwargs, 'rw' + n))
self._append('rs', _get(kwargs, 'rs' + n, '1'))
self._append('rc', _get(kwargs, 'rc' + n, '1'))
# statsbox
self._append('sb', _get(kwargs, 'sb' + n, 'nmsc'))
# fit function
self._append('ff', _get(kwargs, 'ff' + n))
self._append('fp', _get(kwargs, 'fp' + n))
self._append('fl', _get(kwargs, 'fl' + n))
# x-, y- and z-adjustment expression
for v, w in product('xyz', 'a'):
self._append(v + w, _get(kwargs, v + n + w))
# x- and y-binnings for histograms/profile
for v, w in product('xy', 'b'):
self._append(v + w, _get(kwargs, v + n + w))
# plot options
for k, v in kwargs.iteritems():
if k.startswith('o' + n) and v:
a = 'o' + k[2:]
if not hasattr(self, a):
setattr(self, a, 10 * [None])
getattr(self, a)[N] = v.strip()
# range, scale, label
for v in sproduct('xyz', 'rsl'):
setattr(self, v , _get(kwargs, v))
for v in sproduct('xyz', 'rsl'):
setattr(self, v + 'tw', _get(kwargs, v + 'tw'))
# title, fontsize, width,height, grid, legend
for v in 'tfwhgl':
setattr(self, v, _get(kwargs, v))
# source with rate averaging
for i, s in enumerate(self.s):
self._append('sr', '{}:{}:{}:{}'.format(path.join(config['datadir'], s), self.rw[i], self.rs[i], self.rc[i]) if s else None)
self.legend = []
self.textboxes = []
self.fitboxes = []
self.progress = 0 # reaching from 0 to 1
self.axes = OrderedDict()
def _append(self, varname, value):
'append value to self.varname'
try:
getattr(self, varname).append(value)
except AttributeError:
setattr(self, varname, [value])
def _get(self, var, default = None, dtype = lambda x:x):
val = getattr(self, var)
if val is None:
return default
else:
return dtype(val)
def _prepare_data(self):
# create dict: source --> all expr for this source
# prefilled with empty lists
expr_data = {}
joined_cuts = {} # OR of all cuts
for n, s in enumerate(self.sr):
if s:
if s not in expr_data:
expr_data[s] = {} # add dict for every unique source s (plot n)
for v in ['x', 'y', 'z', 'c', 'xa', 'ya', 'za']:
expr = getattr(self, v)[n] # x/y/z/c expression for source s (plot n)
log.debug('{}{}, expr: {}'.format(v, n, expr))
if expr:
expr_data[s][expr] = []
if v == 'c':
if s in joined_cuts:
joined_cuts[s] = '{} or ({})'.format(joined_cuts[s], expr)
else:
joined_cuts[s] = '({})'.format(expr)
for s in joined_cuts.keys():
if '(None)' in joined_cuts[s]: del joined_cuts[s]
log.debug('joined_cuts = {}'.format(joined_cuts))
# loop over tables and fill data lists in expr_data
units = {}
self._get_data(expr_data, joined_cuts, units)
log.debug(units)
# assing data arrays to x/y/z/c-data fields
for v in ['x', 'y', 'z', 'c', 'xa', 'ya', 'za']:
setattr(self, v + 'data', [(expr_data[self.sr[i]][x] if x and self.sr[i] else None) for i, x in enumerate(getattr(self, v))])
setattr(self, v + 'unit', [(units[self.sr[i]][x] if x and self.sr[i] else None) for i, x in enumerate(getattr(self, v))])
log.debug('source={}'.format(self.s))
log.debug('srcavg={}'.format(self.sr))
for v in ['x', 'y', 'z', 'c', 'xa', 'ya', 'za']:
log.debug(' {}data {}'.format(v, [len(x) if x is not None else None for x in getattr(self, v + 'data')]))
# log.debug(' {}unit {}'.format(v, [x for x in getattr(self, v + 'unit')]))
def _get_data(self, expr_data, filters, units = {}):
# evaluate expressions for each source
for s, exprs in expr_data.iteritems():
log.debug('processing source {}'.format(s))
log.debug(' expressions {}'.format(exprs.keys()))
log.debug(' filter {}'.format(filters[s] if s in filters else None))
progr_prev = self.progress
# source s has form 'filename:/path/to/table'
# open HDF5 table
ss = s.strip().split(':')
with tables.openFile(ss[0], 'r') as h5:
table = h5.getNode(ss[1])
window = float(eval(ss[2])) if ss[2] != 'None' else None
shift = float(ss[3]) if ss[3] != 'None' else 1
weight = ss[4] if ss[4] != 'None' else None
progr_factor = 1.0 / table.nrows / len(expr_data)
table_units = tuple(json.loads(table.attrs.units))
def unit(var):
try:
return table_units[table.colnames.index(var.strip())]
except:
return '?'
units[s] = dict([(e, unit(e)) for e in exprs.keys()])
def compile_function(x):
fields = set(table.colnames)
fields.add('rate')
fields.add('count')
fields.add('weight')
for v in fields: # map T_a --> row['T_a'], etc.
x = re.sub('(?<!\\w)' + re.escape(v) + '(?!\\w)',
'row["' + v + '"]', x)
return eval('lambda row: ({})'.format(x))
# compile the expressions
exprs = dict([(compile_function(e), d) for e, d in exprs.iteritems()])
def average():
# look if there is data for this source in the cache
cachedir = self.config['cachedir'] or gettempdir()
cachefile = os.path.join(cachedir, 'avg{}.h5'.format(hashargs(s)))
cachefile = os.path.abspath(cachefile)
log.debug('cachefile %s', cachefile)
def average_cached():
if not self.config['cachedir']:
raise # always fail it cache is disabled
with tables.openFile(cachefile) as cacheh5:
cachetable = cacheh5.getNode('/data')
progr_factor = 1.0 / cachetable.nrows / len(expr_data)
log.info('reading averaged data from cache')
for row in cachetable.iterrows():
self.progress = progr_prev + row.nrow * progr_factor
yield row
def average_computed():
try:
log.debug('creating averaged data cachefile')
cacheh5 = tables.openFile(cachefile, 'w')
except:
log.exception('failed opening %s', cachefile)
raise RuntimeError('cache for {} in use or corrupt, try again in a few seconds'.format(s))
with cacheh5:
# use tables col descriptor and append fields rate and count
log.debug('caching averaged data')
coldesc = OrderedDict() # keep the order
for k in table.colnames:
d = table.coldescrs[k]
if isinstance(d, tables.BoolCol): # make bool to float for averaging
coldesc[k] = tables.FloatCol(pos = len(coldesc))
else:
coldesc[k] = d
coldesc['count'] = tables.IntCol(pos = len(coldesc))
coldesc['weight'] = tables.FloatCol(pos = len(coldesc))
coldesc['rate'] = tables.FloatCol(pos = len(coldesc))
cachetable = cacheh5.createTable('/', 'data', coldesc, 'cached data')
cachetable.attrs.source = s
cacherow = cachetable.row
assert 0 < shift <= 1
it = table.colnames.index('time') # index of time column
ta = table[0][it] # window left edge
tb = ta + window # window right edge
wd = [] # window data
cols = table.colnames
wdlen = len(cols) + 1
fweight = compile_function(weight)
def append(r):
wd.append(np.fromiter(chain(r[:], [fweight(r)]), dtype = np.float, count = wdlen))
progr_factor = 1.0 / table.nrows / len(expr_data)
for row in table.iterrows():
if row[it] < tb: # add row if in window
append(row)
else: # calculate av and shift window
n = len(wd)
if n > 0:
wdsum = reduce(lambda a, b: a + b, wd)
for i, c in enumerate(cols):
cacherow[c] = wdsum[i] / n
cacherow['time'] = (ta + tb) * 0.5 # overwrite with interval center
cacherow['count'] = n
cacherow['weight'] = wdsum[-1] / n
cacherow['rate'] = n / window
self.progress = progr_prev + row.nrow * progr_factor
yield cacherow
cacherow.append()
ta += shift * window # shift window
tb = ta + window
if row[it] >= tb:
ta = row[it] # shift window
tb = ta + window
if shift == 1: # windows must be empty
wd = []
else: # remove data outside new window
wd = filter(lambda x: ta <= x[it] < tb, wd)
append(row)
if not self.config['cachedir']:
log.debug('removing averaged data cachefile')
os.remove(cachefile)
try: # try using data from cache
for x in average_cached(): yield x
except: # if cache fails
with lock_file(cachefile + '.lock'):
try: # try cache again (maybe it was populated while waiting for the lock)
for x in average_cached(): yield x
except: # if it fails again, compute the data
for x in average_computed(): yield x
def prefilter(data, filterexpr):
filterexpr = compile_function(filterexpr)
for row in data:
if filterexpr(row):
yield row
def updateProgress(row, fac):
self.progress = progr_prev + row.nrow * fac
if window:
tableiter = average()
updateProgress = noop # progress update is done inside average()
else:
tableiter = table.iterrows()
if s in filters:
tableiter = prefilter(tableiter, filters[s])
for row in tableiter:
for expr, data in exprs.iteritems():
data.append(expr(row))
if row.nrow % 10000 == 0: updateProgress(row, progr_factor)
# convert data lists to numpy arrays
d = expr_data[s]
for k in d.keys():
d[k] = np.array(d[k])
# done with getting data
self.progress = 1
__tick_density = 1.5
def _configure_pre(self):
# configure plotlib
plt.clf()
plt.close('all')
self.f = self._get('f', 10, float)
if 'map' in self.m: self.f *= 0.8 # smaller font if plotting map
plt.rc('font', **{'family':'sans-serif', 'sans-serif':['Dejavu Sans'], 'size':self.f})
# plt.rc('axes', grid = True)
plt.rc('lines', markeredgewidth = 0)
w = self._get('w', 25, float)
h = self._get('h', w / np.sqrt(2), float)
# convert cm to inches
w = w / 2.54
h = h / 2.54
self.w = w
self.h = h
plt.gcf().set_size_inches([w, h], forward = True);
# f = 0.09
# if 'map' in self.m: f = 0.06 # more margin if plotting map
# plt.gca().set_position([f, f, 1 - 2 * f, 1 - 2 * f])
# plt.subplots_adjust(left = f, bottom = f, right = 1 - f, top = 1 - f, wspace = 0, hspace = 0)
ticks.set_extended_locator(self.__tick_density)
self.axes[''] = plt.gca()
def _configure_post(self):
plt.axes(self.axes['']) # activate main axes
# title
if self.t: plt.title(self.t, fontsize = 1.4 * self.f)
if 'map' in self.m: return
# settings for main and twin axes
for v, ax in self.axes.iteritems():
plt.axes(ax)
# grid
plt.grid(which = 'major', axis = v or 'both', linestyle = '--' if v else '-', color = 'k', alpha = 0.4)
plt.grid(which = 'minor', axis = v or 'both', linestyle = '-.' if v else ':', color = 'k', alpha = 0.4)
# set labels, scales and ranges
for a in 'xy':
if v and a != v: continue # on twins, set only axis
getattr(plt, '{}label'.format(a))(self.alabel(a, v)) # label
s = getattr(self, a + 's' + ('tw' if a == v else ''))
if s: # scale
getattr(plt, '{}scale'.format(a))(s)
r = getattr(self, a + 'r' + ('tw' if a == v else ''))
if r: # range (limits)
rmin, rmax = r.split(',')
rlim = getattr(plt, '{}lim'.format(a))
# defaults
rmind, rmaxd = rlim()
# set range
try:
rmin = rmind if rmin == '' else float(rmin)
rmax = rmaxd if rmax == '' else float(rmax)
log.debug('rmin={}, rmax={}'.format(rmin, rmax))
rlim(rmin, rmax)
except ValueError:
# ignore if input is no float
pass
# legend
plt.axes(self.axes.values()[-1]) # activate last added axes
if self.l != 'none' and 'map' not in self.m :
lines = [v[0] for v in self.legend]
labels = [v[1] for v in self.legend]
leg = plt.legend(lines, labels, loc = self.l or 'best', fancybox = True, numpoints = 1)
plt.setp(leg.get_texts(), fontsize = self.f)
leg.get_frame().set_alpha(0.8)
# get plot size to position textboxes
fig = plt.gcf()
sx, sy = fig.get_size_inches() * fig.dpi
# draw textboxes
cxw = 0
cx = sx
# add offset if we have a second y-axis
for tw in self.tw:
if tw == 'y':
cx += 50
break
# add offset if we have a z-axis
# only if offset hasn't been added yet
if cx == sx:
for z in self.z:
if z != None:
cx += 50
break
cy = sy
for i, t in enumerate(self.textboxes):
label = plt.annotate(t, (cx, cy), xycoords = 'axes pixels',
family = 'monospace', size = 'small',
horizontalalignment = 'left', verticalalignment = 'top',
bbox = dict(facecolor = 'w', alpha = 0.8, boxstyle = "round,pad=0.5"),
annotation_clip = False)
extents = label.get_window_extent(fig.canvas.get_renderer()).extents
w = extents[2] - extents[0]
if w > cxw:
cxw = w
cy -= sy * 0.25
# draw fitboxes
cx = cxw + cx + 50 if len(self.textboxes) else cx
cy = sy
for i, t in enumerate(self.fitboxes):
plt.annotate(t, (cx, cy), xycoords = 'axes pixels',
family = 'monospace', size = 'small',
horizontalalignment = 'left', verticalalignment = 'top',
bbox = dict(facecolor = 'w', alpha = 0.8, boxstyle = "round,pad=0.5"),
annotation_clip = False)
cy -= sy * 0.25
def data(self, i):
x, y, z, c = self.xdata[i], self.ydata[i], self.zdata[i], self.cdata[i]
xa, ya, za = self.xadata[i], self.yadata[i], self.zadata[i]
if xa is not None:
x = xa
if ya is not None:
y = ya
if za is not None:
z = za
if c is not None and len(c) > 0:
if x is not None: x = x[c]
if y is not None: y = y[c]
if z is not None: z = z[c]
return x, y, z
def opts(self, i):
o = {}
for k, v in self.__dict__.iteritems():
if k.startswith('o') and v[i] is not None:
log.debug('v[{}]={}'.format(i, v[i]))
log.debug('k[]={}'.format(k))
try:
o[k[1:]] = float(v[i])
except:
o[k[1:]] = v[i]
return o
def bins(self, i, a):
try:
b = getattr(self, a + 'b')[i]
if b:
bn = b.split(',')
if len(bn) == 1:
return float(bn[0])
return tuple([float(x) for x in bn])
else:
raise
except:
return 0
def llabel(self, i):
l = self.n[i]
if l: return l
l = ''
for v in 'xyzc':
w = getattr(self, v)[i]
if w: l += u':{}'.format(w)
return l[1:]
def alabel(self, a, t = ''):
l = getattr(self, a + ('ltw' if t == a else 'l'))
if l:
return l
l = u''
exprs = getattr(self, a)
units = getattr(self, a + 'unit')
adjust_funcs = getattr(self, a + 'a')
for i, x in enumerate(exprs):
if t and self.tw[i] != a: continue
if not t and self.tw[i] == a: continue
if x:
if x not in l:
l += u', {} [{}]'.format(x, units[i])
if adjust_funcs[i]:
l += u' ({})'.format(adjust_funcs[i])
return l[2:]
def plot(self):
self._prepare_data()
self._configure_pre()
for i, m in enumerate(self.m):
if m and self.s[i]:
self.selectAxes(i)
if m == 'xy':
self._xy(i)
elif m == 'h1':
self._hist1d(i)
elif m == 'h2':
self._hist2d(i)
elif m == 'p':
self._profile(i)
elif m == 'map':
self._map(i)
else:
raise RuntimeError('unknow mode ' + m)
self._configure_post()
def show(self):
log.debug('showing plot in interactive mode')
if not any(self.legend):
self.plot()
plt.show()
def save(self, name = 'fig', extensions = ('png', 'pdf', 'svg')):
plt.ioff()
if not any(self.legend):
self.plot()
names = []
for ext in extensions:
n = name + '.' + ext
log.debug('saving plot to %s', n)
plt.savefig(n, bbox_inches = 'tight', pad_inches = 0.5 if 'map' in self.m else 0.1, transparent = False)
names.append(n)
return dict(zip(extensions, names))
__twin = {'x':plt.twiny, 'y':plt.twinx}
def selectAxes(self, i):
plt.axes(self.axes['']) # activate main axes
v = self.tw[i]
if v and v in 'xy':
if v in self.axes:
plt.axes(self.axes[v]) # activate twin x/y axes
else:
self.axes[v] = self.__twin[v]() # create twin x/y axes
ticks.set_extended_locator(self.__tick_density) # add tick locator
return
def fit(self, i, x, y, yerr = None):
ff = self.ff[i]
if ff:
ff = ff.replace(' ', '')
log.info('fitting function {}'.format(ff))
fitfunc = eval('lambda x,*p:' + ff)
x, y = np.array(x), np.array(y)
m = np.logical_and(np.isfinite(x), np.isfinite(y))
if yerr is not None:
yerr = np.array(yerr)
m = np.logical_and(m, np.isfinite(yerr))
yerr = yerr[m]
x , y = x[m], y[m]
# gather fit parameters
p = tuple([float(fp) for fp in self.fp[i].split(',')])
try:
p, c = curve_fit(fitfunc, x, y, p, yerr)
log.info('parameters = {}'.format(p))
log.info('covariance = {}'.format(c))
fit_status = ''
except Exception as e:
fit_status = ' (failed: {})'.format(e)
c = None
log.exception('fit failed')
# plot fit result
xfit = np.linspace(np.nanmin(x), np.nanmax(x), 1000)
yfit = fitfunc(xfit, *p)
args = [xfit, yfit]
if self.fl[i]: args.append(self.fl[i])
l, = plt.plot(*args)
N = len(x)
chi2 = fitfunc(x, *p) - y
if yerr is not None:
chi2 = chi2 / yerr
chi2 = (chi2 ** 2).sum()
# add textbox
t = 'y=' + ff
t += '\n$\\chi^2$/N = {}/{}'.format(number_mathformat(chi2), number_mathformat(N))
for k, v in enumerate(p):
try:
t += '\np[{}] = {}$\\pm${}'.format(k, number_mathformat(v), number_mathformat(np.sqrt(c[k, k])))
except:
t += '\np[{}] = {}$\\pm${}'.format(k, v, c)
self.fitboxes.append(t)
ll = ('Fit' + fit_status + ' y=' + ff)
for k, v in enumerate(p):
ll = ll.replace('p[{}]'.format(k), number_mathformat(v, 3))
self.legend.append((l, ll))
def _xy(self, i):
log.debug('xy plot of {}'.format([getattr(self, v)[i] for v in 'sxyzc']))
kwargs = self.opts(i)
x, y, z = self.data(i)
if x is not None:
args = (x, y)
else:
args = (y,)
if z is None:
l, = plt.plot(*args, **kwargs)
else:
# linestyle must not be 'none' when plotting 3D
if 'linestyle' in kwargs and kwargs['linestyle'] == 'none':
kwargs['linestyle'] = ':'
o = get_args_from(kwargs, markersize = 2, cbfrac = 0.04, cblabel = self.alabel('z'))
l = plt.scatter(x, y, c = z, s = o.markersize ** 2, edgecolor = 'none', **kwargs)
m = 6.0
dmin, dmax = np.nanmin(z), np.nanmax(z)
cticks = ticks.get_ticks(dmin, dmax, m, only_inside = 1)
formatter = mpl.ticker.FuncFormatter(func = lambda x, i:number_mathformat(x))
cb = plt.colorbar(fraction = o.cbfrac, pad = 0.01, aspect = 40, ticks = cticks, format = formatter)
cb.set_label(o.cblabel)
self.legend.append((l, self.llabel(i)))
# fit
self.fit(i, x, y)
def _hist1d(self, i):
self.plotted_lines = []
log.debug('1D histogram of {}'.format([getattr(self, v)[i] for v in 'sxyzc']))
kwargs = self.opts(i)
x, y, z = self.data(i)
o = get_args_from(kwargs, density = False, cumulative = 0)
o.update(get_args_from(kwargs, style = 'histline' if o.density else 'hist'))
err = 0 # o.style.startswith('s')
o.update(get_args_from(kwargs, xerr = err, yerr = err, capsize = 3 if err else 0))
bins = self.bins(i, 'x')
if bins == 0:
bins = int(1 + np.log2(len(x)))
binedges, bincenters, binwidths = get_binning(bins, x)
bincontents, _d1 = np.histogram(x, binedges)
assert np.all(binedges == _d1)
binerrors = np.sqrt(bincontents)
binerrors[binerrors == 0] = 1
# statsbox
self.stats_fields1d(i, x, bincontents, binerrors, binedges)
if o.density:
bincontents, binerrors = get_density(bincontents, binerrors, binwidths)
if o.cumulative:
bincontents, binerrors = get_cumulative(bincontents, binerrors, o.cumulative, binwidths if o.density else 1)
if 'line' in o.style:
x = bincenters
y = bincontents
else:
x, y = get_step_points(bincontents, binedges)
if 'fill' in o.style:
l, = plt.fill(x, y, **kwargs)
elif 'hist' in o.style:
l, = plt.plot(x, y, **kwargs)
elif 'scat' in o.style:
pargs = set_defaults(kwargs, linestyle = '', marker = '.')
l, = plt.plot(bincenters, bincontents, **pargs)
else:
raise ValueError('unknown style: ' + o.style)
if o.xerr or o.yerr:
pargs = set_defaults(kwargs, capsize = o.capsize, ecolor = 'k' if 'fill' in o.style else l.get_c())
xerr = 0.5 * binwidths if o.xerr else None
yerr = binerrors if o.yerr else None
plt.errorbar(bincenters, bincontents, yerr, xerr, fmt = None, **pargs)
adjust_limits('x', binedges)
adjust_limits('y', bincontents + binerrors, marl = 0)
self.legend.append((l, self.llabel(i)))
self.fit(i, bincenters, bincontents, binerrors)
def _hist2d(self, i):
log.debug('2D histogram of {}'.format([getattr(self, v)[i] for v in 'sxyzc']))
kwargs = self.opts(i)
x, y, z = self.data(i)
o = get_args_from(kwargs, style = 'color', density = False, log = False, cbfrac = 0.04, cblabel = 'bincontent', levels = 10)
filled = 'color' in o.style or ('fill' in o.style)
o.update(get_args_from(kwargs, hidezero = o.log or filled, colorbar = filled, clabels = not filled))
# make binnings
bins = self.bins(i, 'x')
if bins == 0:
bins = int(1 + np.log2(len(x)))
xedges, xcenters, xwidths = get_binning(bins, x)
bins = self.bins(i, 'y')
if bins == 0:
bins = int(1 + np.log2(len(y)))
yedges, ycenters, ywidths = get_binning(bins, y)
bincontents, _d1, _d2 = np.histogram2d(x, y, [xedges, yedges])
bincontents = np.transpose(bincontents)
assert np.all(_d1 == xedges)
assert np.all(_d2 == yedges)
# statsbox
self.stats_fields2d(i, bincontents, xcenters, ycenters)
if o.density:
bincontents = get_density2d(bincontents, xwidths, ywidths)
if o.hidezero:
bincontents[bincontents == 0] = np.nan
if o.log:
bincontents = np.log10(bincontents)
formatter = mpl.ticker.FuncFormatter(func = lambda x, i:number_mathformat(np.power(10, x)))
else:
formatter = mpl.ticker.FuncFormatter(func = lambda x, i:number_mathformat(x))
if 'color' in o.style:
pargs = set_defaults(kwargs, cmap = 'jet', edgecolor = 'none')
plt.pcolor(xedges, yedges, ma.array(bincontents, mask = np.isnan(bincontents)), **kwargs)
elif 'box' in o.style:
pargs = set_defaults(kwargs, color = (1, 1, 1, 0), marker = 's', edgecolor = 'k')
n = bincontents.size
s = bincontents.reshape(n)
s = s / np.nanmax(s) * (72. / 2. * self.w / max(len(xcenters), len(ycenters))) ** 2
xcenters, ycenters = np.meshgrid(xcenters, ycenters)
plt.scatter(xcenters.reshape(n), ycenters.reshape(n), s = s, **pargs)
elif 'contour' in o.style:
pargs = set_defaults(kwargs, cmap = 'jet')
if not isinstance(pargs['cmap'], mpl.colors.Colormap):
pargs['cmap'] = mpl.cm.get_cmap(pargs['cmap'])
if filled:
cs = plt.contourf(xcenters, ycenters, bincontents, o.levels, **pargs)
else:
cs = plt.contour(xcenters, ycenters, bincontents, o.levels, **pargs)
if o.clabels:
plt.clabel(cs, inline = 1)
else:
raise ValueError('unknown style ' + o.style)
if o.colorbar:
m = 6.0
dmin, dmax = np.nanmin(bincontents), np.nanmax(bincontents)
if o.log:
dmin, dmax = np.ceil(dmin), np.floor(dmax) + 1
step = max(1, np.floor((dmax - dmin) / m))
cticks = np.arange(dmin, dmax, step)
else:
cticks = ticks.get_ticks(dmin, dmax, m, only_inside = 1)
cb = plt.colorbar(fraction = o.cbfrac, pad = 0.01, aspect = 40, ticks = cticks, format = formatter)
cb.set_label(o.cblabel)
def _profile(self, i):
log.debug('profile of {}'.format([getattr(self, v)[i] for v in 'sxyzc']))
kwargs = self.opts(i)
x, y, z = self.data(i)
o = get_args_from(kwargs, xerr = 0, yerr = 0)
# make x binning
xedges, xcenters, xwidths = get_binning(self.bins(i, 'x'), x)
# compute avg and std for each x bin
xx = xcenters
xerr = 0.5 * xwidths if o.xerr else None
yy = []
yerr = []
for l, u in zip(xedges[:-1], xedges[1:]):
bindata = y[(l <= x) & (x < u)]
yy.append(np.mean(bindata))
yerr.append(np.std(bindata))
if not o.yerr:
yerr = None
pargs = set_defaults(kwargs, capsize = 3, marker = '.', linestyle = 'none')
l, _d, _d = plt.errorbar(xx, yy, yerr, xerr, **pargs)
self.legend.append((l, self.llabel(i)))
self.fit(i, xx, yy, yerr)
def _map(self, i):
import maps
log.debug('map of {}'.format([getattr(self, v)[i] for v in 'sxyzc']))
kwargs = self.opts(i)
x, y, z = self.data(i)
o = get_args_from(kwargs, margin = 0.05, width = 10e6, height = None, boundarylat = 50, projection = 'cyl',
drawcoastline = 1, drawgrid = 1, drawspecgrid = 1, drawcountries = 0, bluemarble = 0, nightshade = None)
m = maps.drawmap(y, x, **o)
x, y = m(x, y)
if z is None:
l, = plt.plot(x, y, **kwargs)
else:
# linestyle must not be 'none' when plotting 3D
if 'linestyle' in kwargs and kwargs['linestyle'] == 'none':
kwargs['linestyle'] = ':'
o = get_args_from(kwargs, markersize = 6, cbfrac = 0.04, cblabel = self.alabel('z'))
p = set_defaults(kwargs, zorder = 100)
l = plt.scatter(x, y, c = z, s = o.markersize ** 2, edgecolor = 'none', **p)
m = 6.0
dmin, dmax = np.nanmin(z), np.nanmax(z)
cticks = ticks.get_ticks(dmin, dmax, m, only_inside = 1)
formatter = mpl.ticker.FuncFormatter(func = lambda x, i:number_mathformat(x))
cb = plt.colorbar(fraction = o.cbfrac, pad = 0.01, aspect = 40, ticks = cticks, format = formatter)
cb.set_label(o.cblabel)
self.legend.append((l, self.llabel(i)))
def stats_fields1d(self, i, data, contents, errors, edges):
centers = (edges[1:] + edges[:-1]) / 2
widths = np.diff(edges)
stats = {}
stats['N'] = N = np.sum(contents)
stats['uflow'] = np.sum(data < edges[0])
stats['oflow'] = np.sum(edges[-1] < data)
stats['mean'] = mean = np.sum(centers * contents) / N
stats['std'] = std = np.sqrt(np.sum((centers - mean) ** 2 * contents) / N)
stats['mode'] = centers[np.argmax(contents)]
bc, be = get_density(contents, errors, widths)
bc, be = get_cumulative(bc, be, 1, widths)
median_i = np.minimum(len(centers)-1, np.searchsorted(bc, 0.5, side = 'right'))
stats['median'] = median = centers[median_i]
if len(centers) % 2 == 0: # even # of s
stats['median'] = median = (median + centers[median_i - 1]) / 2
stats['skew'] = np.sum(((centers - mean) / std) ** 3 * contents) / N
stats['kurtos'] = kurtosis = np.sum(((centers - mean) / std) ** 4 * contents) / N
stats['excess'] = kurtosis - 3
log.debug(stats)
text = '{:6} {}'.format('hist', self.llabel(i))
sb = self.sb[i]
if 'a' in sb: sb = 'nmscpewx'
if 'uflow' in stats and stats['uflow']: sb += 'u'
if 'oflow' in stats and stats['oflow']: sb += 'o'
for k in sb:
k = stats_abrv[k]
if k in stats:
text += '\n{:6} {}'.format(_(k), number_mathformat(stats[k]))
self.textboxes.append(text)
def stats_fields2d(self, i, contents, xcenters, ycenters):
stats = {}
stats['N'] = N = contents.sum()
stats['mean'] = mean = np.array([ (contents.sum(axis = 0) * xcenters).sum(),
(contents.sum(axis = 1) * ycenters).sum()]) / N
stats['std'] = np.sqrt(np.array([(contents.sum(axis = 0) * (xcenters - mean[0]) ** 2).sum(),
(contents.sum(axis = 1) * (ycenters - mean[1]) ** 2).sum()]) / N)
cov = 0
for k, l in product(xrange(contents.shape[1]), xrange(contents.shape[0])):
cov += contents[l, k] * (xcenters[k] - mean[0]) * (ycenters[l] - mean[1])
stats['cov'] = cov / N
log.debug(stats)
text = '{:6} {}'.format('hist', self.llabel(i))
sb = self.sb[i]
if 'a' in sb: sb = 'nmscpewx'
if 'uflow' in stats and stats['uflow']: sb += 'u'
if 'oflow' in stats and stats['oflow']: sb += 'o'
for k in sb:
k = stats_abrv[k]
if k in stats:
v = stats[k]
try:
v = number_mathformat(v)
except:
v = '({})'.format(','.join(map(number_mathformat, v)))
text += '\n{:6} {}'.format(_(k), v)
self.textboxes.append(text)
def display_progress(p):
"display a progressbar on stdout by reading p.progress"
from threading import Thread
from progressbar import ProgressBar, Bar, Percentage, ETA
def progressUpdate():
pb = ProgressBar(maxval = 1, widgets = [Bar(), ' ', Percentage(), ' ', ETA()], fd = sys.stdout)
while p.progress < 1:
pb.update(p.progress)
time.sleep(0.5)
pb.finish()
t = Thread(target = progressUpdate)
t.daemon = True
t.start()
def display_settings_help():
print '''available plot settings
===========================
Settings containing a # may appear multiple times, once per graph.
The # is to be replaced by an integer starting with 0.
t
w
h
s#
m#
n#
rw#
rs#
rc#
x#
x#b
o#xerr
y#
o#yerr
c#
ff#
fp#
fl#
o#color
o#alpha
o#linestyle
o#linewidth
o#marker
o#markersize
o#zorder
xl
xr
xs
xrtw
l
'''
exit()
def main():
from argparse import ArgumentParser
import ctplot
def key_value_pair(s):
k, v = s.split('=', 1)
return k, v
parser = ArgumentParser(description = 'analyse and plot HDF5 table data', epilog = ctplot.__epilog__)
parser.add_argument('-H', '--help-settings', action = 'store_true', help = 'display help for settings')
parser.add_argument('-V', '--version', action = 'version', version = '%(prog)s {} build {}'.format(ctplot.__version__, ctplot.__build_date__))
parser.add_argument('-o', '--output', metavar = 'file', help = 'name of output file, show window if omitted')
parser.add_argument('-v', '--verbose', action = 'store_true', help = 'set logging level to DEBUG')
parser.add_argument('-q', '--quiet', action = 'store_true', help = 'set logging level to ERROR')
parser.add_argument('-c', '--cache', metavar = 'dir', help = 'dir where to store cached HDF5 tables, cache is deactivated if not set')
parser.add_argument('settings', metavar = 'K=V', nargs = '+', type = key_value_pair, help = 'plot settings, given as key value pairs')
settings = {"t":"", "w":"", "h":"", "experiment0":"neutron-mon-neumayer",
"s0":"../data/2013_NEUMAYER-nm-my.h5:/raw/PS_mu_nm_data", "m0":"p",
"n0":"", "rw0":"3600", "rs0":"", "rc0":"", "x0":"p", "x0b":"30", "o0xerr":"true", "y0":"log10(mu_rate)",
"o0yerr":"true", "c0":"time<7.65e7 and mu_rate==mu_rate", "ff0":"p[0]+p[1]*x", "fp0":"1.5, 0", "fl0":"b",
"o0color":"r", "o0alpha":"", "o0linestyle":"", "o0linewidth":"", "o0marker":"", "o0markersize":"12", "o0zorder":"",
"xl":"", "xr-min":"", "xr-max":"", "xr":"", "xs":"", "xrtw":"", "yl":"", "yr-min":"", "yr-max":"", "yr":"", "ys":"",
"yrtw":"", "zl":"", "zr-min":"", "zr-max":"", "zr":"", "zs":"", "l":"lower left", "a":"plot", "plots":1}
ss = ['{}={}'.format(k, v) for k, v in settings.items()]
ss.append('-h')
ss.append('-c..')
args = parser.parse_args()
if args.help_settings:
display_settings_help()
log.setLevel(logging.INFO)
if args.verbose:
log.setLevel(logging.DEBUG)
if args.quiet:
log.setLevel(logging.ERROR)
args.settings = dict(args.settings)
log.debug(args)
config = {'cachedir':''}
if args.cache:
config['cachedir'] = args.cache
p = Plot(config, **args.settings)
if not args.quiet:
display_progress(p)
if args.output:
p.save(args.output)
else:
p.show()
if __name__ == '__main__':
main()
| gpl-3.0 |
sebastien-forestier/NIPS2017 | scripts/check_logs.py | 4 | 2837 |
import os
import pickle
import matplotlib.pyplot as plt
log_dir = "../data/logs/"
name = "motor_babbling_demos"
filename = os.path.join(log_dir, name + ".pickle")
with open(filename, 'r') as f:
data = pickle.load(f)
# print "\nsm_data", data["sm_data"]
# print "\nim_data", data["im_data"]
# print "\nchosen_modules", data["chosen_modules"]
# print "\nprogresses_evolution", data["progresses_evolution"]
# print "\ninterests_evolution", data["interests_evolution"]
# print "\nnormalized_interests_evolution", data["normalized_interests_evolution"]
#
#
#
# print data["normalized_interests_evolution"]
fig, ax = plt.subplots()
ax.plot(data["normalized_interests_evolution"], lw=2)
ax.legend(["Hand", "Joystick_1", "Joystick_2", "Ergo", "Ball", "Light", "Sound"], ncol=3)
ax.set_xlabel('Time steps', fontsize=20)
ax.set_ylabel('Learning progress', fontsize=20)
#plt.show(block=True)
fig, ax = plt.subplots()
plt.title("Hand")
for s in data["sm_data"]["mod1"][1]:
print s
x = s[range(0, 30, 3)]
y = s[range(1, 30, 3)]
z = s[range(2, 30, 3)]
ax.plot(x, color="r")
ax.plot(y, color="g")
ax.plot(z, color="b")
plt.legend(["x", "y", "z"])
ax.set_xlabel('Time steps', fontsize=20)
plt.legend()
plt.ylim([-1.1, 1.1])
fig, ax = plt.subplots()
plt.title("Joystick 1")
for s in data["sm_data"]["mod2"][1]:
x = s[range(0, 20, 2)]
y = s[range(1, 20, 2)]
ax.plot(x, color="r")
ax.plot(y, color="g")
plt.legend(["Forward", "LR"])
ax.set_xlabel('Time steps', fontsize=20)
plt.legend()
plt.ylim([-1.1, 1.1])
fig, ax = plt.subplots()
plt.title("Joystick 2")
for s in data["sm_data"]["mod3"][1]:
x = s[range(0, 20, 2)]
y = s[range(1, 20, 2)]
ax.plot(x, color="r")
ax.plot(y, color="g")
plt.legend(["LR", "Forward"])
ax.set_xlabel('Time steps', fontsize=20)
plt.legend()
plt.ylim([-1.1, 1.1])
fig, ax = plt.subplots()
plt.title("Ergo")
for s in data["sm_data"]["mod4"][1]:
x = s[range(1, 21, 2)]
y = s[range(2, 21, 2)]
ax.plot(x, color="r")
ax.plot(y, color="g")
plt.legend(["Angle", "Elongation"])
ax.set_xlabel('Time steps', fontsize=20)
plt.legend()
plt.ylim([-1.1, 1.1])
fig, ax = plt.subplots()
plt.title("Ball")
for s in data["sm_data"]["mod5"][1]:
x = s[range(2, 22, 2)]
y = s[range(3, 22, 2)]
ax.plot(x, color="r")
ax.plot(y, color="g")
plt.legend(["Angle", "Elongation"])
ax.set_xlabel('Time steps', fontsize=20)
plt.legend()
plt.ylim([-1.1, 1.1])
fig, ax = plt.subplots()
plt.title("Light")
for s in data["sm_data"]["mod6"][1]:
ax.plot(s[2:], color="r")
ax.set_xlabel('Time steps', fontsize=20)
plt.ylim([-1.1, 1.1])
fig, ax = plt.subplots()
plt.title("Sound")
for s in data["sm_data"]["mod7"][1]:
ax.plot(s[2:], color="r")
ax.set_xlabel('Time steps', fontsize=20)
plt.ylim([-1.1, 1.1])
plt.show(block=True)
| gpl-3.0 |
samzhang111/scikit-learn | examples/tree/unveil_tree_structure.py | 4 | 4825 | """
=========================================
Understanding the decision tree structure
=========================================
The decision tree structure can be analysed to gain further insight on the
relation between the features and the target to predict. In this example, we
show how to retrieve:
- the binary tree structure;
- the depth of each node and whether or not it's a leaf;
- the nodes that were reached by a sample using the ``decision_path`` method;
- the leaf that was reached by a sample using the apply method;
- the rules that were used to predict a sample;
- the decision path shared by a group of samples.
"""
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
iris = load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
estimator = DecisionTreeClassifier(max_leaf_nodes=3, random_state=0)
estimator.fit(X_train, y_train)
# The decision estimator has an attribute called tree_ which stores the entire
# tree structure and allows access to low level attributes. The binary tree
# tree_ is represented as a number of parallel arrays. The i-th element of each
# array holds information about the node `i`. Node 0 is the tree's root. NOTE:
# Some of the arrays only apply to either leaves or split nodes, resp. In this
# case the values of nodes of the other type are arbitrary!
#
# Among those arrays, we have:
# - left_child, id of the left child of the node
# - right_child, id of the right child of the node
# - feature, feature used for splitting the node
# - threshold, threshold value at the node
#
# Using those arrays, we can parse the tree structure:
n_nodes = estimator.tree_.node_count
children_left = estimator.tree_.children_left
children_right = estimator.tree_.children_right
feature = estimator.tree_.feature
threshold = estimator.tree_.threshold
# The tree structure can be traversed to compute various properties such
# as the depth of each node and whether or not it is a leaf.
node_depth = np.zeros(shape=n_nodes)
is_leaves = np.zeros(shape=n_nodes, dtype=bool)
stack = [(0, -1)] # seed is the root node id and its parent depth
while len(stack) > 0:
node_id, parent_depth = stack.pop()
node_depth[node_id] = parent_depth + 1
# If we have a test node
if (children_left[node_id] != children_right[node_id]):
stack.append((children_left[node_id], parent_depth + 1))
stack.append((children_right[node_id], parent_depth + 1))
else:
is_leaves[node_id] = True
print("The binary tree structure has %s nodes and has "
"the following tree structure:"
% n_nodes)
for i in range(n_nodes):
if is_leaves[i]:
print("%snode=%s leaf node." % (node_depth[i] * "\t", i))
else:
print("%snode=%s test node: go to node %s if X[:, %s] <= %ss else to "
"node %s."
% (node_depth[i] * "\t",
i,
children_left[i],
feature[i],
threshold[i],
children_right[i],
))
print()
# First let's retrieve the decision path of each sample. The decision_path
# method allows to retrieve the node indicator functions. A non zero element of
# indicator matrix at the position (i, j) indicates that the sample i goes
# through the node j.
node_indicator = estimator.decision_path(X_test)
# Similarly, we can also have the leaves ids reached by each sample.
leave_id = estimator.apply(X_test)
# Now, it's possible to get the tests that were used to predict a sample or
# a group of samples. First, let's make it for the sample.
sample_id = 0
node_index = node_indicator.indices[node_indicator.indptr[sample_id]:
node_indicator.indptr[sample_id + 1]]
print('Rules used to predict sample %s: ' % sample_id)
for node_id in node_index:
if leave_id[sample_id] != node_id:
continue
if (X_test[sample_id, feature[node_id]] <= threshold[node_id]):
threshold_sign = "<="
else:
threshold_sign = ">"
print("decision id node %s : (X[%s, %s] (= %s) %s %s)"
% (node_id,
sample_id,
feature[node_id],
X_test[i, feature[node_id]],
threshold_sign,
threshold[node_id]))
# For a group of samples, we have the following common node.
sample_ids = [0, 1]
common_nodes = (node_indicator.toarray()[sample_ids].sum(axis=0) ==
len(sample_ids))
common_node_id = np.arange(n_nodes)[common_nodes]
print("\nThe following samples %s share the node %s in the tree"
% (sample_ids, common_node_id))
print("It is %s %% of all nodes." % (100 * len(common_node_id) / n_nodes,))
| bsd-3-clause |
empirical-org/WikipediaSentences | utils/qfragment/setup.py | 1 | 1078 | from setuptools import setup
setup(name='qfragment',
version='0.0.26',
description='Sentence fragment detection and feedback',
url='https://github.com/empirical-org/Quill-NLP-Tools-and-Datasets',
author='Quill.org',
author_email='[email protected]',
license='MIT',
packages=['qfragment'],
install_requires=[
'Flask==0.12.2',
'SQLAlchemy==1.2.6',
'en_core_web_lg==2.0.0',
'nltk==3.2.5',
'numpy==1.14.2',
'pandas==0.19.2',
'pathlib==1.0.1',
'psycopg2==2.7.3.2',
'requests==2.18.4',
'spacy==2.0.10',
'spacy==2.0.10',
'tensorflow==1.5.1',
'tensorflow==1.5.1',
'textacy==0.6.1',
'tflearn',
'tflearn==0.3.2',
'thinc==6.10.2',
],
dependency_links = [
'https://github.com/explosion/spacy-models/releases/download/en_core_web_lg-2.0.0/en_core_web_lg-2.0.0.tar.gz#egg=en_core_web_lg==2.0.0'
],
tests_require=['pytest'],
include_package_data=True,
zip_safe=False)
| agpl-3.0 |
RomainBrault/scikit-learn | sklearn/utils/tests/test_extmath.py | 19 | 24513 | # Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import np_version
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import fast_dot, _fast_dot
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _incremental_mean_and_var
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.extmath import softmax
from sklearn.utils.extmath import stable_cumsum
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_array_equal(mode, mode2)
assert_array_equal(score, score2)
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'LU', 'QR']: # 'none' would not be stable
# compute the singular values of X using the fast approximate method
Ua, sa, Va = \
randomized_svd(X, k, power_iteration_normalizer=normalizer,
random_state=0)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the
# real rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = \
randomized_svd(X, k, power_iteration_normalizer=normalizer,
random_state=0)
assert_almost_equal(s[:rank], sa[:rank])
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
def test_row_norms():
X = np.random.RandomState(42).randn(100, 100)
for dtype in (np.float32, np.float64):
if dtype is np.float32:
precision = 4
else:
precision = 5
X = X.astype(dtype)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True),
precision)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X), precision)
Xcsr = sparse.csr_matrix(X, dtype=dtype)
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True),
precision)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr), precision)
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.1,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate
# method without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer,
random_state=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.01)
# compute the singular values of X using the fast approximate
# method with iterated power method
_, sap, _ = randomized_svd(X, k,
power_iteration_normalizer=normalizer,
random_state=0)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method
# with iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5,
power_iteration_normalizer=normalizer)
# the iterated power method is still managing to get most of the
# structure at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limited impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_randomized_svd_power_iteration_normalizer():
# randomized_svd with power_iteration_normalized='none' diverges for
# large number of power iterations on this dataset
rng = np.random.RandomState(42)
X = make_low_rank_matrix(100, 500, effective_rank=50, random_state=rng)
X += 3 * rng.randint(0, 2, size=X.shape)
n_components = 50
# Check that it diverges with many (non-normalized) power iterations
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
U, s, V = randomized_svd(X, n_components, n_iter=20,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_20 = linalg.norm(A, ord='fro')
assert_greater(np.abs(error_2 - error_20), 100)
for normalizer in ['LU', 'QR', 'auto']:
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer=normalizer,
random_state=0)
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
for i in [5, 10, 50]:
U, s, V = randomized_svd(X, n_components, n_iter=i,
power_iteration_normalizer=normalizer,
random_state=0)
A = X - U.dot(np.diag(s).dot(V))
error = linalg.norm(A, ord='fro')
assert_greater(15, np.abs(error_2 - error))
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_randomized_svd_sign_flip_with_transpose():
# Check if the randomized_svd sign flipping is always done based on u
# irrespective of transpose.
# See https://github.com/scikit-learn/scikit-learn/issues/5608
# for more details.
def max_loading_is_positive(u, v):
"""
returns bool tuple indicating if the values maximising np.abs
are positive across all rows for u and across all columns for v.
"""
u_based = (np.abs(u).max(axis=0) == u.max(axis=0)).all()
v_based = (np.abs(v).max(axis=1) == v.max(axis=1)).all()
return u_based, v_based
mat = np.arange(10 * 8).reshape(10, -1)
# Without transpose
u_flipped, _, v_flipped = randomized_svd(mat, 3, flip_sign=True)
u_based, v_based = max_loading_is_positive(u_flipped, v_flipped)
assert_true(u_based)
assert_false(v_based)
# With transpose
u_flipped_with_transpose, _, v_flipped_with_transpose = randomized_svd(
mat, 3, flip_sign=True, transpose=True)
u_based, v_based = max_loading_is_positive(
u_flipped_with_transpose, v_flipped_with_transpose)
assert_true(u_based)
assert_false(v_based)
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
def naive_log_logistic(x):
return np.log(1 / (1 + np.exp(-x)))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_fast_dot():
# Check fast dot blas wrapper function
if fast_dot is np.dot:
return
rng = np.random.RandomState(42)
A = rng.random_sample([2, 10])
B = rng.random_sample([2, 10])
try:
linalg.get_blas_funcs(['gemm'])[0]
has_blas = True
except (AttributeError, ValueError):
has_blas = False
if has_blas:
# Test _fast_dot for invalid input.
# Maltyped data.
for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]:
assert_raises(ValueError, _fast_dot, A.astype(dt1),
B.astype(dt2).T)
# Malformed data.
# ndim == 0
E = np.empty(0)
assert_raises(ValueError, _fast_dot, E, E)
# ndim == 1
assert_raises(ValueError, _fast_dot, A, A[0])
# ndim > 2
assert_raises(ValueError, _fast_dot, A.T, np.array([A, A]))
# min(shape) == 1
assert_raises(ValueError, _fast_dot, A, A[0, :][None, :])
# test for matrix mismatch error
assert_raises(ValueError, _fast_dot, A, A)
# Test cov-like use case + dtypes.
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
# col < row
C = np.dot(A.T, A)
C_ = fast_dot(A.T, A)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A, B.T)
C_ = fast_dot(A, B.T)
assert_almost_equal(C, C_, decimal=5)
# Test square matrix * rectangular use case.
A = rng.random_sample([2, 2])
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
C = np.dot(A, B)
C_ = fast_dot(A, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
if has_blas:
for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]:
assert_raises(ValueError, _fast_dot, x, x.T)
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = X1.shape[0]
final_means, final_variances, final_count = \
_incremental_mean_and_var(X2, old_means, old_variances,
old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
@skip_if_32bit
def test_incremental_variance_numerical_stability():
# Test Youngs and Cramer incremental variance formulas.
def np_var(A):
return A.var(axis=0)
# Naive one pass variance computation - not numerically stable
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
def one_pass_var(X):
n = X.shape[0]
exp_x2 = (X ** 2).sum(axis=0) / n
expx_2 = (X.sum(axis=0) / n) ** 2
return exp_x2 - expx_2
# Two-pass algorithm, stable.
# We use it as a benchmark. It is not an online algorithm
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm
def two_pass_var(X):
mean = X.mean(axis=0)
Y = X.copy()
return np.mean((Y - mean)**2, axis=0)
# Naive online implementation
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
# This works only for chunks for size 1
def naive_mean_variance_update(x, last_mean, last_variance,
last_sample_count):
updated_sample_count = (last_sample_count + 1)
samples_ratio = last_sample_count / float(updated_sample_count)
updated_mean = x / updated_sample_count + last_mean * samples_ratio
updated_variance = last_variance * samples_ratio + \
(x - last_mean) * (x - updated_mean) / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
# We want to show a case when one_pass_var has error > 1e-3 while
# _batch_mean_variance_update has less.
tol = 200
n_features = 2
n_samples = 10000
x1 = np.array(1e8, dtype=np.float64)
x2 = np.log(1e-5, dtype=np.float64)
A0 = x1 * np.ones((n_samples // 2, n_features), dtype=np.float64)
A1 = x2 * np.ones((n_samples // 2, n_features), dtype=np.float64)
A = np.vstack((A0, A1))
# Older versions of numpy have different precision
# In some old version, np.var is not stable
if np.abs(np_var(A) - two_pass_var(A)).max() < 1e-6:
stable_var = np_var
else:
stable_var = two_pass_var
# Naive one pass var: >tol (=1063)
assert_greater(np.abs(stable_var(A) - one_pass_var(A)).max(), tol)
# Starting point for online algorithms: after A0
# Naive implementation: >tol (436)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = \
naive_mean_variance_update(A1[i, :], mean, var, n)
assert_equal(n, A.shape[0])
# the mean is also slightly unstable
assert_greater(np.abs(A.mean(axis=0) - mean).max(), 1e-6)
assert_greater(np.abs(stable_var(A) - var).max(), tol)
# Robust implementation: <tol (177)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = \
_incremental_mean_and_var(A1[i, :].reshape((1, A1.shape[1])),
mean, var, n)
assert_equal(n, A.shape[0])
assert_array_almost_equal(A.mean(axis=0), mean)
assert_greater(tol, np.abs(stable_var(A) - var).max())
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = batch.shape[0]
else:
result = _incremental_mean_and_var(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
def test_softmax():
rng = np.random.RandomState(0)
X = rng.randn(3, 5)
exp_X = np.exp(X)
sum_exp_X = np.sum(exp_X, axis=1).reshape((-1, 1))
assert_array_almost_equal(softmax(X), exp_X / sum_exp_X)
def test_stable_cumsum():
if np_version < (1, 9):
raise SkipTest("Sum is as unstable as cumsum for numpy < 1.9")
assert_array_equal(stable_cumsum([1, 2, 3]), np.cumsum([1, 2, 3]))
r = np.random.RandomState(0).rand(100000)
assert_warns(RuntimeWarning, stable_cumsum, r, rtol=0, atol=0)
# test axis parameter
A = np.random.RandomState(36).randint(1000, size=(5, 5, 5))
assert_array_equal(stable_cumsum(A, axis=0), np.cumsum(A, axis=0))
assert_array_equal(stable_cumsum(A, axis=1), np.cumsum(A, axis=1))
assert_array_equal(stable_cumsum(A, axis=2), np.cumsum(A, axis=2))
| bsd-3-clause |
jmuhlich/pysb | pysb/simulator/cupsoda.py | 5 | 27740 | from pysb.simulator.base import Simulator, SimulatorException, SimulationResult
import pysb
import pysb.bng
import numpy as np
from scipy.constants import N_A
import os
import re
import subprocess
import tempfile
import time
import logging
from pysb.logging import EXTENDED_DEBUG
import shutil
from pysb.pathfinder import get_path
import sympy
import collections
from collections.abc import Iterable
try:
import pandas as pd
except ImportError:
pd = None
try:
import pycuda.driver as cuda
except ImportError:
cuda = None
class CupSodaSimulator(Simulator):
"""An interface for running cupSODA, a CUDA implementation of LSODA.
cupSODA is a graphics processing unit (GPU)-based implementation of the
LSODA simulation algorithm (see references). It requires an NVIDIA GPU
card with support for the CUDA framework version 7 or above. Further
details of cupSODA and software can be found on github:
https://github.com/aresio/cupSODA
The simplest way to install cupSODA is to use a pre-compiled version,
which can be downloaded from here:
https://github.com/aresio/cupSODA/releases
Parameters
----------
model : pysb.Model
Model to integrate.
tspan : vector-like, optional
Time values at which the integrations are sampled. The first and last
values define the time range.
initials : list-like, optional
Initial species concentrations for all simulations. Dimensions are
N_SIMS x number of species.
param_values : list-like, optional
Parameters for all simulations. Dimensions are N_SIMS x number of
parameters.
verbose : bool or int, optional
Verbosity level, see :class:`pysb.simulator.base.Simulator` for
further details.
**kwargs: dict, optional
Extra keyword arguments, including:
* ``gpu``: Index of GPU to run on (default: 0)
* ``vol``: System volume; required if model encoded in extrinsic
(number) units (default: None)
* ``obs_species_only``: Only output species contained in observables
(default: True)
* ``cleanup``: Delete all temporary files after the simulation is
finished. Includes both BioNetGen and cupSODA files. Useful for
debugging (default: True)
* ``prefix``: Prefix for the temporary directory containing cupSODA
input and output files (default: model name)
* ``base_dir``: Directory in which temporary directory with cupSODA
input and output files are placed (default: system directory
determined by `tempfile.mkdtemp`)
* ``integrator``: Name of the integrator to use; see
`default_integrator_options` (default: 'cupsoda')
* ``integrator_options``: A dictionary of keyword arguments to
supply to the integrator; see `default_integrator_options`.
Attributes
----------
model : pysb.Model
Model passed to the constructor.
tspan : numpy.ndarray
Time values passed to the constructor.
initials : numpy.ndarray
Initial species concentrations for all simulations. Dimensions are
number of simulations x number of species.
param_values : numpy.ndarray
Parameters for all simulations. Dimensions are number of simulations
x number of parameters.
verbose: bool or int
Verbosity setting. See the base class
:class:`pysb.simulator.base.Simulator` for further details.
gpu : int or list
Index of GPU being run on, or a list of integers to use multiple GPUs.
Simulations will be split equally among the of GPUs.
outdir : str
Directory where cupSODA output files are placed. Input files are
also placed here.
opts: dict
Dictionary of options for the integrator, which can include the
following:
* vol (float or None): System volume
* n_blocks (int or None): Number of GPU blocks used by the simulator
* atol (float): Absolute integrator tolerance
* rtol (float): Relative integrator tolerance
* chunksize (int or None): The maximum number of simulations to run
per GPU at one time. Set this option if your GPU is running out of
memory.
* memory_usage ('global', 'shared', or 'sharedconstant'): The type of
GPU memory to use
* max_steps (int): The maximum number of internal integrator iterations
(equivalent to LSODA's mxstep)
integrator : str
Name of the integrator in use (only "cupsoda" is supported).
Notes
-----
1. If `vol` is defined, species amounts and rate constants are assumed
to be in number units and are automatically converted to concentration
units before generating the cupSODA input files. The species
concentrations returned by cupSODA are converted back to number units
during loading.
2. If `obs_species_only` is True, only the species contained within
observables are output by cupSODA. All other concentrations are set
to 'nan'.
References
----------
1. Harris, L.A., Nobile, M.S., Pino, J.C., Lubbock, A.L.R., Besozzi, D.,
Mauri, G., Cazzaniga, P., and Lopez, C.F. 2017. GPU-powered model
analysis with PySB/cupSODA. Bioinformatics 33, pp.3492-3494.
2. Nobile M.S., Cazzaniga P., Besozzi D., Mauri G., 2014. GPU-accelerated
simulations of mass-action kinetics models with cupSODA, Journal of
Supercomputing, 69(1), pp.17-24.
3. Petzold, L., 1983. Automatic selection of methods for solving stiff and
nonstiff systems of ordinary differential equations. SIAM journal on
scientific and statistical computing, 4(1), pp.136-148.
"""
_supports = {'multi_initials': True, 'multi_param_values': True}
_memory_options = {'global': '0', 'shared': '1', 'sharedconstant': '2'}
default_integrator_options = {
# some sane default options for a few well-known integrators
'cupsoda': {
'max_steps': 20000, # max # of internal iterations (LSODA's MXSTEP)
'atol': 1e-8, # absolute tolerance
'rtol': 1e-8, # relative tolerance
'chunksize': None, # Max number of simulations per GPU per run
'n_blocks': None, # number of GPU blocks
'memory_usage': 'sharedconstant'}} # see _memory_options dict
_integrator_options_allowed = {'max_steps', 'atol', 'rtol', 'n_blocks',
'memory_usage', 'vol', 'chunksize'}
def __init__(self, model, tspan=None, initials=None, param_values=None,
verbose=False, **kwargs):
super(CupSodaSimulator, self).__init__(model, tspan=tspan,
initials=initials,
param_values=param_values,
verbose=verbose, **kwargs)
self.gpu = kwargs.pop('gpu', (0, ))
if not isinstance(self.gpu, Iterable):
self.gpu = [self.gpu]
self._obs_species_only = kwargs.pop('obs_species_only', True)
self._cleanup = kwargs.pop('cleanup', True)
self._prefix = kwargs.pop('prefix', self._model.name)
# Sanitize the directory - cupsoda doesn't handle spaces etc. well
self._prefix = re.sub('[^0-9a-zA-Z]', '_', self._prefix)
self._base_dir = kwargs.pop('base_dir', None)
self.integrator = kwargs.pop('integrator', 'cupsoda')
integrator_options = kwargs.pop('integrator_options', {})
if kwargs:
raise ValueError('Unknown keyword argument(s): {}'.format(
', '.join(kwargs.keys())
))
unknown_integrator_options = set(integrator_options.keys()).difference(
self._integrator_options_allowed
)
if unknown_integrator_options:
raise ValueError(
'Unknown integrator_options: {}. Allowed options: {}'.format(
', '.join(unknown_integrator_options),
', '.join(self._integrator_options_allowed)
)
)
# generate the equations for the model
pysb.bng.generate_equations(self._model, self._cleanup, self.verbose)
# build integrator options list from our defaults and any kwargs
# passed to this function
options = {}
if self.default_integrator_options.get(self.integrator):
options.update(self.default_integrator_options[
self.integrator]) # default options
else:
raise SimulatorException(
"Integrator type '" + self.integrator + "' not recognized.")
options.update(integrator_options) # overwrite
# defaults
self.opts = options
self._out_species = None
# private variables (to reduce the number of function calls)
self._len_rxns = len(self._model.reactions)
self._len_species = len(self._model.species)
self._len_params = len(self._model.parameters)
self._model_parameters_rules = self._model.parameters_rules()
# Set cupsoda verbosity level
logger_level = self._logger.logger.getEffectiveLevel()
if logger_level <= EXTENDED_DEBUG:
self._cupsoda_verbose = 2
elif logger_level <= logging.DEBUG:
self._cupsoda_verbose = 1
else:
self._cupsoda_verbose = 0
# regex for extracting cupSODA reported running time
self._running_time_regex = re.compile(r'Running time:\s+(\d+\.\d+)')
def _run_chunk(self, gpus, outdir, chunk_idx, cmtx, sims, trajectories,
tout):
_indirs = {}
_outdirs = {}
p = {}
# Path to cupSODA executable
bin_path = get_path('cupsoda')
# Start simulations
for gpu in gpus:
_indirs[gpu] = os.path.join(outdir, "INPUT_GPU{}_{}".format(
gpu, chunk_idx))
os.mkdir(_indirs[gpu])
_outdirs[gpu] = os.path.join(outdir, "OUTPUT_GPU{}_{}".format(
gpu, chunk_idx))
# Create cupSODA input files
self._create_input_files(_indirs[gpu], sims[gpu], cmtx)
# Build command
# ./cupSODA input_model_folder blocks output_folder simulation_
# file_prefix gpu_number fitness_calculation memory_use dump
command = [bin_path, _indirs[gpu], str(self.n_blocks),
_outdirs[gpu], self._prefix, str(gpu),
'0', self._memory_usage, str(self._cupsoda_verbose)]
self._logger.info("Running cupSODA: " + ' '.join(command))
# Run simulation and return trajectories
p[gpu] = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Read results
for gpu in gpus:
(p_out, p_err) = p[gpu].communicate()
p_out = p_out.decode('utf-8')
p_err = p_err.decode('utf-8')
logger_level = self._logger.logger.getEffectiveLevel()
if logger_level <= logging.INFO:
run_time_match = self._running_time_regex.search(p_out)
if run_time_match:
self._logger.info('cupSODA GPU {} chunk {} reported '
'time: {} seconds'.format(
gpu,
chunk_idx,
run_time_match.group(1)))
self._logger.debug('cupSODA GPU {} chunk {} stdout:\n{}'.format(
gpu, chunk_idx, p_out))
if p_err:
self._logger.error('cupSODA GPU {} chunk {} '
'stderr:\n{}'.format(
gpu, chunk_idx, p_err))
if p[gpu].returncode:
raise SimulatorException(
"cupSODA GPU {} chunk {} exception:\n{}\n{}".format(
gpu, chunk_idx, p_out.rstrip("at line"), p_err.rstrip()
)
)
tout_run, trajectories_run = self._load_trajectories(
_outdirs[gpu], sims[gpu])
if trajectories is None:
tout = tout_run
trajectories = trajectories_run
else:
tout = np.concatenate((tout, tout_run))
trajectories = np.concatenate(
(trajectories, trajectories_run))
return tout, trajectories
def run(self, tspan=None, initials=None, param_values=None):
"""Perform a set of integrations.
Returns a :class:`.SimulationResult` object.
Parameters
----------
tspan : list-like, optional
Time values at which the integrations are sampled. The first and
last values define the time range.
initials : list-like, optional
Initial species concentrations for all simulations. Dimensions are
number of simulation x number of species.
param_values : list-like, optional
Parameters for all simulations. Dimensions are number of
simulations x number of parameters.
Returns
-------
A :class:`SimulationResult` object
Notes
-----
1. An exception is thrown if `tspan` is not defined in either
`__init__`or `run`.
2. If neither `initials` nor `param_values` are defined in either
`__init__` or `run` a single simulation is run with the initial
concentrations and parameter values defined in the model.
"""
super(CupSodaSimulator, self).run(tspan=tspan, initials=initials,
param_values=param_values,
_run_kwargs=[])
# Create directories for cupSODA input and output files
_outdirs = {}
_indirs = {}
start_time = time.time()
cmtx = self._get_cmatrix()
outdir = tempfile.mkdtemp(prefix=self._prefix + '_',
dir=self._base_dir)
self._logger.debug("Output directory is %s" % outdir)
# Set up chunking (enforce max # sims per GPU per run)
n_sims = len(self.param_values)
chunksize_gpu = self.opts.get('chunksize', None)
if chunksize_gpu is None:
chunksize_gpu = n_sims
chunksize_total = chunksize_gpu * len(self.gpu)
tout = None
trajectories = None
chunks = np.array_split(range(n_sims),
np.ceil(n_sims / chunksize_total))
try:
for chunk_idx, chunk in enumerate(chunks):
self._logger.debug('cupSODA chunk {} of {}'.format(
(chunk_idx + 1), len(chunks)))
# Split chunk equally between GPUs
sims = dict(zip(self.gpu, np.array_split(chunk,
len(self.gpu))))
tout, trajectories = self._run_chunk(
self.gpu, outdir, chunk_idx, cmtx, sims,
trajectories, tout)
finally:
if self._cleanup:
shutil.rmtree(outdir)
end_time = time.time()
self._logger.info("cupSODA + I/O time: {} seconds".format(
end_time - start_time))
return SimulationResult(self, tout, trajectories)
@property
def _memory_usage(self):
try:
return self._memory_options[self.opts['memory_usage']]
except KeyError:
raise Exception('memory_usage must be one of %s',
self._memory_options.keys())
@property
def vol(self):
vol = self.opts.get('vol', None)
return vol
@vol.setter
def vol(self, volume):
self.opts['vol'] = volume
@property
def n_blocks(self):
n_blocks = self.opts.get('n_blocks')
if n_blocks is None:
default_threads_per_block = 32
bytes_per_float = 4
memory_per_thread = (self._len_species + 1) * bytes_per_float
if cuda is None:
threads_per_block = default_threads_per_block
else:
cuda.init()
device = cuda.Device(self.gpu[0])
attrs = device.get_attributes()
shared_memory_per_block = attrs[
cuda.device_attribute.MAX_SHARED_MEMORY_PER_BLOCK]
upper_limit_threads_per_block = attrs[
cuda.device_attribute.MAX_THREADS_PER_BLOCK]
max_threads_per_block = min(
shared_memory_per_block / memory_per_thread,
upper_limit_threads_per_block)
threads_per_block = min(max_threads_per_block,
default_threads_per_block)
n_blocks = int(
np.ceil(1. * len(self.param_values) / threads_per_block))
self._logger.debug('n_blocks set to {} (used pycuda: {})'.format(
n_blocks, cuda is not None
))
self.n_blocks = n_blocks
return n_blocks
@n_blocks.setter
def n_blocks(self, n_blocks):
if not isinstance(n_blocks, int):
raise ValueError("n_blocks must be an integer")
if n_blocks <= 0:
raise ValueError("n_blocks must be greater than 0")
self.opts['n_blocks'] = n_blocks
def _create_input_files(self, directory, sims, cmtx):
# atol_vector
with open(os.path.join(directory, "atol_vector"), 'w') as atol_vector:
for i in range(self._len_species):
atol_vector.write(str(self.opts.get('atol')))
if i < self._len_species - 1:
atol_vector.write("\n")
# c_matrix
with open(os.path.join(directory, "c_matrix"), 'w') as c_matrix:
for i in sims:
line = ""
for j in range(self._len_rxns):
if j > 0:
line += "\t"
line += str(cmtx[i][j])
c_matrix.write(line)
if i != sims[-1]:
c_matrix.write("\n")
# cs_vector
with open(os.path.join(directory, "cs_vector"), 'w') as cs_vector:
self._out_species = range(self._len_species) # species to output
if self._obs_species_only:
self._out_species = [False for sp in self._model.species]
for obs in self._model.observables:
for i in obs.species:
self._out_species[i] = True
self._out_species = [i for i in range(self._len_species) if
self._out_species[i] is True]
for i in range(len(self._out_species)):
if i > 0:
cs_vector.write("\n")
cs_vector.write(str(self._out_species[i]))
# left_side
with open(os.path.join(directory, "left_side"), 'w') as left_side:
for i in range(self._len_rxns):
line = ""
for j in range(self._len_species):
if j > 0:
line += "\t"
stoich = 0
for k in self._model.reactions[i]['reactants']:
if j == k:
stoich += 1
line += str(stoich)
if i < self._len_rxns - 1:
left_side.write(line + "\n")
else:
left_side.write(line)
# max_steps
with open(os.path.join(directory, "max_steps"), 'w') as mxsteps:
mxsteps.write(str(self.opts['max_steps']))
# model_kind
with open(os.path.join(directory, "modelkind"), 'w') as model_kind:
# always set modelkind to 'deterministic'
model_kind.write("deterministic")
# MX_0
with open(os.path.join(directory, "MX_0"), 'w') as MX_0:
mx0 = self.initials
# if a volume has been defined, rescale populations
# by N_A*vol to get concentration
# (NOTE: act on a copy of self.initials, not
# the original, which we don't want to modify)
if self.vol:
mx0 = mx0.copy()
mx0 /= (N_A * self.vol)
for i in sims:
line = ""
for j in range(self._len_species):
if j > 0:
line += "\t"
line += str(mx0[i][j])
MX_0.write(line)
if i != sims[-1]:
MX_0.write("\n")
# right_side
with open(os.path.join(directory, "right_side"), 'w') as right_side:
for i in range(self._len_rxns):
line = ""
for j in range(self._len_species):
if j > 0:
line += "\t"
stochiometry = 0
for k in self._model.reactions[i]['products']:
if j == k:
stochiometry += 1
line += str(stochiometry)
if i < self._len_rxns - 1:
right_side.write(line + "\n")
else:
right_side.write(line)
# rtol
with open(os.path.join(directory, "rtol"), 'w') as rtol:
rtol.write(str(self.opts.get('rtol')))
# t_vector
with open(os.path.join(directory, "t_vector"), 'w') as t_vector:
for t in self.tspan:
t_vector.write(str(float(t)) + "\n")
# time_max
with open(os.path.join(directory, "time_max"), 'w') as time_max:
time_max.write(str(float(self.tspan[-1])))
def _get_cmatrix(self):
if self.model.tags:
raise ValueError('cupSODA does not currently support local '
'functions')
self._logger.debug("Constructing the c_matrix:")
c_matrix = np.zeros((len(self.param_values), self._len_rxns))
par_names = [p.name for p in self._model_parameters_rules]
rate_mask = np.array([p in self._model_parameters_rules for p in
self._model.parameters])
rate_args = []
par_vals = self.param_values[:, rate_mask]
rate_order = []
for rxn in self._model.reactions:
rate_args.append([arg for arg in rxn['rate'].atoms(sympy.Symbol) if
not arg.name.startswith('__s')])
reactants = len(rxn['reactants'])
rate_order.append(reactants)
output = 0.01 * len(par_vals)
output = int(output) if output > 1 else 1
for i in range(len(par_vals)):
if i % output == 0:
self._logger.debug(str(int(round(100. * i / len(par_vals)))) +
"%")
for j in range(self._len_rxns):
rate = 1.0
for r in rate_args[j]:
if isinstance(r, pysb.Parameter):
rate *= par_vals[i][par_names.index(r.name)]
elif isinstance(r, pysb.Expression):
raise ValueError('cupSODA does not currently support '
'models with Expressions')
else:
rate *= r
# volume correction
if self.vol:
rate *= (N_A * self.vol) ** (rate_order[j] - 1)
c_matrix[i][j] = rate
self._logger.debug("100%")
return c_matrix
def _load_trajectories(self, directory, sims):
"""Read simulation results from output files.
Returns `tout` and `trajectories` arrays.
"""
files = [filename for filename in os.listdir(directory) if
re.match(self._prefix, filename)]
if len(files) == 0:
raise SimulatorException(
"Cannot find any output files to load data from.")
if len(files) != len(sims):
raise SimulatorException(
"Number of output files (%d) does not match number "
"of requested simulations (%d)." % (
len(files), len(sims)))
n_sims = len(files)
trajectories = [None] * n_sims
tout = [None] * n_sims
traj_n = np.ones((len(self.tspan), self._len_species)) * float('nan')
tout_n = np.ones(len(self.tspan)) * float('nan')
# load the data
indir_prefix = os.path.join(directory, self._prefix)
for idx, n in enumerate(sims):
trajectories[idx] = traj_n.copy()
tout[idx] = tout_n.copy()
filename = indir_prefix + "_" + str(idx)
if not os.path.isfile(filename):
raise Exception("Cannot find input file " + filename)
# determine optimal loading method
if idx == 0:
(data, use_pandas) = self._test_pandas(filename)
# load data
else:
if use_pandas:
data = self._load_with_pandas(filename)
else:
data = self._load_with_openfile(filename)
# store data
tout[idx] = data[:, 0]
trajectories[idx][:, self._out_species] = data[:, 1:]
# volume correction
if self.vol:
trajectories[idx][:, self._out_species] *= (N_A * self.vol)
return np.array(tout), np.array(trajectories)
def _test_pandas(self, filename):
""" calculates the fastest method to load in data
Parameters
----------
filename : str
filename to laod in
Returns
-------
np.array, bool
"""
# using open(filename,...)
start = time.time()
data = self._load_with_openfile(filename)
end = time.time()
load_time_openfile = end - start
# using pandas
if pd:
start = time.time()
self._load_with_pandas(filename)
end = time.time()
load_time_pandas = end - start
if load_time_pandas < load_time_openfile:
return data, True
return data, False
@staticmethod
def _load_with_pandas(filename):
data = pd.read_csv(filename, sep='\t', skiprows=None,
header=None).to_numpy()
return data
@staticmethod
def _load_with_openfile(filename):
with open(filename, 'r') as f:
data = [line.rstrip('\n').split() for line in f]
data = np.array(data, dtype=np.float, copy=False)
return data
def run_cupsoda(model, tspan, initials=None, param_values=None,
integrator='cupsoda', cleanup=True, verbose=False, **kwargs):
"""Wrapper method for running cupSODA simulations.
Parameters
----------
See ``CupSodaSimulator`` constructor.
Returns
-------
SimulationResult.all : list of record arrays
List of trajectory sets. The first dimension contains species,
observables and expressions (in that order)
"""
sim = CupSodaSimulator(model, tspan=tspan, integrator=integrator,
cleanup=cleanup, verbose=verbose, **kwargs)
simres = sim.run(initials=initials, param_values=param_values)
return simres.all
| bsd-2-clause |
rs2/pandas | pandas/core/arrays/sparse/scipy_sparse.py | 4 | 5379 | """
Interaction with scipy.sparse matrices.
Currently only includes to_coo helpers.
"""
from pandas.core.indexes.api import Index, MultiIndex
from pandas.core.series import Series
def _check_is_partition(parts, whole):
whole = set(whole)
parts = [set(x) for x in parts]
if set.intersection(*parts) != set():
raise ValueError("Is not a partition because intersection is not null.")
if set.union(*parts) != whole:
raise ValueError("Is not a partition because union is not the whole.")
def _to_ijv(ss, row_levels=(0,), column_levels=(1,), sort_labels=False):
"""
For arbitrary (MultiIndexed) sparse Series return
(v, i, j, ilabels, jlabels) where (v, (i, j)) is suitable for
passing to scipy.sparse.coo constructor.
"""
# index and column levels must be a partition of the index
_check_is_partition([row_levels, column_levels], range(ss.index.nlevels))
# from the sparse Series: get the labels and data for non-null entries
values = ss.array._valid_sp_values
nonnull_labels = ss.dropna()
def get_indexers(levels):
""" Return sparse coords and dense labels for subset levels """
# TODO: how to do this better? cleanly slice nonnull_labels given the
# coord
values_ilabels = [tuple(x[i] for i in levels) for x in nonnull_labels.index]
if len(levels) == 1:
values_ilabels = [x[0] for x in values_ilabels]
# # performance issues with groupby ###################################
# TODO: these two lines can replace the code below but
# groupby is too slow (in some cases at least)
# labels_to_i = ss.groupby(level=levels, sort=sort_labels).first()
# labels_to_i[:] = np.arange(labels_to_i.shape[0])
def _get_label_to_i_dict(labels, sort_labels=False):
"""
Return dict of unique labels to number.
Optionally sort by label.
"""
labels = Index(map(tuple, labels)).unique().tolist() # squish
if sort_labels:
labels = sorted(labels)
return {k: i for i, k in enumerate(labels)}
def _get_index_subset_to_coord_dict(index, subset, sort_labels=False):
ilabels = list(zip(*[index._get_level_values(i) for i in subset]))
labels_to_i = _get_label_to_i_dict(ilabels, sort_labels=sort_labels)
labels_to_i = Series(labels_to_i)
if len(subset) > 1:
labels_to_i.index = MultiIndex.from_tuples(labels_to_i.index)
labels_to_i.index.names = [index.names[i] for i in subset]
else:
labels_to_i.index = Index(x[0] for x in labels_to_i.index)
labels_to_i.index.name = index.names[subset[0]]
labels_to_i.name = "value"
return labels_to_i
labels_to_i = _get_index_subset_to_coord_dict(
ss.index, levels, sort_labels=sort_labels
)
# #####################################################################
# #####################################################################
i_coord = labels_to_i[values_ilabels].tolist()
i_labels = labels_to_i.index.tolist()
return i_coord, i_labels
i_coord, i_labels = get_indexers(row_levels)
j_coord, j_labels = get_indexers(column_levels)
return values, i_coord, j_coord, i_labels, j_labels
def sparse_series_to_coo(ss, row_levels=(0,), column_levels=(1,), sort_labels=False):
"""
Convert a sparse Series to a scipy.sparse.coo_matrix using index
levels row_levels, column_levels as the row and column
labels respectively. Returns the sparse_matrix, row and column labels.
"""
import scipy.sparse
if ss.index.nlevels < 2:
raise ValueError("to_coo requires MultiIndex with nlevels > 2")
if not ss.index.is_unique:
raise ValueError(
"Duplicate index entries are not allowed in to_coo transformation."
)
# to keep things simple, only rely on integer indexing (not labels)
row_levels = [ss.index._get_level_number(x) for x in row_levels]
column_levels = [ss.index._get_level_number(x) for x in column_levels]
v, i, j, rows, columns = _to_ijv(
ss, row_levels=row_levels, column_levels=column_levels, sort_labels=sort_labels
)
sparse_matrix = scipy.sparse.coo_matrix(
(v, (i, j)), shape=(len(rows), len(columns))
)
return sparse_matrix, rows, columns
def coo_to_sparse_series(A, dense_index: bool = False):
"""
Convert a scipy.sparse.coo_matrix to a SparseSeries.
Parameters
----------
A : scipy.sparse.coo.coo_matrix
dense_index : bool, default False
Returns
-------
Series
Raises
------
TypeError if A is not a coo_matrix
"""
from pandas import SparseDtype
try:
s = Series(A.data, MultiIndex.from_arrays((A.row, A.col)))
except AttributeError as err:
raise TypeError(
f"Expected coo_matrix. Got {type(A).__name__} instead."
) from err
s = s.sort_index()
s = s.astype(SparseDtype(s.dtype))
if dense_index:
# is there a better constructor method to use here?
i = range(A.shape[0])
j = range(A.shape[1])
ind = MultiIndex.from_product([i, j])
s = s.reindex(ind)
return s
| bsd-3-clause |
elkingtoncode/People-Networks | tests/consensus/runtests.py | 4 | 20659 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Augur consensus tests.
To run consensus, call the Serpent functions in this order:
interpolate
center
tokenize
covariance
loop max_components:
blank
loop max_iterations:
loadings
latent
deflate
score
reputation_delta
weighted_delta
select_scores
smooth
resolve
payout
Final results (event outcomes and updated reputation values) are returned
as fixed-point (base 2^64) values from the payout function.
"""
from __future__ import division
import os
import sys
import json
import getopt
from pprint import pprint
import numpy as np
import pandas as pd
try:
from colorama import Fore, Style, init
except ImportError:
pass
from ethereum import tester as t
from pyconsensus import Oracle
ROOT = os.path.join(os.path.dirname(os.path.realpath(__file__)),
os.pardir, os.pardir, "consensus")
np.set_printoptions(linewidth=225,
suppress=True,
formatter={"float": "{: 0.6f}".format})
pd.set_option("display.max_rows", 25)
pd.set_option("display.width", 1000)
pd.set_option('display.float_format', lambda x: '%.8f' % x)
# max_iterations: number of blocks required to complete PCA
verbose = False
max_iterations = 5
tolerance = 0.05
variance_threshold = 0.85
max_components = 5
init()
YES = 2.0
NO = 1.0
BAD = 1.5
NA = 0.0
def BR(string): # bright red
return "\033[1;31m" + str(string) + "\033[0m"
def BB(string): # bright blue
return Fore.BLUE + Style.BRIGHT + str(string) + Style.RESET_ALL
def BW(string): # bright white
return Fore.WHITE + Style.BRIGHT + str(string) + Style.RESET_ALL
def BG(string): # bright green
return Fore.GREEN + Style.BRIGHT + str(string) + Style.RESET_ALL
def BC(string): # bright cyan
return Fore.CYAN + Style.BRIGHT + str(string) + Style.RESET_ALL
def binary_input_example():
print BW("Testing with binary inputs")
print BW("==========================")
reports = np.array([[ YES, YES, NO, YES],
[ YES, NO, NO, NO],
[ YES, YES, NO, NO],
[ YES, YES, YES, NO],
[ YES, NO, YES, YES],
[ NO, NO, YES, YES]])
reputation = [2, 10, 4, 2, 7, 1]
scaled = [0, 0, 0, 0]
scaled_max = [YES, YES, YES, YES]
scaled_min = [NO, NO, NO, NO]
return (reports, reputation, scaled, scaled_max, scaled_min)
def scalar_input_example():
print BW("Testing with binary and scalar inputs")
print BW("=====================================")
reports = np.array([[ YES, YES, NO, NO, 233, 16027.59 ],
[ YES, NO, NO, NO, 199, 0. ],
[ YES, YES, NO, NO, 233, 16027.59 ],
[ YES, YES, YES, NO, 250, 0. ],
[ NO, NO, YES, YES, 435, 8001.00 ],
[ NO, NO, YES, YES, 435, 19999.00 ]])
reputation = [1, 1, 1, 1, 1, 1]
scaled = [0, 0, 0, 0, 1, 1]
scaled_max = [ YES, YES, YES, YES, 435, 20000 ]
scaled_min = [ NO, NO, NO, NO, 0, 8000 ]
return (reports, reputation, scaled, scaled_max, scaled_min)
def randomized_inputs(num_reports=10, num_events=5):
print BW("Testing with randomized inputs")
print BW("==============================")
reports = np.random.randint(-1, 2, (num_reports, num_events)).astype(float)
reputation = np.random.randint(1, 100, num_reports).tolist()
scaled = np.random.randint(0, 2, num_events).tolist()
scaled_max = np.ones(num_events)
scaled_min = -np.ones(num_events)
for i in range(num_events):
if scaled[i]:
scaled_max[i] = np.random.randint(1, 100)
scaled_min[i] = np.random.randint(0, scaled_max[i])
scaled_max = scaled_max.astype(int).tolist()
scaled_min = scaled_min.astype(int).tolist()
return (reports, reputation, scaled, scaled_max, scaled_min)
def fix(x):
return int(x * 0x10000000000000000)
def unfix(x):
return x / 0x10000000000000000
def fold(arr, num_cols):
folded = []
num_rows = len(arr) / float(num_cols)
if num_rows != int(num_rows):
raise Exception("array length (%i) not divisible by %i" % (len(arr), num_cols))
num_rows = int(num_rows)
for i in range(num_rows):
row = []
for j in range(num_cols):
row.append(arr[i*num_cols + j])
folded.append(row)
return folded
def display(arr, description=None, show_all=None, refold=False):
if description is not None:
print(BW(description))
if refold and type(refold) == int:
num_rows = len(arr) / float(refold)
if num_rows == int(num_rows) and len(arr) > refold:
print(np.array(fold(map(unfix, arr), refold)))
else:
refold = False
if not refold:
if show_all is not None:
print(pd.DataFrame({
'result': arr,
'base 16': map(hex, arr),
'base 2^64': map(unfix, arr),
}))
else:
print(json.dumps(map(unfix, arr), indent=3, sort_keys=True))
def rmsd(forecast, actual, fixed=True):
if fixed:
if len(forecast) > 1:
forecast = np.array(map(unfix, forecast))
else:
forecast = unfix(np.array(forecast).squeeze())
return np.sqrt(np.mean((actual - forecast)**2))
def tol(forecast, actual, fixed=True):
r = rmsd(forecast, actual, fixed=fixed)
try:
assert(r < tolerance)
except Exception as err:
print "Forecast:", np.array(map(unfix, forecast))
print "Actual:", actual
print "RMSD tolerance exceeded:", r, ">=", tolerance
raise
def init_chain(gas_limit=700000000):
print(BR("Creating new test chain ") + "(gas limit: %s)" % gas_limit)
s = t.state()
t.gas_limit = gas_limit
return t.state()
def compile_contract(state, filename, root=ROOT, gas=70000000):
print(BG(filename) + " (%s gas)" % gas)
return state.abi_contract(os.path.join(root, filename), gas=gas)
def profile(contract, fn, *args, **kwargs):
sys.stdout.write(BW(" - %s:" % fn))
sys.stdout.flush()
result = getattr(contract, fn)(*args, profiling=True)
print(" %i gas (%s seconds)" % (result['gas'], result['time']))
return result['output']
def test_consensus(example, verbose=False):
reports, reputation, scaled, scaled_max, scaled_min = example()
num_reports = len(reputation)
num_events = len(reports[0])
flatsize = num_reports * num_events
reputation_fixed = map(fix, reputation)
reports_fixed = map(fix, reports.ravel())
scaled_max_fixed = map(fix, scaled_max)
scaled_min_fixed = map(fix, scaled_min)
if verbose:
display(np.array(reports_fixed), "reports (raw):", refold=num_events, show_all=True)
s = init_chain()
c = compile_contract(s, "interpolate.se")
result = profile(c, "interpolate", reports_fixed,
reputation_fixed,
scaled,
scaled_max_fixed,
scaled_min_fixed)
result = np.array(result)
reports_filled = result[0:flatsize].tolist()
reports_mask = result[flatsize:].tolist()
if verbose:
display(reports_filled, "reports_filled:", refold=num_events, show_all=True)
c = compile_contract(s, "center.se")
result = profile(c, "center", reports_filled,
reputation_fixed,
scaled,
scaled_max_fixed,
scaled_min_fixed,
max_iterations,
max_components)
result = np.array(result)
weighted_centered_data = result[0:flatsize].tolist()
if verbose:
display(weighted_centered_data, "Weighted centered data:", refold=num_events, show_all=True)
lv = np.array(map(unfix, result[flatsize:-2]))
wcd = np.array(fold(map(unfix, weighted_centered_data), num_events))
wcd_init = wcd
rep = map(unfix, reputation_fixed)
R = np.diag(rep)
# Get "Satoshi" (integer) Reputation values
# Python
tokens = np.array([int(r * 1e6) for r in rep])
alltokens = np.sum(tokens)
# Serpent
reptokens = profile(c, "tokenize", reputation_fixed, num_reports, nparray=False)
if verbose:
print BR("Tokens:")
print BW(" Python: "), tokens
print BW(" Serpent:"), np.array(map(unfix, reptokens)).astype(int)
# Calculate the first row of the covariance matrix
# Python
covmat = wcd.T.dot(np.diag(tokens)).dot(wcd) / float(alltokens - 1)
totalvar = np.trace(covmat)
Crow = np.zeros(num_events)
wcd_x_tokens = wcd[:,0] * tokens
Crow = wcd_x_tokens.dot(wcd) / (alltokens-1)
# Serpent
covrow = profile(c, "covariance", weighted_centered_data,
reptokens,
num_reports,
num_events)
if verbose:
print BR("Covariance matrix row")
print BW(" Python: "), Crow
print BW(" Serpent:"), np.array(map(unfix, covrow))
tol(covrow, Crow)
#######
# PCA #
#######
# Python
iv = result[flatsize:]
variance_explained = 0
nc = np.zeros(num_reports)
negative = False
for j in range(min(max_components, num_events)):
# Calculate loading vector
lv = np.array(map(unfix, iv[:-2]))
for i in range(max_iterations):
lv = R.dot(wcd).dot(lv).dot(wcd)
lv /= np.sqrt(lv.dot(lv))
# Calculate the eigenvalue for this eigenvector
for k in range(num_events):
if lv[k] != 0:
break
E = covmat[k,:].dot(lv) / lv[k]
# Cumulative variance explained
variance_explained += E / totalvar
# Projection onto new axis: nonconformity vector
slv = lv
if slv[0] < 0:
slv *= -1
nc += E * wcd.dot(slv)
if verbose:
print BW(" Loadings %d:" % j), np.round(np.array(lv), 6)
print BW(" Latent %d: " % j), E, "(%s%% variance explained)" % np.round(variance_explained * 100, 3)
# Deflate the data matrix
wcd = wcd - wcd.dot(np.outer(lv, lv))
if verbose:
print BW(" Nonconformity: "), np.round(nc, 6)
# Serpent
loading_vector = result[flatsize:].tolist()
data = weighted_centered_data
scores = map(int, np.zeros(num_reports).tolist())
var_exp = 0
num_comps = 0
c = compile_contract(s, "score.se")
while True:
print(BC(" COMPONENT %s" % str(num_comps + 1)))
# Loading vector (eigenvector)
# - Second-to-last element: number of iterations remaining
# - Last element: number of components remaining
loading_vector = profile(c, "blank", loading_vector[-1],
max_iterations,
num_events)
sys.stdout.write(BW(" - loadings"))
sys.stdout.flush()
lv_gas = []
lv_time = []
while loading_vector[num_events] > 0:
sys.stdout.write(BW("."))
sys.stdout.flush()
result = c.loadings(loading_vector,
data,
reputation_fixed,
num_reports,
num_events,
profiling=True)
loading_vector = result['output']
lv_gas.append(result['gas'])
lv_time.append(result['time'])
print(" %i gas (%s seconds)" % (np.mean(lv_gas), np.mean(lv_time)))
# Latent factor (eigenvalue; check sign bit)
latent = profile(c, "latent", covrow, loading_vector, num_events)
# Deflate the data matrix
data = profile(c, "deflate", loading_vector, data, num_reports, num_events)
# Project data onto this component and add to weighted scores
scores = profile(c, "score", scores,
loading_vector,
weighted_centered_data,
latent,
num_reports,
num_events)
if verbose:
printable_loadings = np.array(map(unfix, loading_vector[:-2]))
if printable_loadings[0] < 0:
printable_loadings *= -1
print BW("Component %d [%s]:\t" %
(num_comps, np.round(unfix(latent), 4))), printable_loadings
num_comps += 1
if loading_vector[num_events + 1] == 0:
break
tol(scores, nc)
c = compile_contract(s, "adjust.se")
result = profile(c, "reputation_delta", scores, num_reports, num_events)
result = np.array(result)
set1 = result[0:num_reports].tolist()
set2 = result[num_reports:].tolist()
assert(len(set1) == len(set2))
assert(len(result) == 2*num_reports)
if verbose:
display(set1, "set1:", show_all=True)
display(set2, "set2:", show_all=True)
result = profile(c, "weighted_delta", set1,
set2,
reputation_fixed,
reports_filled,
num_reports,
num_events)
result = np.array(result)
old = result[0:num_events].tolist()
new1 = result[num_events:(2*num_events)].tolist()
new2 = result[(2*num_events):].tolist()
assert(len(result) == 3*num_events)
assert(len(old) == len(new1) == len(new2))
if verbose:
display(old, "old:", show_all=True)
display(new1, "new1:", show_all=True)
display(new2, "new2:", show_all=True)
adjusted_scores = profile(c, "select_scores", old,
new1,
new2,
set1,
set2,
scores,
num_reports,
num_events)
c = compile_contract(s, "resolve.se")
smooth_rep = profile(c, "smooth", adjusted_scores,
reputation_fixed,
num_reports,
num_events)
event_outcomes = profile(c, "resolve", smooth_rep,
reports_filled,
scaled,
scaled_max_fixed,
scaled_min_fixed,
num_reports,
num_events)
c = compile_contract(s, "payout.se")
reporter_payout = profile(c, "payout", event_outcomes,
smooth_rep,
reports_mask,
num_reports,
num_events)
reporter_payout = np.array(reporter_payout)
if verbose:
print BW("Nonconformity scores:"), np.array(map(unfix, scores))
print BW("Raw reputation: "), np.array(map(unfix, smooth_rep))
print BW("Adjusted scores: "), np.array(map(unfix, adjusted_scores))
print BW("Reporter payout: "), np.array(map(unfix, reporter_payout))
print BW("Event outcomes: "), np.array(map(unfix, event_outcomes))
# Compare to pyconsensus
print BG("pyconsensus")
event_bounds = []
for i, s in enumerate(scaled):
event_bounds.append({
'scaled': 0 if s == False else 1,
'min': scaled_min[i],
'max': scaled_max[i],
})
for j in range(num_events):
for i in range(num_reports):
if reports[i,j] == 0:
reports[i,j] = np.nan
pyresults = Oracle(reports=reports,
reputation=reputation,
event_bounds=event_bounds,
algorithm="big-five",
variance_threshold=variance_threshold,
max_components=max_components,
verbose=False).consensus()
serpent_results = {
'reputation': map(unfix, smooth_rep),
'outcomes': map(unfix, event_outcomes),
}
python_results = {
'reputation': pyresults['agents']['smooth_rep'],
'outcomes': np.array(pyresults['events']['outcomes_final']),
}
comparisons = {}
for m in ('reputation', 'outcomes'):
comparisons[m] = abs((python_results[m] - serpent_results[m]) / python_results[m])
fails = 0
for key, value in comparisons.items():
try:
assert((value < tolerance).all())
except Exception as e:
fails += 1
print BW("Tolerance exceeded for ") + BR("%s:" % key)
print "Serpent: ", np.array(serpent_results[key])
print "Python: ", python_results[key]
print "Difference: ", comparisons[key]
if fails == 0:
print BC("Tests passed!")
def test_redeem(example, verbose=False):
branch = 1
period = 1
reports, reputation, scaled, scaled_max, scaled_min = example()
num_reports = len(reputation)
num_events = len(reports[0])
flatsize = num_reports * num_events
reputation_fixed = map(fix, reputation)
reports_fixed = map(fix, reports.ravel())
scaled_max_fixed = map(fix, scaled_max)
scaled_min_fixed = map(fix, scaled_min)
mock = 0 if example.__name__ == "binary_input_example" else 1
s = init_chain(gas_limit=750000000)
c = compile_contract(s, "redeem_full.se", gas=700000000)
output = profile(c, "redeem", branch, period, num_events, num_reports, flatsize, mock)
print np.array(map(unfix, output))
display(output, "Refolded:", refold=num_events)
def test_dispatch(example, verbose=False):
branch = 1
period = 1
reports, reputation, scaled, scaled_max, scaled_min = example()
num_reports = len(reputation)
num_events = len(reports[0])
flatsize = num_reports * num_events
reputation_fixed = map(fix, reputation)
reports_fixed = map(fix, reports.ravel())
scaled_max_fixed = map(fix, scaled_max)
scaled_min_fixed = map(fix, scaled_min)
mock = 0 if example.__name__ == "binary_input_example" else 1
s = init_chain(gas_limit=750000000)
c = compile_contract(s, r"../function files/dispatch-tester.se", gas=700000000)
profile(c, "dispatch", branch, mock)
def runtests(verbose=False, full=False, redeem=False, dispatch=False):
examples = (
binary_input_example,
scalar_input_example,
# randomized_inputs,
)
for example in examples:
if full:
test_consensus(example, verbose=verbose)
test_redeem(example, verbose=verbose)
test_dispatch(example, verbose=verbose)
elif redeem:
test_redeem(example, verbose=verbose)
elif dispatch:
test_dispatch(example, verbose=verbose)
else:
test_consensus(example, verbose=verbose)
def main(argv=None):
if argv is None:
argv = sys.argv
try:
short_opts = 'hvfrd'
long_opts = ['help', 'verbose', 'full', 'redeem', 'dispatch']
opts, vals = getopt.getopt(argv[1:], short_opts, long_opts)
except getopt.GetoptError as e:
sys.stderr.write(e.msg)
sys.stderr.write("for help use --help")
return 2
parameters = {
'verbose': False,
'full': False,
'redeem': False,
'dispatch': False,
}
for opt, arg in opts:
if opt in ('-h', '--help'):
print(__doc__)
return 0
elif opt in ('-v', '--verbose'):
parameters['verbose'] = True
elif opt in ('-f', '--full'):
parameters['full'] = True
elif opt in ('-r', '--redeem'):
parameters['redeem'] = True
elif opt in ('-d', '--dispatch'):
parameters['dispatch'] = True
# Run tests
runtests(**parameters)
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
flavour/cedarbluff | private/update_check/eden_update_check.py | 3 | 5998 | # -*- coding: utf-8 -*-
"""
Check whether the configuration is sufficient to run Eden.
"""
def update_check(environment):
# Get Web2py environment into our globals.
globals().update(**environment)
import os
app_path_parts = ["applications", request.application]
app_path = os.path.join(*app_path_parts)
# Fatal configuration errors.
errors = []
# Non-fatal warnings.
warnings = []
# -------------------------------------------------------------------------
# Check Python libraries
try:
import dateutil
except(ImportError):
errors.append("S3 unresolved dependency: dateutil required for Sahana to run")
try:
import lxml
except(ImportError):
errors.append("S3XML unresolved dependency: lxml required for Sahana to run")
try:
import shapely
except(ImportError):
warnings.append("S3GIS unresolved dependency: shapely required for GIS support")
try:
import xlrd
except(ImportError):
warnings.append("S3XLS unresolved dependency: xlrd required for XLS export")
try:
import xlwt
except(ImportError):
warnings.append("S3XLS unresolved dependency: xlwt required for XLS export")
try:
from PIL import Image
except(ImportError):
try:
import Image
except(ImportError):
warnings.append("S3PDF unresolved dependency: Python Imaging required for PDF export")
try:
import reportlab
except(ImportError):
warnings.append("S3PDF unresolved dependency: reportlab required for PDF export")
try:
import matplotlib
except(ImportError):
warnings.append("S3Chart unresolved dependency: matplotlib required for charting")
try:
import numpy
except(ImportError):
warnings.append("S3Cube unresolved dependency: numpy required for pivot table reports")
try:
import tweepy
except(ImportError):
warnings.append("S3Msg unresolved dependency: tweepy required for non-Tropo Twitter support")
try:
import PyRTF
except(ImportError):
warnings.append("Survey unresolved dependency: PyRTF required if you want to export assessment templates as a Word document")
# -------------------------------------------------------------------------
# Check Web2Py
# Currently, the minimum usable Web2py is determined by the existence of
# the global "current".
try:
from gluon import current
except ImportError:
errors.append(
"The installed version of Web2py is too old -- it does not define current."
"\nPlease upgrade Web2py to a more recent version.")
web2py_minimum_version = "Version 1.99.2 (2011-09-26 00:51:34) stable"
web2py_version_ok = True
try:
from gluon.fileutils import parse_version
except ImportError:
web2py_version_ok = False
if web2py_version_ok:
web2py_minimum_datetime = parse_version(web2py_minimum_version)[3]
web2py_installed_datetime = request.global_settings.web2py_version[3]
web2py_version_ok = web2py_installed_datetime >= web2py_minimum_datetime
if not web2py_version_ok:
warnings.append(
"The installed version of Web2py is too old to provide the Scheduler,"
"\nso scheduled tasks will not be available. If you need scheduled tasks,"
"\nplease upgrade Web2py to at least version: %s" % \
web2py_minimum_version)
# -------------------------------------------------------------------------
# Add required directories if needed
databases_dir = os.path.join(app_path, "databases")
try:
os.stat(databases_dir)
except OSError:
# not found, create it
os.mkdir(databases_dir)
# -------------------------------------------------------------------------
# Copy in Templates
template_src = os.path.join(app_path, "deployment-templates")
template_dst = app_path
template_files = (
os.path.join("models", "000_config.py"),
# Deprecated by Scheduler
#"cron/crontab"
)
copied_from_template = []
for t in template_files:
src_path = os.path.join(template_src, t)
dst_path = os.path.join(template_dst, t)
try:
os.stat(dst_path)
except OSError:
# not found, copy from template
import shutil
shutil.copy(src_path, dst_path)
copied_from_template.append(t)
else:
# Found the file in the destination
# Check if it has been edited
import re
edited_pattern = r"FINISHED_EDITING_\w*\s*=\s*(True|False)"
edited_matcher = re.compile(edited_pattern).match
has_edited = False
with open(dst_path) as f:
for line in f:
edited_result = edited_matcher(line)
if edited_result:
has_edited = True
edited = edited_result.group(1)
break
if has_edited and (edited != "True"):
errors.append("Please edit %s before starting the system." % t)
# @ToDo: Check if it's up to date (i.e. a critical update requirement)
#version_pattern = r"VERSION_\w*\s*=\s*([0-9]+)"
#version_matcher = re.compile(version_pattern).match
#has_version = False
if copied_from_template:
errors.append(
"The following files were copied from templates and should be edited: %s" %
", ".join(copied_from_template))
return {"error_messages": errors, "warning_messages": warnings}
# =============================================================================
| mit |
gciteam6/xgboost | src/models/predict_model.py | 1 | 5199 | # Built-in modules
from os import path, pardir
import sys
import logging
# not used in this stub but often useful for finding various files
PROJECT_ROOT_DIRPATH = path.join(path.dirname(__file__), pardir, pardir)
sys.path.append(PROJECT_ROOT_DIRPATH)
# Third-party modules
import click
from dotenv import find_dotenv, load_dotenv
import pandas as pd
import bloscpack as bp
# Hand-made modules
from src.models.xgb import MyXGBRegressor
TRAIN_FILEPATH_PREFIX = path.join(PROJECT_ROOT_DIRPATH, "data/processed/dataset.train_X_y")
TEST_FILEPATH_PREFIX = path.join(PROJECT_ROOT_DIRPATH, "data/processed/dataset.test_X")
PREDICT_FILENAME_PREFIX = "predict"
PREDICT_FILENAME_EXTENSION = "tsv"
LOCATIONS = (
"ukishima",
"ougishima",
"yonekurayama"
)
KWARGS_READ_CSV = {
"sep": "\t",
"header": 0,
"parse_dates": [0],
"index_col": 0
}
KWARGS_TO_CSV = {
"sep": "\t"
}
def gen_params_dict(n_estimators, max_depth, learning_rate,
reg_lambda, reg_alpha,
subsample, colsample_bytree, seed):
return {"n_estimators": n_estimators,
"max_depth": max_depth,
"learning_rate": learning_rate,
"reg_lambda": reg_lambda,
"reg_alpha": reg_alpha,
"subsample": subsample,
"colsample_bytree": colsample_bytree,
"seed": seed}
def get_test_X(filepath_prefix, location, fold_id=None):
df = pd.read_csv('.'.join([filepath_prefix, location + ".tsv"]), **KWARGS_READ_CSV)
if isinstance(fold_id, int):
crossval_index_filename = '.'.join([filepath_prefix,
"index.crossval{f}".format(f=fold_id),
location + ".blp"])
extract_index = pd.DatetimeIndex(bp.unpack_ndarray_file(crossval_index_filename))
df = df.loc[extract_index, :]
return df.iloc[:, :-1].values, df.index
else:
return df.values, df.index
@click.command()
@click.option("-t", "predict_target", flag_value="test", default=True)
@click.option("-v", "predict_target", flag_value="crossval")
@click.option("--location", "-l", type=str, default=None)
@click.option("--fold-id", "-f", type=int)
@click.option("--n_estimators", type=int, default=1000)
@click.option("--max_depth", type=int, default=3)
@click.option("--learning_rate", type=float, default=0.1)
@click.option("--reg_lambda", type=float, default=1.0)
@click.option("--reg_alpha", type=float, default=0.0)
@click.option("--subsample", type=float, default=0.8)
@click.option("--colsample_bytree", type=float, default=0.8)
@click.option("--seed", type=int, default=0)
def main(location, predict_target, fold_id,
n_estimators, max_depth, learning_rate,
reg_lambda, reg_alpha,
subsample, colsample_bytree, seed):
logger = logging.getLogger(__name__)
logger.info('#0: run prediction ')
#
# predict by the serialized model
#
if location is None:
location_list = LOCATIONS
else:
location_list = [location, ]
XGB_PARAMS = gen_params_dict(n_estimators, max_depth, learning_rate,
reg_lambda, reg_alpha,
subsample, colsample_bytree, seed)
param_str = str()
for (key, value) in XGB_PARAMS.items():
param_str += "{k}_{v}.".format(k=key, v=value)
for place in location_list:
if predict_target == "test":
logger.info('#1: predict all training data by the model trained those @ {l} !'.format(l=place))
m = MyXGBRegressor(model_name=param_str + "test.{l}".format(l=place), params=XGB_PARAMS)
X_test, ret_index = get_test_X(TEST_FILEPATH_PREFIX, place, fold_id=None)
elif predict_target == "crossval":
if fold_id is None:
raise ValueError("Specify validation dataset number as an integer !")
logger.info('#1: predict test subset in cross-validation of fold-id: {f} @ {l} !'.format(f=fold_id, l=place))
m = MyXGBRegressor(model_name=param_str + "crossval{i}.{l}".format(i=fold_id, l=place), params=XGB_PARAMS)
X_test, ret_index = get_test_X(TRAIN_FILEPATH_PREFIX, place, fold_id=fold_id)
else:
raise ValueError("Invalid flag, '-t' or '-v' is permitted !")
logger.info('#1: get test dataset @ {l} !'.format(l=place))
logger.info('#2: now predicting...')
y_pred = m.predict(X_test)
pd.DataFrame(
y_pred, index=ret_index, columns=[param_str[:-1]]
).to_csv(
path.join(m.MODELS_SERIALIZING_BASEPATH,
'.'.join([PREDICT_FILENAME_PREFIX, m.model_name, PREDICT_FILENAME_EXTENSION])),
**KWARGS_TO_CSV
)
logger.info('#2: a prediction result is saved @ {l} !'.format(l=place))
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()
| mit |
manewton/BioReactor-Data-Logging | Data_Management/Data_Management.py | 1 | 2851 | import pandas as pd
import datetime
def add_and_merge_inst2_data(filename):
"""
Takes R1Data file and merges another filename into it by time index.
Returns .csv file titles R1Data with additional columns of data all indexed by time.
"""
#R1 - first data file parse by data
R1 = pd.read_csv("R1Data")
R1date = pd.DatetimeIndex(R1["Date"])
R1_indexed = R1.set_index(R1date)
#R2 - second data file (input) is parsed by date
R2 = pd.read_csv(filename, encoding = "utf-16", skiprows=8, sep = '\t')
R2 = R2.ix[1:]
R2 = R2[R2["Sample/ctrl ID"].str.contains("R1")]
R2date = pd.DatetimeIndex(R2["Result time"])
R2_indexed = R2.set_index(R2date)
joined_data = R1_indexed.join(R2_indexed, how = "outer", rsuffix = "_y")
#I now learned that the rsuffix will append an _y to any repeating columns
#and there will be repeating columns when we try to add more and more data
#The correct fucntion would be to us append. which will simple append new data
#to the existing columns. But only had experimental file of each type so missed
#this error. Will try to fix once we succesful push this to google drive
#and effectively add, newly needed columns.
return joined_data.to_csv("R1Data", sep = ",", index_label=False)
def instrument3_input_v2():
"""
Takes R1Data and adds a new column with a user input function. return csv
titled R1Data. Can then be used to push it to google drive.
"""
#importing present R1Data file
R1 = pd.read_csv("R1Data")
R1date = pd.DatetimeIndex(R1["Date"])
R1_indexed = R1.set_index(R1date)
#creating dataframe for manually entered data
inst3_input = input("enter result from instrument 3: ")
time = datetime.datetime.now()
R3 = pd.DataFrame([[float(inst3_input),time]], columns = ['Inst_3_Value',"Date"])
#indexing created datafram by date
inst3_time = pd.DatetimeIndex(R3["Date"])
R3_indexed = R3.set_index(inst3_time)
#joning the data frames by index
joined_data = R1_indexed.join(R3_indexed, how = "outer", rsuffix = "_y")
joined_data.to_csv("R1Data", sep = ",", index_label="Date")
return joined_data.to_csv("R1Data", sep = ",", index_label="Date")
def instrument3_input():
"""
Updates existing .csv file titled instrument_3 with a user input and
overwrites the file with the added value. Designed to create a .csv file
with manually input parameters from instrument #3.
"""
inst3_df = pd.read_csv("instrument_3")
inst3_input = input("enter result from instrument 3: ")
time = datetime.datetime.now()
df2 = pd.DataFrame([[float(inst3_input),time]], columns = ['Value',"Date"])
inst3_df = inst3_df.append(df2, ignore_index=True)
inst3_df.to_csv("instrument_3", sep = ",", index_label=False)
return inst3_df
| gpl-3.0 |
liuguoyaolgy/Stock | m_JTZF.py | 1 | 15908 | # 酒田战法
import tushare as ts
from m_load_update_data import load
import m_draw
import matplotlib.pyplot as plt
import string
import m_smtp
import datetime
import talib as ta
from m_db import m_db2
import m_cw
import matplotlib.pyplot as plt
import time
class g():
a = ''
b = ''
c = []
#************
#1.macd 三天是红,第三根是第一根的>4/3倍
#2.0轴上第一个金叉 buy
#3.0轴上第二个金叉买 buy
#
#
#
#**************
# def data_complete():
# # 补全day历史数据
# ld = load()
# # ld.get_stick_hisdata_d(begin_date='2014-01-01',end_date='2016-12-23')
# ld.get_stick_hisdata_d(begin_date='2016-12-01', end_date='2016-12-23')
def pre_data(stick_code,ktype='D',today=''):
# ktype in ('D','W','M')
#today='2010-01-01'
if '' == today:
today = datetime.date.today().strftime('%Y-%m-%d')
# begindate = datetime.date.today() - datetime.timedelta(days=13)
global df
db = m_db2()
try:
if ktype == 'D':
df = db.get_data("select * from t_stick_data_d where code = '"+stick_code+"' and date > '2015-09-01' and date <='"+today+"' order by date asc;")#and date>'2015-05-01'
elif ktype == 'W':
df = db.get_data("select * from t_stick_data_w where code = '"+stick_code+"' ;")#and date>'2015-05-01'
elif ktype == 'M':
df = db.get_data("select * from t_stick_data_m where code = '" + stick_code + "' ;") # and date>'2015-05-01'
except Exception as e:
#print('ERR:',e)
return
df['cci'] = ta.CCI(df['high'].values.astype('double'),df['low'].values.astype('double'),df['close'].values.astype('double'))
df['diff'],df['dea'],df['macd'] = ta.MACD(df['close'].values.astype('double'),fastperiod=12, slowperiod=26, signalperiod=9)
df['obv'] = ta.OBV(df['close'].values.astype('double'),df['vol'].values.astype('double'))
df['volma5']=ta.MA(df['vol'].values.astype('double'),5);
df['volma13'] = ta.MA(df['vol'].values.astype('double'), 13);
df['volma20'] = ta.MA(df['vol'].values.astype('double'), 20);
df['volma34'] = ta.MA(df['vol'].values.astype('double'), 34);
df['MA20'] = ta.MA(df['close'].values.astype('double'), 20)
df['MA60'] = ta.MA(df['close'].values.astype('double'), 60)
df['MA5'] = ta.MA(df['close'].values.astype('double'), 5)
df['MA13'] = ta.MA(df['close'].values.astype('double'), 13)
df['MA34'] = ta.MA(df['close'].values.astype('double'), 34)
df['MA89'] = ta.MA(df['close'].values.astype('double'), 89)
df['MA144'] = ta.MA(df['close'].values.astype('double'), 144)
df['cwbili']=0
df['pricebili']=0
return df
# draw
def run(code,today):
#601999
#600485
#601011
#code = '600706'
#print(code,today)
df = pre_data(code, ktype='D',today=today)
try:
dflen = len(df)-1
#print(dflen)
if dflen<10:
return
except Exception as e:
#print(e)
return
#print('end pre_Data')
# for icnt in range(30,len(df)):
#sale
# if 1==upGap2Times(df,icnt-1) \
# or 1==diffdown3days(df,icnt-1) \
# or 1==in20DhszshHasSaleFlag(df,icnt-1):
# m_cw.sale(code, float(df.loc[icnt-1]['close']), 1)
# print('S:',df.loc[icnt-1]['date'],m_cw.allamt())
#buy
#if 1==upGap2Times(df,dflen):
if 1==gapNotBeFillIn3days(df,dflen) and 1==downGap3Times(df,dflen):
# m_cw.buy(code, float(df.loc[icnt-1]['close']), 1)
print('B : ',today,code)
m_draw.drawDayWeek(code, today, 60, ktype='D')
#用于画图
# df.loc[icnt-1,['cwbili']]=m_cw.allamt()/100000.0
# df.loc[icnt-1,['pricebili']]=float(df.loc[icnt-1]['close'])/float(df.loc[30]['close'])
return
# draw
def run2(code,today):
#601999
#600485
#601011
#code = '600706'
#print(code,today)
df = pre_data(code, ktype='D',today=today)
try:
dflen = len(df)-1
#print(dflen)
if dflen<10:
return
except Exception as e:
#print(e)
return
if 1==in5dHasMacdBuyFlag(df,dflen) and 1==vol_canbuyflag(df,dflen) and 1==price_canbuyflag(df,dflen) :
# and price_up16per(df,dflen) == 1:
# m_cw.buy(code, float(df.loc[icnt-1]['close']), 1)
print('B : ',today,code)
try:
db.insert_can_buy_code(today, code, '2')
db.commit()
except Exception as e:
print('ERR:', e)
if 1==red_up_throw3MAline(df,dflen):
print('B : ',today,code)
try:
db.insert_can_buy_code(today, code, '3')
db.commit()
except Exception as e:
print('ERR:', e)
if 1==F23_muZiXingTai(df,dflen):
print('B : ', today, code)
try:
db.insert_can_buy_code(today, code, '4')
db.commit()
except Exception as e:
print('ERR:', e)
#m_draw.drawDayWeek(code, today, 60, ktype='D')
#用于画图
# df.loc[icnt-1,['cwbili']]=m_cw.allamt()/100000.0
# df.loc[icnt-1,['pricebili']]=float(df.loc[icnt-1]['close'])/float(df.loc[30]['close'])
return
#一阳穿三线
def red_up_throw3MAline(df,daycnt):
today = daycnt
if float(df.loc[today]['close'])>float(df.loc[today]['MA5']) \
and float(df.loc[today]['close'])>float(df.loc[today]['MA13']) \
and float(df.loc[today]['close'])>float(df.loc[today]['MA34']) \
and float(df.loc[today]['close'])>float(df.loc[today]['MA89']) \
and float(df.loc[today]['open']) < float(df.loc[today]['MA5']) \
and float(df.loc[today]['open']) < float(df.loc[today]['MA13']) \
and float(df.loc[today]['open']) < float(df.loc[today]['MA34']) \
and float(df.loc[today]['open']) < float(df.loc[today]['MA89']) \
and float(df.loc[today]['close'])>1.08*float(df.loc[today]['open']):
return 1
return 0
#最近5天 有连续两天涨幅大于16%
def price_up16per(df,daycnt):
today = daycnt
i_1DAgo = daycnt - 1
i_2DAgo = daycnt - 2
i_3DAgo = daycnt - 3
i_4DAgo = daycnt - 4
i_5DAgo = daycnt - 5
if float(df.loc[i_2DAgo]['close']) * 1.11 < float(df.loc[today]['close'] ) \
or float(df.loc[i_3DAgo]['close']) * 1.11 < float(df.loc[i_1DAgo]['close'] ) \
or float(df.loc[i_4DAgo]['close']) * 1.11 < float(df.loc[i_2DAgo]['close'] ) \
or float(df.loc[i_5DAgo]['close']) * 1.11 < float(df.loc[i_3DAgo]['close'] ) :
return 1
return 0
#最5天有金叉
def in5dHasMacdBuyFlag(df,daycnt):
today = daycnt
yestoday = daycnt - 1
i_2DAgo = daycnt - 2
i_3DAgo = daycnt - 3
i_4DAgo = daycnt - 4
i_5DAgo = daycnt - 5
if daycnt < 30:
return 0
if df.loc[today]['MA5']>df.loc[today]['MA13'] and df.loc[yestoday]['MA5']<df.loc[yestoday]['MA13']:
return 1
if df.loc[yestoday]['MA5'] > df.loc[yestoday]['MA13'] and df.loc[i_2DAgo]['MA5'] < df.loc[i_2DAgo]['MA13']:
return 1
if df.loc[i_2DAgo]['MA5'] > df.loc[i_2DAgo]['MA13'] and df.loc[i_3DAgo]['MA5'] < df.loc[i_3DAgo]['MA13']:
return 1
if df.loc[i_3DAgo]['MA5'] > df.loc[i_3DAgo]['MA13'] and df.loc[i_4DAgo]['MA5'] < df.loc[i_4DAgo]['MA13']:
return 1
if df.loc[i_4DAgo]['MA5'] > df.loc[i_4DAgo]['MA13'] and df.loc[i_5DAgo]['MA5'] < df.loc[i_5DAgo]['MA13']:
return 1
return 0
def vol_canbuyflag(df,daycnt):
today = daycnt
if df.loc[today]['volma5']>1.8*df.loc[today]['volma34'] \
and df.loc[today]['volma13']>df.loc[today]['volma34'] :
return 1
return 0
def price_canbuyflag(df,daycnt):
today = daycnt
if df.loc[today]['MA5'] > df.loc[today]['MA13'] \
and df.loc[today]['MA13'] > df.loc[today]['MA34'] \
and df.loc[today]['MA5'] > df.loc[today]['MA89'] \
and df.loc[today]['MA5'] > df.loc[today]['MA144'] \
and float(df.loc[today]['close'])*0.9 < df.loc[today]['MA89'] \
and float(df.loc[today]['close'])*0.9 < df.loc[today]['MA144'] \
and float(df.loc[today]['open'])*0.99 > float(df.loc[today]['close']):
return 1
return 0
#最近三天有金叉
def in3dHasMacdBuyFlag(df,daycnt):
today = daycnt
yestoday = daycnt - 1
i_2DAgo = daycnt - 2
i_3DAgo = daycnt - 3
if daycnt < 30:
return 0
if df.loc[today]['diff']>df.loc[today]['dea'] and df.loc[yestoday]['diff']<df.loc[yestoday]['dea']:
return 1
if df.loc[yestoday]['diff'] > df.loc[yestoday]['dea'] and df.loc[i_2DAgo]['diff'] < df.loc[i_2DAgo]['dea']:
return 1
if df.loc[i_2DAgo]['diff'] > df.loc[i_2DAgo]['dea'] and df.loc[i_3DAgo]['diff'] < df.loc[i_3DAgo]['dea']:
return 1
return 0
#最近三天死叉
def in3dHasMacdSaleFlag(df,daycnt):
today = daycnt
yestoday = daycnt - 1
i_2DAgo = daycnt - 2
i_3DAgo = daycnt - 3
if daycnt < 30:
return 0
if df.loc[today]['diff']<df.loc[today]['dea'] and df.loc[yestoday]['diff']>df.loc[yestoday]['dea']:
return 1
if df.loc[yestoday]['diff'] < df.loc[yestoday]['dea'] and df.loc[i_2DAgo]['diff'] > df.loc[i_2DAgo]['dea']:
return 1
if df.loc[i_2DAgo]['diff'] < df.loc[i_2DAgo]['dea'] and df.loc[i_3DAgo]['diff'] > df.loc[i_3DAgo]['dea']:
return 1
return 0
#大盘创20日新低
def in20DhszshHasSaleFlag(df,daycnt):
if float(df.loc[daycnt]['close'])< \
float(df[daycnt-20:daycnt]['close'].values.astype('double').min()):
return 1
return 0
#diff连续三天升高
def diffup3days(df,daycnt):
if df.loc[daycnt]['diff'] > df.loc[daycnt-1]['diff'] and df.loc[daycnt-1]['diff']>df.loc[daycnt-2]['diff'] \
and df.loc[daycnt]['macd'] > 4 * df.loc[daycnt - 2]['macd'] / 3 and df.loc[daycnt - 2]['macd']>0:
return 1
return 0
#diff连续三天下降
def diffdown3days(df,daycnt):
if df.loc[daycnt]['diff'] < df.loc[daycnt-1]['diff'] \
and df.loc[daycnt-1]['diff']<df.loc[daycnt-2]['diff']:
# and df.loc[daycnt]['diff']>4*df.loc[daycnt-2]['diff']/3:
return 1
return 0
#60日线拐头向上
def ma60up(df,daycnt):
if df.loc[daycnt]['MA60'] < df.loc[daycnt - 1]['MA60'] \
and df.loc[daycnt - 1]['MA60'] < df.loc[daycnt - 2]['MA60']:
return 1
return 0
#最近两个月 向下跳空三次
#从周线判断近一年的(high-low)/high>50%)分三挡 高档 中档 低档 或者 通过pe判断
def dangWei(df,daycnt):
return
#向下跳空三次 近一个月 连续下跌(5日线 < 10 日线)下降幅度大于40%
def downGap3Times(df,daycnt):
if daycnt < 30 :
return 0
gapCnt = 0
for icnt in range(0,30):
if df.loc[daycnt-30+icnt]['low']>df.loc[daycnt-30+1+icnt]['high']:
gapCnt +=1
if gapCnt>=3:
return 1
return 0
#连续下跌
#近两个月 跌幅超过40%
def down40percent(df,daycnt):
if df[daycnt - 30:daycnt]['low'].values.astype('double').min() \
< 0.6 * df[daycnt - 30:daycnt]['high'].values.astype('double').max():
return 1
return 0
#双针探底
#两跳空 买
def upGap2Times(df,daycnt):
if df.loc[daycnt]['low'] > df.loc[daycnt - 1]['high'] \
and df.loc[daycnt - 1]['low'] > df.loc[daycnt - 2]['high']\
and float(df.loc[daycnt - 2]['low']) > df[daycnt - 23:daycnt - 3]['high'].values.astype('double').max()\
and df[daycnt - 23:daycnt - 3]['low'].values.astype('double').min()>0.8*df[daycnt - 23:daycnt - 3]['high'].values.astype('double').max():
return 1
return 0
#缺口三天不补 买
def gapNotBeFillIn3days(df,daycnt):
if df[daycnt-2:daycnt+1]['low'].values.astype('double').min() > float(df.loc[daycnt-3]['high'])\
and float(df.loc[daycnt]['close'])<1.04*float(df.loc[daycnt-3]['high'])\
and df[daycnt-1:daycnt+1]['high'].values.astype('double').max()<float(df.loc[daycnt-2]['close'])\
and float(df.loc[daycnt-3]['high'])>1.06*float(df.loc[daycnt-3]['low']) :
#print('min:',df[daycnt-2:daycnt]['low'].values.astype('double').min(),'high:',df.loc[daycnt-3]['high'])
return 1
return 0
#法6 脱出盘整 跳空向下出三只连续线 买
#法7 下切线 买
def F7_xiaQieXian(df,daycnt):
if df[daycnt-2:daycnt+1]['low'].values.astype('double').min() > float(df.loc[daycnt-3]['high'])\
and float(df.loc[daycnt]['close'])<1.04*float(df.loc[daycnt-3]['high'])\
and df[daycnt-1:daycnt+1]['high'].values.astype('double').max()<float(df.loc[daycnt-2]['close'])\
and float(df.loc[daycnt-3]['high'])>1.06*float(df.loc[daycnt-3]['low']) :
#print('min:',df[daycnt-2:daycnt]['low'].values.astype('double').min(),'high:',df.loc[daycnt-3]['high'])
return 1
return
#法10 怀抱线 买
def F10_huaiBaoXian(df,daycnt):
return
#法16 上窜连续星线 买
#法19 反拖线 买
#法21 下阻线(金针探底)下跌一个月 当日成交量放大
def F21_xiaZuXian(df,daycnt):
return
#法23 母子形态 外孕十字星 成交量放大 翌日确认(高开收阳,成交量放大)
def F23_muZiXingTai(df,daycnt):
today = daycnt
yestoday = daycnt - 1
i_2DAgo = daycnt - 2
i_3DAgo = daycnt - 3
if daycnt < 30:
return 0
if df.loc[today]['open']<df.loc[today]['close'] \
and float(df.loc[yestoday]['open'])>1.05*float(df.loc[yestoday]['close'])\
and float(df.loc[yestoday]['open'])>float(df.loc[today]['close'])\
and float(df.loc[yestoday]['close'])<float(df.loc[today]['open'])\
and float(df.loc[today]['vol'])>1.5*float(df.loc[yestoday]['vol']):
return 1
return 0
#法41 红三兵 买
#法43 川子三黑 翌日高开收阳 买
#法53 逆袭线 买
#法54 回落再涨买 红三兵要强劲
#法59 下十字 买
def F59_xiaShiZi(df,daycnt):
return
#法62 u字线 买
#法63 擎天一柱 买
def F63_qingTianYiZhu(df,daycnt):
return
#法65 锅底 买
#w底部
def W_bottom(df,daycnt):
return
#回测 每天回测所有股票
def huiCeMoniDay():
#近一年回测
for cnt in range(1,30):
currentday = datetime.date.today() - datetime.timedelta(days=30-cnt)
strcurrentday = currentday.strftime('%Y-%m-%d')
print(time.asctime( time.localtime(time.time()) ),'currentday:',currentday)
coderslt = db.getXiaoShiZhiStock()
for code in coderslt['code']:
# run(code,strcurrentday)
run2(code,strcurrentday)
return
#回测 指定时间段回测所有股票 不采取
def huice():
return
def runtoday():
currentday = datetime.date.today()
strcurrentday = currentday.strftime('%Y-%m-%d')
print(time.asctime(time.localtime(time.time())), 'currentday:', currentday)
coderslt = db.getXiaoShiZhiStock()
for code in coderslt['code']:
# run(code,strcurrentday)
run2(code, strcurrentday)
return
################################
db = m_db2()
ld = load()
def m_run():
#补全历史数据 day
#ld.data_complete(beginday='2015-06-01',endday='2017-02-14',ktype='D')
enddate = datetime.date.today()
begindate = datetime.date.today() - datetime.timedelta(days=7)
ld.data_complete(beginday=begindate.strftime('%Y-%m-%d'),endday=enddate.strftime('%Y-%m-%d'),ktype='D')
#m_draw.drawDayWeek('000672','2016-12-30',10,ktype='D')
#huiCeMoniDay()
runtoday()
#获取优质小市值
#df = db.getlittlestock('2016-12-13')
#买卖操作
# run()
#
# m_cw.cw_print()
# plt.plot(df.index,df['cwbili'])
# plt.plot(df.index,df['pricebili'])
# plt.show()
#打印仓位
# code = '603800'
# df = pre_data(code, ktype='D')
# print(df[0:3]['close'])
# print(df.loc[3]['close'])
# print(df[0:3]['close'].values.astype('double').min())
# print(float(df[0:3]['close'].values.astype('double').min()) > float(df.loc[3]['close']))
| gpl-2.0 |
CognitiveRobotics/rpg_svo | svo_analysis/src/svo_analysis/filter_groundtruth_smooth.py | 17 | 1875 | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
import transformations
from scipy import signal
save = True
data_filename = '/home/cforster/Datasets/SlamBenchmark/asl_vicon_d2/groundtruth.txt'
filtered_data_filename = '/home/cforster/Datasets/SlamBenchmark/asl_vicon_d2/groundtruth_filtered.txt'
file = open(data_filename)
data = file.read()
lines = data.replace(","," ").replace("\t"," ").split("\n")
D = np.array([[v.strip() for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"], dtype=np.float64)
n = np.shape(D)[0]
rpy = np.empty([n,3])
for i in range(n):
quat = D[i,4:8]
rpy[i,:] = transformations.euler_from_quaternion(quat, axes='sxyz')
# filter rpy
f_sensor = 200.0; # sampling frequency in hz
f_cut = 15.0; # cutoff frequency in hz
b,a = signal.butter(5,f_cut/(f_sensor/2));
print a
print b
rpy_filt = np.empty([n,3])
rpy_filt[:,0] = signal.filtfilt(b, a, rpy[:,0])
rpy_filt[:,1] = signal.filtfilt(b, a, rpy[:,1])
rpy_filt[:,2] = signal.filtfilt(b, a, rpy[:,2])
fig = plt.figure()
ax = fig.add_subplot(111, title='orientation filtered')
ax.plot(rpy[:,0], 'r-')
ax.plot(rpy[:,1], 'g-')
ax.plot(rpy[:,2], 'b-')
ax.plot(rpy_filt[:,0], 'k-', linewidth=2)
ax.plot(rpy_filt[:,1], 'k-', linewidth=2)
ax.plot(rpy_filt[:,2], 'k-', linewidth=2)
fig = plt.figure()
ax = fig.add_subplot(111, title='position')
ax.plot(D[:,1], 'r')
ax.plot(D[:,2], 'g')
ax.plot(D[:,3], 'b')
fig = plt.figure()
ax = fig.add_subplot(111, title='trajectory from top')
ax.plot(D[:,1], D[:,2])
if save:
f = open(filtered_data_filename,'w')
for i in range(np.shape(D)[0]):
quat = transformations.quaternion_from_euler(rpy_filt[i,0], rpy_filt[i,1], rpy_filt[i,2], axes='sxyz')
f.write('%.7f %.5f %.5f %.5f %.5f %.5f %.5f %.5f\n' % (D[i,0], D[i,1], D[i,2], D[i,3], quat[0], quat[1], quat[2], quat[3]))
f.close()
| gpl-3.0 |
imito/odin | odin/visual/figures.py | 1 | 75844 | # -*- coding: utf-8 -*-
# ===========================================================================
# The waveform and spectrogram plot adapted from:
# [librosa](https://github.com/bmcfee/librosa)
# Copyright (c) 2016, librosa development team.
# Modified work Copyright 2016-2017 TrungNT
# ===========================================================================
from __future__ import print_function, absolute_import, division
import os
import sys
import copy
import warnings
import colorsys
import itertools
from numbers import Number
from six import string_types
from six.moves import zip, range
from contextlib import contextmanager
from collections import Mapping, OrderedDict, defaultdict
import numpy as np
from scipy import stats
from odin.visual.stats_plot import *
# try:
# import seaborn # import seaborn for pretty plot
# except:
# pass
line_styles = ['-', '--', '-.', ':']
# this is shuffled by hand to make sure everything ordered
# in the most intuitive way
marker_styles = [".", "_", "|", "2", "s", "P", "+", "x", "^", "*", "h", "p", "d",
"v", "H", "<", "8", ">", "X",
"1", "3", "4", "D", "o"]
def get_all_named_colors(to_hsv=False):
from matplotlib import colors as mcolors
colors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)
# Sort colors by hue, saturation, value and name.
if to_hsv:
by_hsv = sorted((tuple(mcolors.rgb_to_hsv(mcolors.to_rgba(color)[:3])), name)
for name, color in colors.items())
colors = OrderedDict([(name, color) for color, name in by_hsv])
return colors
def generate_random_colors(n, seed=1234, lightness_value=None,
return_hsl=False, return_hex=True):
if seed is not None:
rand = np.random.RandomState(seed)
n = int(n)
colors = []
# we want maximizing the differences in hue
all_hue = np.linspace(0., 0.88, num=n)
for i, hue in enumerate(all_hue):
saturation = 0.6 + rand.rand(1)[0] / 2.5 # saturation
if lightness_value is None:
lightness = 0.25 + rand.rand(1)[0] / 1.4 # lightness
else:
lightness = float(lightness_value)
# select color scheme to return
if return_hsl:
colors.append((hue, saturation, lightness))
else:
rgb = colorsys.hls_to_rgb(hue, lightness, saturation)
colors.append(rgb if not return_hex else
"#{:02x}{:02x}{:02x}".format(int(rgb[0] * 255),
int(rgb[1] * 255),
int(rgb[2] * 255)))
return colors
def generate_random_colormaps(n, seed=1234, bicolors=False):
from matplotlib.colors import LinearSegmentedColormap
color_maps = []
interpolate_hsl = lambda h, s, l: \
[(h, l + 0.49, s),
(h, l, s),
(h, l - 0.1, min(s + 0.1, 1.))]
if bicolors:
base_colors = generate_random_colors(n * 2, lightness_value=0.5, seed=seed,
return_hsl=True)
base_colors = list(zip(base_colors[:n], base_colors[n:]))
else:
base_colors = generate_random_colors(n, lightness_value=0.5, seed=seed,
return_hsl=True)
for i, c in enumerate(base_colors):
if bicolors:
cA, cB = c
colors = [colorsys.hls_to_rgb(*i)
for i in interpolate_hsl(*cB)[::-1] + interpolate_hsl(*cA)]
else:
hue, saturation, lightness = c
colors = [colorsys.hls_to_rgb(*i)
for i in interpolate_hsl(*c)]
color_maps.append(LinearSegmentedColormap.from_list(
name='Colormap%d' % i, colors=colors, N=256, gamma=1))
return color_maps
def generate_random_marker(n, seed=1234):
if n > len(marker_styles):
raise ValueError("There are %d different marker styles, but need %d" %
(len(marker_styles), n))
return marker_styles[:n]
# return np.random.choice(marker_styles, size=n, replace=False)
def to_axis(ax, is_3D=False):
""" Convert: int, tuple, None, Axes object
to proper matplotlib Axes (2D and 3D)
"""
from matplotlib import pyplot as plt
# 3D plot
if is_3D:
from mpl_toolkits.mplot3d import Axes3D
if ax is not None:
assert isinstance(ax, (Axes3D, Number, tuple, list)), \
'Axes3D must be used for 3D plot (z is given)'
if isinstance(ax, Number):
ax = plt.gcf().add_subplot(ax, projection='3d')
elif isinstance(ax, (tuple, list)):
ax = plt.gcf().add_subplot(*ax, projection='3d')
else:
ax = Axes3D(fig=plt.gcf())
# 2D plot
else:
if isinstance(ax, Number):
ax = plt.gcf().add_subplot(ax)
elif isinstance(ax, (tuple, list)):
ax = plt.gcf().add_subplot(*ax)
elif ax is None:
ax = plt.gca()
return ax
def _check_arg_length(dat, n, dtype, default, converter):
""" Shortcut for validating sequence of uniform data type """
if dat is None:
dat = [default] * n
elif isinstance(dat, dtype):
dat = [dat] * n
else:
assert len(dat) == n
dat = [converter(d) for d in dat]
return dat
# ===========================================================================
# Helper for spectrogram
# ===========================================================================
def time_ticks(locs, *args, **kwargs): # pylint: disable=star-args
'''Plot time-formatted axis ticks.
Parameters
----------
locations : list or np.ndarray
Time-stamps for tick marks
n_ticks : int > 0 or None
Show this number of ticks (evenly spaced).
If none, all ticks are displayed.
Default: 5
axis : 'x' or 'y'
Which axis should the ticks be plotted on?
Default: 'x'
time_fmt : None or {'ms', 's', 'm', 'h'}
- 'ms': milliseconds (eg, 241ms)
- 's': seconds (eg, 1.43s)
- 'm': minutes (eg, 1:02)
- 'h': hours (eg, 1:02:03)
If none, formatted is automatically selected by the
range of the times data.
Default: None
fmt : str
.. warning:: This parameter name was in librosa 0.4.2
Use the `time_fmt` parameter instead.
The `fmt` parameter will be removed in librosa 0.5.0.
kwargs : additional keyword arguments.
See `matplotlib.pyplot.xticks` or `yticks` for details.
Returns
-------
locs
labels
Locations and labels of tick marks
See Also
--------
matplotlib.pyplot.xticks
matplotlib.pyplot.yticks
Examples
--------
>>> # Tick at pre-computed beat times
>>> librosa.display.specshow(S)
>>> librosa.display.time_ticks(beat_times)
>>> # Set the locations of the time stamps
>>> librosa.display.time_ticks(locations, timestamps)
>>> # Format in seconds
>>> librosa.display.time_ticks(beat_times, time_fmt='s')
>>> # Tick along the y axis
>>> librosa.display.time_ticks(beat_times, axis='y')
'''
from matplotlib import pyplot as plt
n_ticks = kwargs.pop('n_ticks', 5)
axis = kwargs.pop('axis', 'x')
time_fmt = kwargs.pop('time_fmt', None)
if axis == 'x':
ticker = plt.xticks
elif axis == 'y':
ticker = plt.yticks
else:
raise ValueError("axis must be either 'x' or 'y'.")
if len(args) > 0:
times = args[0]
else:
times = locs
locs = np.arange(len(times))
if n_ticks is not None:
# Slice the locations and labels evenly between 0 and the last point
positions = np.linspace(0, len(locs) - 1, n_ticks,
endpoint=True).astype(int)
locs = locs[positions]
times = times[positions]
# Format the labels by time
formats = {'ms': lambda t: '{:d}ms'.format(int(1e3 * t)),
's': '{:0.2f}s'.format,
'm': lambda t: '{:d}:{:02d}'.format(int(t / 6e1),
int(np.mod(t, 6e1))),
'h': lambda t: '{:d}:{:02d}:{:02d}'.format(int(t / 3.6e3),
int(np.mod(t / 6e1,
6e1)),
int(np.mod(t, 6e1)))}
if time_fmt is None:
if max(times) > 3.6e3:
time_fmt = 'h'
elif max(times) > 6e1:
time_fmt = 'm'
elif max(times) > 1.0:
time_fmt = 's'
else:
time_fmt = 'ms'
elif time_fmt not in formats:
raise ValueError('Invalid format: {:s}'.format(time_fmt))
times = [formats[time_fmt](t) for t in times]
return ticker(locs, times, **kwargs)
def _cmap(data):
'''Get a default colormap from the given data.
If the data is boolean, use a black and white colormap.
If the data has both positive and negative values,
use a diverging colormap ('coolwarm').
Otherwise, use a sequential map: either cubehelix or 'OrRd'.
Parameters
----------
data : np.ndarray
Input data
Returns
-------
cmap : matplotlib.colors.Colormap
- If `data` has dtype=boolean, `cmap` is 'gray_r'
- If `data` has only positive or only negative values,
`cmap` is 'OrRd' (`use_sns==False`) or cubehelix
- If `data` has both positive and negatives, `cmap` is 'coolwarm'
See Also
--------
matplotlib.pyplot.colormaps
seaborn.cubehelix_palette
'''
import matplotlib as mpl
from matplotlib import pyplot as plt
_HAS_SEABORN = False
try:
_matplotlibrc = copy.deepcopy(mpl.rcParams)
import seaborn as sns
_HAS_SEABORN = True
mpl.rcParams.update(**_matplotlibrc)
except ImportError:
pass
data = np.atleast_1d(data)
if data.dtype == 'bool':
return plt.get_cmap('gray_r')
data = data[np.isfinite(data)]
robust = True
if robust:
min_p, max_p = 2, 98
else:
min_p, max_p = 0, 100
max_val = np.percentile(data, max_p)
min_val = np.percentile(data, min_p)
if min_val >= 0 or max_val <= 0:
if _HAS_SEABORN:
return sns.cubehelix_palette(light=1.0, as_cmap=True)
else:
return plt.get_cmap('OrRd')
return plt.get_cmap('coolwarm')
# ===========================================================================
# Helpers
# From DeepLearningTutorials: http://deeplearning.net
# ===========================================================================
def resize_images(x, shape):
from scipy.misc import imresize
reszie_func = lambda x, shape: imresize(x, shape, interp='bilinear')
if x.ndim == 4:
def reszie_func(x, shape):
# x: 3D
# The color channel is the first dimension
tmp = []
for i in x:
tmp.append(imresize(i, shape).reshape((-1,) + shape))
return np.swapaxes(np.vstack(tmp).T, 0, 1)
imgs = []
for i in x:
imgs.append(reszie_func(i, shape))
return imgs
def tile_raster_images(X, tile_shape=None, tile_spacing=(2, 2), spacing_value=0.):
''' This function create tile of images
Parameters
----------
X : 3D-gray or 4D-color images
for color images, the color channel must be the second dimension
tile_shape : tuple
resized shape of images
tile_spacing : tuple
space betwen rows and columns of images
spacing_value : int, float
value used for spacing
'''
if X.ndim == 3:
img_shape = X.shape[1:]
elif X.ndim == 4:
img_shape = X.shape[2:]
else:
raise ValueError('Unsupport %d dimension images' % X.ndim)
if tile_shape is None:
tile_shape = img_shape
if tile_spacing is None:
tile_spacing = (2, 2)
if img_shape != tile_shape:
X = resize_images(X, tile_shape)
else:
X = [np.swapaxes(x.T, 0, 1) for x in X]
n = len(X)
n = int(np.ceil(np.sqrt(n)))
# create spacing
rows_spacing = np.zeros_like(X[0])[:tile_spacing[0], :] + spacing_value
nothing = np.vstack((np.zeros_like(X[0]), rows_spacing))
cols_spacing = np.zeros_like(nothing)[:, :tile_spacing[1]] + spacing_value
# ====== Append columns ====== #
rows = []
for i in range(n): # each rows
r = []
for j in range(n): # all columns
idx = i * n + j
if idx < len(X):
r.append(np.vstack((X[i * n + j], rows_spacing)))
else:
r.append(nothing)
if j != n - 1: # cols spacing
r.append(cols_spacing)
rows.append(np.hstack(r))
# ====== Append rows ====== #
img = np.vstack(rows)[:-tile_spacing[0]]
return img
# ===========================================================================
# Plotting methods
# ===========================================================================
@contextmanager
def figure(nrow=8, ncol=8, dpi=180, show=False, tight_layout=True, title=''):
from matplotlib import pyplot as plt
inches_for_box = 2.4
if nrow != ncol:
nrow = inches_for_box * ncol
ncol = inches_for_box * nrow
else:
nrow = inches_for_box * nrow
ncol = inches_for_box * ncol
nrow += 1.2 # for the title
fig = plt.figure(figsize=(ncol, nrow), dpi=dpi)
yield fig
plt.suptitle(title)
if show:
plot_show(block=True, tight_layout=tight_layout)
def merge_figures(nrow, ncol):
pass
def fig2data(fig):
"""w, h, 4"""
fig.canvas.draw()
w, h = fig.canvas.get_width_height()
buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)
buf.shape = (w, h, 4)
buf = np.roll(buf, 3, axis=2)
return buf
def data2fig(data):
from matplotlib import pyplot as plt
fig = plt.figure()
plt.imshow(data)
return fig
def plot_figure(nrow=8, ncol=8, dpi=180):
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(ncol, nrow), dpi=dpi)
return fig
def plot_title(title, fontsize=12):
from matplotlib import pyplot as plt
plt.suptitle(str(title), fontsize=fontsize)
def subplot(*arg, **kwargs):
from matplotlib import pyplot as plt
subplot = plt.subplot(*arg)
if 'title' in kwargs:
subplot.set_title(kwargs['title'])
return subplot
def plot_frame(ax=None, left=None, right=None, top=None, bottom=None):
""" Turn on, off the frame (i.e. the bounding box of an axis) """
ax = to_axis(ax)
if top is not None:
ax.spines['top'].set_visible(bool(top))
if right is not None:
ax.spines['right'].set_visible(bool(right))
if bottom is not None:
ax.spines['bottom'].set_visible(bool(bottom))
if left is not None:
ax.spines['left'].set_visible(bool(left))
return ax
def plot_aspect(aspect=None, adjustable=None, ax=None):
"""
aspect : {'auto', 'equal'} or num
'auto' automatic; fill the position rectangle with data
'equal' same scaling from data to plot units for x and y
num a circle will be stretched such that the height is num times
the width. aspect=1 is the same as aspect='equal'.
adjustable : None or {'box', 'datalim'}, optional
If not None, this defines which parameter will be adjusted to
meet the required aspect. See set_adjustable for further details.
"""
ax = to_axis(ax)
if aspect is not None and adjustable is None:
ax.axis(aspect)
else:
ax.set_aspect(aspect, adjustable)
return ax
@contextmanager
def plot_gridSpec(nrow, ncol, wspace=None, hspace=None):
"""
Example
-------
grid = plt.GridSpec(2, 3, wspace=0.4, hspace=0.3)
plt.subplot(grid[0, 0])
plt.subplot(grid[0, 1:])
plt.subplot(grid[1, :2])
plt.subplot(grid[1, 2])
"""
from matplotlib import pyplot as plt
grid = plt.GridSpec(nrows=nrow, ncols=ncol,
wspace=wspace, hspace=hspace)
yield grid
def plot_gridSubplot(shape, loc, colspan=1, rowspan=1):
"""
Example
-------
ax1 = plt.subplot2grid((3, 3), (0, 0))
ax2 = plt.subplot2grid((3, 3), (0, 1), colspan=2)
ax3 = plt.subplot2grid((3, 3), (1, 0), colspan=2, rowspan=2)
ax4 = plt.subplot2grid((3, 3), (1, 2), rowspan=2)
"""
from matplotlib import pyplot as plt
return plt.subplot2grid(shape=shape, loc=loc, colspan=colspan, rowspan=rowspan)
def plot_subplot(*args):
from matplotlib import pyplot as plt
return plt.subplot(*args)
def set_labels(ax, title=None, xlabel=None, ylabel=None):
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
def plot_vline(x, ymin=0., ymax=1., color='r', ax=None):
from matplotlib import pyplot as plt
ax = ax if ax is not None else plt.gca()
ax.axvline(x=x, ymin=ymin, ymax=ymax, color=color, linewidth=1, alpha=0.6)
return ax
def plot_comparison_track(Xs, legends, tick_labels,
line_colors=None, line_styles=None, linewidth=1.,
marker_size=33, marker_styles=None,
fontsize=10, draw_label=True, title=None):
""" Plot multiple series for comparison
Parameters
----------
Xs : list (tuple) of series
the list that contain list of data points
legends : list of string
name for each series
tick_labels : list of string
name for each data points
draw_label : bool
if True, drawing text of actual value of each point on top of it
"""
if len(Xs) != len(legends):
raise ValueError("Number of series (len(Xs)) is: %d different from "
"number of legends: %d" % (len(Xs), len(legends)))
nb_series = len(Xs)
if len(Xs[0]) != len(tick_labels):
raise ValueError("Number of points for each series is: %d different from "
"number of xticks' labels: %d" % (len(Xs[0], len(tick_labels))))
nb_points = len(Xs[0])
from matplotlib import pyplot as plt
# ====== some default styles ====== #
default_marker_styles = ['o', '^', 's', '*', '+', 'X', '|', 'D', 'H', '8']
if marker_styles is None and nb_series <= len(default_marker_styles):
marker_styles = default_marker_styles[:nb_series]
# ====== init ====== #
point_colors = []
inited = False
handlers = []
# ====== plotting ====== #
for idx, X in enumerate(Xs):
kwargs = {}
if line_colors is not None:
kwargs['color'] = line_colors[idx]
if line_styles is not None:
kwargs['linestyle'] = line_styles[idx]
else:
kwargs['linestyle'] = '--'
# lines
handlers.append(
plt.plot(X, linewidth=linewidth, **kwargs)[0])
# points
ax = plt.gca()
for i, j in enumerate(X):
style = 'o' if marker_styles is None else marker_styles[idx]
if not inited:
p = plt.scatter(i, j, s=marker_size, marker=style)
point_colors.append(p.get_facecolor()[0])
else:
p = plt.scatter(i, j, s=marker_size, marker=style, color=point_colors[i])
if draw_label:
ax.text(i, 1.01 * j, s=str(j), ha='center', va='bottom',
fontsize=fontsize)
inited = True
# ====== legends and tick labels ====== #
plt.gca().set_xticks(np.arange(len(tick_labels)))
plt.gca().set_xticklabels(tick_labels, rotation=-60, fontsize=fontsize)
plt.legend(handlers, legends,
bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.,
fontsize=fontsize)
if title is not None:
plt.suptitle(title)
def plot_histogram(x, bins=80, ax=None,
normalize=False, range_0_1=False, kde=False, covariance_factor=None,
color='blue', color_kde='red', alpha=0.6, centerlize=False,
linewidth=1.2, fontsize=12, title=None):
"""
x: histogram
covariance_factor : None or float
if float is given, smaller mean more detail
"""
# ====== prepare ====== #
# only 1-D
if isinstance(x, (tuple, list)):
x = np.array(x)
x = x.ravel()
ax = to_axis(ax, is_3D=False)
# ====== get the bins ====== #
if range_0_1:
x = (x - np.min(x, axis=0, keepdims=True)) /\
(np.max(x, axis=0, keepdims=True) - np.min(x, axis=0, keepdims=True))
hist, hist_bins = np.histogram(x, bins=bins, density=normalize)
width = (hist_bins[1] - hist_bins[0]) / 1.36
ax.bar((hist_bins[:-1] + hist_bins[1:]) / 2 - width / 2, hist,
width=width, color=color, alpha=alpha)
# ====== centerlize the data ====== #
min_val = np.min(hist_bins)
max_val = np.max(hist_bins)
if centerlize:
ax.set_xlim((min_val - np.abs(max_val) / 2,
max_val + np.abs(max_val) / 2))
# ====== kde ====== #
if kde:
if not normalize:
raise ValueError("KDE plot only applicable for normalized-to-1 histogram.")
density = stats.gaussian_kde(x)
if isinstance(covariance_factor, Number):
density.covariance_factor = lambda: float(covariance_factor)
density._compute_covariance()
if centerlize:
xx = np.linspace(np.min(x) - np.abs(max_val) / 2,
np.max(x) + np.abs(max_val) / 2, 100)
else:
xx = np.linspace(np.min(x), np.max(x), 100)
yy = density(xx)
ax.plot(xx, yy,
color=color_kde, alpha=min(1., alpha + 0.2),
linewidth=linewidth, linestyle='-.')
# ====== post processing ====== #
ax.tick_params(axis='both', labelsize=fontsize)
if title is not None:
ax.set_title(str(title), fontsize=fontsize)
return hist, hist_bins
def plot_histogram_layers(Xs, bins=50, ax=None,
normalize=False, range_0_1=False, kde=False, covariance_factor=None,
layer_name=None, layer_color=None,
legend_loc='upper center', legend_ncol=5, legend_colspace=0.4,
grid=True, fontsize=12, title=None):
"""
normalize : bool (default: False)
pass
range_0_1 : bool (default: False)
if True, normalize each array in `Xs` so it min value is 0,
and max value is 1
covariance_factor : None or float
if float is given, smaller mean more detail
"""
if isinstance(Xs, np.ndarray):
assert Xs.ndim == 2
Xs = [Xs[:, i] for i in range(Xs.shape[1])]
num_classes = len(Xs)
ax = to_axis(ax, is_3D=True)
# ====== validate input argument ====== #
layer_name = _check_arg_length(layer_name, n=num_classes,
dtype=string_types, default='',
converter=lambda x:str(x))
layer_color = _check_arg_length(layer_color, n=num_classes,
dtype=string_types, default='blue',
converter=lambda x:str(x))
legends = []
for name, a, c, z, x in zip(layer_name,
np.linspace(0.6, 0.9, num_classes)[::-1],
layer_color,
np.linspace(0, 100, num_classes),
Xs):
if range_0_1:
x = (x - np.min(x, axis=0, keepdims=True)) /\
(np.max(x, axis=0, keepdims=True) - np.min(x, axis=0, keepdims=True))
hist, hist_bins = np.histogram(x, bins=bins, density=normalize)
width = (hist_bins[1] - hist_bins[0]) / 1.36
_ = ax.bar(left=(hist_bins[:-1] + hist_bins[1:]) / 2 - width / 2,
height=hist, width=width,
zs=z, zdir='y', color=c, ec=c, alpha=a)
if kde:
if not normalize:
raise ValueError("KDE plot only applicable for normalized-to-1 histogram.")
density = stats.gaussian_kde(x)
if isinstance(covariance_factor, Number):
density.covariance_factor = lambda: float(covariance_factor)
density._compute_covariance()
xx = np.linspace(np.min(x), np.max(x), 1000)
yy = density(xx)
zz = np.full_like(xx, fill_value=z)
ax.plot(xs=xx, ys=zz, zs=yy,
color=c, alpha=a, linewidth=1.2, linestyle='-.')
# legend
if len(name) > 0:
legends.append((name, _))
# ====== legend ====== #
if len(legends) > 0:
legends = ax.legend([i[1] for i in legends], [i[0] for i in legends],
markerscale=1.5, scatterpoints=1, scatteryoffsets=[0.375, 0.5, 0.3125],
loc=legend_loc, bbox_to_anchor=(0.5, -0.01), ncol=int(legend_ncol),
columnspacing=float(legend_colspace), labelspacing=0.,
fontsize=fontsize, handletextpad=0.1)
for i, c in enumerate(layer_color):
legends.legendHandles[i].set_color(c)
# ====== config ====== #
ax.set_xlabel('Value')
ax.set_zlabel('Frequency', rotation=-90)
ax.set_yticklabels([])
ax.grid(grid)
if title is not None:
ax.set_title(str(title))
return ax
# ===========================================================================
# Scatter plot
# ===========================================================================
def _parse_scatterXYZ(x, y, z):
assert x is not None, "`x` cannot be None"
# remove all `1` dimensions
x = np.squeeze(x)
if y is not None:
y = np.squeeze(y)
assert y.ndim == 1
if z is not None:
z = np.square(z)
assert z.ndim == 1
# infer y, z from x
if x.ndim > 2:
x = np.reshape(x, (-1, np.prod(x.shape[1:])))
if x.ndim == 1:
if y is None:
y = x
x = np.arange(len(y))
elif x.ndim == 2:
if x.shape[1] == 2:
y = x[:, 1]
x = x[:, 0]
elif x.shape[1] > 2:
z = x[:, 2]
y = x[:, 1]
x = x[:, 0]
return x, y, z
def _validate_color_marker_size_legend(n_samples,
color, marker, size,
is_colormap=False):
""" Return: colors, markers, sizes, legends """
from matplotlib.colors import LinearSegmentedColormap
default_color = 'b'
if isinstance(color, (string_types, LinearSegmentedColormap)):
default_color = color
color = None
default_marker = '.'
if isinstance(marker, string_types):
default_marker = marker
marker = None
default_size = 8
legend = [[None] * n_samples, # color
[None] * n_samples, # marker
[None] * n_samples] # size
seed = 1234
create_label_map = lambda labs, def_val, fn_gen: \
({labs[0]: def_val}
if len(labs) == 1 else
{i: j for i, j in zip(labs, fn_gen(len(labs), seed=seed))})
# ====== check arguments ====== #
if color is None:
color = [0] * n_samples
else:
legend[0] = color
if marker is None:
marker = [0] * n_samples
else:
legend[1] = marker
if isinstance(size, Number):
default_size = size
size = [0] * n_samples
elif size is None:
size = [0] * n_samples
else:
legend[2] = size
# ====== validate the length ====== #
assert len(color) == n_samples, \
"Given %d variable for `color`, but require %d samples" % (len(color), n_samples)
assert len(marker) == n_samples, \
"Given %d variable for `marker`, but require %d samples" % (len(marker), n_samples)
assert len(size) == n_samples, \
"Given %d variable for `size`, but require %d samples" % (len(size), n_samples)
# ====== labels set ====== #
color_labels = np.unique(color)
color_map = create_label_map(color_labels, default_color,
generate_random_colormaps
if is_colormap else
generate_random_colors)
marker_labels = np.unique(marker)
marker_map = create_label_map(marker_labels, default_marker, generate_random_marker)
size_labels = np.unique(size)
size_map = create_label_map(size_labels, default_size, lambda n, s: [default_size] * n)
# ====== prepare legend ====== #
legend_name = []
legend_style = []
for c, m, s in zip(*legend):
name = []
style = []
if c is None:
name.append(''); style.append(color_map[0])
else:
name.append(str(c)); style.append(color_map[c])
if m is None:
name.append(''); style.append(marker_map[0])
else:
name.append(str(m)); style.append(marker_map[m])
if s is None:
name.append(''); style.append(size_map[0])
else:
name.append(str(s)); style.append(size_map[s])
name = tuple(name)
style = tuple(style)
if name not in legend_name:
legend_name.append(name)
legend_style.append(style)
legend = OrderedDict([(i, j)
for i, j in zip(legend_style, legend_name)])
# ====== return ====== #
return ([color_map[i] for i in color],
[marker_map[i] for i in marker],
[size_map[i] for i in size],
legend)
def _downsample_scatter_points(x, y, z, n_samples, *args):
args = list(args)
# downsample all data
if n_samples is not None and n_samples < len(x):
n_samples = int(n_samples)
rand = np.random.RandomState(seed=1234)
ids = rand.permutation(len(x))[:n_samples]
x = np.array(x)[ids]
y = np.array(y)[ids]
if z is not None:
z = np.array(z)[ids]
args = [np.array(a)[ids]
if isinstance(a, (tuple, list, np.ndarray))
else a
for a in args]
return [len(x), x, y, z] + args
def plot_scatter_layers(x_y_val, ax=None,
layer_name=None, layer_color=None, layer_marker=None,
size=4.0, z_ratio=4, elev=None, azim=88,
ticks_off=True, grid=True, surface=True,
wireframe=False, wireframe_resolution=10,
colorbar=False, colorbar_horizontal=False,
legend_loc='upper center', legend_ncol=3, legend_colspace=0.4,
fontsize=8, title=None):
"""
Parameter
---------
z_ratio: float (default: 4)
the amount of compression that layer in z_axis will be closer
to each others compared to (x, y) axes
"""
from matplotlib import pyplot as plt
assert len(x_y_val) > 1, "Use `plot_scatter_heatmap` to plot only 1 layer"
max_z = -np.inf
min_z = np.inf
for x, y, val in x_y_val:
assert len(x) == len(y) == len(val)
max_z = max(max_z, np.max(x), np.max(y))
min_z = min(min_z, np.min(x), np.min(y))
ax = to_axis(ax, is_3D=True)
num_classes = len(x_y_val)
# ====== preparing ====== #
# name
layer_name = _check_arg_length(dat=layer_name, n=num_classes,
dtype=string_types, default='',
converter=lambda x: str(x))
# colormap
layer_color = _check_arg_length(dat=layer_color, n=num_classes,
dtype=string_types, default='Blues',
converter=lambda x: plt.get_cmap(str(x)))
# class marker
layer_marker = _check_arg_length(dat=layer_marker, n=num_classes,
dtype=string_types, default='o',
converter=lambda x: str(x))
# size
size = _check_arg_length(dat=size, n=num_classes,
dtype=Number, default=4.0,
converter=lambda x: float(x))
# ====== plotting each class ====== #
legends = []
for idx, (alpha, z) in enumerate(zip(np.linspace(0.05, 0.4, num_classes),
np.linspace(min_z / 4, max_z / 4, num_classes))):
x, y, val = x_y_val[idx]
num_samples = len(x)
z = np.full(shape=(num_samples,), fill_value=z)
_ = ax.scatter(x, y, z, c=val, s=size[idx], marker=layer_marker[idx],
cmap=layer_color[idx])
# ploting surface and wireframe
if surface or wireframe:
x, y = np.meshgrid(np.linspace(min(x), max(x), wireframe_resolution),
np.linspace(min(y), max(y), wireframe_resolution))
z = np.full_like(x, fill_value=z[0])
if surface:
ax.plot_surface(X=x, Y=y, Z=z,
color=layer_color[idx](0.5), edgecolor='none',
alpha=alpha)
if wireframe:
ax.plot_wireframe(X=x, Y=y, Z=z, linewidth=0.8,
color=layer_color[idx](0.8), alpha=alpha + 0.1)
# legend
name = layer_name[idx]
if len(name) > 0:
legends.append((name, _))
# colorbar
if colorbar:
cba = plt.colorbar(_, shrink=0.5, pad=0.01,
orientation='horizontal' if colorbar_horizontal else 'vertical')
if len(name) > 0:
cba.set_label(name, fontsize=fontsize)
# ====== plot the legend ====== #
if len(legends) > 0:
legends = ax.legend([i[1] for i in legends], [i[0] for i in legends],
markerscale=1.5, scatterpoints=1, scatteryoffsets=[0.375, 0.5, 0.3125],
loc=legend_loc, bbox_to_anchor=(0.5, -0.01), ncol=int(legend_ncol),
columnspacing=float(legend_colspace), labelspacing=0.,
fontsize=fontsize, handletextpad=0.1)
for i, c in enumerate(layer_color):
legends.legendHandles[i].set_color(c(.8))
# ====== some configuration ====== #
if ticks_off:
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
ax.grid(grid)
if title is not None:
ax.set_title(str(title))
if (elev is not None or azim is not None):
ax.view_init(elev=ax.elev if elev is None else elev,
azim=ax.azim if azim is None else azim)
return ax
def plot_scatter_heatmap(x, val, y=None, z=None, ax=None,
colormap='bwr', marker='o', size=4.0, alpha=0.8,
elev=None, azim=None,
ticks_off=True, grid=True,
colorbar=False, colorbar_horizontal=False, colorbar_ticks=None,
legend_enable=True,
legend_loc='upper center', legend_ncol=3, legend_colspace=0.4,
n_samples=None, fontsize=8, title=None):
"""
Parameters
----------
x : {1D, or 2D array} [n_samples,]
y : {None, 1D-array} [n_samples,]
z : {None, 1D-array} [n_samples,]
if provided, plot in 3D
val : 1D-array (num_samples,)
float value for the intensity of given class
"""
from matplotlib import pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
x, y, z = _parse_scatterXYZ(x, y, z)
assert len(x) == len(y) == len(val)
if z is not None:
assert len(y) == len(z)
is_3D_mode = False if z is None else True
ax = to_axis(ax, is_3D=is_3D_mode)
min_val = np.min(val)
max_val = np.max(val)
assert isinstance(colormap, (string_types, LinearSegmentedColormap)), \
"`colormap` can be string or instance of matplotlib Colormap, but given: %s" % type(colormap)
# ====== downsampling points ====== #
n_samples, x, y, z, val, marker, size = \
_downsample_scatter_points(x, y, z, n_samples, val, marker, size)
colormap, marker, size, legend = _validate_color_marker_size_legend(
n_samples, colormap, marker, size)
# ====== plotting each class ====== #
axes = []
legend_name = []
for idx, (style, name) in enumerate(legend.items()):
x_, y_, z_, val_ = [], [], [], []
# get the right set of data points
for i, (c, m, s) in enumerate(zip(colormap, marker, size)):
if c == style[0] and m == style[1] and s == style[2]:
x_.append(x[i])
y_.append(y[i])
val_.append(val[i])
if is_3D_mode:
z_.append(z[i])
# plot
kwargs = {'c':val_, 'vmin': min_val, 'vmax': max_val,
'cmap': style[0], 'marker':style[1], 's':style[2],
'alpha': alpha}
if is_3D_mode:
_ = ax.scatter(x_, y_, z_, **kwargs)
else:
_ = ax.scatter(x_, y_, **kwargs)
axes.append(_)
# make the shortest name
name = [i for i in name if len(i) > 0]
short_name = []
for i in name:
if i not in short_name:
short_name.append(i)
name = ', '.join(short_name)
if len(name) > 0:
legend_name.append(name)
# colorbar
if colorbar and idx == 0:
cba = plt.colorbar(_, shrink=0.99, pad=0.01,
orientation='horizontal' if colorbar_horizontal else 'vertical')
if colorbar_ticks is not None:
cba.set_ticks(np.linspace(min_val, max_val,
num=len(colorbar_ticks)))
cba.set_ticklabels(colorbar_ticks)
else:
cba.set_ticks(np.linspace(min_val, max_val, num=8 - 1))
cba.ax.tick_params(labelsize=fontsize)
# if len(name) > 0:
# cba.set_label(name, fontsize=fontsize)
# ====== plot the legend ====== #
if len(legend_name) > 0 and bool(legend_enable):
legend = ax.legend(axes, legend_name, markerscale=1.5,
scatterpoints=1, scatteryoffsets=[0.375, 0.5, 0.3125],
loc=legend_loc, bbox_to_anchor=(0.5, -0.01), ncol=int(legend_ncol),
columnspacing=float(legend_colspace), labelspacing=0.,
fontsize=fontsize, handletextpad=0.1)
# if len(legend_name) > 0:
# legends = ax.legend([i[1] for i in legends], [i[0] for i in legends],
# markerscale=1.5, scatterpoints=1, scatteryoffsets=[0.375, 0.5, 0.3125],
# loc=legend_loc, bbox_to_anchor=(0.5, -0.01), ncol=int(legend_ncol),
# columnspacing=float(legend_colspace), labelspacing=0.,
# fontsize=fontsize, handletextpad=0.1)
# for i, c in enumerate(cls_color):
# legends.legendHandles[i].set_color(c(.8))
# ====== some configuration ====== #
if ticks_off:
ax.set_xticklabels([])
ax.set_yticklabels([])
if is_3D_mode:
ax.set_zticklabels([])
ax.grid(grid)
if title is not None:
ax.set_title(str(title), fontsize=fontsize, fontweight='regular')
if is_3D_mode and (elev is not None or azim is not None):
ax.view_init(elev=ax.elev if elev is None else elev,
azim=ax.azim if azim is None else azim)
return ax
def plot_scatter(x, y=None, z=None,
color='b', marker='.', size=4.0, alpha=1,
linewidths=None, linestyle='-',
facecolors=None, edgecolors=None,
elev=None, azim=None,
ticks_off=True, grid=True,
legend_enable=True,
legend_loc='upper center', legend_ncol=3, legend_colspace=0.4,
n_samples=None, fontsize=8,
ax=None, title=None):
''' Plot the amplitude envelope of a waveform.
Parameters
----------
x : {1D, or 2D array} [n_samples,]
y : {None, 1D-array} [n_samples,]
z : {None, 1D-array} [n_samples,]
if provided, plot in 3D
ax : {None, int, tuple of int, Axes object) (default: None)
if int, `ax` is the location of the subplot (e.g. `111`)
if tuple, `ax` is tuple of location (e.g. `(1, 1, 1)`)
if Axes object, `ax` must be `mpl_toolkits.mplot3d.Axes3D` in case `z`
is given
color: array [n_samples,]
list of colors for each class, check `generate_random_colors`,
length of color must be equal to `x` and `y`
marker: array [n_samples,]
different marker for each color, default marker is '.'
legend_ncol : int (default: 3)
number of columns for displaying legends
legend_colspace : float (default: 0.4)
space between columns in the legend
legend_loc : {str, int}
‘best’ 0
‘upper right’ 1
‘upper left’ 2
‘lower left’ 3
‘lower right’ 4
‘right’ 5
‘center left’ 6
‘center right’ 7
‘lower center’ 8
‘upper center’ 9
‘center’ 10
elev : {None, Number} (default: None or 30 degree)
stores the elevation angle in the z plane, with `elev=90` is
looking from top down.
This can be used to rotate the axes programatically.
azim : {None, Number} (default: None or -60 degree)
stores the azimuth angle in the x,y plane.
This can be used to rotate the axes programatically.
title : {None, string} (default: None)
specific title for the subplot
'''
x, y, z = _parse_scatterXYZ(x, y, z)
assert len(x) == len(y)
if z is not None:
assert len(y) == len(z)
is_3D_mode = False if z is None else True
ax = to_axis(ax, is_3D_mode)
# ====== perform downsample ====== #
n_samples, x, y, z, color, marker, size = _downsample_scatter_points(
x, y, z, n_samples, color, marker, size)
color, marker, size, legend = _validate_color_marker_size_legend(
n_samples, color, marker, size)
# ====== plotting ====== #
# group into color-marker then plot each set
axes = []
legend_name = []
for style, name in legend.items():
x_, y_, z_ = [], [], []
# get the right set of data points
for i, (c, m, s) in enumerate(zip(color, marker, size)):
if c == style[0] and m == style[1] and s == style[2]:
x_.append(x[i])
y_.append(y[i])
if is_3D_mode:
z_.append(z[i])
# plotting
if is_3D_mode:
_ = ax.scatter(x_, y_, z_,
color=style[0], marker=style[1], s=style[2],
alpha=alpha, linewidths=linewidths,
edgecolors=edgecolors, facecolors=facecolors,
linestyle=linestyle)
else:
_ = ax.scatter(x_, y_,
color=style[0], marker=style[1], s=style[2],
alpha=alpha, linewidths=linewidths,
edgecolors=edgecolors, facecolors=facecolors,
linestyle=linestyle)
axes.append(_)
# make the shortest name
name = [i for i in name if len(i) > 0]
short_name = []
for i in name:
if i not in short_name:
short_name.append(i)
name = short_name
if len(name) > 0:
legend_name.append(', '.join(name))
# ====== plot the legend ====== #
if len(legend_name) > 0 and bool(legend_enable):
legend = ax.legend(axes, legend_name, markerscale=1.5,
scatterpoints=1, scatteryoffsets=[0.375, 0.5, 0.3125],
loc=legend_loc, bbox_to_anchor=(0.5, -0.01), ncol=int(legend_ncol),
columnspacing=float(legend_colspace), labelspacing=0.,
fontsize=fontsize, handletextpad=0.1)
# ====== some configuration ====== #
if ticks_off:
ax.set_xticklabels([])
ax.set_yticklabels([])
if is_3D_mode:
ax.set_zticklabels([])
ax.grid(grid)
if title is not None:
ax.set_title(str(title), fontsize=fontsize, fontweight='regular')
if is_3D_mode and (elev is not None or azim is not None):
ax.view_init(elev=ax.elev if elev is None else elev,
azim=ax.azim if azim is None else azim)
return ax
def plot_text_scatter(X, text, ax=None,
font_weight='bold', font_size=8, font_alpha=0.8,
elev=None, azim=None, title=None):
"""
Parameters
----------
X : numpy.ndarray
2-D array
text : {tuple, list, array}
list of the text or character for plotting at each data point
ax : {None, int, tuple of int, Axes object) (default: None)
if int, `ax` is the location of the subplot (e.g. `111`)
if tuple, `ax` is tuple of location (e.g. `(1, 1, 1)`)
if Axes object, `ax` must be `mpl_toolkits.mplot3d.Axes3D` in case `z`
is given
elev : {None, Number} (default: None or 30 degree)
stores the elevation angle in the z plane, with `elev=90` is
looking from top down.
This can be used to rotate the axes programatically.
azim : {None, Number} (default: None or -60 degree)
stores the azimuth angle in the x,y plane.
This can be used to rotate the axes programatically.
"""
assert X.ndim == 2, \
"Only support `X` two dimension array, but given: %s" % str(X.shape)
if X.shape[1] == 2:
is_3D = False
elif X.shape[1] == 3:
is_3D = True
else:
raise ValueError("No support for `X` with shape: %s" % str(X.shape))
ax = to_axis(ax, is_3D=is_3D)
assert len(text) == len(X), \
"`text` length: %d is different from `X` length: %d" % (len(text), len(X))
from matplotlib import pyplot as plt
# ====== normalize X ====== #
x_min, x_max = np.min(X, axis=0), np.max(X, axis=0)
X = (X - x_min) / (x_max - x_min)
# ====== check y ====== #
text = [str(i) for i in text]
labels = sorted(set(text))
# ====== start plotting ====== #
font_dict = {'weight': font_weight,
'size': font_size,
'alpha':font_alpha}
for x, t in zip(X, text):
if is_3D:
plt.gca().text(x[0], x[1], x[2], t,
color=plt.cm.tab20((labels.index(t) + 1) / float(len(labels))),
fontdict=font_dict)
else:
plt.text(x[0], x[1], t,
color=plt.cm.tab20((labels.index(t) + 1) / float(len(labels))),
fontdict=font_dict)
# ====== minor adjustment ====== #
ax.set_xticklabels([])
ax.set_yticklabels([])
if is_3D:
ax.set_zticklabels([])
if title is not None:
ax.set_title(title, fontsize=font_size + 2, weight='semibold')
return ax
def plot(x, y=None, ax=None, color='b', lw=1, **kwargs):
'''Plot the amplitude envelope of a waveform.
'''
from matplotlib import pyplot as plt
ax = ax if ax is not None else plt.gca()
if y is None:
ax.plot(x, c=color, lw=lw, **kwargs)
else:
ax.plot(x, y, c=color, lw=lw, **kwargs)
return ax
def plot_ellipses(mean, sigma, color, alpha=0.75, ax=None):
""" Plot an ellipse in 2-D
If the data is more than 2-D, you can use PCA before
fitting the GMM.
"""
import matplotlib as mpl
from matplotlib import pyplot as plt
# ====== prepare ====== #
mean = mean.ravel()
assert len(mean) == 2, "mean must be vector of size 2"
assert sigma.shape == (2, 2), "sigma must be matrix of shape (2, 2)"
if ax is None:
ax = plt.gca()
covariances = sigma ** 2
# ====== create the ellipses ====== #
v, w = np.linalg.eigh(covariances)
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v = 2. * np.sqrt(2.) * np.sqrt(v)
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle,
color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(alpha)
ax.add_artist(ell)
def plot_indices(idx, x=None, ax=None, alpha=0.3, ymin=0., ymax=1.):
from matplotlib import pyplot as plt
ax = ax if ax is not None else plt.gca()
x = range(idx.shape[0]) if x is None else x
for i, j in zip(idx, x):
if i: ax.axvline(x=j, ymin=ymin, ymax=ymax,
color='r', linewidth=1, alpha=alpha)
return ax
def plot_multiple_features(features, order=None, title=None, fig_width=4,
sharex=False):
""" Plot a series of 1D and 2D in the same scale for comparison
Parameters
----------
features: Mapping
pytho Mapping from name (string type) to feature matrix (`numpy.ndarray`)
order: None or list of string
if None, the order is keys of `features` sorted in alphabet order,
else, plot the features or subset of features based on the name
specified in `order`
title: None or string
title for the figure
Note
----
delta or delta delta features should have suffix: '_d1' and '_d2'
"""
known_order = [
# For audio processing
'raw',
'stft_energy', 'stft_energy_d1', 'stft_energy_d2',
'frames_energy', 'frames_energy_d1', 'frames_energy_d2',
'energy', 'energy_d1', 'energy_d2',
'vad',
'sad',
'sap', 'sap_d1', 'sap_d2',
'pitch', 'pitch_d1', 'pitch_d2',
'loudness', 'loudness_d1', 'loudness_d2',
'f0', 'f0_d1', 'f0_d2',
'spec', 'spec_d1', 'spec_d2',
'mspec', 'mspec_d1', 'mspec_d2',
'mfcc', 'mfcc_d1', 'mfcc_d2',
'sdc',
'qspec', 'qspec_d1', 'qspec_d2',
'qmspec', 'qmspec_d1', 'qmspec_d2',
'qmfcc', 'qmfcc_d1', 'qmfcc_d2',
'bnf', 'bnf_d1', 'bnf_d2',
'ivec', 'ivec_d1', 'ivec_d2',
# For image processing
# For video processing
]
from matplotlib import pyplot as plt
if isinstance(features, (tuple, list)):
features = OrderedDict(features)
if not isinstance(features, Mapping):
raise ValueError("`features` must be mapping from name -> feature_matrix.")
# ====== check order or create default order ====== #
if order is not None:
order = [str(o) for o in order]
else:
if isinstance(features, OrderedDict):
order = features.keys()
else:
keys = sorted(features.keys() if isinstance(features, Mapping) else
[k for k, v in features])
order = []
for name in known_order:
if name in keys:
order.append(name)
# add the remain keys
for name in keys:
if name not in order:
order.append(name)
# ====== get all numpy array ====== #
features = [(name, features[name])
for name in order
if name in features and
isinstance(features[name], np.ndarray) and
features[name].ndim <= 4]
plt.figure(figsize=(int(fig_width), len(features)))
for i, (name, X) in enumerate(features):
X = X.astype('float32')
plt.subplot(len(features), 1, i + 1)
# flatten 2D features with one dim equal to 1
if X.ndim == 2 and any(s == 1 for s in X.shape):
X = X.ravel()
# check valid dimension and select appropriate plot
if X.ndim == 1:
plt.plot(X)
plt.xlim(0, len(X))
plt.ylabel(name, fontsize=6)
elif X.ndim == 2: # transpose to frequency x time
plot_spectrogram(X.T, title=name)
elif X.ndim == 3:
plt.imshow(X)
plt.xticks(())
plt.yticks(())
plt.ylabel(name, fontsize=6)
else:
raise RuntimeError("No support for >= 3D features.")
# auto, equal
plt.gca().set_aspect(aspect='auto')
# plt.axis('off')
plt.xticks(())
# plt.yticks(())
plt.tick_params(axis='y', size=6, labelsize=4, color='r', pad=0,
length=2)
# add title to the first subplot
if i == 0 and title is not None:
plt.title(str(title), fontsize=8)
if sharex:
plt.subplots_adjust(hspace=0)
def plot_spectrogram(x, vad=None, ax=None, colorbar=False,
linewidth=0.5, vmin='auto', vmax='auto',
title=None):
''' Plotting spectrogram
Parameters
----------
x : np.ndarray
2D array
vad : np.ndarray, list
1D array, a red line will be draw at vad=1.
ax : matplotlib.Axis
create by fig.add_subplot, or plt.subplots
colorbar : bool, 'all'
whether adding colorbar to plot, if colorbar='all', call this
methods after you add all subplots will create big colorbar
for all your plots
path : str
if path is specified, save png image to given path
Notes
-----
Make sure nrow and ncol in add_subplot is int or this error will show up
- ValueError: The truth value of an array with more than one element is
ambiguous. Use a.any() or a.all()
'''
from matplotlib import pyplot as plt
if vmin == 'auto':
vmin = np.min(x)
if vmax == 'auto':
vmax = np.max(x)
# colormap = _cmap(x)
# colormap = 'spectral'
colormap = 'nipy_spectral'
if x.ndim > 2:
raise ValueError('No support for > 2D')
elif x.ndim == 1:
x = x[:, None]
if vad is not None:
vad = np.asarray(vad).ravel()
if len(vad) != x.shape[1]:
raise ValueError('Length of VAD must equal to signal length, but '
'length[vad]={} != length[signal]={}'.format(
len(vad), x.shape[1]))
# normalize vad
vad = np.cast[np.bool](vad)
ax = to_axis(ax, is_3D=False)
ax.set_aspect('equal', 'box')
# ax.tick_params(axis='both', which='major', labelsize=6)
ax.set_xticks([])
ax.set_yticks([])
# ax.axis('off')
if title is not None:
ax.set_ylabel(str(title) + '-' + str(x.shape), fontsize=6)
img = ax.imshow(x, cmap=colormap, interpolation='kaiser', alpha=0.9,
vmin=vmin, vmax=vmax, origin='lower')
# img = ax.pcolorfast(x, cmap=colormap, alpha=0.9)
# ====== draw vad vertical line ====== #
if vad is not None:
for i, j in enumerate(vad):
if j: ax.axvline(x=i, ymin=0, ymax=1, color='r', linewidth=linewidth,
alpha=0.3)
# plt.grid(True)
if colorbar == 'all':
fig = ax.get_figure()
axes = fig.get_axes()
fig.colorbar(img, ax=axes)
elif colorbar:
plt.colorbar(img, ax=ax)
return ax
def plot_images(X, tile_shape=None, tile_spacing=None,
fig=None, title=None):
'''
Parameters
----------
x : 2D-gray or 3D-color images, or list of (2D, 3D images)
for color image the color channel is second dimension
tile_shape : tuple
resized shape of images
tile_spacing : tuple
space betwen rows and columns of images
'''
from matplotlib import pyplot as plt
if not isinstance(X, (tuple, list)):
X = [X]
if not isinstance(title, (tuple, list)):
title = [title]
n = int(np.ceil(np.sqrt(len(X))))
for i, (x, t) in enumerate(zip(X, title)):
if x.ndim == 3 or x.ndim == 2:
cmap = plt.cm.Greys_r
elif x.ndim == 4:
cmap = None
else:
raise ValueError('NO support for %d dimensions image!' % x.ndim)
x = tile_raster_images(x, tile_shape, tile_spacing)
if fig is None:
fig = plt.figure()
subplot = fig.add_subplot(n, n, i + 1)
subplot.imshow(x, cmap=cmap)
if t is not None:
subplot.set_title(str(t), fontsize=12, fontweight='bold')
subplot.axis('off')
fig.tight_layout()
return fig
def plot_images_old(x, fig=None, titles=None, show=False):
'''
x : 2D-gray or 3D-color images
for color image the color channel is second dimension
'''
from matplotlib import pyplot as plt
if x.ndim == 3 or x.ndim == 2:
cmap = plt.cm.Greys_r
elif x.ndim == 4:
cmap = None
shape = x.shape[2:] + (x.shape[1],)
x = np.vstack([i.T.reshape((-1,) + shape) for i in x])
else:
raise ValueError('NO support for %d dimensions image!' % x.ndim)
if x.ndim == 2:
ncols = 1
nrows = 1
else:
ncols = int(np.ceil(np.sqrt(x.shape[0])))
nrows = int(ncols)
if fig is None:
fig = plt.figure()
if titles is not None:
if not isinstance(titles, (tuple, list)):
titles = [titles]
if len(titles) != x.shape[0]:
raise ValueError('Titles must have the same length with'
'the number of images!')
for i in range(ncols):
for j in range(nrows):
idx = i * ncols + j
if idx < x.shape[0]:
subplot = fig.add_subplot(nrows, ncols, idx + 1)
subplot.imshow(x[idx], cmap=cmap)
if titles is not None:
subplot.set_title(titles[idx])
subplot.axis('off')
if show:
# plt.tight_layout()
plt.show(block=True)
input('<Enter> to close the figure ...')
else:
return fig
def plot_Cnorm(cnorm, labels, Ptrue=[0.1, 0.5], ax=None, title=None,
fontsize=12):
from matplotlib import pyplot as plt
cmap = plt.cm.Blues
cnorm = cnorm.astype('float32')
if not isinstance(Ptrue, (tuple, list, np.ndarray)):
Ptrue = (Ptrue,)
Ptrue = [float(i) for i in Ptrue]
if len(Ptrue) != cnorm.shape[0]:
raise ValueError("`Cnorm` was calculated for %d Ptrue values, but given only "
"%d values for `Ptrue`: %s" %
(cnorm.shape[0], len(Ptrue), str(Ptrue)))
ax = to_axis(ax, is_3D=False)
ax.imshow(cnorm, interpolation='nearest', cmap=cmap)
# axis.get_figure().colorbar(im)
ax.set_xticks(np.arange(len(labels)))
ax.set_yticks(np.arange(len(Ptrue)))
ax.set_xticklabels(labels, rotation=-57, fontsize=fontsize)
ax.set_yticklabels([str(i) for i in Ptrue], fontsize=fontsize)
ax.set_ylabel('Ptrue', fontsize=fontsize)
ax.set_xlabel('Predicted label', fontsize=fontsize)
# center text for value of each grid
for i, j in itertools.product(range(len(Ptrue)),
range(len(labels))):
color = 'red'
weight = 'normal'
fs = fontsize
text = '%.2f' % cnorm[i, j]
plt.text(j, i, text,
weight=weight, color=color, fontsize=fs,
verticalalignment="center",
horizontalalignment="center")
# Turns off grid on the left Axis.
ax.grid(False)
title = "Cnorm: %.6f" % np.mean(cnorm) if title is None else \
"%s (Cnorm: %.6f)" % (str(title), np.mean(cnorm))
ax.set_title(title, fontsize=fontsize + 2, weight='semibold')
# axis.tight_layout()
return ax
def plot_confusion_matrix(cm, labels=None, ax=None, fontsize=12, colorbar=False,
title=None):
# TODO: new style for confusion matrix (using small and big dot)
from matplotlib import pyplot as plt
cmap = plt.cm.Blues
ax = to_axis(ax, is_3D=False)
if labels is None:
labels = ['#%d' % i for i in range(max(cm.shape))]
# calculate F1
N_row = np.sum(cm, axis=-1)
N_col = np.sum(cm, axis=0)
TP = np.diagonal(cm)
FP = N_col - TP
FN = N_row - TP
precision = TP / (TP + FP)
recall = TP / (TP + FN)
F1 = 2 / (1 / precision + 1 / recall)
F1[np.isnan(F1)] = 0.
F1_mean = np.mean(F1)
# column normalize
nb_classes = cm.shape[0]
cm = cm.astype('float32') / np.sum(cm, axis=1, keepdims=True)
# im = ax.pcolorfast(cm.T, cmap=cmap)
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
# axis.get_figure().colorbar(im)
tick_marks = np.arange(len(labels))
ax.set_xticks(tick_marks)
ax.set_yticks(tick_marks)
ax.set_xticklabels(labels, rotation=-57, fontsize=fontsize)
ax.set_yticklabels(labels, fontsize=fontsize)
ax.set_ylabel('True label', fontsize=fontsize)
ax.set_xlabel('Predicted label', fontsize=fontsize)
# center text for value of each grid
worst_index = {i: np.argmax([val if j != i else -1
for j, val in enumerate(row)])
for i, row in enumerate(cm)}
for i, j in itertools.product(range(nb_classes),
range(nb_classes)):
color = 'black'
weight = 'normal'
fs = fontsize
text = '%.2f' % cm[i, j]
if i == j: # diagonal
color = 'magenta'
# color = "darkgreen" if cm[i, j] <= 0.8 else 'forestgreen'
weight = 'bold'
fs = fontsize
text = '%.2f\nF1:%.2f' % (cm[i, j], F1[i])
elif j == worst_index[i]: # worst mis-classified
color = 'red'
weight = 'semibold'
fs = fontsize
plt.text(j, i, text,
weight=weight, color=color, fontsize=fs,
verticalalignment="center",
horizontalalignment="center")
# Turns off grid on the left Axis.
ax.grid(False)
# ====== colorbar ====== #
if colorbar == 'all':
fig = ax.get_figure()
axes = fig.get_axes()
fig.colorbar(im, ax=axes)
elif colorbar:
plt.colorbar(im, ax=ax)
# ====== set title ====== #
if title is None:
title = ''
title += ' (F1: %.3f)' % F1_mean
ax.set_title(title, fontsize=fontsize + 2, weight='semibold')
# axis.tight_layout()
return ax
def plot_weights(x, ax=None, colormap = "Greys", colorbar=False, keep_aspect=True):
'''
Parameters
----------
x : np.ndarray
2D array
ax : matplotlib.Axis
create by fig.add_subplot, or plt.subplots
colormap : str
colormap alias from plt.cm.Greys = 'Greys' ('spectral')
plt.cm.gist_heat
colorbar : bool, 'all'
whether adding colorbar to plot, if colorbar='all', call this
methods after you add all subplots will create big colorbar
for all your plots
path : str
if path is specified, save png image to given path
Notes
-----
Make sure nrow and ncol in add_subplot is int or this error will show up
- ValueError: The truth value of an array with more than one element is
ambiguous. Use a.any() or a.all()
Example
-------
>>> x = np.random.rand(2000, 1000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(2, 2, 1)
>>> dnntoolkit.visual.plot_weights(x, ax)
>>> ax = fig.add_subplot(2, 2, 2)
>>> dnntoolkit.visual.plot_weights(x, ax)
>>> ax = fig.add_subplot(2, 2, 3)
>>> dnntoolkit.visual.plot_weights(x, ax)
>>> ax = fig.add_subplot(2, 2, 4)
>>> dnntoolkit.visual.plot_weights(x, ax, path='/Users/trungnt13/tmp/shit.png')
>>> plt.show()
'''
from matplotlib import pyplot as plt
if colormap is None:
colormap = plt.cm.Greys
if x.ndim > 2:
raise ValueError('No support for > 2D')
elif x.ndim == 1:
x = x[:, None]
ax = ax if ax is not None else plt.gca()
if keep_aspect:
ax.set_aspect('equal', 'box')
# ax.tick_params(axis='both', which='major', labelsize=6)
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
ax.set_title(str(x.shape), fontsize=6)
img = ax.pcolorfast(x, cmap=colormap, alpha=0.8)
plt.grid(True)
if colorbar == 'all':
fig = ax.get_figure()
axes = fig.get_axes()
fig.colorbar(img, ax=axes)
elif colorbar:
plt.colorbar(img, ax=ax)
return ax
def plot_weights3D(x, colormap = "Greys"):
'''
Example
-------
>>> # 3D shape
>>> x = np.random.rand(32, 28, 28)
>>> dnntoolkit.visual.plot_conv_weights(x)
'''
from matplotlib import pyplot as plt
if colormap is None:
colormap = plt.cm.Greys
shape = x.shape
if len(shape) == 3:
ncols = int(np.ceil(np.sqrt(shape[0])))
nrows = int(ncols)
else:
raise ValueError('This function only support 3D weights matrices')
fig = plt.figure()
count = 0
for i in range(nrows):
for j in range(ncols):
count += 1
# skip
if count > shape[0]:
continue
ax = fig.add_subplot(nrows, ncols, count)
# ax.set_aspect('equal', 'box')
ax.set_xticks([])
ax.set_yticks([])
if i == 0 and j == 0:
ax.set_xlabel('Width:%d' % x.shape[-1], fontsize=6)
ax.xaxis.set_label_position('top')
ax.set_ylabel('Height:%d' % x.shape[-2], fontsize=6)
ax.yaxis.set_label_position('left')
else:
ax.axis('off')
# image data: no idea why pcolorfast flip image vertically
img = ax.pcolorfast(x[count - 1][::-1, :], cmap=colormap, alpha=0.9)
# plt.grid(True)
plt.tight_layout()
# colorbar
axes = fig.get_axes()
fig.colorbar(img, ax=axes)
return fig
def plot_weights4D(x, colormap = "Greys"):
'''
Example
-------
>>> # 3D shape
>>> x = np.random.rand(32, 28, 28)
>>> dnntoolkit.visual.plot_conv_weights(x)
'''
from matplotlib import pyplot as plt
if colormap is None:
colormap = plt.cm.Greys
shape = x.shape
if len(shape) != 4:
raise ValueError('This function only support 4D weights matrices')
fig = plt.figure()
imgs = []
for i in range(shape[0]):
imgs.append(tile_raster_images(x[i], tile_spacing=(3, 3)))
ncols = int(np.ceil(np.sqrt(shape[0])))
nrows = int(ncols)
count = 0
for i in range(nrows):
for j in range(ncols):
count += 1
# skip
if count > shape[0]:
continue
ax = fig.add_subplot(nrows, ncols, count)
ax.set_aspect('equal', 'box')
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
# image data: no idea why pcolorfast flip image vertically
img = ax.pcolorfast(imgs[count - 1][::-1, :], cmap=colormap, alpha=0.9)
plt.tight_layout()
# colorbar
axes = fig.get_axes()
fig.colorbar(img, ax=axes)
return fig
def plot_hinton(matrix, max_weight=None, ax=None):
'''
Hinton diagrams are useful for visualizing the values of a 2D array (e.g.
a weight matrix):
Positive: white
Negative: black
squares, and the size of each square represents the magnitude of each value.
* Note: performance significant decrease as array size > 50*50
Example:
W = np.random.rand(10,10)
hinton_plot(W)
'''
from matplotlib import pyplot as plt
"""Draw Hinton diagram for visualizing a weight matrix."""
ax = ax if ax is not None else plt.gca()
if not max_weight:
max_weight = 2**np.ceil(np.log(np.abs(matrix).max()) / np.log(2))
ax.patch.set_facecolor('gray')
ax.set_aspect('equal', 'box')
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
for (x, y), w in np.ndenumerate(matrix):
color = 'white' if w > 0 else 'black'
size = np.sqrt(np.abs(w))
rect = plt.Rectangle([x - size / 2, y - size / 2], size, size,
facecolor=color, edgecolor=color)
ax.add_patch(rect)
ax.autoscale_view()
ax.invert_yaxis()
return ax
# ===========================================================================
# Helper methods
# ===========================================================================
def plot_show(block=True, tight_layout=False):
from matplotlib import pyplot as plt
if tight_layout:
plt.tight_layout()
plt.show(block=block)
if not block: # manually block
input('<enter> to close all plots')
plt.close('all')
# ===========================================================================
# Detection plot
# ===========================================================================
def _ppndf(cum_prob):
""" @Original code from NIST
The input to this function is a cumulative probability.
The output from this function is the Normal deviate
that corresponds to that probability.
"""
SPLIT = 0.42
A0 = 2.5066282388
A1 = -18.6150006252
A2 = 41.3911977353
A3 = -25.4410604963
B1 = -8.4735109309
B2 = 23.0833674374
B3 = -21.0622410182
B4 = 3.1308290983
C0 = -2.7871893113
C1 = -2.2979647913
C2 = 4.8501412713
C3 = 2.3212127685
D1 = 3.5438892476
D2 = 1.6370678189
# ====== preprocess ====== #
cum_prob = np.array(cum_prob)
eps = np.finfo(cum_prob.dtype).eps
cum_prob = np.clip(cum_prob, eps, 1 - eps)
adj_prob = cum_prob - 0.5
# ====== init ====== #
R = np.empty_like(cum_prob)
norm_dev = np.empty_like(cum_prob)
# ====== transform ====== #
centerindexes = np.argwhere(np.abs(adj_prob) <= SPLIT).ravel()
tailindexes = np.argwhere(np.abs(adj_prob) > SPLIT).ravel()
# do centerstuff first
R[centerindexes] = adj_prob[centerindexes] * adj_prob[centerindexes]
norm_dev[centerindexes] = adj_prob[centerindexes] * \
(((A3 * R[centerindexes] + A2) * R[centerindexes] + A1) * R[centerindexes] + A0)
norm_dev[centerindexes] = norm_dev[centerindexes] /\
((((B4 * R[centerindexes] + B3) * R[centerindexes] + B2) * R[centerindexes] + B1) * R[centerindexes] + 1.0)
#find left and right tails
right = np.argwhere(cum_prob[tailindexes] > 0.5).ravel()
left = np.argwhere(cum_prob[tailindexes] < 0.5).ravel()
# do tail stuff
R[tailindexes] = cum_prob[tailindexes]
R[tailindexes[right]] = 1 - cum_prob[tailindexes[right]]
R[tailindexes] = np.sqrt((-1.0) * np.log(R[tailindexes]))
norm_dev[tailindexes] = (((C3 * R[tailindexes] + C2) * R[tailindexes] + C1) * R[tailindexes] + C0)
norm_dev[tailindexes] = norm_dev[tailindexes] / ((D2 * R[tailindexes] + D1) * R[tailindexes] + 1.0)
# swap sign on left tail
norm_dev[tailindexes[left]] = norm_dev[tailindexes[left]] * -1.0
return norm_dev
def plot_detection_curve(x, y, curve, xlims=None, ylims=None,
ax=None, labels=None, legend=True,
title=None, linewidth=1.2, pointsize=8.0):
"""
Parameters
----------
x: array, or list|tuple of array
if list or tuple of array is given, plot multiple curves at once
y: array, or list|tuple of array
if list or tuple of array is given, plot multiple curves at once
curve: {'det', 'roc', 'prc'}
det: detection error trade-off
roc: receiver operating curve
prc: precision-recall curve
xlims : (xmin, xmax) in float
for DET, `xlims` should be in [0, 1]
ylims : (ymin, ymax) in float
for DET, `ylims` should be in [0, 1]
labels: {list of str}
labels in case ploting multiple curves
Note
----
for 'det': xaxis is FPR - Pfa, and yxais is FNR - Pmiss
for 'roc': xaxis is FPR - Pfa, and yaxis is TPR
for 'prc': xaxis is, yaxis is
"""
from matplotlib import pyplot as plt
from odin import backend as K
from odin.utils import as_tuple
# ====== preprocessing ====== #
if not isinstance(x, (tuple, list)):
x = (x,)
if not isinstance(y, (tuple, list)):
y = (y,)
if len(x) != len(y):
raise ValueError("Given %d series for `x`, but only get %d series for `y`."
% (len(x), len(y)))
if not isinstance(labels, (tuple, list)):
labels = (labels,)
labels = as_tuple(labels, N=len(x))
linewidth = float(linewidth)
# ====== const ====== #
eps = np.finfo(x[0].dtype).eps
xticks, xticklabels = None, None
yticks, yticklabels = None, None
xlabel, ylabel = None, None
lines = []
points = []
# ====== check input arguments ====== #
curve = curve.lower()
if curve not in ('det', 'roc', 'prc'):
raise ValueError("`curve` can only be: 'det', 'roc', or 'prc'")
if ax is None:
ax = plt.gca()
# ====== select DET curve style ====== #
if curve == 'det':
xlabel = "False Alarm probability (in %)"
ylabel = "Miss probability (in %)"
# 0.00001, 0.00002,
# , 0.99995, 0.99998, 0.99999
xticks = np.array([
0.00005, 0.0001, 0.0002, 0.0005,
0.001, 0.002, 0.005, 0.01, 0.02, 0.05,
0.1, 0.2, 0.4, 0.6, 0.8, 0.9,
0.95, 0.98, 0.99, 0.995, 0.998, 0.999,
0.9995, 0.9998, 0.9999])
xticklabels = [str(i)[:-2] if '.0' == str(i)[-2:]
else (str(i) if i > 99.99 else str(i))
for i in xticks * 100]
if xlims is None:
xlims = (max(min(np.min(i) for i in x), xticks[0]),
min(max(np.max(i) for i in x), xticks[-1]))
xlims = ([val for i, val in enumerate(xticks) if val <= xlims[0] or i == 0][-1] + eps,
[val for i, val in enumerate(xticks) if val >= xlims[1] or i == len(xticks) - 1][0] - eps)
if ylims is None:
ylims = (max(min(np.min(i) for i in y), xticks[0]),
min(max(np.max(i) for i in y), xticks[-1]))
ylims = ([val for i, val in enumerate(xticks) if val <= ylims[0] or i == 0][-1] + eps,
[val for i, val in enumerate(xticks) if val >= ylims[1] or i == len(xticks) - 1][0] - eps)
# convert to log scale
xticks = _ppndf(xticks)
yticks, yticklabels = xticks, xticklabels
xlims, ylims = _ppndf(xlims), _ppndf(ylims)
# main line
# TODO: add EER value later
name_fmt = lambda name, dcf, eer: ('EER=%.2f;minDCF=%.2f' % (eer * 100, dcf * 100)) \
if name is None else \
('%s (EER=%.2f;minDCF=%.2f)' % (name, eer * 100, dcf * 100))
labels_new = []
for count, (Pfa, Pmiss, name) in enumerate(zip(x, y, labels)):
eer = K.metrics.compute_EER(Pfa=Pfa, Pmiss=Pmiss)
# DCF point
dcf, Pfa_opt, Pmiss_opt = K.metrics.compute_minDCF(Pfa=Pfa, Pmiss=Pmiss)
Pfa_opt = _ppndf((Pfa_opt,))
Pmiss_opt = _ppndf((Pmiss_opt,))
points.append(((Pfa_opt, Pmiss_opt),
{'s': pointsize}))
# det curve
Pfa = _ppndf(Pfa)
Pmiss = _ppndf(Pmiss)
name = name_fmt(name, eer, dcf)
lines.append(((Pfa, Pmiss),
{'lw': linewidth, 'label': name,
'linestyle': '-' if count % 2 == 0 else '-.'}))
labels_new.append(name)
labels = labels_new
# ====== select ROC curve style ====== #
elif curve == 'roc':
xlabel = "False Positive probability"
ylabel = "True Positive probability"
xlims = (0, 1)
ylims = (0, 1)
# roc
name_fmt = lambda name, auc: ('AUC=%.2f' % auc) if name is None else \
('%s (AUC=%.2f)' % (name, auc))
labels_new = []
for count, (i, j, name) in enumerate(zip(x, y, labels)):
auc = K.metrics.compute_AUC(i, j)
name = name_fmt(name, auc)
lines.append([(i, j),
{'lw': linewidth, 'label': name,
'linestyle': '-' if count % 2 == 0 else '-.'}])
labels_new.append(name)
labels = labels_new
# diagonal
lines.append([(xlims, ylims),
{'lw': 0.8, 'linestyle': '-.', 'color': 'black'}])
# ====== select ROC curve style ====== #
elif curve == 'prc':
raise NotImplementedError
# ====== ploting ====== #
fontsize = 9
if xticks is not None:
ax.set_xticks(xticks)
if xticklabels is not None:
ax.set_xticklabels(xticklabels, rotation=-60, fontsize=fontsize)
if yticks is not None:
ax.set_yticks(yticks)
if yticklabels is not None:
ax.set_yticklabels(yticklabels, fontsize=fontsize)
# axes labels
ax.set_xlabel(xlabel, fontsize=12)
ax.set_ylabel(ylabel, fontsize=12)
# plot all lines
for args, kwargs in lines:
ax.plot(*args, **kwargs)
# plot all points
for arg, kwargs in points:
ax.scatter(*arg, **kwargs)
if xlims is not None:
ax.set_xlim(xlims)
if ylims is not None:
ax.set_ylim(ylims)
ax.grid(color='black', linestyle='--', linewidth=0.4)
if title is not None:
ax.set_title(title, fontsize=fontsize + 2)
# legend
if legend and any(i is not None for i in labels):
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# ===========================================================================
# Micro-control
# ===========================================================================
def plot_colorbar(colormap, vmin=0, vmax=1,
ax=None, orientation='vertical',
tick_location=None, tick_labels=None,
label=None):
"""
Parameters
----------
colormap : string, ColorMap type
vmin : float
vmax : float
ax : {None, matplotlib.figure.Figure or matplotlib.axes.Axes}
if `Figure` is given, show the color bar in the right hand side or
top side of the figure based on the `orientation`
orientation : {'vertical', 'horizontal'}
ticks : None
label : text label
fig : figure instance matplotlib
"""
import matplotlib as mpl
from matplotlib import pyplot as plt
if isinstance(colormap, string_types):
cmap = mpl.cm.get_cmap(name=colormap)
else:
cmap = colormap
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
# ====== add colorbar for the whole figure ====== #
if ax is None or isinstance(ax, mpl.figure.Figure):
fig = plt.gcf() if ax is None else ax
if orientation == 'vertical':
cbar_ax = fig.add_axes([0.92, 0.15, 0.02, 0.7])
else:
cbar_ax = fig.add_axes([0.15, 0.92, 0.7, 0.02])
cb1 = mpl.colorbar.ColorbarBase(cbar_ax, cmap=cmap,
norm=norm,
orientation=orientation)
# ====== add colorbar for only 1 Axes ====== #
elif isinstance(ax, mpl.axes.Axes):
mappable = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
mappable.set_array([]) # no idea why we need this
cb1 = plt.colorbar(mappable, ax=ax,
pad=0.03 if orientation == 'vertical' else 0.1,
shrink=0.7, aspect=25)
# ====== no idea ====== #
else:
raise ValueError("No support for `ax` type: %s" % str(type(ax)))
# ====== final configuration ====== #
if tick_location is not None:
cb1.set_ticks(tick_location)
if tick_labels is not None:
cb1.set_ticklabels(tick_labels)
if label is not None:
cb1.set_label(str(label))
return cb1
# ===========================================================================
# Shortcut
# ===========================================================================
def plot_close():
from matplotlib import pyplot as plt
plt.close('all')
def plot_save(path='/tmp/tmp.pdf', figs=None, dpi=180,
tight_plot=False, clear_all=True, log=False):
"""
Parameters
----------
clear_all: bool
if True, remove all saved figures from current figure list
in matplotlib
"""
import matplotlib.pyplot as plt
if tight_plot:
plt.tight_layout()
if os.path.exists(path) and os.path.isfile(path):
os.remove(path)
if figs is None:
figs = [plt.figure(n) for n in plt.get_fignums()]
# ====== saving PDF file ====== #
if '.pdf' in path.lower():
saved_path = [path]
try:
from matplotlib.backends.backend_pdf import PdfPages
pp = PdfPages(path)
for fig in figs:
fig.savefig(pp, dpi=dpi, format='pdf', bbox_inches="tight")
pp.close()
except Exception as e:
sys.stderr.write('Cannot save figures to pdf, error:%s \n' % str(e))
# ====== saving PNG file ====== #
else:
saved_path = []
path = os.path.splitext(path)
ext = path[-1][1:].lower()
path = path[0]
kwargs = dict(dpi=dpi, bbox_inches="tight")
for idx, fig in enumerate(figs):
if len(figs) > 1:
out_path = path + ('.%d.' % idx) + ext
else:
out_path = path + '.' + ext
fig.savefig(out_path, **kwargs)
saved_path.append(out_path)
# ====== clean ====== #
if log:
sys.stderr.write('Saved figures to:%s \n' % ', '.join(saved_path))
if clear_all:
plt.close('all')
def plot_save_show(path, figs=None, dpi=180, tight_plot=False,
clear_all=True, log=True):
plot_save(path, figs, dpi, tight_plot, clear_all, log)
os.system('open -a /Applications/Preview.app %s' % path)
| mit |
johnwu93/find_best_mall | recomendation system/logistic_reg.py | 3 | 5204 | import numpy as np
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import linear_model #used for logistic regression
import filter_demo_data
import one_class
import recsys
import time
class logistic_reg(recsys.recsys):
#baseline for recommendations
#This will perform extremely badly with rmse as a score evaluator. but maybe not map
def __init__(self,X, feature_helper = None, score_helper = None, user_feat = None, \
sparseness = 1):
self.X = X
self.feature_helper = feature_helper
self.score_helper = score_helper
self.user_feat = user_feat
self.sparseness = sparseness
pass
#get the necessary helper functions to do an analysis. may require more parameters for derived classes
def get_helpers(self, feature_func = None, similarity_func = None):
if ( not(feature_func is None) or (self.feature_helper is None)):
self.feature_helper = feature_func;
if ( not(similarity_func is None) or (self.similarity_helper is None)):
self.similarity_helper = similarity_func;
def predict_for_user(self, user_ratings, user_feat, k, feature_transform_all =None):
#Will develop later.
#save the coefficients of the linear regression into a matrix
pass
def transform_training(self, train_indices, test_indices):
#train_incides must be a |Train_Data|-by-2 matrix.
#train_indices come in tuples
self.X_train = np.copy(self.X);
if((test_indices is None) and (train_indices is None) ):
return
elif(not (test_indices is None)):
self.X_train[test_indices[:, 0], test_indices[:, 1]] = np.zeros((1, test_indices.shape[0]))
return
else:
#create a binary matrix that
Nitems, Nusers = self.X.shape
test_indicator = np.ones((Nitems, Nusers))
test_indicator[train_indices[:, 0], train_indices[:, 1]] = np.zeros((1, train_indices.shape[0]))
self.X_train[test_indicator == 1] = 0
def fit(self, train_indices = "None", test_indices = "None"):
#super(logistic_reg, self).transform_training(train_indices, test_indices)
t = time.time()
self.X_train = self.X
Nitems, Nusers = self.X_train.shape
print(self.X_train.shape)
print(self.user_feat.shape)
#feature transformation
if self.feature_helper == None:
self.feature_transform = self.user_feat
else:
self.feature_transform = self.feature_helper(X=self.X_train, feat = self.user_feat)
self.X_predict = np.zeros( (Nitems, Nusers))
for i in np.arange(Nitems): #if this loop can be parallelized, that will be awesome :)
#in the future, use vector binary classifier
print(i)
mall_in_training = train_indices[ train_indices[:, 0] == i, 1] #This gets the malls that are in training for the ith store and makes a prediction off of that
y_log = self.X_train[i, mall_in_training]
X_log = self.feature_transform[mall_in_training, :]
#L2 logistic regression
logreg = linear_model.LogisticRegression(C=self.sparseness);#L2- regression
#print(np.sum(y_log))
logreg.fit(X_log, y_log)
probability_scores = logreg.predict_proba(self.feature_transform)
#verify probability scores for 0 and 1
self.X_predict[i, :] = probability_scores[:, 1]
#Save each predictor, logreg, in a list for regression latter.
#logreg.coef_ #gives you the coefficent of the regression
print( time.time() - t)
return self.X_predict
#in reality, this would not be used alot
def predict(self, indices):
if(not isinstance(indices, np.ndarray)):
raise Exception("your indices have to be an ndarray")
return self.X_predict(indices[:, 0], indices[:, 1])
def score(self, truth_index):
if(not isinstance(truth_index, np.ndarray)):
raise Exception("your testing indices have to be an ndarray")
return self.score_helper(self.X, self.X_predict, truth_index)
#do ranked precision
#first
def get_helper2(self, name, function):
if(name == 'feature_helper'):
self.feature_helper = function
return
if(name == 'similarity_helper'):
self.similarity_helper = function
return
if(name == 'score_helper'):
self.score_helper = function
return
else:
raise Exception("Cannot find feature function corresponding to the input name")
#for testing use category data
X, category = filter_demo_data.get_X()
print(X.shape)
print(category.shape)
X = X[:, :]
category = category[:, :]
model = logistic_reg(X, user_feat=category)
initializer = one_class.one_class(learner=model)
t = time.time()
train, test = initializer.train_test_split_equal_item(X, .1) #use something else. THe train test split gets ones sometimes
print(time.time() - t )
train = train.astype(int)
test = test.astype(int)
model.fit(train, test)
| mit |
gregcaporaso/scikit-bio | skbio/stats/distance/_permanova.py | 7 | 5737 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from functools import partial
import numpy as np
from ._base import (_preprocess_input, _run_monte_carlo_stats, _build_results)
from skbio.util._decorator import experimental
@experimental(as_of="0.4.0")
def permanova(distance_matrix, grouping, column=None, permutations=999):
"""Test for significant differences between groups using PERMANOVA.
Permutational Multivariate Analysis of Variance (PERMANOVA) is a
non-parametric method that tests whether two or more groups of objects
(e.g., samples) are significantly different based on a categorical factor.
It is conceptually similar to ANOVA except that it operates on a distance
matrix, which allows for multivariate analysis. PERMANOVA computes a
pseudo-F statistic.
Statistical significance is assessed via a permutation test. The assignment
of objects to groups (`grouping`) is randomly permuted a number of times
(controlled via `permutations`). A pseudo-F statistic is computed for each
permutation and the p-value is the proportion of permuted pseudo-F
statisics that are equal to or greater than the original (unpermuted)
pseudo-F statistic.
Parameters
----------
distance_matrix : DistanceMatrix
Distance matrix containing distances between objects (e.g., distances
between samples of microbial communities).
grouping : 1-D array_like or pandas.DataFrame
Vector indicating the assignment of objects to groups. For example,
these could be strings or integers denoting which group an object
belongs to. If `grouping` is 1-D ``array_like``, it must be the same
length and in the same order as the objects in `distance_matrix`. If
`grouping` is a ``DataFrame``, the column specified by `column` will be
used as the grouping vector. The ``DataFrame`` must be indexed by the
IDs in `distance_matrix` (i.e., the row labels must be distance matrix
IDs), but the order of IDs between `distance_matrix` and the
``DataFrame`` need not be the same. All IDs in the distance matrix must
be present in the ``DataFrame``. Extra IDs in the ``DataFrame`` are
allowed (they are ignored in the calculations).
column : str, optional
Column name to use as the grouping vector if `grouping` is a
``DataFrame``. Must be provided if `grouping` is a ``DataFrame``.
Cannot be provided if `grouping` is 1-D ``array_like``.
permutations : int, optional
Number of permutations to use when assessing statistical
significance. Must be greater than or equal to zero. If zero,
statistical significance calculations will be skipped and the p-value
will be ``np.nan``.
Returns
-------
pandas.Series
Results of the statistical test, including ``test statistic`` and
``p-value``.
See Also
--------
anosim
Notes
-----
See [1]_ for the original method reference, as well as ``vegan::adonis``,
available in R's vegan package [2]_.
The p-value will be ``np.nan`` if `permutations` is zero.
References
----------
.. [1] Anderson, Marti J. "A new method for non-parametric multivariate
analysis of variance." Austral Ecology 26.1 (2001): 32-46.
.. [2] http://cran.r-project.org/web/packages/vegan/index.html
Examples
--------
See :mod:`skbio.stats.distance.anosim` for usage examples (both functions
provide similar interfaces).
"""
sample_size, num_groups, grouping, tri_idxs, distances = _preprocess_input(
distance_matrix, grouping, column)
# Calculate number of objects in each group.
group_sizes = np.bincount(grouping)
s_T = (distances ** 2).sum() / sample_size
test_stat_function = partial(_compute_f_stat, sample_size, num_groups,
tri_idxs, distances, group_sizes, s_T)
stat, p_value = _run_monte_carlo_stats(test_stat_function, grouping,
permutations)
return _build_results('PERMANOVA', 'pseudo-F', sample_size, num_groups,
stat, p_value, permutations)
def _compute_f_stat(sample_size, num_groups, tri_idxs, distances, group_sizes,
s_T, grouping):
"""Compute PERMANOVA pseudo-F statistic."""
# Create a matrix where objects in the same group are marked with the group
# index (e.g. 0, 1, 2, etc.). objects that are not in the same group are
# marked with -1.
grouping_matrix = -1 * np.ones((sample_size, sample_size), dtype=int)
for group_idx in range(num_groups):
within_indices = _index_combinations(
np.where(grouping == group_idx)[0])
grouping_matrix[within_indices] = group_idx
# Extract upper triangle (in same order as distances were extracted
# from full distance matrix).
grouping_tri = grouping_matrix[tri_idxs]
# Calculate s_W for each group, accounting for different group sizes.
s_W = 0
for i in range(num_groups):
s_W += (distances[grouping_tri == i] ** 2).sum() / group_sizes[i]
s_A = s_T - s_W
return (s_A / (num_groups - 1)) / (s_W / (sample_size - num_groups))
def _index_combinations(indices):
# Modified from http://stackoverflow.com/a/11144716
return np.tile(indices, len(indices)), np.repeat(indices, len(indices))
| bsd-3-clause |
tawsifkhan/scikit-learn | sklearn/linear_model/tests/test_logistic.py | 23 | 27579 | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
from sklearn.metrics import log_loss
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial')]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ("Logistic Regression supports only liblinear, newton-cg and"
" lbfgs solvers, got wrong_name")
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solver except 'newton-cg' and 'lfbgs'
for solver in ['liblinear']:
msg = ("Solver %s does not support a multinomial backend." %
solver)
lr = LR(solver=solver, multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for method in ('lbfgs', 'newton-cg', 'liblinear'):
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-16, solver=method)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-16)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4)
# test for fit_intercept=True
for method in ('lbfgs', 'newton-cg', 'liblinear'):
Cs = [1e3]
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-4, solver=method)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=3)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=3)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
# Test the liblinear fails when class_weight of type dict is
# provided, when it is multiclass. However it can handle
# binary problems.
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=balanced
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='balanced')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='balanced')
clf_lib.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
def test_logreg_cv_penalty():
# Test that the correct penalty is passed to the final fit.
X, y = make_classification(n_samples=50, n_features=20, random_state=0)
lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear')
lr_cv.fit(X, y)
lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear')
lr.fit(X, y)
assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
def test_logreg_predict_proba_multinomial():
X, y = make_classification(
n_samples=10, n_features=20, random_state=0, n_classes=3, n_informative=10)
# Predicted probabilites using the true-entropy loss should give a smaller loss
# than those using the ovr method.
clf_multi = LogisticRegression(multi_class="multinomial", solver="lbfgs")
clf_multi.fit(X, y)
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_ovr = LogisticRegression(multi_class="ovr", solver="lbfgs")
clf_ovr.fit(X, y)
clf_ovr_loss = log_loss(y, clf_ovr.predict_proba(X))
assert_greater(clf_ovr_loss, clf_multi_loss)
# Predicted probabilites using the soft-max function should give a smaller loss
# than those using the logistic function.
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_wrong_loss = log_loss(y, clf_multi._predict_proba_lr(X))
assert_greater(clf_wrong_loss, clf_multi_loss)
| bsd-3-clause |
aabadie/scikit-learn | sklearn/cluster/tests/test_affinity_propagation.py | 341 | 2620 | """
Testing for Clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.cluster.affinity_propagation_ import AffinityPropagation
from sklearn.cluster.affinity_propagation_ import affinity_propagation
from sklearn.datasets.samples_generator import make_blobs
from sklearn.metrics import euclidean_distances
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=60, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=0)
def test_affinity_propagation():
# Affinity Propagation algorithm
# Compute similarities
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
# Compute Affinity Propagation
cluster_centers_indices, labels = affinity_propagation(
S, preference=preference)
n_clusters_ = len(cluster_centers_indices)
assert_equal(n_clusters, n_clusters_)
af = AffinityPropagation(preference=preference, affinity="precomputed")
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference, verbose=True)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert_equal(np.unique(labels).size, n_clusters_)
assert_equal(n_clusters, n_clusters_)
# Test also with no copy
_, labels_no_copy = affinity_propagation(S, preference=preference,
copy=False)
assert_array_equal(labels, labels_no_copy)
# Test input validation
assert_raises(ValueError, affinity_propagation, S[:, :-1])
assert_raises(ValueError, affinity_propagation, S, damping=0)
af = AffinityPropagation(affinity="unknown")
assert_raises(ValueError, af.fit, X)
def test_affinity_propagation_predict():
# Test AffinityPropagation.predict
af = AffinityPropagation(affinity="euclidean")
labels = af.fit_predict(X)
labels2 = af.predict(X)
assert_array_equal(labels, labels2)
def test_affinity_propagation_predict_error():
# Test exception in AffinityPropagation.predict
# Not fitted.
af = AffinityPropagation(affinity="euclidean")
assert_raises(ValueError, af.predict, X)
# Predict not supported when affinity="precomputed".
S = np.dot(X, X.T)
af = AffinityPropagation(affinity="precomputed")
af.fit(S)
assert_raises(ValueError, af.predict, X)
| bsd-3-clause |
valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/sklearn/utils/validation.py | 9 | 25126 | """Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from ..utils.fixes import signature
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
class DataConversionWarning(UserWarning):
"""A warning on implicit data conversions happening in the code"""
pass
warnings.simplefilter("always", DataConversionWarning)
class NonBLASDotWarning(UserWarning):
"""A warning on implicit dispatch to numpy.dot"""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
"""
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent reprensentation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: "
"%s" % str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse in [None, False]:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if (isinstance(accept_sparse, (list, tuple))
and spmatrix.format not in accept_sparse):
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if estimator is not None:
if isinstance(estimator, six.string_types):
estimator_name = estimator
else:
estimator_name = estimator.__class__.__name__
else:
estimator_name = "Estimator"
context = " by %s" % estimator_name if estimator is not None else ""
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
array = np.array(array, dtype=dtype, order=order, copy=copy)
if ensure_2d:
if array.ndim == 1:
if ensure_min_samples >= 2:
raise ValueError("%s expects at least 2 samples provided "
"in a 2 dimensional array-like input"
% estimator_name)
warnings.warn(
"Passing 1d arrays as data is deprecated in 0.17 and will"
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample.",
DeprecationWarning)
array = np.atleast_2d(array)
# To ensure that array flags are maintained
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we acually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. %s expected <= 2."
% (array.ndim, estimator_name))
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required%s."
% (n_samples, shape_repr, ensure_min_samples,
context))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required%s."
% (n_features, shape_repr, ensure_min_features,
context))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s%s."
% (dtype_orig, array.dtype, context))
warnings.warn(msg, DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False,
multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y. For multi-label y,
set multi_output=True to allow 2d and sparse y.
If the dtype of X is object, attempt converting to float,
raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in signature(estimator.fit).parameters
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : array-like or sparse matrix
Input data.
whom : string
Who passed X to this function.
"""
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
| gpl-2.0 |
treycausey/scikit-learn | sklearn/linear_model/tests/test_perceptron.py | 378 | 1815 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
| bsd-3-clause |
lancezlin/ml_template_py | lib/python2.7/site-packages/sklearn/datasets/tests/test_base.py | 5 | 8862 | import os
import shutil
import tempfile
import warnings
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import with_setup
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
# test return_X_y option
X_y_tuple = load_digits(return_X_y=True)
bunch = load_digits()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
# test return_X_y option
X_y_tuple = load_diabetes(return_X_y=True)
bunch = load_diabetes()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_linnerud(return_X_y=True)
bunch = load_linnerud()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_iris(return_X_y=True)
bunch = load_iris()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_breast_cancer():
res = load_breast_cancer()
assert_equal(res.data.shape, (569, 30))
assert_equal(res.target.size, 569)
assert_equal(res.target_names.size, 2)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_breast_cancer(return_X_y=True)
bunch = load_breast_cancer()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_boston(return_X_y=True)
bunch = load_boston()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
def test_bunch_pickle_generated_with_0_16_and_read_with_0_17():
bunch = Bunch(key='original')
# This reproduces a problem when Bunch pickles have been created
# with scikit-learn 0.16 and are read with 0.17. Basically there
# is a suprising behaviour because reading bunch.key uses
# bunch.__dict__ (which is non empty for 0.16 Bunch objects)
# whereas assigning into bunch.key uses bunch.__setattr__. See
# https://github.com/scikit-learn/scikit-learn/issues/6196 for
# more details
bunch.__dict__['key'] = 'set from __dict__'
bunch_from_pkl = loads(dumps(bunch))
# After loading from pickle the __dict__ should have been ignored
assert_equal(bunch_from_pkl.key, 'original')
assert_equal(bunch_from_pkl['key'], 'original')
# Making sure that changing the attr does change the value
# associated with __getitem__ as well
bunch_from_pkl.key = 'changed'
assert_equal(bunch_from_pkl.key, 'changed')
assert_equal(bunch_from_pkl['key'], 'changed')
def test_bunch_dir():
# check that dir (important for autocomplete) shows attributes
data = load_iris()
assert_true("data" in dir(data))
| mit |
pnedunuri/scikit-learn | examples/manifold/plot_swissroll.py | 330 | 1446 | """
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
except:
ax = fig.add_subplot(211)
ax.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
| bsd-3-clause |
atantet/ergoPack | example/numericalFP/vsBetaImag.py | 1 | 5387 | import os
import numpy as np
import matplotlib.pyplot as plt
import ergoPlot
# Get model
model = 'Hopf'
gam = 1.
#betaRng = np.arange(0., 2.05, 0.05)
betaRng = np.arange(0., 1.85, 0.05)
betaRngPlot = np.array([0., 0.25, 0.5])
mu = 0.
eps = 1.
# Grid definition
dim = 2
nx0 = 200
nSTD = 5
# Number of eigenvalues
nev = 21
#nev = 201
nevPlot = 7
# Directories
print 'For eps = ', eps
print 'For mu = ', mu
resDir = '../results/numericalFP/%s' % model
plotDir = '../results/plot/numericalFP/%s' % model
os.system('mkdir %s 2> /dev/null' % plotDir)
mu += 1.e-8
if mu < 0:
signMu = 'm'
else:
signMu = 'p'
plotPostfix = '_%s_mu%s%02d_eps%03d_nx%d_nSTD%d_nev%d' \
% (model, signMu, int(round(np.abs(mu) * 10)),
int(round(eps * 100)), nx0, nSTD, nev)
print 'Plot postfix = ', plotPostfix
eigValBeta = np.empty((betaRng.shape[0], nev), dtype=complex)
eigVecBeta = np.empty((betaRng.shape[0], nx0**dim, nev), dtype=complex)
for ibeta in np.arange(betaRng.shape[0]):
beta = betaRng[ibeta]
beta += 1.e-8
if beta < 0:
signBeta = 'm'
else:
signBeta = 'p'
postfix = '_%s_mu%s%02d_beta%s%03d_eps%03d_nx%d_nSTD%d_nev%d' \
% (model, signMu, int(round(np.abs(mu) * 10)),
signBeta, int(round(np.abs(beta) * 100)), int(round(eps * 100)),
nx0, nSTD, nev)
print 'For beta = ', beta
# Read eigenvalues
eigVal = np.empty((nev,), dtype=complex)
ergoPlot.loadtxt_complex('%s/eigValBackward%s.txt' \
% (resDir, postfix), eigVal)
isort = np.argsort(-eigVal.real)
eigVal = eigVal[isort]
eigValBeta[ibeta] = eigVal
markersize = 6
markeredgewidth = 1
lw = 2
colors1 = rcParams['axes.prop_cycle'].by_key()['color']
colors = np.empty((len(colors1)*2,), dtype='|S1')
colors[::2] = colors1
colors[1::2] = colors1
xplot = np.linspace(0., betaRng[-1], 1000)
XP = np.ones((xplot.shape[0], 2))
XP[:, 1] = xplot
XP = np.matrix(XP)
# Imaginary part of leading eigenvalues vs. beta
fig = plt.figure()
ax = fig.add_subplot(111)
ev = 1
evImagMax = []
ordinate = []
steepness = []
evRank = []
flag = False
colorCount = 0
for iev in np.arange(nevPlot):
if np.abs(eigValBeta[0, ev].imag) < 1.e-6:
ev += 1
else:
# if eigValBeta[:, ev].imag > 0:
if not flag:
ab = np.abs(eigValBeta[:, ev].imag)
diff = ab[1:] - ab[:-1]
eigVal = ab.copy()
eigVal[np.concatenate(([False], diff > 0))] *= -1
flag = True
else:
ab = -np.abs(eigValBeta[:, ev].imag)
diff = ab[1:] - ab[:-1]
eigVal = ab.copy()
eigVal[np.concatenate(([False], diff < 0))] *= -1
flag = False
evImagMax.append(np.max(np.abs(eigVal)))
# Linear regression
nreg = betaRng.shape[0]
X = np.ones((nreg, 2))
X[:, 1] = betaRng[:nreg]
X = np.matrix(X)
B = np.matrix(eigVal[:nreg]).T
A = (X.T * X)**(-1) * (X.T * B)
Y = X * A
Stot = np.var(B)
Sres = np.sum((np.array(Y)[:, 0] - B)**2) / nreg
ordinate.append(A[0, 0])
steepness.append(A[1, 0])
evRank.append(ev)
ax.plot(betaRng, eigVal, 'x%s' % colors[colorCount],
markersize=markersize, markeredgewidth=markeredgewidth,
label=r'$\Im(\lambda_{%d})$' % (ev,))
ax.plot(xplot, np.array(XP*A)[:, 0], '-%s' % colors[colorCount], linewidth=lw,
label=r'$\Im(\widehat{\lambda}_%d)} = %.2f + %.2f \beta$' \
% (ev, A[0, 0], A[1, 0]))
ev += 1
colorCount += 1
#plt.legend(fontsize=ergoPlot.fs_default, loc='lower left')
xlim = [betaRng[0], betaRng[-1]]
ymax = np.max(np.abs(evImagMax))
ylim = [-ymax, ymax]
ax.set_xlim(xlim)
ax.set_ylim(ylim)
# Add text labels
aspect = (xlim[1] - xlim[0]) / (ylim[1] - ylim[0]) \
* fig.get_figheight() / fig.get_figwidth()
for iev in np.arange(len(evRank)):
rotation = np.arctan(steepness[iev] * aspect) * 180./np.pi
corr = 0.4
xcorr = -corr * np.sin(rotation*np.pi/180.)
ycorr = corr / np.cos(rotation*np.pi/180.)
xFitTxt = betaRng[int(betaRng.shape[0] * 0.82)] + xcorr
yFitTxt = ordinate[iev] + xFitTxt * steepness[iev] + ycorr
if steepness[iev] >= 0:
sgn = '+'
else:
sgn = '-'
fitTxt = r'$\Im(\widehat{ \lambda}_{%d}) = %.2f %s %.2f \beta$' \
% (evRank[iev], ordinate[iev], sgn, np.abs(steepness[iev]))
ax.text(xFitTxt, yFitTxt, fitTxt, fontsize='x-large',
color=colors[iev], rotation=rotation,
ha='center', va='center')
# Parameter labels
ax.text(xlim[0] + (xlim[1] - xlim[0]) * 0.03,
ylim[1] + (ylim[1] - ylim[0]) * 0.02,
r'$\mu = %.1f$' % mu, fontsize='xx-large')
ax.text(xlim[0] + (xlim[1] - xlim[0]) * 0.82,
ylim[1] + (ylim[1] - ylim[0]) * 0.02,
r'$\epsilon = %.1f$' % eps, fontsize='xx-large')
# Axes
ax.set_xlabel(r'$\beta$', fontsize=ergoPlot.fs_latex)
ax.set_ylabel(r'$\Im(\lambda)$', fontsize=ergoPlot.fs_latex)
plt.setp(ax.get_xticklabels(), fontsize=ergoPlot.fs_xticklabels)
plt.setp(ax.get_yticklabels(), fontsize=ergoPlot.fs_yticklabels)
fig.savefig('%s/eigValImagVSbeta%s.%s' \
% (plotDir, plotPostfix, ergoPlot.figFormat),
bbox_inches=ergoPlot.bbox_inches, dpi=ergoPlot.dpi)
| gpl-3.0 |
mkukielka/oddt | oddt/pandas.py | 2 | 19996 | """ Pandas extension for chemical analysis """
from __future__ import absolute_import
from collections import deque
from six import BytesIO, StringIO, text_type
import pandas as pd
import oddt
pd.set_option("display.max_colwidth", 999999)
def _mol_reader(fmt='sdf',
filepath_or_buffer=None,
usecols=None,
molecule_column='mol',
molecule_name_column='mol_name',
smiles_column=None,
skip_bad_mols=False,
chunksize=None,
**kwargs):
"""Universal reading function for private use.
.. versionadded:: 0.3
Parameters
----------
fmt : string
The format of molecular file
filepath_or_buffer : string or None
File path
usecols : list or None, optional (default=None)
A list of columns to read from file. If None then all available
fields are read.
molecule_column : string or None, optional (default='mol')
Name of molecule column. If None the molecules will be skipped and
the reading will be speed up significantly.
molecule_name_column : string or None, optional (default='mol_name')
Column name which will contain molecules' title/name. Column is
skipped when set to None.
smiles_column : string or None, optional (default=None)
Column name containg molecules' SMILES, by default it is disabled.
skip_bad_mols : bool, optional (default=False)
Switch to skip empty (bad) molecules. Useful for RDKit, which Returns
None if molecule can not sanitize.
chunksize : int or None, optional (default=None)
Size of chunk to return. If set to None whole set is returned.
Returns
-------
chunk :
A `ChemDataFrame` containg `chunksize` molecules.
"""
# capture options for reader
reader_kwargs = {}
if 'opt' in kwargs:
reader_kwargs['opt'] = kwargs.pop('opt')
if 'sanitize' in kwargs:
reader_kwargs['sanitize'] = kwargs.pop('sanitize')
# when you dont read molecules you can skip parsing them
if molecule_column is None:
if oddt.toolkit.backend == 'ob' and fmt == 'sdf':
if 'opt' in reader_kwargs:
reader_kwargs['opt']['P'] = None
else:
reader_kwargs['opt'] = {'P': None}
elif oddt.toolkit.backend == 'rdk':
reader_kwargs['sanitize'] = False
chunk = []
for n, mol in enumerate(oddt.toolkit.readfile(fmt, filepath_or_buffer,
**reader_kwargs)):
if skip_bad_mols and mol is None:
continue # add warning with number of skipped molecules
if usecols is None:
mol_data = mol.data.to_dict()
else:
mol_data = dict((k, mol.data[k]) for k in usecols)
if molecule_column:
mol_data[molecule_column] = mol
if molecule_name_column:
mol_data[molecule_name_column] = mol.title
if smiles_column:
mol_data[smiles_column] = mol.smiles
chunk.append(mol_data)
if chunksize and (n + 1) % chunksize == 0:
chunk_frm = ChemDataFrame(chunk, **kwargs)
chunk_frm._molecule_column = molecule_column
yield chunk_frm
chunk = []
if chunk or chunksize is None:
chunk_frm = ChemDataFrame(chunk, **kwargs)
chunk_frm._molecule_column = molecule_column
yield chunk_frm
def _mol_writer(data,
fmt='sdf',
filepath_or_buffer=None,
update_properties=True,
molecule_column=None,
columns=None):
"""Universal writing function for private use.
.. versionadded:: 0.3
Parameters
----------
fmt : string
The format of molecular file
filepath_or_buffer : string or None
File path
update_properties : bool, optional (default=True)
Switch to update properties from the DataFrames to the molecules
while writting.
molecule_column : string or None, optional (default='mol')
Name of molecule column. If None the molecules will be skipped.
columns : list or None, optional (default=None)
A list of columns to write to file. If None then all available
fields are written.
"""
if filepath_or_buffer is None:
out = StringIO()
elif hasattr(filepath_or_buffer, 'write'):
out = filepath_or_buffer
else:
out = oddt.toolkit.Outputfile(fmt, filepath_or_buffer, overwrite=True)
if isinstance(data, pd.DataFrame):
molecule_column = molecule_column or data._molecule_column
for ix, row in data.iterrows():
mol = row[molecule_column].clone
if update_properties:
new_data = row.to_dict()
del new_data[molecule_column]
mol.data.update(new_data)
if columns:
for k in mol.data.keys():
if k not in columns:
del mol.data[k]
if filepath_or_buffer is None or hasattr(filepath_or_buffer, 'write'):
out.write(mol.write(fmt))
else:
out.write(mol)
elif isinstance(data, pd.Series):
for mol in data:
if filepath_or_buffer is None or hasattr(filepath_or_buffer, 'write'):
out.write(mol.write(fmt))
else:
out.write(mol)
if filepath_or_buffer is None:
return out.getvalue()
elif not hasattr(filepath_or_buffer, 'write'): # dont close foreign buffer
out.close()
def read_csv(*args, **kwargs):
""" TODO: Support Chunks """
smiles_to_molecule = kwargs.pop('smiles_to_molecule', None)
molecule_column = kwargs.pop('molecule_column', 'mol')
data = pd.read_csv(*args, **kwargs)
if smiles_to_molecule is not None:
data[molecule_column] = data[smiles_to_molecule].map(
lambda x: oddt.toolkit.readstring('smi', x))
return data
def read_sdf(filepath_or_buffer=None,
usecols=None,
molecule_column='mol',
molecule_name_column='mol_name',
smiles_column=None,
skip_bad_mols=False,
chunksize=None,
**kwargs):
"""Read SDF/MDL multi molecular file to ChemDataFrame
.. versionadded:: 0.3
Parameters
----------
filepath_or_buffer : string or None
File path
usecols : list or None, optional (default=None)
A list of columns to read from file. If None then all available
fields are read.
molecule_column : string or None, optional (default='mol')
Name of molecule column. If None the molecules will be skipped and
the reading will be speed up significantly.
molecule_name_column : string or None, optional (default='mol_name')
Column name which will contain molecules' title/name. Column is
skipped when set to None.
smiles_column : string or None, optional (default=None)
Column name containg molecules' SMILES, by default it is disabled.
skip_bad_mols : bool, optional (default=False)
Switch to skip empty (bad) molecules. Useful for RDKit, which Returns
None if molecule can not sanitize.
chunksize : int or None, optional (default=None)
Size of chunk to return. If set to None whole set is returned.
Returns
-------
result :
A `ChemDataFrame` containg all molecules if `chunksize` is None
or genrerator of `ChemDataFrame` with `chunksize` molecules.
"""
result = _mol_reader(fmt='sdf',
filepath_or_buffer=filepath_or_buffer,
usecols=usecols,
molecule_column=molecule_column,
molecule_name_column=molecule_name_column,
smiles_column=smiles_column,
skip_bad_mols=skip_bad_mols,
chunksize=chunksize,
**kwargs)
if chunksize:
return result
else:
return deque(result, maxlen=1).pop()
def read_mol2(filepath_or_buffer=None,
usecols=None,
molecule_column='mol',
molecule_name_column='mol_name',
smiles_column=None,
skip_bad_mols=False,
chunksize=None,
**kwargs):
"""Read Mol2 multi molecular file to ChemDataFrame. UCSF Dock 6 comments
style is supported, i.e. `#### var_name: value` before molecular block.
.. versionadded:: 0.3
Parameters
----------
filepath_or_buffer : string or None
File path
usecols : list or None, optional (default=None)
A list of columns to read from file. If None then all available
fields are read.
molecule_column : string or None, optional (default='mol')
Name of molecule column. If None the molecules will be skipped and
the reading will be speed up significantly.
molecule_name_column : string or None, optional (default='mol_name')
Column name which will contain molecules' title/name. Column is
skipped when set to None.
smiles_column : string or None, optional (default=None)
Column name containg molecules' SMILES, by default it is disabled.
skip_bad_mols : bool, optional (default=False)
Switch to skip empty (bad) molecules. Useful for RDKit, which Returns
None if molecule can not sanitize.
chunksize : int or None, optional (default=None)
Size of chunk to return. If set to None whole set is returned.
Returns
-------
result :
A `ChemDataFrame` containg all molecules if `chunksize` is None
or genrerator of `ChemDataFrame` with `chunksize` molecules.
"""
result = _mol_reader(fmt='mol2',
filepath_or_buffer=filepath_or_buffer,
usecols=usecols,
molecule_column=molecule_column,
molecule_name_column=molecule_name_column,
smiles_column=smiles_column,
skip_bad_mols=skip_bad_mols,
chunksize=chunksize,
**kwargs)
if chunksize:
return result
else:
return deque(result, maxlen=1).pop()
class ChemSeries(pd.Series):
"""Pandas Series modified to adapt `oddt.toolkit.Molecule` objects and apply
molecular methods easily.
.. versionadded:: 0.3
"""
def __le__(self, other):
""" Substructure searching.
`chemseries < mol`: are molecules in series substructures of a `mol`
"""
if (isinstance(other, oddt.toolkit.Molecule) and
isinstance(self[0], oddt.toolkit.Molecule)):
return self.map(lambda x: oddt.toolkit.Smarts(x.smiles).match(other))
else:
return super(ChemSeries, self).__le__(other)
def __ge__(self, other):
""" Substructure searching.
`chemseries > mol`: is `mol` a substructure of molecules in series
"""
if (isinstance(other, oddt.toolkit.Molecule) and
isinstance(self[0], oddt.toolkit.Molecule)):
smarts = oddt.toolkit.Smarts(other.smiles)
return self.map(lambda x: smarts.match(x))
else:
return super(ChemSeries, self).__ge__(other)
def __or__(self, other):
""" Tanimoto coefficient """
if (isinstance(self[0], oddt.toolkit.Fingerprint) and
isinstance(other, oddt.toolkit.Fingerprint)):
return self.map(lambda x: x | other)
else:
return super(ChemSeries, self).__or__(other)
def calcfp(self, *args, **kwargs):
"""Helper function to map FP calculation throuugh the series"""
assert(isinstance(self[0], oddt.toolkit.Molecule))
return self.map(lambda x: x.calcfp(*args, **kwargs))
def to_smiles(self, filepath_or_buffer=None):
return _mol_writer(self, fmt='smi', filepath_or_buffer=filepath_or_buffer)
def to_sdf(self, filepath_or_buffer=None):
return _mol_writer(self, fmt='sdf', filepath_or_buffer=filepath_or_buffer)
def to_mol2(self, filepath_or_buffer=None):
return _mol_writer(self, fmt='mol2', filepath_or_buffer=filepath_or_buffer)
@property
def _constructor(self):
""" Force new class to be usead as constructor """
return ChemSeries
@property
def _constructor_expanddim(self):
""" Force new class to be usead as constructor when expandig dims """
return ChemDataFrame
class ChemDataFrame(pd.DataFrame):
"""Chemical DataFrame object, which contains molecules column of
`oddt.toolkit.Molecule` objects. Rich display of moleucles (2D) is available
in iPython Notebook. Additional `to_sdf` and `to_mol2` methods make writing
to molecular formats easy.
.. versionadded:: 0.3
Notes
-----
Thanks to: http://blog.snapdragon.cc/2015/05/05/subclass-pandas-dataframe-to-save-custom-attributes/
"""
_metadata = ['_molecule_column']
_molecule_column = None
def to_sdf(self,
filepath_or_buffer=None,
update_properties=True,
molecule_column=None,
columns=None):
"""Write DataFrame to SDF file.
.. versionadded:: 0.3
Parameters
----------
filepath_or_buffer : string or None
File path
update_properties : bool, optional (default=True)
Switch to update properties from the DataFrames to the molecules
while writting.
molecule_column : string or None, optional (default='mol')
Name of molecule column. If None the molecules will be skipped.
columns : list or None, optional (default=None)
A list of columns to write to file. If None then all available
fields are written.
"""
molecule_column = molecule_column or self._molecule_column
return _mol_writer(self,
filepath_or_buffer=filepath_or_buffer,
update_properties=update_properties,
fmt='sdf',
molecule_column=molecule_column,
columns=columns)
def to_mol2(self,
filepath_or_buffer=None,
update_properties=True,
molecule_column='mol',
columns=None):
"""Write DataFrame to Mol2 file.
.. versionadded:: 0.3
Parameters
----------
filepath_or_buffer : string or None
File path
update_properties : bool, optional (default=True)
Switch to update properties from the DataFrames to the molecules
while writting.
molecule_column : string or None, optional (default='mol')
Name of molecule column. If None the molecules will be skipped.
columns : list or None, optional (default=None)
A list of columns to write to file. If None then all available
fields are written.
"""
molecule_column = molecule_column or self._molecule_column
return _mol_writer(self,
fmt='mol2',
filepath_or_buffer=filepath_or_buffer,
update_properties=update_properties,
molecule_column=molecule_column,
columns=columns)
def to_html(self, *args, **kwargs):
"""Patched rendering in HTML - don't escape HTML inside the cells.
Docs are copied from parent
"""
kwargs['escape'] = False
return super(ChemDataFrame, self).to_html(*args, **kwargs)
def to_csv(self, *args, **kwargs):
""" Docs are copied from parent """
if self._molecule_column and ('columns' not in kwargs or
kwargs['columns'] is None or
self._molecule_column in kwargs['columns']):
frm_copy = self.copy(deep=True)
smi = frm_copy[self._molecule_column].map(lambda x: x.smiles)
frm_copy[self._molecule_column] = smi
return super(ChemDataFrame, frm_copy).to_csv(*args, **kwargs)
else:
return super(ChemDataFrame, self).to_csv(*args, **kwargs)
def to_excel(self, *args, **kwargs):
""" Docs are copied from parent """
if 'columns' in kwargs:
columns = kwargs['columns']
else:
columns = self.columns.tolist()
if 'molecule_column' in kwargs:
molecule_column = kwargs['molecule_column']
else:
molecule_column = self._molecule_column
molecule_column_idx = columns.index(molecule_column)
if 'index' not in kwargs or ('index' in kwargs and kwargs['index']):
molecule_column_idx += 1
size = kwargs.pop('size') if 'size' in kwargs else (200, 200)
excel_writer = args[0]
if isinstance(excel_writer, str):
excel_writer = pd.ExcelWriter(excel_writer, engine='xlsxwriter')
assert excel_writer.engine == 'xlsxwriter'
frm_copy = self.copy(deep=True)
smi = frm_copy[molecule_column].map(lambda x: x.smiles)
frm_copy[molecule_column] = smi
super(ChemDataFrame, frm_copy).to_excel(excel_writer, *args[1:], **kwargs)
sheet = excel_writer.sheets['Sheet1'] # TODO: Get appropriate sheet name
sheet.set_column(molecule_column_idx, molecule_column_idx,
width=size[1] / 6.)
for i, mol in enumerate(self[molecule_column]):
if mol is None:
continue
img = BytesIO()
png = mol.clone.write('png', size=size)
if isinstance(png, text_type):
png = png.encode('utf-8', errors='surrogateescape')
img.write(png)
sheet.write_string(i + 1, molecule_column_idx, "")
sheet.insert_image(i + 1,
molecule_column_idx,
'dummy',
{'image_data': img,
'positioning': 2,
'x_offset': 1,
'y_offset': 1})
sheet.set_row(i + 1, height=size[0])
excel_writer.save()
@property
def _constructor(self):
""" Force new class to be usead as constructor """
return ChemDataFrame
@property
def _constructor_sliced(self):
""" Force new class to be usead as constructor when slicing """
return ChemSeries
@property
def _constructor_expanddim(self):
""" Force new class to be usead as constructor when expandig dims """
return ChemPanel
# Copy some docscrings from upstream classes
for method in ['to_html', 'to_csv', 'to_excel']:
try:
getattr(ChemDataFrame, method).__doc__ = getattr(pd.DataFrame, method).__doc__
except AttributeError: # Python 2 compatible
getattr(ChemDataFrame, method).__func__.__doc__ = getattr(pd.DataFrame, method).__func__.__doc__
class ChemPanel(pd.Panel):
"""Modified `pandas.Panel` to adopt higher dimension data than
`ChemDataFrame`. Main purpose is to store molecular fingerprints in one
column and keep 2D numpy array underneath.
.. versionadded:: 0.3
"""
_metadata = ['_molecule_column']
_molecule_column = None
@property
def _constructor(self):
""" Force new class to be usead as constructor """
return ChemPanel
@property
def _constructor_sliced(self):
""" Force new class to be usead as constructor when slicing """
return ChemDataFrame
| bsd-3-clause |
kernc/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
thehackerwithin/berkeley | code_examples/python_regression_classification/notebooks/util/util.py | 1 | 1877 | import itertools
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report,confusion_matrix
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
fig = plt.figure(figsize=(10, 8))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
# Plotting decision regions
def plot_desicion_boundary(X, y, clf, title = None):
'''
Helper function to plot the decision boundary for the SVM
'''
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.01),
np.arange(y_min, y_max, 0.01))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.figure(figsize = (10, 8))
plt.contourf(xx, yy, Z, alpha=0.4)
plt.scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
if title is not None:
plt.title(title)
plt.xlabel('Feature 1')
plt.ylabel('Feature 2')
plt.show() | bsd-3-clause |
ksopyla/Pandas_Wordbank_GDP | gdp_pandas_worldbank.py | 1 | 3505 | import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
gdp = pd.read_csv('GDP_Eastern_Europe.csv')
#we take only data, not additional informations
gdp = gdp[0:-5]
#delete empty column
del gdp['2016 [YR2016]']
#replace '..' string with nan values
gdp.replace('..', np.nan, inplace=True)
# some of the colums are objects, we have to convert to floats,
#then pivot_table will take them into consideration
#col_list = ['1990 [YR1990]','1991 [YR1991]','1992 [YR1992]','1993 [YR1993]','1994 [YR1994]','1995 [YR1995]']
col_list = gdp.columns[4:].values
gdp[col_list]=gdp[col_list].apply(pd.to_numeric)
#reindex all table, create pivot view
pv2 = pd.pivot_table(gdp,index=['Series Name','Country Code'], dropna=False, fill_value=0.0)
# set the years
pv2.columns= np.arange(1990,2016)
# Get only GPD
pv2.loc['GDP (current US$)'].T
# Get only GPD for Poland
pv2.loc['GDP (current US$)', 'POL']
import seaborn as sns
palette = sns.color_palette("Paired", 10)
#palette = sns.color_palette("colorblind", 10)
sns.set_palette(palette)
pv2.loc['GDP (current US$)'].T.plot(alpha=0.75, rot=45, title="GDP (current US$)")
pv2.loc['GDP per capita (current US$)'].T.plot(alpha=0.75, rot=45, title="GDP per capita (current US$)")
pv2.loc['GDP growth (annual %)'].T.plot(alpha=0.75, rot=45, title="GDP growth (annual %)")
pv2.loc['GDP per capita growth (annual %)'].T.plot(title="GDP per capita growth (annual %)")
#Create regression plots in seaborn
plot_data = pv2.loc['GDP (current US$)'].T.reset_index()
plot_data.rename(columns={'index':'Years'}, inplace=True)
# unpivot the data, change from table view, where we have columns for each
# country, to big long time series data, [year, country code, value]
melt_data = pd.melt(plot_data, id_vars=['Years'],var_name='Country')
melt_data.rename(columns={'value':'GDP'}, inplace=True)
sns.lmplot(x="Years", y="GDP", hue="Country", data=melt_data, palette="Set1");
sns.plt.title('Regression lines for GDP (current US$)')
# plot data without western Europe countries
plot_data_without_west = plot_data.copy()
del plot_data_without_west['DEU']
del plot_data_without_west['FRA']
del plot_data_without_west['GBR']
melt_data_without_west = pd.melt(plot_data_without_west, id_vars=['Years'],var_name='Country')
melt_data_without_west.rename(columns={'value':'GDP'}, inplace=True)
sns.lmplot(x="Years", y="GDP", hue="Country", data=melt_data_without_west, palette="Set1");
sns.plt.title('Regression lines for GDP (current US$)')
# draw bar plot for 4 countries
#transpose data
pv2T = pv2.T
f, ax = plt.subplots(figsize=(14, 8))
# colors from: http://xkcd.com/color/rgb/
sns.barplot(x='Years',y='DEU', data=plot_data,label="DEU", color=sns.xkcd_rgb["lime"])
sns.barplot(x='Years',y='POL', data=plot_data,label="POL", color=sns.xkcd_rgb["cyan"])
sns.barplot(x='Years',y='CZE', data=plot_data,label="CZE", color=sns.xkcd_rgb["blue"])
#sns.barplot(x='Years',y='EST', data=plot_data,label="EST", color=sns.xkcd_rgb["lavender"])
sns.barplot(x='Years',y='HUN', data=plot_data,label="HUN", color=sns.xkcd_rgb["burnt orange"])
#sns.barplot(x='Years',y='SVK', data=plot_data,label="SVK", color=sns.xkcd_rgb["peach"])
#sns.barplot(x='Years',y='UKR', data=plot_data,label="UKR", color=sns.xkcd_rgb["mustard"])
# Add a legend and informative axis label
ax.legend(ncol=2, loc="upper left", frameon=True)
ax.set(xlim=(0, 24), ylabel="",xlabel="gdp")
plt.xticks(rotation=90)
sns.despine(left=True, bottom=True)
plt.show()
| mit |
laurent-george/bokeh | bokeh/charts/builder/tests/test_scatter_builder.py | 33 | 2895 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import Scatter
from bokeh.charts.builder.tests._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestScatter(unittest.TestCase):
def test_supported_input(self):
xyvalues = OrderedDict()
xyvalues['python'] = [(1, 2), (3, 3), (4, 7), (5, 5), (8, 26)]
xyvalues['pypy'] = [(1, 12), (2, 23), (4, 47), (5, 15), (8, 46)]
xyvalues['jython'] = [(1, 22), (2, 43), (4, 10), (6, 25), (8, 26)]
xyvaluesdf = pd.DataFrame(xyvalues)
y_python = [2, 3, 7, 5, 26]
y_pypy = [12, 23, 47, 15, 46]
y_jython = [22, 43, 10, 25, 26]
x_python = [1, 3, 4, 5, 8]
x_pypy = [1, 2, 4, 5, 8]
x_jython = [1, 2, 4, 6, 8]
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
hm = create_chart(Scatter, _xy)
builder = hm._builders[0]
self.assertEqual(sorted(builder._groups), sorted(list(xyvalues.keys())))
assert_array_equal(builder._data['y_python'], y_python)
assert_array_equal(builder._data['y_jython'], y_jython)
assert_array_equal(builder._data['y_pypy'], y_pypy)
assert_array_equal(builder._data['x_python'], x_python)
assert_array_equal(builder._data['x_jython'], x_jython)
assert_array_equal(builder._data['x_pypy'], x_pypy)
lvalues = [xyvalues['python'], xyvalues['pypy'], xyvalues['jython']]
for _xy in [lvalues, np.array(lvalues)]:
hm = create_chart(Scatter, _xy)
builder = hm._builders[0]
self.assertEqual(builder._groups, ['0', '1', '2'])
assert_array_equal(builder._data['y_0'], y_python)
assert_array_equal(builder._data['y_1'], y_pypy)
assert_array_equal(builder._data['y_2'], y_jython)
assert_array_equal(builder._data['x_0'], x_python)
assert_array_equal(builder._data['x_1'], x_pypy)
assert_array_equal(builder._data['x_2'], x_jython)
| bsd-3-clause |
mattgiguere/scikit-learn | sklearn/feature_selection/__init__.py | 244 | 1088 | """
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from .univariate_selection import chi2
from .univariate_selection import f_classif
from .univariate_selection import f_oneway
from .univariate_selection import f_regression
from .univariate_selection import SelectPercentile
from .univariate_selection import SelectKBest
from .univariate_selection import SelectFpr
from .univariate_selection import SelectFdr
from .univariate_selection import SelectFwe
from .univariate_selection import GenericUnivariateSelect
from .variance_threshold import VarianceThreshold
from .rfe import RFE
from .rfe import RFECV
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectPercentile',
'VarianceThreshold',
'chi2',
'f_classif',
'f_oneway',
'f_regression']
| bsd-3-clause |
annayqho/TheCannon | code/lamost/xcalib_5labels/paper_plots/teff_logg_full.py | 1 | 2221 | import pyfits
import matplotlib.pyplot as plt
from matplotlib import rc
import matplotlib.gridspec as gridspec
from matplotlib.colors import LogNorm
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
import numpy as np
direc = "/Users/annaho/TheCannon/code/lamost/make_lamost_catalog"
hdulist = pyfits.open("%s/lamost_catalog_full.fits" %direc)
tbdata = hdulist[1].data
hdulist.close()
teff = tbdata.field("cannon_teff")
logg = tbdata.field("cannon_logg")
mh = tbdata.field("cannon_m_h")
snr = tbdata.field("cannon_snrg")
cannon = np.vstack((teff, logg, mh)).T
teff = tbdata.field("teff_1")
logg = tbdata.field("logg_1")
feh = tbdata.field("feh")
lamost = np.vstack((teff, logg, mh)).T
choose = np.logical_and(feh < 0.0, feh > -0.1)
print(sum(choose))
data = [lamost[choose], cannon[choose]]
low = 3600
high = 6000
low2 = 0.0
high2 = 4.5
fig,axarr = plt.subplots(1,2, figsize=(10,5.5), sharex=True, sharey=True)
names = ['Labels from LAMOST DR2', 'Labels from Cannon']
#text = r'-0.1 $\textless$ [Fe/H] $\textless$ 0.0 (44,000 objects)'
for i in range(0, len(names)):
ax = axarr[i]
use = data[i]
im = ax.hist2d(use[:,0], use[:,1], norm=LogNorm(), bins=100,
cmap="inferno", range=[[low,high],[low2,high2]], vmin=1,vmax=70)
ax.set_xlabel(r"$\mbox{T}_{\mbox{eff}}$" + " [K]", fontsize=16)
if i == 0:
ax.set_ylabel("log g [dex]", fontsize=16)
ax.set_title("%s" %names[i], fontsize=16)
ax.set_xlim(low,high)
ax.set_ylim(low2,high2)
ax.tick_params(axis='x', labelsize=16)
ax.locator_params(nbins=5)
#if i == 2: fig.colorbar(im[3], cax=ax, label="log(Number of Objects)")
#plt.savefig("rc_%s.png" %names)
#plt.close()
#props = dict(boxstyle='round', facecolor='white')
# axarr[0].text(
# 0.5, 0.90, text, horizontalalignment='left',
# verticalalignment='top', transform=axarr[0].transAxes, bbox=props,
# fontsize=16)
plt.gca().invert_xaxis()
plt.gca().invert_yaxis()
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.1, 0.02, 0.8])
cbar = plt.colorbar(im[3], cax=cbar_ax)
cbar.set_label("log(density)", size=16)
cbar.ax.tick_params(labelsize=16)
#plt.show()
plt.savefig("teff_logg_test_set.png")
| mit |
hyqneuron/pylearn2-maxsom | pylearn2/sandbox/cuda_convnet/specialized_bench.py | 44 | 3906 | __authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
from pylearn2.testing.skip import skip_if_no_gpu
skip_if_no_gpu()
import numpy as np
from theano.compat.six.moves import xrange
from theano import shared
from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
from theano.tensor.nnet.conv import conv2d
from theano import function
import time
import matplotlib.pyplot as plt
def make_funcs(batch_size, rows, cols, channels, filter_rows,
num_filters):
rng = np.random.RandomState([2012,10,9])
filter_cols = filter_rows
base_image_value = rng.uniform(-1., 1., (channels, rows, cols,
batch_size)).astype('float32')
base_filters_value = rng.uniform(-1., 1., (channels, filter_rows,
filter_cols, num_filters)).astype('float32')
images = shared(base_image_value)
filters = shared(base_filters_value, name='filters')
# bench.py should always be run in gpu mode so we should not need a gpu_from_host here
layer_1_detector = FilterActs()(images, filters)
layer_1_pooled_fake = layer_1_detector[:,0:layer_1_detector.shape[0]:2,
0:layer_1_detector.shape[1]:2, :]
base_filters2_value = rng.uniform(-1., 1., (num_filters, filter_rows,
filter_cols, num_filters)).astype('float32')
filters2 = shared(base_filters_value, name='filters')
layer_2_detector = FilterActs()(images, filters2)
output = layer_2_detector
output_shared = shared( output.eval() )
cuda_convnet = function([], updates = { output_shared : output } )
cuda_convnet.name = 'cuda_convnet'
images_bc01 = base_image_value.transpose(3,0,1,2)
filters_bc01 = base_filters_value.transpose(3,0,1,2)
filters_bc01 = filters_bc01[:,:,::-1,::-1]
images_bc01 = shared(images_bc01)
filters_bc01 = shared(filters_bc01)
output_conv2d = conv2d(images_bc01, filters_bc01,
border_mode='valid')
output_conv2d_shared = shared(output_conv2d.eval())
baseline = function([], updates = { output_conv2d_shared : output_conv2d } )
baseline.name = 'baseline'
return cuda_convnet, baseline
def bench(f):
for i in xrange(3):
f()
trials = 10
t1 = time.time()
for i in xrange(trials):
f()
t2 = time.time()
return (t2-t1)/float(trials)
def get_speedup( *args, **kwargs):
cuda_convnet, baseline = make_funcs(*args, **kwargs)
return bench(baseline) / bench(cuda_convnet)
def get_time_per_10k_ex( *args, **kwargs):
cuda_convnet, baseline = make_funcs(*args, **kwargs)
batch_size = kwargs['batch_size']
return 10000 * bench(cuda_convnet) / float(batch_size)
def make_batch_size_plot(yfunc, yname, batch_sizes, rows, cols, channels, filter_rows, num_filters):
speedups = []
for batch_size in batch_sizes:
speedup = yfunc(batch_size = batch_size,
rows = rows,
cols = cols,
channels = channels,
filter_rows = filter_rows,
num_filters = num_filters)
speedups.append(speedup)
plt.plot(batch_sizes, speedups)
plt.title("cuda-convnet benchmark")
plt.xlabel("Batch size")
plt.ylabel(yname)
plt.show()
"""
make_batch_size_plot(get_speedup, "Speedup factor", batch_sizes = [1,2,5,25,32,50,63,64,65,96,100,127,128,129,159,160,161,191,192,193,200,255,256,257],
rows = 32,
cols = 32,
channels = 3,
filter_rows = 7,
num_filters = 64)
"""
make_batch_size_plot(get_time_per_10k_ex, "Time per 10k examples", batch_sizes = [1,2,5,25,32,50,63,64,65,96,100,127,128,129,159,160,161,191,192,193,200,255,256,257],
rows = 32,
cols = 32,
channels = 3,
filter_rows = 5,
num_filters = 64)
| bsd-3-clause |
rnelsonchem/gcmstools | gcmstools/isotope.py | 1 | 9463 | import numpy as np
import pandas as pd
from gcmstools.datastore import GcmsStore
class Isotope(object):
'''An isotope analysis class.
Parameters
----------
h5file : str or GcmsStore instance
This is the HDF file that contains all of the processed GCMS data.
datafile : str
The file name of the particular GCMS data file that contains the
isotopic data to be processed.
name : str
The name to give to this particular analysis. The isotopic data will
be saved into the HDF file under this name.
start : float
The starting elution time for the analysis.
stop : float (Default None)
The ending elution time for the analysis. If `None`, then only a
single MS slice at the indicated start time above will be used.
Otherwise, all MS between the start and stop time will be averaged.
rmbkg : bool (default False)
Do a simple background removal of the analysis region. This simply
subtracts the first MS slice from all the other MS data in the
analysis region.
'''
def __init__(self, h5file, datafile, name, start, stop=None, rmbkg=False):
if isinstance(h5file, str):
self.h5 = GcmsStore(h5file)
else:
self.h5 = h5file
self.data = self.h5.extract_gcms(datafile)
self.name = name
self.datafile = datafile
self.start = start
self.stop = stop
self.rmbkg = rmbkg
self.refs = {}
self.datams = self._ms_select(self.data)
self._dmask = np.zeros(self.data.masses.size, dtype=bool)
def addref(self, reffile, refname, refmin, refmax, numiso, basemz,
startmz=None):
'''Add a reference spectrum to the analysis.
Parameters
----------
reffile : str
Reference file name.
refname : str
Name of reference compound.
refmin : int
The minimum m/z to select as a reference from this file.
refmax : int
The maximum m/z to select as a reference from this file.
numiso : int
The number of isotopologues to fit with this particular reference.
basemz : int
The starting m/z value in the main data file that this reference
will be used to fit. This can be different than refmin,
especially if a true reference is contaminated.
startmz : int (default None)
The starting m/z value to fit this particular reference. This can
only be equal to or larger than basemz. It may be necessary to
make this larger if the first m/z value is contaminated with other
compounds.
'''
if not startmz:
startmz = basemz
elif startmz < basemz:
err = 'Staring m/z value ({}) must be greater than base m/z ({})'
raise ValueError(err.format(startmz, basemz))
gcms = self.h5.extract_gcms(reffile)
# Get the MS and select only the region of interest
refms = self._ms_select(gcms)
mask = (gcms.masses >= refmin)
mask = np.logical_and(gcms.masses <= refmax, mask)
refvals = refms[mask]
# Modify the data mask. End m/z also needs also take into account the
# number of masses for the reference, i.e. len(ref['vals'])
endmz = startmz + numiso + (refmax - refmin)
refmask = (self.data.masses >= startmz)
refmask = np.logical_and(self.data.masses < endmz, refmask)
self._dmask = np.logical_or(self._dmask, refmask)
# Store all these values in the reference dictionary
self.refs[refname] = {}
tmpd = self.refs[refname]
tmpd['file'] = reffile
tmpd['refmin'] = refmin
tmpd['refmax'] = refmax
tmpd['basemz'] = basemz
tmpd['numiso'] = numiso
tmpd['startmz'] = startmz
tmpd['vals'] = refvals
# TODO: Do I need to store these values? I don't think so.
#tmpd['gcms'] = gcms ###
#tmpd['refms'] = refms ####
#tmpd['refvals'] = refvals ####
#tmpd['mask'] = mask ###
#tmpd['ms'] = refms ####
def fit(self, ):
'''Fit the data using the reference information provided.'''
# Get the MS intensities from the data for the region covered by the
# references
# MS intensities
self.vals = self.datams[self._dmask]
# Masses for the intensities
self.masses = self.data.masses[self._dmask]
# Create a DataFrame representation of the isotopolgue matrix
self.isomat = self._iso_matrix_prep()
# Perfom least squares fit
# TODO: I should probably check some of the other vals besides coefs
self._allfit = np.linalg.lstsq(self.isomat.T, self.vals)
# Make a results DataFrame w/ same index as iso matrix, Coef column
self.results = pd.DataFrame(self._allfit[0], index=self.isomat.index,
columns=['Coef',])
# As percents, needs to be done per compound (index level=0)
self.results['Per'] = 0.0
for cpd in self.results.index.levels[0]:
df = self.results.ix[cpd]
df['Per'] = df['Coef']*100./df['Coef'].sum()
# Create a DataFrame of simulated and real MS with residuals
siminten = self.isomat.mul(self.results['Coef'], axis=0)
simdf = pd.DataFrame({'Real': self.vals,
'Sim Total': siminten.sum(axis=0)})
simdf['Resid'] = simdf['Real'] - simdf['Sim Total']
# Add the simulation per compound as well
for cpd in siminten.index.levels[0]:
df = siminten.ix[cpd]
simdf[cpd + ' Sim'] = df.sum(axis=0)
simdf.index.set_names('Mass', inplace=True)
self.simdf = simdf
def save(self, ):
'''Save the fit data into the HDF file.'''
if not hasattr(self.h5.root, 'isotope'):
self.h5._handle.create_group('/', 'isotope',
filters=self.h5._filters)
isogrp = self.h5.root.isotope
if not hasattr(isogrp, self.data.shortname):
self.h5._handle.create_group(isogrp, self.data.shortname,
filters=self.h5._filters)
datagrp = getattr(isogrp, self.data.shortname)
if hasattr(datagrp, self.name):
self.h5._handle.remove_node(datagrp, self.name, recursive=True)
grp = self.h5._handle.create_group(datagrp, self.name,
filters=self.h5._filters)
for k, v in self.__dict__.items():
if isinstance(v, np.ndarray):
self.h5._handle.create_carray(grp, k, obj=v)
elif isinstance(v, pd.DataFrame):
loc = grp._v_pathname
self.h5.append(loc + '/' + k, v)
elif isinstance(v, (str, list, dict, tuple, int, float)):
self.h5._handle.set_node_attr(grp, k, v)
def _ms_select(self, gcms):
'''Select the MS from a data set.
Parameters
----------
gcms : GcmsObj instance
The GCMS data object to process.
Returns
-------
ndarry
The normalized MS data over the given start/stop region.
'''
if not self.stop:
# Just grab a single slice
idx = gcms.index(gcms.times, self.start)
ms = gcms.intensity[idx]
return ms/ms.max()
else:
# Select a range of MS spectra
mask = (gcms.times > self.start) & (gcms.times < self.stop)
region = gcms.intensity[mask]
if self.rmbkg and self.stop:
# Subtract the first MS
region = region - region[0]
# Sum and normalize
ms = region.sum(axis=0)
return ms/ms.max()
def _iso_matrix_prep(self, ):
'''Create the isotopic matrix used for fitting.
Returns
-------
DataFrame
The isotopic matrix with the m/z numbers as the columns and a
multiindex on the rows with the reference name and the number of
isotopic substitutions as the rows.
'''
dfs = {}
for refname in self.refs:
ref = self.refs[refname]
smz = ref['startmz']
num = ref['numiso']
base = ref['basemz']
diff = smz - base
rsz = ref['vals'].size
# This is going to be a problem
isos = np.arange(diff, num+diff)
zmat = np.zeros((num, self.vals.size))
# This is making our "diagonal" matrix of masses
for n in range(num):
firstmz = smz+n
lastmz = firstmz + rsz
rng = np.where((self.masses >= firstmz) & \
(self.masses < lastmz)
)[0]
rngsz = rng.size
if rngsz >= rsz:
zmat[n, rng] = ref['vals']
elif rngsz > 0:
zmat[n, rng] = ref['vals'][:rngsz]
# Create a dataframe with this matrix, which is easier to concat
tmpdf = pd.DataFrame(zmat, columns=self.masses, index=isos)
dfs[refname] = tmpdf
df = pd.concat(dfs)
df.index.set_names(['Ref', 'Dnum'], inplace=True)
return df
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.