repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
petewarden/tensorflow_makefile | tensorflow/examples/skflow/iris.py | 4 | 1322 | # Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import metrics, cross_validation
from tensorflow.contrib import learn
# Load dataset.
iris = learn.datasets.load_dataset('iris')
X_train, X_test, y_train, y_test = cross_validation.train_test_split(iris.data, iris.target,
test_size=0.2, random_state=42)
# Build 3 layer DNN with 10, 20, 10 units respectively.
classifier = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3, steps=200)
# Fit and predict.
classifier.fit(X_train, y_train)
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print('Accuracy: {0:f}'.format(score))
| apache-2.0 |
tornadomeet/mxnet | example/svm_mnist/svm_mnist.py | 44 | 4094 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#############################################################
## Please read the README.md document for better reference ##
#############################################################
from __future__ import print_function
import mxnet as mx
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.decomposition import PCA
# import matplotlib.pyplot as plt
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# Network declaration as symbols. The following pattern was based
# on the article, but feel free to play with the number of nodes
# and with the activation function
data = mx.symbol.Variable('data')
fc1 = mx.symbol.FullyConnected(data = data, name='fc1', num_hidden=512)
act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu")
fc2 = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 512)
act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu")
fc3 = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=10)
# Here we add the ultimate layer based on L2-SVM objective
mlp = mx.symbol.SVMOutput(data=fc3, name='svm')
# To use L1-SVM objective, comment the line above and uncomment the line below
# mlp = mx.symbol.SVMOutput(data=fc3, name='svm', use_linear=True)
# Now we fetch MNIST dataset, add some noise, as the article suggests,
# permutate and assign the examples to be used on our network
mnist = fetch_mldata('MNIST original')
mnist_pca = PCA(n_components=70).fit_transform(mnist.data)
noise = np.random.normal(size=mnist_pca.shape)
mnist_pca += noise
np.random.seed(1234) # set seed for deterministic ordering
p = np.random.permutation(mnist_pca.shape[0])
X = mnist_pca[p]
Y = mnist.target[p]
X_show = mnist.data[p]
# This is just to normalize the input and separate train set and test set
X = X.astype(np.float32)/255
X_train = X[:60000]
X_test = X[60000:]
X_show = X_show[60000:]
Y_train = Y[:60000]
Y_test = Y[60000:]
# Article's suggestion on batch size
batch_size = 200
train_iter = mx.io.NDArrayIter(X_train, Y_train, batch_size=batch_size, label_name='svm_label')
test_iter = mx.io.NDArrayIter(X_test, Y_test, batch_size=batch_size, label_name='svm_label')
# Here we instatiate and fit the model for our data
# The article actually suggests using 400 epochs,
# But I reduced to 10, for convinience
mod = mx.mod.Module(
context = mx.cpu(0), # Run on CPU 0
symbol = mlp, # Use the network we just defined
label_names = ['svm_label'],
)
mod.fit(
train_data=train_iter,
eval_data=test_iter, # Testing data set. MXNet computes scores on test set every epoch
batch_end_callback = mx.callback.Speedometer(batch_size, 200), # Logging module to print out progress
num_epoch = 10, # Train for 10 epochs
optimizer_params = {
'learning_rate': 0.1, # Learning rate
'momentum': 0.9, # Momentum for SGD with momentum
'wd': 0.00001, # Weight decay for regularization
},
)
# Uncomment to view an example
# plt.imshow((X_show[0].reshape((28,28))*255).astype(np.uint8), cmap='Greys_r')
# plt.show()
# print 'Result:', model.predict(X_test[0:1])[0].argmax()
# Now it prints how good did the network did for this configuration
print('Accuracy:', mod.score(test_iter, mx.metric.Accuracy())[0][1]*100, '%')
| apache-2.0 |
f3r/scikit-learn | examples/gaussian_process/plot_gpc_isoprobability.py | 45 | 3025 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=================================================================
Iso-probability lines for Gaussian Processes classification (GPC)
=================================================================
A two-dimensional classification example showing iso-probability lines for
the predicted probabilities.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Adapted to GaussianProcessClassifier:
# Jan Hendrik Metzen <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from matplotlib import pyplot as pl
from matplotlib import cm
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import DotProduct, ConstantKernel as C
# A few constants
lim = 8
def g(x):
"""The function to predict (classification will then consist in predicting
whether g(x) <= 0 or not)"""
return 5. - x[:, 1] - .5 * x[:, 0] ** 2.
# Design of experiments
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
# Observations
y = np.array(g(X) > 0, dtype=int)
# Instanciate and fit Gaussian Process Model
kernel = C(0.1, (1e-5, np.inf)) * DotProduct(sigma_0=0.1) ** 2
gp = GaussianProcessClassifier(kernel=kernel)
gp.fit(X, y)
print("Learned kernel: %s " % gp.kernel_)
# Evaluate real function and the predicted probability
res = 50
x1, x2 = np.meshgrid(np.linspace(- lim, lim, res),
np.linspace(- lim, lim, res))
xx = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size)]).T
y_true = g(xx)
y_prob = gp.predict_proba(xx)[:, 1]
y_true = y_true.reshape((res, res))
y_prob = y_prob.reshape((res, res))
# Plot the probabilistic classification iso-values
fig = pl.figure(1)
ax = fig.gca()
ax.axes.set_aspect('equal')
pl.xticks([])
pl.yticks([])
ax.set_xticklabels([])
ax.set_yticklabels([])
pl.xlabel('$x_1$')
pl.ylabel('$x_2$')
cax = pl.imshow(y_prob, cmap=cm.gray_r, alpha=0.8,
extent=(-lim, lim, -lim, lim))
norm = pl.matplotlib.colors.Normalize(vmin=0., vmax=0.9)
cb = pl.colorbar(cax, ticks=[0., 0.2, 0.4, 0.6, 0.8, 1.], norm=norm)
cb.set_label('${\\rm \mathbb{P}}\left[\widehat{G}(\mathbf{x}) \leq 0\\right]$')
pl.clim(0, 1)
pl.plot(X[y <= 0, 0], X[y <= 0, 1], 'r.', markersize=12)
pl.plot(X[y > 0, 0], X[y > 0, 1], 'b.', markersize=12)
cs = pl.contour(x1, x2, y_true, [0.], colors='k', linestyles='dashdot')
cs = pl.contour(x1, x2, y_prob, [0.666], colors='b',
linestyles='solid')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, y_prob, [0.5], colors='k',
linestyles='dashed')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, y_prob, [0.334], colors='r',
linestyles='solid')
pl.clabel(cs, fontsize=11)
pl.show()
| bsd-3-clause |
danche354/Sequence-Labeling | ner_BIOES/senna-hash-2-pos-chunk-128-64-rmsprop5.py | 1 | 7395 | from keras.models import Model
from keras.layers import Input, Masking, Dense, LSTM
from keras.layers import Dropout, TimeDistributed, Bidirectional, merge
from keras.layers.embeddings import Embedding
from keras.utils import np_utils
from keras.optimizers import RMSprop
import numpy as np
import pandas as pd
import sys
import math
import os
from datetime import datetime
# add path
sys.path.append('../')
sys.path.append('../tools')
from tools import conf
from tools import load_data
from tools import prepare
from tools import plot
np.random.seed(0)
# train hyperparameters
step_length = conf.ner_step_length
pos_length = conf.ner_pos_length
chunk_length = conf.ner_chunk_length
# gazetteer_length = conf.gazetteer_length
emb_vocab = conf.senna_vocab
emb_length = conf.senna_length
hash_vocab = conf.ner_hash_vocab
hash_length = conf.ner_hash_length
output_length = conf.ner_BIOES_length
batch_size = conf.batch_size
nb_epoch = 50 #conf.nb_epoch
model_name = os.path.basename(__file__)[:-3]
folder_path = 'model/%s'%model_name
if not os.path.isdir(folder_path):
os.makedirs(folder_path)
# the data, shuffled and split between train and test sets
train_data = load_data.load_ner(dataset='eng.train', form='BIOES')
dev_data = load_data.load_ner(dataset='eng.testa', form='BIOES')
train_samples = len(train_data)
dev_samples = len(dev_data)
print('train shape:', train_samples)
print('dev shape:', dev_samples)
print()
word_embedding = pd.read_csv('../preprocessing/senna/embeddings.txt', delimiter=' ', header=None)
word_embedding = word_embedding.values
word_embedding = np.concatenate([np.zeros((1,emb_length)),word_embedding, np.random.uniform(-1,1,(1,emb_length))])
hash_embedding = pd.read_csv('../preprocessing/ner-auto-encoder-2/auto-encoder-embeddings.txt', delimiter=' ', header=None)
hash_embedding = hash_embedding.values
hash_embedding = np.concatenate([np.zeros((1,hash_length)),hash_embedding, np.random.rand(1,hash_length)])
embed_index_input = Input(shape=(step_length,))
embedding = Embedding(emb_vocab+2, emb_length, weights=[word_embedding], mask_zero=True, input_length=step_length)(embed_index_input)
hash_index_input = Input(shape=(step_length,))
encoder_embedding = Embedding(hash_vocab+2, hash_length, weights=[hash_embedding], mask_zero=True, input_length=step_length)(hash_index_input)
pos_input = Input(shape=(step_length, pos_length))
chunk_input = Input(shape=(step_length, chunk_length))
# gazetteer_input = Input(shape=(step_length, gazetteer_length))
senna_hash_pos_chunk_gazetteer_merge = merge([embedding, encoder_embedding, pos_input, chunk_input], mode='concat')
input_mask = Masking(mask_value=0)(senna_hash_pos_chunk_gazetteer_merge)
dp_1 = Dropout(0.5)(input_mask)
hidden_1 = Bidirectional(LSTM(128, return_sequences=True))(dp_1)
hidden_2 = Bidirectional(LSTM(64, return_sequences=True))(hidden_1)
dp_2 = Dropout(0.5)(hidden_2)
output = TimeDistributed(Dense(output_length, activation='softmax'))(dp_2)
model = Model(input=[embed_index_input,hash_index_input,pos_input,chunk_input], output=output)
rmsprop = RMSprop(lr=0.0005)
model.compile(loss='categorical_crossentropy',
optimizer=rmsprop,
metrics=['accuracy'])
print(model.summary())
number_of_train_batches = int(math.ceil(float(train_samples)/batch_size))
number_of_dev_batches = int(math.ceil(float(dev_samples)/batch_size))
print('start train %s ...\n'%model_name)
best_accuracy = 0
best_epoch = 0
all_train_loss = []
all_dev_loss = []
all_dev_accuracy = []
log = open('%s/model_log.txt'%folder_path, 'w')
start_time = datetime.now()
print('train start at %s\n'%str(start_time))
log.write('train start at %s\n\n'%str(start_time))
for epoch in range(nb_epoch):
start = datetime.now()
print('-'*60)
print('epoch %d start at %s'%(epoch, str(start)))
log.write('-'*60+'\n')
log.write('epoch %d start at %s\n'%(epoch, str(start)))
train_loss = 0
dev_loss = 0
np.random.shuffle(train_data)
for i in range(number_of_train_batches):
train_batch = train_data[i*batch_size: (i+1)*batch_size]
embed_index, hash_index, pos, chunk, label, length, sentence = prepare.prepare_ner(batch=train_batch, form='BIOES', gram='bi')
pos = np.array([(np.concatenate([np_utils.to_categorical(p, pos_length), np.zeros((step_length-length[l], pos_length))])) for l,p in enumerate(pos)])
chunk = np.array([(np.concatenate([np_utils.to_categorical(c, chunk_length), np.zeros((step_length-length[l], chunk_length))])) for l,c in enumerate(chunk)])
y = np.array([np_utils.to_categorical(each, output_length) for each in label])
train_metrics = model.train_on_batch([embed_index, hash_index, pos, chunk], y)
train_loss += train_metrics[0]
all_train_loss.append(train_loss)
correct_predict = 0
all_predict = 0
for j in range(number_of_dev_batches):
dev_batch = dev_data[j*batch_size: (j+1)*batch_size]
embed_index, hash_index, pos, chunk, label, length, sentence = prepare.prepare_ner(batch=dev_batch, form='BIOES', gram='bi')
pos = np.array([(np.concatenate([np_utils.to_categorical(p, pos_length), np.zeros((step_length-length[l], pos_length))])) for l,p in enumerate(pos)])
chunk = np.array([(np.concatenate([np_utils.to_categorical(c, chunk_length), np.zeros((step_length-length[l], chunk_length))])) for l,c in enumerate(chunk)])
y = np.array([np_utils.to_categorical(each, output_length) for each in label])
# for loss
dev_metrics = model.test_on_batch([embed_index, hash_index, pos, chunk], y)
dev_loss += dev_metrics[0]
# for accuracy
prob = model.predict_on_batch([embed_index, hash_index, pos, chunk])
for i, l in enumerate(length):
predict_label = np_utils.categorical_probas_to_classes(prob[i])
correct_predict += np.sum(predict_label[:l]==label[i][:l])
all_predict += np.sum(length)
epcoh_accuracy = float(correct_predict)/all_predict
all_dev_accuracy.append(epcoh_accuracy)
all_dev_loss.append(dev_loss)
if epcoh_accuracy>=best_accuracy:
best_accuracy = epcoh_accuracy
best_epoch = epoch
end = datetime.now()
model.save('%s/model_epoch_%d.h5'%(folder_path, epoch), overwrite=True)
print('epoch %d end at %s'%(epoch, str(end)))
print('epoch %d train loss: %f'%(epoch, train_loss))
print('epoch %d dev loss: %f'%(epoch, dev_loss))
print('epoch %d dev accuracy: %f'%(epoch, epcoh_accuracy))
print('best epoch now: %d\n'%best_epoch)
log.write('epoch %d end at %s\n'%(epoch, str(end)))
log.write('epoch %d train loss: %f\n'%(epoch, train_loss))
log.write('epoch %d dev loss: %f\n'%(epoch, dev_loss))
log.write('epoch %d dev accuracy: %f\n'%(epoch, epcoh_accuracy))
log.write('best epoch now: %d\n\n'%best_epoch)
end_time = datetime.now()
print('train end at %s\n'%str(end_time))
log.write('train end at %s\n\n'%str(end_time))
timedelta = end_time - start_time
print('train cost time: %s\n'%str(timedelta))
print('best epoch last: %d\n'%best_epoch)
log.write('train cost time: %s\n\n'%str(timedelta))
log.write('best epoch last: %d\n\n'%best_epoch)
plot.plot_loss(all_train_loss, all_dev_loss, folder_path=folder_path, title='%s'%model_name)
plot.plot_accuracy(all_dev_accuracy, folder_path=folder_path, title='%s'%model_name)
| mit |
bgris/ODL_bgris | lib/python3.5/site-packages/matplotlib/backends/backend_qt5.py | 6 | 30437 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
import re
import signal
import sys
from six import unichr
import matplotlib
from matplotlib.cbook import is_string_like
from matplotlib.backend_bases import FigureManagerBase
from matplotlib.backend_bases import FigureCanvasBase
from matplotlib.backend_bases import NavigationToolbar2
from matplotlib.backend_bases import cursors
from matplotlib.backend_bases import TimerBase
from matplotlib.backend_bases import ShowBase
from matplotlib._pylab_helpers import Gcf
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
import matplotlib.backends.qt_editor.figureoptions as figureoptions
from .qt_compat import (QtCore, QtGui, QtWidgets, _getSaveFileName,
__version__, is_pyqt5)
from matplotlib.backends.qt_editor.formsubplottool import UiSubplotTool
backend_version = __version__
# SPECIAL_KEYS are keys that do *not* return their unicode name
# instead they have manually specified names
SPECIAL_KEYS = {QtCore.Qt.Key_Control: 'control',
QtCore.Qt.Key_Shift: 'shift',
QtCore.Qt.Key_Alt: 'alt',
QtCore.Qt.Key_Meta: 'super',
QtCore.Qt.Key_Return: 'enter',
QtCore.Qt.Key_Left: 'left',
QtCore.Qt.Key_Up: 'up',
QtCore.Qt.Key_Right: 'right',
QtCore.Qt.Key_Down: 'down',
QtCore.Qt.Key_Escape: 'escape',
QtCore.Qt.Key_F1: 'f1',
QtCore.Qt.Key_F2: 'f2',
QtCore.Qt.Key_F3: 'f3',
QtCore.Qt.Key_F4: 'f4',
QtCore.Qt.Key_F5: 'f5',
QtCore.Qt.Key_F6: 'f6',
QtCore.Qt.Key_F7: 'f7',
QtCore.Qt.Key_F8: 'f8',
QtCore.Qt.Key_F9: 'f9',
QtCore.Qt.Key_F10: 'f10',
QtCore.Qt.Key_F11: 'f11',
QtCore.Qt.Key_F12: 'f12',
QtCore.Qt.Key_Home: 'home',
QtCore.Qt.Key_End: 'end',
QtCore.Qt.Key_PageUp: 'pageup',
QtCore.Qt.Key_PageDown: 'pagedown',
QtCore.Qt.Key_Tab: 'tab',
QtCore.Qt.Key_Backspace: 'backspace',
QtCore.Qt.Key_Enter: 'enter',
QtCore.Qt.Key_Insert: 'insert',
QtCore.Qt.Key_Delete: 'delete',
QtCore.Qt.Key_Pause: 'pause',
QtCore.Qt.Key_SysReq: 'sysreq',
QtCore.Qt.Key_Clear: 'clear', }
# define which modifier keys are collected on keyboard events.
# elements are (mpl names, Modifier Flag, Qt Key) tuples
SUPER = 0
ALT = 1
CTRL = 2
SHIFT = 3
MODIFIER_KEYS = [('super', QtCore.Qt.MetaModifier, QtCore.Qt.Key_Meta),
('alt', QtCore.Qt.AltModifier, QtCore.Qt.Key_Alt),
('ctrl', QtCore.Qt.ControlModifier, QtCore.Qt.Key_Control),
('shift', QtCore.Qt.ShiftModifier, QtCore.Qt.Key_Shift),
]
if sys.platform == 'darwin':
# in OSX, the control and super (aka cmd/apple) keys are switched, so
# switch them back.
SPECIAL_KEYS.update({QtCore.Qt.Key_Control: 'super', # cmd/apple key
QtCore.Qt.Key_Meta: 'control',
})
MODIFIER_KEYS[0] = ('super', QtCore.Qt.ControlModifier,
QtCore.Qt.Key_Control)
MODIFIER_KEYS[2] = ('ctrl', QtCore.Qt.MetaModifier,
QtCore.Qt.Key_Meta)
def fn_name():
return sys._getframe(1).f_code.co_name
DEBUG = False
cursord = {
cursors.MOVE: QtCore.Qt.SizeAllCursor,
cursors.HAND: QtCore.Qt.PointingHandCursor,
cursors.POINTER: QtCore.Qt.ArrowCursor,
cursors.SELECT_REGION: QtCore.Qt.CrossCursor,
}
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw_idle()
# make place holder
qApp = None
def _create_qApp():
"""
Only one qApp can exist at a time, so check before creating one.
"""
global qApp
if qApp is None:
if DEBUG:
print("Starting up QApplication")
app = QtWidgets.QApplication.instance()
if app is None:
# check for DISPLAY env variable on X11 build of Qt
if hasattr(QtGui, "QX11Info"):
display = os.environ.get('DISPLAY')
if display is None or not re.search(':\d', display):
raise RuntimeError('Invalid DISPLAY variable')
qApp = QtWidgets.QApplication([str(" ")])
qApp.lastWindowClosed.connect(qApp.quit)
else:
qApp = app
if is_pyqt5():
qApp.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps)
class Show(ShowBase):
def mainloop(self):
# allow KeyboardInterrupt exceptions to close the plot window.
signal.signal(signal.SIGINT, signal.SIG_DFL)
global qApp
qApp.exec_()
show = Show()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
thisFig = Figure(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasQT(figure)
manager = FigureManagerQT(canvas, num)
return manager
class TimerQT(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses Qt4 timer events.
Attributes:
* interval: The time between timer events in milliseconds. Default
is 1000 ms.
* single_shot: Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
* callbacks: Stores list of (func, args) tuples that will be called
upon timer events. This list can be manipulated directly, or the
functions add_callback and remove_callback can be used.
'''
def __init__(self, *args, **kwargs):
TimerBase.__init__(self, *args, **kwargs)
# Create a new timer and connect the timeout() signal to the
# _on_timer method.
self._timer = QtCore.QTimer()
self._timer.timeout.connect(self._on_timer)
self._timer_set_interval()
def __del__(self):
# Probably not necessary in practice, but is good behavior to
# disconnect
try:
TimerBase.__del__(self)
self._timer.timeout.disconnect(self._on_timer)
except RuntimeError:
# Timer C++ object already deleted
pass
def _timer_set_single_shot(self):
self._timer.setSingleShot(self._single)
def _timer_set_interval(self):
self._timer.setInterval(self._interval)
def _timer_start(self):
self._timer.start()
def _timer_stop(self):
self._timer.stop()
class FigureCanvasQT(QtWidgets.QWidget, FigureCanvasBase):
# map Qt button codes to MouseEvent's ones:
buttond = {QtCore.Qt.LeftButton: 1,
QtCore.Qt.MidButton: 2,
QtCore.Qt.RightButton: 3,
# QtCore.Qt.XButton1: None,
# QtCore.Qt.XButton2: None,
}
def __init__(self, figure):
if DEBUG:
print('FigureCanvasQt qt5: ', figure)
_create_qApp()
# NB: Using super for this call to avoid a TypeError:
# __init__() takes exactly 2 arguments (1 given) on QWidget
# PyQt5
# The need for this change is documented here
# http://pyqt.sourceforge.net/Docs/PyQt5/pyqt4_differences.html#cooperative-multi-inheritance
super(FigureCanvasQT, self).__init__(figure=figure)
self.figure = figure
self.setMouseTracking(True)
w, h = self.get_width_height()
self.resize(w, h)
@property
def _dpi_ratio(self):
# Not available on Qt4 or some older Qt5.
try:
return self.devicePixelRatio()
except AttributeError:
return 1
def get_width_height(self):
w, h = FigureCanvasBase.get_width_height(self)
return int(w / self._dpi_ratio), int(h / self._dpi_ratio)
def enterEvent(self, event):
FigureCanvasBase.enter_notify_event(self, guiEvent=event)
def leaveEvent(self, event):
QtWidgets.QApplication.restoreOverrideCursor()
FigureCanvasBase.leave_notify_event(self, guiEvent=event)
def mouseEventCoords(self, pos):
x = pos.x() * self._dpi_ratio
# flip y so y=0 is bottom of canvas
y = self.figure.bbox.height - pos.y() * self._dpi_ratio
return x, y
def mousePressEvent(self, event):
x, y = self.mouseEventCoords(event.pos())
button = self.buttond.get(event.button())
if button is not None:
FigureCanvasBase.button_press_event(self, x, y, button,
guiEvent=event)
if DEBUG:
print('button pressed:', event.button())
def mouseDoubleClickEvent(self, event):
x, y = self.mouseEventCoords(event.pos())
button = self.buttond.get(event.button())
if button is not None:
FigureCanvasBase.button_press_event(self, x, y,
button, dblclick=True,
guiEvent=event)
if DEBUG:
print('button doubleclicked:', event.button())
def mouseMoveEvent(self, event):
x, y = self.mouseEventCoords(event)
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
# if DEBUG: print('mouse move')
def mouseReleaseEvent(self, event):
x, y = self.mouseEventCoords(event)
button = self.buttond.get(event.button())
if button is not None:
FigureCanvasBase.button_release_event(self, x, y, button,
guiEvent=event)
if DEBUG:
print('button released')
def wheelEvent(self, event):
x, y = self.mouseEventCoords(event)
# from QWheelEvent::delta doc
if event.pixelDelta().x() == 0 and event.pixelDelta().y() == 0:
steps = event.angleDelta().y() / 120
else:
steps = event.pixelDelta().y()
if steps != 0:
FigureCanvasBase.scroll_event(self, x, y, steps, guiEvent=event)
if DEBUG:
print('scroll event: delta = %i, '
'steps = %i ' % (event.delta(), steps))
def keyPressEvent(self, event):
key = self._get_key(event)
if key is None:
return
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
if DEBUG:
print('key press', key)
def keyReleaseEvent(self, event):
key = self._get_key(event)
if key is None:
return
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
if DEBUG:
print('key release', key)
def resizeEvent(self, event):
w = event.size().width() * self._dpi_ratio
h = event.size().height() * self._dpi_ratio
if DEBUG:
print('resize (%d x %d)' % (w, h))
print("FigureCanvasQt.resizeEvent(%d, %d)" % (w, h))
dpival = self.figure.dpi
winch = w / dpival
hinch = h / dpival
self.figure.set_size_inches(winch, hinch, forward=False)
FigureCanvasBase.resize_event(self)
self.draw_idle()
QtWidgets.QWidget.resizeEvent(self, event)
def sizeHint(self):
w, h = self.get_width_height()
return QtCore.QSize(w, h)
def minumumSizeHint(self):
return QtCore.QSize(10, 10)
def _get_key(self, event):
if event.isAutoRepeat():
return None
event_key = event.key()
event_mods = int(event.modifiers()) # actually a bitmask
# get names of the pressed modifier keys
# bit twiddling to pick out modifier keys from event_mods bitmask,
# if event_key is a MODIFIER, it should not be duplicated in mods
mods = [name for name, mod_key, qt_key in MODIFIER_KEYS
if event_key != qt_key and (event_mods & mod_key) == mod_key]
try:
# for certain keys (enter, left, backspace, etc) use a word for the
# key, rather than unicode
key = SPECIAL_KEYS[event_key]
except KeyError:
# unicode defines code points up to 0x0010ffff
# QT will use Key_Codes larger than that for keyboard keys that are
# are not unicode characters (like multimedia keys)
# skip these
# if you really want them, you should add them to SPECIAL_KEYS
MAX_UNICODE = 0x10ffff
if event_key > MAX_UNICODE:
return None
key = unichr(event_key)
# qt delivers capitalized letters. fix capitalization
# note that capslock is ignored
if 'shift' in mods:
mods.remove('shift')
else:
key = key.lower()
mods.reverse()
return '+'.join(mods + [key])
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of
:class:`backend_bases.Timer`. This is useful for getting
periodic events through the backend's native event
loop. Implemented only for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs)
will be executed by the timer every *interval*.
"""
return TimerQT(*args, **kwargs)
def flush_events(self):
global qApp
qApp.processEvents()
def start_event_loop(self, timeout):
FigureCanvasBase.start_event_loop_default(self, timeout)
start_event_loop.__doc__ = \
FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__ = FigureCanvasBase.stop_event_loop_default.__doc__
class MainWindow(QtWidgets.QMainWindow):
closing = QtCore.Signal()
def closeEvent(self, event):
self.closing.emit()
QtWidgets.QMainWindow.closeEvent(self, event)
class FigureManagerQT(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The qt.QToolBar
window : The qt.QMainWindow
"""
def __init__(self, canvas, num):
if DEBUG:
print('FigureManagerQT.%s' % fn_name())
FigureManagerBase.__init__(self, canvas, num)
self.canvas = canvas
self.window = MainWindow()
self.window.closing.connect(canvas.close_event)
self.window.closing.connect(self._widgetclosed)
self.window.setWindowTitle("Figure %d" % num)
image = os.path.join(matplotlib.rcParams['datapath'],
'images', 'matplotlib.png')
self.window.setWindowIcon(QtGui.QIcon(image))
# Give the keyboard focus to the figure instead of the
# manager; StrongFocus accepts both tab and click to focus and
# will enable the canvas to process event w/o clicking.
# ClickFocus only takes the focus is the window has been
# clicked
# on. http://qt-project.org/doc/qt-4.8/qt.html#FocusPolicy-enum or
# http://doc.qt.digia.com/qt/qt.html#FocusPolicy-enum
self.canvas.setFocusPolicy(QtCore.Qt.StrongFocus)
self.canvas.setFocus()
self.window._destroying = False
# add text label to status bar
self.statusbar_label = QtWidgets.QLabel()
self.window.statusBar().addWidget(self.statusbar_label)
self.toolbar = self._get_toolbar(self.canvas, self.window)
if self.toolbar is not None:
self.window.addToolBar(self.toolbar)
self.toolbar.message.connect(self.statusbar_label.setText)
tbs_height = self.toolbar.sizeHint().height()
else:
tbs_height = 0
# resize the main window so it will display the canvas with the
# requested size:
cs = canvas.sizeHint()
sbs = self.window.statusBar().sizeHint()
self._status_and_tool_height = tbs_height + sbs.height()
height = cs.height() + self._status_and_tool_height
self.window.resize(cs.width(), height)
self.window.setCentralWidget(self.canvas)
if matplotlib.is_interactive():
self.window.show()
self.canvas.draw_idle()
def notify_axes_change(fig):
# This will be called whenever the current axes is changed
if self.toolbar is not None:
self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
self.window.raise_()
def full_screen_toggle(self):
if self.window.isFullScreen():
self.window.showNormal()
else:
self.window.showFullScreen()
def _widgetclosed(self):
if self.window._destroying:
return
self.window._destroying = True
try:
Gcf.destroy(self.num)
except AttributeError:
pass
# It seems that when the python session is killed,
# Gcf can get destroyed before the Gcf.destroy
# line is run, leading to a useless AttributeError.
def _get_toolbar(self, canvas, parent):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2QT(canvas, parent, False)
else:
toolbar = None
return toolbar
def resize(self, width, height):
'set the canvas size in pixels'
self.window.resize(width, height + self._status_and_tool_height)
def show(self):
self.window.show()
def destroy(self, *args):
# check for qApp first, as PySide deletes it in its atexit handler
if QtWidgets.QApplication.instance() is None:
return
if self.window._destroying:
return
self.window._destroying = True
self.window.destroyed.connect(self._widgetclosed)
if self.toolbar:
self.toolbar.destroy()
if DEBUG:
print("destroy figure manager")
self.window.close()
def get_window_title(self):
return six.text_type(self.window.windowTitle())
def set_window_title(self, title):
self.window.setWindowTitle(title)
class NavigationToolbar2QT(NavigationToolbar2, QtWidgets.QToolBar):
message = QtCore.Signal(str)
def __init__(self, canvas, parent, coordinates=True):
""" coordinates: should we show the coordinates on the right? """
self.canvas = canvas
self.parent = parent
self.coordinates = coordinates
self._actions = {}
"""A mapping of toolitem method names to their QActions"""
QtWidgets.QToolBar.__init__(self, parent)
NavigationToolbar2.__init__(self, canvas)
def _icon(self, name):
if is_pyqt5():
name = name.replace('.png', '_large.png')
return QtGui.QIcon(os.path.join(self.basedir, name))
def _init_toolbar(self):
self.basedir = os.path.join(matplotlib.rcParams['datapath'], 'images')
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.addSeparator()
else:
a = self.addAction(self._icon(image_file + '.png'),
text, getattr(self, callback))
self._actions[callback] = a
if callback in ['zoom', 'pan']:
a.setCheckable(True)
if tooltip_text is not None:
a.setToolTip(tooltip_text)
if text == 'Subplots':
a = self.addAction(self._icon("qt4_editor_options.png"),
'Customize', self.edit_parameters)
a.setToolTip('Edit axis, curve and image parameters')
self.buttons = {}
# Add the x,y location widget at the right side of the toolbar
# The stretch factor is 1 which means any resizing of the toolbar
# will resize this label instead of the buttons.
if self.coordinates:
self.locLabel = QtWidgets.QLabel("", self)
self.locLabel.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTop)
self.locLabel.setSizePolicy(
QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Ignored))
labelAction = self.addWidget(self.locLabel)
labelAction.setVisible(True)
# reference holder for subplots_adjust window
self.adj_window = None
# Esthetic adjustments - we need to set these explicitly in PyQt5
# otherwise the layout looks different - but we don't want to set it if
# not using HiDPI icons otherwise they look worse than before.
if is_pyqt5():
self.setIconSize(QtCore.QSize(24, 24))
self.layout().setSpacing(12)
if is_pyqt5():
# For some reason, self.setMinimumHeight doesn't seem to carry over to
# the actual sizeHint, so override it instead in order to make the
# aesthetic adjustments noted above.
def sizeHint(self):
size = super(NavigationToolbar2QT, self).sizeHint()
size.setHeight(max(48, size.height()))
return size
def edit_parameters(self):
allaxes = self.canvas.figure.get_axes()
if not allaxes:
QtWidgets.QMessageBox.warning(
self.parent, "Error", "There are no axes to edit.")
return
if len(allaxes) == 1:
axes = allaxes[0]
else:
titles = []
for axes in allaxes:
name = (axes.get_title() or
" - ".join(filter(None, [axes.get_xlabel(),
axes.get_ylabel()])) or
"<anonymous {} (id: {:#x})>".format(
type(axes).__name__, id(axes)))
titles.append(name)
item, ok = QtWidgets.QInputDialog.getItem(
self.parent, 'Customize', 'Select axes:', titles, 0, False)
if ok:
axes = allaxes[titles.index(six.text_type(item))]
else:
return
figureoptions.figure_edit(axes, self)
def _update_buttons_checked(self):
# sync button checkstates to match active mode
self._actions['pan'].setChecked(self._active == 'PAN')
self._actions['zoom'].setChecked(self._active == 'ZOOM')
def pan(self, *args):
super(NavigationToolbar2QT, self).pan(*args)
self._update_buttons_checked()
def zoom(self, *args):
super(NavigationToolbar2QT, self).zoom(*args)
self._update_buttons_checked()
def dynamic_update(self):
self.canvas.draw_idle()
def set_message(self, s):
self.message.emit(s)
if self.coordinates:
self.locLabel.setText(s)
def set_cursor(self, cursor):
if DEBUG:
print('Set cursor', cursor)
self.canvas.setCursor(cursord[cursor])
def draw_rubberband(self, event, x0, y0, x1, y1):
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val)for val in (min(x0, x1), min(y0, y1), w, h)]
self.canvas.drawRectangle(rect)
def remove_rubberband(self):
self.canvas.drawRectangle(None)
def configure_subplots(self):
image = os.path.join(matplotlib.rcParams['datapath'],
'images', 'matplotlib.png')
dia = SubplotToolQt(self.canvas.figure, self.parent)
dia.setWindowIcon(QtGui.QIcon(image))
dia.exec_()
def save_figure(self, *args):
filetypes = self.canvas.get_supported_filetypes_grouped()
sorted_filetypes = list(six.iteritems(filetypes))
sorted_filetypes.sort()
default_filetype = self.canvas.get_default_filetype()
startpath = matplotlib.rcParams.get('savefig.directory', '')
startpath = os.path.expanduser(startpath)
start = os.path.join(startpath, self.canvas.get_default_filename())
filters = []
selectedFilter = None
for name, exts in sorted_filetypes:
exts_list = " ".join(['*.%s' % ext for ext in exts])
filter = '%s (%s)' % (name, exts_list)
if default_filetype in exts:
selectedFilter = filter
filters.append(filter)
filters = ';;'.join(filters)
fname, filter = _getSaveFileName(self.parent,
"Choose a filename to save to",
start, filters, selectedFilter)
if fname:
if startpath == '':
# explicitly missing key or empty str signals to use cwd
matplotlib.rcParams['savefig.directory'] = startpath
else:
# save dir for next time
savefig_dir = os.path.dirname(six.text_type(fname))
matplotlib.rcParams['savefig.directory'] = savefig_dir
try:
self.canvas.print_figure(six.text_type(fname))
except Exception as e:
QtWidgets.QMessageBox.critical(
self, "Error saving file", six.text_type(e),
QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.NoButton)
class SubplotToolQt(SubplotTool, UiSubplotTool):
def __init__(self, targetfig, parent):
UiSubplotTool.__init__(self, None)
self.targetfig = targetfig
self.parent = parent
self.donebutton.clicked.connect(self.close)
self.resetbutton.clicked.connect(self.reset)
self.tightlayout.clicked.connect(self.functight)
# constraints
self.sliderleft.valueChanged.connect(self.sliderright.setMinimum)
self.sliderright.valueChanged.connect(self.sliderleft.setMaximum)
self.sliderbottom.valueChanged.connect(self.slidertop.setMinimum)
self.slidertop.valueChanged.connect(self.sliderbottom.setMaximum)
self.defaults = {}
for attr in ('left', 'bottom', 'right', 'top', 'wspace', 'hspace', ):
val = getattr(self.targetfig.subplotpars, attr)
self.defaults[attr] = val
slider = getattr(self, 'slider' + attr)
txt = getattr(self, attr + 'value')
slider.setMinimum(0)
slider.setMaximum(1000)
slider.setSingleStep(5)
# do this before hooking up the callbacks
slider.setSliderPosition(int(val * 1000))
txt.setText("%.2f" % val)
slider.valueChanged.connect(getattr(self, 'func' + attr))
self._setSliderPositions()
def _setSliderPositions(self):
for attr in ('left', 'bottom', 'right', 'top', 'wspace', 'hspace', ):
slider = getattr(self, 'slider' + attr)
slider.setSliderPosition(int(self.defaults[attr] * 1000))
def funcleft(self, val):
if val == self.sliderright.value():
val -= 1
val /= 1000.
self.targetfig.subplots_adjust(left=val)
self.leftvalue.setText("%.2f" % val)
if self.drawon:
self.targetfig.canvas.draw_idle()
def funcright(self, val):
if val == self.sliderleft.value():
val += 1
val /= 1000.
self.targetfig.subplots_adjust(right=val)
self.rightvalue.setText("%.2f" % val)
if self.drawon:
self.targetfig.canvas.draw_idle()
def funcbottom(self, val):
if val == self.slidertop.value():
val -= 1
val /= 1000.
self.targetfig.subplots_adjust(bottom=val)
self.bottomvalue.setText("%.2f" % val)
if self.drawon:
self.targetfig.canvas.draw_idle()
def functop(self, val):
if val == self.sliderbottom.value():
val += 1
val /= 1000.
self.targetfig.subplots_adjust(top=val)
self.topvalue.setText("%.2f" % val)
if self.drawon:
self.targetfig.canvas.draw_idle()
def funcwspace(self, val):
val /= 1000.
self.targetfig.subplots_adjust(wspace=val)
self.wspacevalue.setText("%.2f" % val)
if self.drawon:
self.targetfig.canvas.draw_idle()
def funchspace(self, val):
val /= 1000.
self.targetfig.subplots_adjust(hspace=val)
self.hspacevalue.setText("%.2f" % val)
if self.drawon:
self.targetfig.canvas.draw_idle()
def functight(self):
self.targetfig.tight_layout()
self._setSliderPositions()
self.targetfig.canvas.draw_idle()
def reset(self):
self.targetfig.subplots_adjust(**self.defaults)
self._setSliderPositions()
self.targetfig.canvas.draw_idle()
def error_msg_qt(msg, parent=None):
if not is_string_like(msg):
msg = ','.join(map(str, msg))
QtWidgets.QMessageBox.warning(None, "Matplotlib",
msg, QtGui.QMessageBox.Ok)
def exception_handler(type, value, tb):
"""Handle uncaught exceptions
It does not catch SystemExit
"""
msg = ''
# get the filename attribute if available (for IOError)
if hasattr(value, 'filename') and value.filename is not None:
msg = value.filename + ': '
if hasattr(value, 'strerror') and value.strerror is not None:
msg += value.strerror
else:
msg += six.text_type(value)
if len(msg):
error_msg_qt(msg)
FigureCanvas = FigureCanvasQT
FigureManager = FigureManagerQT
| gpl-3.0 |
vincentchevrier/dataquick | dataquick/structures/dataquickframe.py | 1 | 11098 | from collections import OrderedDict
import uuid
import typing
import json
import os
import pandas
from .. import util
class DataQuickFrame(pandas.DataFrame):
"""Sub-Class of pandas `DataFrame` that defines data-structures through
metadata and expected columns of data
The DataQuickFrame class is distinguished from a pandas DataFrame through
private class variables that provide a way to specify the structure of data,
which facilitates the easy processing and visualization of that data, e.g.
in the DataQuick QtViewer. When creating a subclass of a DataQuickFrame, the
following class variables should be defined in the subclass in order to
define the structure of the subclass.
_required_columns : OrderedDict()
a class variable defining the expected data series. The dict keys are
the DataFrame column labels and the values are the expected data types
(e.g. int, float, etc.)
_required_metadata : set
a class variable defining the set of required_metadata keys, used for validation
_column_properties : OrderedDict()
a class variable defining the object properties that will return
_x : typing.Hashable
the column label for 'x' values, if None defaults to the 1st column
_y : typing.Hashable
the column label for 'y' values, if None defaults to the 2nd column
_z : typing.Hashable
the column label for 'z' values, if None defaults to the 3rd column
the pandas `DataFrame` class has private variable `_metadata`_ which can be
assigned a list of strings that become instance attributes. For example, if
a class class defines :code:`_metadata = ["name", "date"]`, then an instance
can reference values as :code:`df.name`, and :code:`df.date`.
Because `_metadata` is more of an ad-hoc feature of `DataFrame`, and might
be deprecated in the future, this class uses `_metadata` to define an
attribute `metadata`, as an `OrderedDict` of metadata that can easily
interface with the json module for saving. As such, metadata would be
accessed by dictionary look-ups, e.g. :code:`df.metadata["name"]`
.. _`_metadata`: https://pandas.pydata.org/pandas-docs/stable/internals.html#define-original-properties
Attributes
----------
metadata : OrderedDict
an ordered key-value pair for storing all of the metadata, e.g. name,
date, mass, density, etc.
x : pandas.Series
The column of data representing `x` values, default is the first column
x_col : typing.Hashable
The column specifier for the `x` values, default is the specifier for
the first column
y : pandas.Series
The column of data representing `y` values, default is the second column
y_col : typing.Hashable
The column specifier for the `y` values, default is the specifier for
the second column
z : pandas.Series
The column of data representing `z` values, default is the third column
z_col : typing.Hashable
The column specifier for the `z` values, default is the specifier for
the third column
uuid : str or None
a python uuid string for when bookkeeping is necessary, default value is
None
"""
label = "DataFrame"
_metadata = "metadata"
_required_metadata = OrderedDict((
("name", ""),
("date", util.iso_date_string), # evaluate date-now if none is provided
))
_x = None # key for default x
_y = None # key for default y
_z = None # key for default z
_required_columns = OrderedDict() # minimum data Series that define the class
_column_properties = OrderedDict() # object properties that return a series based on the actual column values
_loaders = [] # functions that will read in a file and return this class
def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False, **metadata):
"""The constructor for a DataQuickFrame, has the same signature as for a Pandas
Parameters
----------
data
index
columns
dtype
copy
metadata
keyword arguments specifically for the metadata. Must be JSON compatible
"""
super(DataQuickFrame, self).__init__(data, index, columns, dtype, copy)
self.metadata = OrderedDict()
self._uuid = None # type: str
for meta_key, meta_val in self._required_metadata.items():
val = metadata.pop(meta_key, meta_val)
if callable(val): # can only evaluate functions that take no arguments
val = val()
self.metadata[meta_key] = val
# TODO: implement json checker for metadata
self.metadata.update(metadata)
if not self.is_valid():
raise Exception
def is_valid(self):
"""determine if the data and metadata match the specifications of the
DataQuickFrame class or sublcass
DataQuickFrame is meant to be subclassed. The core specification of a
DataQuickFrame are expected columns of data in `._required_columns` and
also necessary meta-data specified in the `._required_metadata`
dictionary. If in the construction or maniuplation of a DataQuickFrame,
the object no longer has the required columns or the required metadata,
it is no longer a valid datastructure. This function checks the state
of the instance to see if it matches the class. All base DataQuickFrame
instances should return `True` unless the `name` entry is removed from
the `.metadata` `dict`.
Returns
-------
bool
"""
# TODO: implement proper dtype checking
# dtypes in pandas are not so simple, to start, we're just going to use
# the consenting adults principle and check only for a column with the
# correct label, although checking for the correct dtype would be a nice
# thing to have
if not self.required_columns().issubset(self.columns):
return False
elif not self.required_metadata().issubset(self.metadata):
return False
else:
return True
@property
def x(self):
if not self._x:
return self.ix[:, 0]
else:
return self[self._x]
@property
def x_col(self):
if not self._x:
return 0
else:
return self.columns.get_loc(self._x)
@property
def y(self):
if not self._y:
return self.ix[:, 1]
else:
return self[self._y]
@property
def y_col(self):
if not self._y:
if len(self.columns) > 1:
return 1
else:
return None
else:
return self.columns.get_loc(self._y)
@property
def z(self):
if not self._z:
return self.ix[:, 2]
else:
return self[self._z]
@property
def z_col(self):
if not self._z:
if len(self.columns) > 2:
return 2
else:
return None
else:
return self.columns.get_loc(self._z)
@classmethod
def required_metadata(self):
return set(self._required_metadata)
@classmethod
def required_columns(self):
return set(self._required_columns)
@property
def uuid(self):
return self._uuid
def set_uuid(self):
"""
self.uuid is None by default and must be set. An error will be
raised if this function is called twice
"""
if self.uuid:
raise AttributeError("self.uuid is already set, specify force_new=True to set a new uuid")
self._uuid = str(uuid.uuid1())
def get_uuid(self):
"""get uuid. If None, set one and then return it"""
if not self.uuid:
self.set_uuid()
return self.uuid
@classmethod
def loaders(cls):
"""get a list of all the Loader objects known to return this specific data structure
Returns
-------
list of Loaders
"""
return cls._loaders
@classmethod
def add_loader(cls, loader):
"""append a Loader object to this classes list of known loaders"""
cls._loaders.append(loader)
def column_accessors(self):
"""
return a list of 2-element tuples containing string labels and lambda
functions (i.e. the accessors) that return column-type values which
includes the dataframe columns and any properties that return
column-like values, i.e. the _column_properties.
Returns
-------
accessors : list of tuples
"""
accessors = list()
def series_accessor(df, key):
return lambda: df[key]
def property_accessor(df, prop):
return lambda: getattr(df, prop)
for key in list(self.keys()):
accessors.append((key, series_accessor(self, key)))
for prop in self._column_properties:
accessors.append((prop, property_accessor(self, prop)))
return accessors
def savetxt(self, filename, overwrite=False):
"""save an ascii version of the dataframe, with metadata included in
comment lines above the datablock
Parameters
----------
filename : str
overwrite : bool
"""
if not filename.endswith(".dqf"):
filename += ".dqf"
meta = OrderedDict()
meta["file-type"] = "DataQuick txt version1"
meta["class"] = self.__class__.__name__
meta.update(self.metadata)
meta_block = "# " + json.dumps(meta, indent=2).replace("\n", "\n# ")
data_block = self.to_csv(index=None)
if os.path.isfile(filename) and not overwrite is True:
raise FileExistsError("You must specify overwrite to be True to overwrite the file")
with open(filename, "w") as fid:
fid.write(meta_block)
fid.write("\n")
fid.write(data_block)
def serialize(self):
"""return a string representation of the DataQuickFrame, with metadata
included
Returns
-------
str
"""
# TODO: define a standard text-format for a DataQuickFrame
# TODO: after text format is defined, modify here to
data_block = self.to_csv(index=None)
return data_block
@classmethod
def from_clipboard(cls, *args, **kwargs):
df = pandas.read_clipboard()
# TODO: figure out a clever way to parse required columns, ignore for now for the sake of convenience
return cls(data=df, *args, **kwargs)
@property
def _constructor(self):
return self.__class__
def dqplot(self, *args, **kwargs):
"""Subclasses may implement custom plotting functionality"""
raise NotImplementedError("only available for specific subclasses of DataQuickFrame")
| mit |
open-risk/portfolio_analytics_library | setup.py | 1 | 2340 | # encoding: utf-8
# (c) 2014-2019 Open Risk (https://www.openriskmanagement.com)
#
# portfolioAnalytics is licensed under the Apache 2.0 license a copy of which is included
# in the source distribution of portfolioAnalytics. This is notwithstanding any licenses of
# third-party software included in this distribution. You may not use this file except in
# compliance with the License.
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions and
# limitations under the License.
from codecs import open
from setuptools import setup
__version__ = '0.2'
ver = __version__
long_descr = open('description.rst', 'r', encoding='utf8').read()
setup(name='portfolioAnalytics',
version=ver,
description='A Python powered library for calculating semi-analytic portfolio loss metrics',
long_description=long_descr,
author='Open Risk',
author_email='[email protected]',
packages=['portfolioAnalytics', 'portfolioAnalytics.estimators', 'portfolioAnalytics.utils', 'tests',
'portfolioAnalytics.thresholds', 'portfolioAnalytics.portfolio_models', 'datasets', 'examples.python'],
include_package_data=True,
url='https://github.com/open-risk/portfolioAnalytics',
install_requires=[
'pandas',
'numpy',
'scipy',
'statsmodels',
'sympy',
'matplotlib'
],
zip_safe=False,
provides=['portfolioAnalytics'],
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: Financial and Insurance Industry',
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Information Analysis'
]
)
| gpl-2.0 |
anirudhjayaraman/scikit-learn | sklearn/tests/test_discriminant_analysis.py | 35 | 11709 | try:
# Python 2 compat
reload
except NameError:
# Regular Python 3+ import
from importlib import reload
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
# Data is just 9 separable points in the plane
X6 = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y6 = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y7 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X7 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8, 3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct
# values for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = LinearDiscriminantAnalysis(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = LinearDiscriminantAnalysis(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_priors():
# Test priors (negative priors)
priors = np.array([0.5, -0.5])
clf = LinearDiscriminantAnalysis(priors=priors)
msg = "priors must be non-negative"
assert_raise_message(ValueError, msg, clf.fit, X, y)
# Test that priors passed as a list are correctly handled (run to see if
# failure)
clf = LinearDiscriminantAnalysis(priors=[0.5, 0.5])
clf.fit(X, y)
# Test that priors always sum to 1
priors = np.array([0.5, 0.6])
prior_norm = np.array([0.45, 0.55])
clf = LinearDiscriminantAnalysis(priors=priors)
clf.fit(X, y)
assert_array_almost_equal(clf.priors_, prior_norm, 2)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_lsqr = LinearDiscriminantAnalysis(solver="lsqr")
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
# Test LDA transform.
clf = LinearDiscriminantAnalysis(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_explained_variance_ratio():
# Test if the sum of the normalized eigen vectors values equals 1
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_eigen.fit(X, y)
assert_almost_equal(clf_lda_eigen.explained_variance_ratio_.sum(), 1.0, 3)
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = LinearDiscriminantAnalysis(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = LinearDiscriminantAnalysis(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
assert_array_equal(y_pred, y6)
# Assure that it works with 1D data
y_pred1 = clf.fit(X7, y6).predict(X7)
assert_array_equal(y_pred1, y6)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X7)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y6)
y_log_proba_pred1 = clf.predict_log_proba(X7)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X6, y7).predict(X6)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y7))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X6, y4)
def test_qda_priors():
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = QuadraticDiscriminantAnalysis(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X6, y6).predict(X6)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = QuadraticDiscriminantAnalysis().fit(X6, y6)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = QuadraticDiscriminantAnalysis(store_covariances=True).fit(X6, y6)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = QuadraticDiscriminantAnalysis()
with ignore_warnings():
y_pred = clf.fit(X2, y6).predict(X2)
assert_true(np.any(y_pred != y6))
# adding a little regularization fixes the problem
clf = QuadraticDiscriminantAnalysis(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y6)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y6)
# Case n_samples_in_a_class < n_features
clf = QuadraticDiscriminantAnalysis(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
def test_deprecated_lda_qda_deprecation():
def import_lda_module():
import sklearn.lda
# ensure that we trigger DeprecationWarning even if the sklearn.lda
# was loaded previously by another test.
reload(sklearn.lda)
return sklearn.lda
lda = assert_warns(DeprecationWarning, import_lda_module)
assert lda.LDA is LinearDiscriminantAnalysis
def import_qda_module():
import sklearn.qda
# ensure that we trigger DeprecationWarning even if the sklearn.qda
# was loaded previously by another test.
reload(sklearn.qda)
return sklearn.qda
qda = assert_warns(DeprecationWarning, import_qda_module)
assert qda.QDA is QuadraticDiscriminantAnalysis
| bsd-3-clause |
stefangri/s_s_productions | PHY641/RTM/Messdaten/latex.py | 7 | 4043 | from pandas import Series, DataFrame
import pandas as pd
import collections
import numpy
import uncertainties
import pint
from uncertainties import ufloat
from uncertainties import ufloat_fromstr
from pint import UnitRegistry
import string
ureg = UnitRegistry()
Q_ = ureg.Quantity
def return_int(num):
num_str = str(num)
num_str = num_str.split('.')[1]
num_str = num_str[0:1]
return int(num_str)
class Latexdocument(object):
def __init__(self, filename):
self.name = filename
self.data = DataFrame(columns=(['tex', 'var']))
def tabular(self, data, header, places, caption, label):
with open(self.name, 'w') as f:
f.write('\\begin{table} \n\\centering \n\\caption{' + caption + '} \n\\label{tab: ' + label + '} \n\\begin{tabular}{')
for i in range(0, len(data)):
if type(data[i][0]) == uncertainties.core.Variable:
f.write('S[table-format=' + str(places[i][0]) + ']@{${}\pm{}$} S[table-format=' + str(places[i][1]) + '] ')
else:
f.write('S ')
f.write('} \n\\toprule \n')
for i in range(0, len(data)):
if i == len(data) - 1:
if type(data[i][0]) == uncertainties.core.Variable:
f.write('\multicolumn{2}{c}{$' + header[i][0:header[i].find('/')] + '\:/\: \si{' + header[i][header[i].find('/')+1:] + '}$} \\\ \n')
else:
f.write('{$' + header[i][0:header[i].find('/')] + '/ \si{' + header[i][header[i].find('/')+1:] + '}$} \\\ \n')
else:
if type(data[i][0]) == uncertainties.core.Variable:
f.write('\multicolumn{2}{c}{$' + header[i][0:header[i].find('/')] + '\:/\: \si{' + header[i][header[i].find('/')+1:] + '}$} & ')
else:
f.write('{$' + header[i][0:header[i].find('/')] + '/ \si{' + header[i][header[i].find('/')+1:] + '}$} & ')
f.write('\\midrule \n')
for i in range(0, len(data[0])):
for j in range(0, len(data)):
if type(data[j][0]) == uncertainties.core.Variable:
if j == len(data) - 1:
f.write(('{:.' + str(return_int(places[j][0])) + 'f} ' + '& {:.' + str(return_int(places[j][1])) + 'f}' + '\\\ \n').format(data[j][i].n, data[j][i].s))
else:
f.write(('{:.' + str(return_int(places[j])) + 'f} ' + '& {:.' + str(return_int(places[j][1])) + 'f}'+ ' & ').format(data[j][i].n, data[j][i].s))
else:
if j == len(data) - 1:
f.write(('{:.' + str(places[j]) + 'f}' + '\\\ \n').format(data[j][i]))
else:
f.write(('{:.' + str(places[j]) + 'f}' + ' & ').format(data[j][i]))
f.write('\\bottomrule \n\\end{tabular} \n\\end{table}')
def app(self, name, value):
if (type(value.magnitude) == uncertainties.core.Variable or type(value.magnitude) == uncertainties.core.AffineScalarFunc):
val = '{:+.1uS}'.format(value.magnitude)
s = '{:Lx}'.format(Q_(2, value.units)) + '~'
df = DataFrame(collections.OrderedDict({'var': pd.Series(value, index = [name] ),
#'tex': name + ' = \SI{' + val[:val.index('+')]+ ' \pm ' + val[val.index('-')+1:] + s[s.index('}{'):s.index('~')]}))
'tex': name + ' = \SI{' + val + '}{' + s[s.index('}{') + 2:s.index('~')]}))
self.data = self.data.append(df)
else:
df = DataFrame({'var': pd.Series(value, index = [name] ),
'tex': name + ' = ' + '{:Lx}'.format(value)})
self.data = self.data.append(df)
def makeresults(self):
print(self.data['var'])
with open(self.name, 'w') as f:
for i in self.data['tex']:
f.write(i + '\n')
| mit |
ahoyosid/scikit-learn | sklearn/decomposition/nmf.py | 24 | 19057 | """ Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <[email protected]>
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (original Python and NumPy port)
# License: BSD 3 clause
from __future__ import division
from math import sqrt
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.optimize import nnls
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.validation import check_is_fitted
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def check_non_negative(X, whom):
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
def _initialize_nmf(X, n_components, variant=None, eps=1e-6,
random_state=None):
"""NNDSVD algorithm for NMF initialization.
Computes a good initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array, [n_samples, n_features]
The data matrix to be decomposed.
n_components : array, [n_components, n_features]
The number of components desired in the approximation.
variant : None | 'a' | 'ar'
The variant of the NNDSVD algorithm.
Accepts None, 'a', 'ar'
None: leaves the zero entries as zero
'a': Fills the zero entries with the average of X
'ar': Fills the zero entries with standard normal random variates.
Default: None
eps: float
Truncate all values less then this in output to zero.
random_state : numpy.RandomState | int, optional
The generator used to fill in the zeros, when using variant='ar'
Default: numpy.random
Returns
-------
(W, H) :
Initial guesses for solving X ~= WH such that
the number of columns in W is n_components.
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
if variant not in (None, 'a', 'ar'):
raise ValueError("Invalid variant name")
U, S, V = randomized_svd(X, n_components)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if variant == "a":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif variant == "ar":
random_state = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * random_state.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * random_state.randn(len(H[H == 0])) / 100)
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the
projected gradient descent algorithm.
min || WH - V ||_2
Parameters
----------
V, W : array-like
Constant matrices.
H : array-like
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like
Solution to the non-negative least squares problem.
grad : array-like
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix factorization.
Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = np.dot(W.T, W)
# values justified in the paper
alpha = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(19):
# Gradient step.
Hn = H - alpha * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_alpha = not suff_decr
if decr_alpha:
if suff_decr:
H = Hn
break
else:
alpha *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
alpha /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
class ProjectedGradientNMF(BaseEstimator, TransformerMixin):
"""Non-Negative matrix factorization by Projected Gradient (NMF)
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all components
are kept
init : 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'random'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise random.
Valid options::
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'random': non-negative random matrices
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : int, default: 200
Number of iterations to compute.
nls_max_iter : int, default: 2000
Number of iterations in NLS subproblem.
random_state : int or RandomState
Random number generator seed control.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import ProjectedGradientNMF
>>> model = ProjectedGradientNMF(n_components=2, init='random',
... random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, sparseness=None,
tol=0.0001)
>>> model.components_
array([[ 0.77032744, 0.11118662],
[ 0.38526873, 0.38228063]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00746...
>>> model = ProjectedGradientNMF(n_components=2,
... sparseness='components', init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0,
sparseness='components', tol=0.0001)
>>> model.components_
array([[ 1.67481991, 0.29614922],
[ 0. , 0.4681982 ]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.513...
References
----------
This implements
C.-J. Lin. Projected gradient methods
for non-negative matrix factorization. Neural
Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with
Sparseness Constraints. Journal of Machine Learning
Research 2004.
NNDSVD is introduced in
C. Boutsidis, E. Gallopoulos: SVD based
initialization: A head start for nonnegative
matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
def __init__(self, n_components=None, init=None, sparseness=None, beta=1,
eta=0.1, tol=1e-4, max_iter=200, nls_max_iter=2000,
random_state=None):
self.n_components = n_components
self.init = init
self.tol = tol
if sparseness not in (None, 'data', 'components'):
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, (None, 'data', 'components')))
self.sparseness = sparseness
self.beta = beta
self.eta = eta
self.max_iter = max_iter
self.nls_max_iter = nls_max_iter
self.random_state = random_state
def _init(self, X):
n_samples, n_features = X.shape
init = self.init
if init is None:
if self.n_components_ < n_features:
init = 'nndsvd'
else:
init = 'random'
random_state = self.random_state
if init == 'nndsvd':
W, H = _initialize_nmf(X, self.n_components_)
elif init == 'nndsvda':
W, H = _initialize_nmf(X, self.n_components_, variant='a')
elif init == 'nndsvdar':
W, H = _initialize_nmf(X, self.n_components_, variant='ar')
elif init == "random":
rng = check_random_state(random_state)
W = rng.randn(n_samples, self.n_components_)
# we do not write np.abs(W, out=W) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(W, W)
H = rng.randn(self.n_components_, n_features)
np.abs(H, H)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random')))
return W, H
def _update_W(self, X, H, W, tolW):
n_samples, n_features = X.shape
if self.sparseness is None:
W, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW,
self.nls_max_iter)
elif self.sparseness == 'data':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(self.beta) * np.ones((1,
self.n_components_))]),
W.T, tolW, self.nls_max_iter)
elif self.sparseness == 'components':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((self.n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
W.T, tolW, self.nls_max_iter)
return W.T, gradW.T, iterW
def _update_H(self, X, H, W, tolH):
n_samples, n_features = X.shape
if self.sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH,
self.nls_max_iter)
elif self.sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((self.n_components_, n_features))]),
safe_vstack([W,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
H, tolH, self.nls_max_iter)
elif self.sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(self.beta)
* np.ones((1, self.n_components_))]),
H, tolH, self.nls_max_iter)
return H, gradH, iterH
def fit_transform(self, X, y=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, "NMF.fit")
n_samples, n_features = X.shape
if not self.n_components:
self.n_components_ = n_features
else:
self.n_components_ = self.n_components
W, H = self._init(X)
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = norm(np.r_[gradW, gradH.T])
tolW = max(0.001, self.tol) * init_grad # why max?
tolH = tolW
tol = self.tol * init_grad
for n_iter in range(1, self.max_iter + 1):
# stopping condition
# as discussed in paper
proj_norm = norm(np.r_[gradW[np.logical_or(gradW < 0, W > 0)],
gradH[np.logical_or(gradH < 0, H > 0)]])
if proj_norm < tol:
break
# update W
W, gradW, iterW = self._update_W(X, H, W, tolW)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = self._update_H(X, H, W, tolH)
if iterH == 1:
tolH = 0.1 * tolH
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
sqnorm_X = np.dot(X.data, X.data)
norm_WHT = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(sqnorm_X + norm_WHT - 2. * cross_prod)
self.reconstruction_err_ = error
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
H[H == 0] = 0 # fix up negative zeros
self.components_ = H
if n_iter == self.max_iter:
warnings.warn("Iteration limit reached during fit. Solving for W exactly.")
return self.transform(X)
self.n_iter_ = n_iter
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be transformed by the model
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
check_is_fitted(self, 'n_components_')
X = check_array(X, accept_sparse='csc')
Wt = np.zeros((self.n_components_, X.shape[0]))
check_non_negative(X, "ProjectedGradientNMF.transform")
if sp.issparse(X):
Wt, _, _ = _nls_subproblem(X.T, self.components_.T, Wt,
tol=self.tol,
max_iter=self.nls_max_iter)
else:
for j in range(0, X.shape[0]):
Wt[:, j], _ = nnls(self.components_.T, X[j, :])
return Wt.T
class NMF(ProjectedGradientNMF):
__doc__ = ProjectedGradientNMF.__doc__
pass
| bsd-3-clause |
YihaoLu/statsmodels | statsmodels/tsa/vector_ar/tests/test_var.py | 23 | 18346 | """
Test VAR Model
"""
from __future__ import print_function
# pylint: disable=W0612,W0231
from statsmodels.compat.python import (iteritems, StringIO, lrange, BytesIO,
range)
from nose.tools import assert_raises
import nose
import os
import sys
import numpy as np
import statsmodels.api as sm
import statsmodels.tsa.vector_ar.util as util
import statsmodels.tools.data as data_util
from statsmodels.tsa.vector_ar.var_model import VAR
from numpy.testing import (assert_almost_equal, assert_equal, assert_,
assert_allclose)
DECIMAL_12 = 12
DECIMAL_6 = 6
DECIMAL_5 = 5
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
class CheckVAR(object):
# just so pylint won't complain
res1 = None
res2 = None
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_3)
def test_neqs(self):
assert_equal(self.res1.neqs, self.res2.neqs)
def test_nobs(self):
assert_equal(self.res1.avobs, self.res2.nobs)
def test_df_eq(self):
assert_equal(self.res1.df_eq, self.res2.df_eq)
def test_rmse(self):
results = self.res1.results
for i in range(len(results)):
assert_almost_equal(results[i].mse_resid**.5,
eval('self.res2.rmse_'+str(i+1)), DECIMAL_6)
def test_rsquared(self):
results = self.res1.results
for i in range(len(results)):
assert_almost_equal(results[i].rsquared,
eval('self.res2.rsquared_'+str(i+1)), DECIMAL_3)
def test_llf(self):
results = self.res1.results
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_2)
for i in range(len(results)):
assert_almost_equal(results[i].llf,
eval('self.res2.llf_'+str(i+1)), DECIMAL_2)
def test_aic(self):
assert_almost_equal(self.res1.aic, self.res2.aic)
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic)
def test_hqic(self):
assert_almost_equal(self.res1.hqic, self.res2.hqic)
def test_fpe(self):
assert_almost_equal(self.res1.fpe, self.res2.fpe)
def test_detsig(self):
assert_almost_equal(self.res1.detomega, self.res2.detsig)
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
def get_macrodata():
data = sm.datasets.macrodata.load().data[['realgdp','realcons','realinv']]
names = data.dtype.names
nd = data.view((float,3))
nd = np.diff(np.log(nd), axis=0)
return nd.ravel().view(data.dtype)
def generate_var():
from rpy2.robjects import r
import pandas.rpy.common as prp
r.source('tests/var.R')
return prp.convert_robj(r['result'], use_pandas=False)
def write_generate_var():
result = generate_var()
np.savez('tests/results/vars_results.npz', **result)
class RResults(object):
"""
Simple interface with results generated by "vars" package in R.
"""
def __init__(self):
#data = np.load(resultspath + 'vars_results.npz')
from .results.results_var_data import var_results
data = var_results.__dict__
self.names = data['coefs'].dtype.names
self.params = data['coefs'].view((float, len(self.names)))
self.stderr = data['stderr'].view((float, len(self.names)))
self.irf = data['irf'].item()
self.orth_irf = data['orthirf'].item()
self.nirfs = int(data['nirfs'][0])
self.nobs = int(data['obs'][0])
self.totobs = int(data['totobs'][0])
crit = data['crit'].item()
self.aic = crit['aic'][0]
self.sic = self.bic = crit['sic'][0]
self.hqic = crit['hqic'][0]
self.fpe = crit['fpe'][0]
self.detomega = data['detomega'][0]
self.loglike = data['loglike'][0]
self.nahead = int(data['nahead'][0])
self.ma_rep = data['phis']
self.causality = data['causality']
def close_plots():
try:
import matplotlib.pyplot as plt
plt.close('all')
except ImportError:
pass
_orig_stdout = None
def setup_module():
global _orig_stdout
_orig_stdout = sys.stdout
sys.stdout = StringIO()
def teardown_module():
sys.stdout = _orig_stdout
close_plots()
def have_matplotlib():
try:
import matplotlib
return True
except ImportError:
return False
class CheckIRF(object):
ref = None; res = None; irf = None
k = None
#---------------------------------------------------------------------------
# IRF tests
def test_irf_coefs(self):
self._check_irfs(self.irf.irfs, self.ref.irf)
self._check_irfs(self.irf.orth_irfs, self.ref.orth_irf)
def _check_irfs(self, py_irfs, r_irfs):
for i, name in enumerate(self.res.names):
ref_irfs = r_irfs[name].view((float, self.k))
res_irfs = py_irfs[:, :, i]
assert_almost_equal(ref_irfs, res_irfs)
def test_plot_irf(self):
if not have_matplotlib():
raise nose.SkipTest
self.irf.plot()
self.irf.plot(plot_stderr=False)
self.irf.plot(impulse=0, response=1)
self.irf.plot(impulse=0)
self.irf.plot(response=0)
self.irf.plot(orth=True)
self.irf.plot(impulse=0, response=1, orth=True)
close_plots()
def test_plot_cum_effects(self):
if not have_matplotlib():
raise nose.SkipTest
self.irf.plot_cum_effects()
self.irf.plot_cum_effects(plot_stderr=False)
self.irf.plot_cum_effects(impulse=0, response=1)
self.irf.plot_cum_effects(orth=True)
self.irf.plot_cum_effects(impulse=0, response=1, orth=True)
close_plots()
class CheckFEVD(object):
fevd = None
#---------------------------------------------------------------------------
# FEVD tests
def test_fevd_plot(self):
if not have_matplotlib():
raise nose.SkipTest
self.fevd.plot()
close_plots()
def test_fevd_repr(self):
self.fevd
def test_fevd_summary(self):
self.fevd.summary()
def test_fevd_cov(self):
# test does not crash
# not implemented
# covs = self.fevd.cov()
pass
class TestVARResults(CheckIRF, CheckFEVD):
@classmethod
def setupClass(cls):
cls.p = 2
cls.data = get_macrodata()
cls.model = VAR(cls.data)
cls.names = cls.model.endog_names
cls.ref = RResults()
cls.k = len(cls.ref.names)
cls.res = cls.model.fit(maxlags=cls.p)
cls.irf = cls.res.irf(cls.ref.nirfs)
cls.nahead = cls.ref.nahead
cls.fevd = cls.res.fevd()
def test_constructor(self):
# make sure this works with no names
ndarr = self.data.view((float, 3))
model = VAR(ndarr)
res = model.fit(self.p)
def test_names(self):
assert_equal(self.model.endog_names, self.ref.names)
model2 = VAR(self.data)
assert_equal(model2.endog_names, self.ref.names)
def test_get_eq_index(self):
assert(type(self.res.names) is list)
for i, name in enumerate(self.names):
idx = self.res.get_eq_index(i)
idx2 = self.res.get_eq_index(name)
assert_equal(idx, i)
assert_equal(idx, idx2)
assert_raises(Exception, self.res.get_eq_index, 'foo')
def test_repr(self):
# just want this to work
foo = str(self.res)
bar = repr(self.res)
def test_params(self):
assert_almost_equal(self.res.params, self.ref.params, DECIMAL_3)
def test_cov_params(self):
# do nothing for now
self.res.cov_params
def test_cov_ybar(self):
self.res.cov_ybar()
def test_tstat(self):
self.res.tvalues
def test_pvalues(self):
self.res.pvalues
def test_summary(self):
summ = self.res.summary()
def test_detsig(self):
assert_almost_equal(self.res.detomega, self.ref.detomega)
def test_aic(self):
assert_almost_equal(self.res.aic, self.ref.aic)
def test_bic(self):
assert_almost_equal(self.res.bic, self.ref.bic)
def test_hqic(self):
assert_almost_equal(self.res.hqic, self.ref.hqic)
def test_fpe(self):
assert_almost_equal(self.res.fpe, self.ref.fpe)
def test_lagorder_select(self):
ics = ['aic', 'fpe', 'hqic', 'bic']
for ic in ics:
res = self.model.fit(maxlags=10, ic=ic, verbose=True)
assert_raises(Exception, self.model.fit, ic='foo')
def test_nobs(self):
assert_equal(self.res.nobs, self.ref.nobs)
def test_stderr(self):
assert_almost_equal(self.res.stderr, self.ref.stderr, DECIMAL_4)
def test_loglike(self):
assert_almost_equal(self.res.llf, self.ref.loglike)
def test_ma_rep(self):
ma_rep = self.res.ma_rep(self.nahead)
assert_almost_equal(ma_rep, self.ref.ma_rep)
#--------------------------------------------------
# Lots of tests to make sure stuff works...need to check correctness
def test_causality(self):
causedby = self.ref.causality['causedby']
for i, name in enumerate(self.names):
variables = self.names[:i] + self.names[i + 1:]
result = self.res.test_causality(name, variables, kind='f')
assert_almost_equal(result['pvalue'], causedby[i], DECIMAL_4)
rng = lrange(self.k)
rng.remove(i)
result2 = self.res.test_causality(i, rng, kind='f')
assert_almost_equal(result['pvalue'], result2['pvalue'], DECIMAL_12)
# make sure works
result = self.res.test_causality(name, variables, kind='wald')
# corner cases
_ = self.res.test_causality(self.names[0], self.names[1])
_ = self.res.test_causality(0, 1)
assert_raises(Exception,self.res.test_causality, 0, 1, kind='foo')
def test_select_order(self):
result = self.model.fit(10, ic='aic', verbose=True)
result = self.model.fit(10, ic='fpe', verbose=True)
# bug
model = VAR(self.model.endog)
model.select_order()
def test_is_stable(self):
# may not necessarily be true for other datasets
assert(self.res.is_stable(verbose=True))
def test_acf(self):
# test that it works...for now
acfs = self.res.acf(10)
# defaults to nlags=lag_order
acfs = self.res.acf()
assert(len(acfs) == self.p + 1)
def test_acorr(self):
acorrs = self.res.acorr(10)
def test_forecast(self):
point = self.res.forecast(self.res.y[-5:], 5)
def test_forecast_interval(self):
y = self.res.y[:-self.p:]
point, lower, upper = self.res.forecast_interval(y, 5)
def test_plot_sim(self):
if not have_matplotlib():
raise nose.SkipTest
self.res.plotsim(steps=100)
close_plots()
def test_plot(self):
if not have_matplotlib():
raise nose.SkipTest
self.res.plot()
close_plots()
def test_plot_acorr(self):
if not have_matplotlib():
raise nose.SkipTest
self.res.plot_acorr()
close_plots()
def test_plot_forecast(self):
if not have_matplotlib():
raise nose.SkipTest
self.res.plot_forecast(5)
close_plots()
def test_reorder(self):
#manually reorder
data = self.data.view((float,3))
names = self.names
data2 = np.append(np.append(data[:,2,None], data[:,0,None], axis=1), data[:,1,None], axis=1)
names2 = []
names2.append(names[2])
names2.append(names[0])
names2.append(names[1])
res2 = VAR(data2).fit(maxlags=self.p)
#use reorder function
res3 = self.res.reorder(['realinv','realgdp', 'realcons'])
#check if the main results match
assert_almost_equal(res2.params, res3.params)
assert_almost_equal(res2.sigma_u, res3.sigma_u)
assert_almost_equal(res2.bic, res3.bic)
assert_almost_equal(res2.stderr, res3.stderr)
def test_pickle(self):
fh = BytesIO()
#test wrapped results load save pickle
self.res.save(fh)
fh.seek(0,0)
res_unpickled = self.res.__class__.load(fh)
assert_(type(res_unpickled) is type(self.res))
class E1_Results(object):
"""
Results from Lutkepohl (2005) using E2 dataset
"""
def __init__(self):
# Lutkepohl p. 120 results
# I asked the author about these results and there is probably rounding
# error in the book, so I adjusted these test results to match what is
# coming out of the Python (double-checked) calculations
self.irf_stderr = np.array([[[.125, 0.546, 0.664 ],
[0.032, 0.139, 0.169],
[0.026, 0.112, 0.136]],
[[0.129, 0.547, 0.663],
[0.032, 0.134, 0.163],
[0.026, 0.108, 0.131]],
[[0.084, .385, .479],
[.016, .079, .095],
[.016, .078, .103]]])
self.cum_irf_stderr = np.array([[[.125, 0.546, 0.664 ],
[0.032, 0.139, 0.169],
[0.026, 0.112, 0.136]],
[[0.149, 0.631, 0.764],
[0.044, 0.185, 0.224],
[0.033, 0.140, 0.169]],
[[0.099, .468, .555],
[.038, .170, .205],
[.033, .150, .185]]])
self.lr_stderr = np.array([[.134, .645, .808],
[.048, .230, .288],
[.043, .208, .260]])
basepath = os.path.split(sm.__file__)[0]
resultspath = basepath + '/tsa/vector_ar/tests/results/'
def get_lutkepohl_data(name='e2'):
lut_data = basepath + '/tsa/vector_ar/data/'
path = lut_data + '%s.dat' % name
return util.parse_lutkepohl_data(path)
def test_lutkepohl_parse():
files = ['e%d' % i for i in range(1, 7)]
for f in files:
get_lutkepohl_data(f)
class TestVARResultsLutkepohl(object):
"""
Verify calculations using results from Lutkepohl's book
"""
def __init__(self):
self.p = 2
sdata, dates = get_lutkepohl_data('e1')
data = data_util.struct_to_ndarray(sdata)
adj_data = np.diff(np.log(data), axis=0)
# est = VAR(adj_data, p=2, dates=dates[1:], names=names)
self.model = VAR(adj_data[:-16], dates=dates[1:-16], freq='Q')
self.res = self.model.fit(maxlags=self.p)
self.irf = self.res.irf(10)
self.lut = E1_Results()
def test_approx_mse(self):
# 3.5.18, p. 99
mse2 = np.array([[25.12, .580, 1.300],
[.580, 1.581, .586],
[1.300, .586, 1.009]]) * 1e-4
assert_almost_equal(mse2, self.res.forecast_cov(3)[1],
DECIMAL_3)
def test_irf_stderr(self):
irf_stderr = self.irf.stderr(orth=False)
for i in range(1, 1 + len(self.lut.irf_stderr)):
assert_almost_equal(np.round(irf_stderr[i], 3),
self.lut.irf_stderr[i-1])
def test_cum_irf_stderr(self):
stderr = self.irf.cum_effect_stderr(orth=False)
for i in range(1, 1 + len(self.lut.cum_irf_stderr)):
assert_almost_equal(np.round(stderr[i], 3),
self.lut.cum_irf_stderr[i-1])
def test_lr_effect_stderr(self):
stderr = self.irf.lr_effect_stderr(orth=False)
orth_stderr = self.irf.lr_effect_stderr(orth=True)
assert_almost_equal(np.round(stderr, 3), self.lut.lr_stderr)
def test_get_trendorder():
results = {
'c' : 1,
'nc' : 0,
'ct' : 2,
'ctt' : 3
}
for t, trendorder in iteritems(results):
assert(util.get_trendorder(t) == trendorder)
def test_var_constant():
# see 2043
import datetime
from pandas import DataFrame, DatetimeIndex
series = np.array([[2., 2.], [1, 2.], [1, 2.], [1, 2.], [1., 2.]])
data = DataFrame(series)
d = datetime.datetime.now()
delta = datetime.timedelta(days=1)
index = []
for i in range(data.shape[0]):
index.append(d)
d += delta
data.index = DatetimeIndex(index)
model = VAR(data)
assert_raises(ValueError, model.fit, 1)
def test_var_trend():
# see 2271
data = get_macrodata().view((float,3))
model = sm.tsa.VAR(data)
results = model.fit(4) #, trend = 'c')
irf = results.irf(10)
data_nc = data - data.mean(0)
model_nc = sm.tsa.VAR(data_nc)
results_nc = model_nc.fit(4, trend = 'nc')
assert_raises(ValueError, model.fit, 4, trend='t')
def test_irf_trend():
# test for irf with different trend see #1636
# this is a rough comparison by adding trend or subtracting mean to data
# to get similar AR coefficients and IRF
data = get_macrodata().view((float,3))
model = sm.tsa.VAR(data)
results = model.fit(4) #, trend = 'c')
irf = results.irf(10)
data_nc = data - data.mean(0)
model_nc = sm.tsa.VAR(data_nc)
results_nc = model_nc.fit(4, trend = 'nc')
irf_nc = results_nc.irf(10)
assert_allclose(irf_nc.stderr()[1:4], irf.stderr()[1:4], rtol=0.01)
trend = 1e-3 * np.arange(len(data)) / (len(data) - 1)
# for pandas version, currently not used, if data is a pd.DataFrame
#data_t = pd.DataFrame(data.values + trend[:,None], index=data.index, columns=data.columns)
data_t = data + trend[:,None]
model_t = sm.tsa.VAR(data_t)
results_t = model_t.fit(4, trend = 'ct')
irf_t = results_t.irf(10)
assert_allclose(irf_t.stderr()[1:4], irf.stderr()[1:4], rtol=0.03)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
| bsd-3-clause |
kevin-intel/scikit-learn | sklearn/semi_supervised/tests/test_self_training.py | 6 | 12895 | from math import ceil
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from sklearn.ensemble import StackingClassifier
from sklearn.exceptions import NotFittedError
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris, make_blobs
from sklearn.metrics import accuracy_score
from sklearn.semi_supervised import SelfTrainingClassifier
# Author: Oliver Rausch <[email protected]>
# License: BSD 3 clause
# load the iris dataset and randomly permute it
iris = load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=0)
n_labeled_samples = 50
y_train_missing_labels = y_train.copy()
y_train_missing_labels[n_labeled_samples:] = -1
mapping = {0: 'A', 1: 'B', 2: 'C', -1: '-1'}
y_train_missing_strings = np.vectorize(mapping.get)(
y_train_missing_labels).astype(object)
y_train_missing_strings[y_train_missing_labels == -1] = -1
def test_missing_predict_proba():
# Check that an error is thrown if predict_proba is not implemented
base_estimator = SVC(probability=False, gamma='scale')
self_training = SelfTrainingClassifier(base_estimator)
with pytest.raises(ValueError, match=r"base_estimator \(SVC\) should"):
self_training.fit(X_train, y_train_missing_labels)
def test_none_classifier():
st = SelfTrainingClassifier(None)
with pytest.raises(ValueError, match="base_estimator cannot be None"):
st.fit(X_train, y_train_missing_labels)
@pytest.mark.parametrize("max_iter, threshold",
[(-1, 1.0), (-100, -2), (-10, 10)])
def test_invalid_params(max_iter, threshold):
# Test negative iterations
base_estimator = SVC(gamma="scale", probability=True)
st = SelfTrainingClassifier(base_estimator, max_iter=max_iter)
with pytest.raises(ValueError, match="max_iter must be >= 0 or None"):
st.fit(X_train, y_train)
base_estimator = SVC(gamma="scale", probability=True)
st = SelfTrainingClassifier(base_estimator, threshold=threshold)
with pytest.raises(ValueError, match="threshold must be in"):
st.fit(X_train, y_train)
def test_invalid_params_selection_crit():
st = SelfTrainingClassifier(KNeighborsClassifier(),
criterion='foo')
with pytest.raises(ValueError, match="criterion must be either"):
st.fit(X_train, y_train)
def test_warns_k_best():
st = SelfTrainingClassifier(KNeighborsClassifier(),
criterion='k_best',
k_best=1000)
with pytest.warns(UserWarning, match="k_best is larger than"):
st.fit(X_train, y_train_missing_labels)
assert st.termination_condition_ == 'all_labeled'
@pytest.mark.parametrize("base_estimator",
[KNeighborsClassifier(),
SVC(gamma="scale", probability=True,
random_state=0)])
@pytest.mark.parametrize("selection_crit",
['threshold', 'k_best'])
def test_classification(base_estimator, selection_crit):
# Check classification for various parameter settings.
# Also assert that predictions for strings and numerical labels are equal.
# Also test for multioutput classification
threshold = 0.75
max_iter = 10
st = SelfTrainingClassifier(base_estimator, max_iter=max_iter,
threshold=threshold,
criterion=selection_crit)
st.fit(X_train, y_train_missing_labels)
pred = st.predict(X_test)
proba = st.predict_proba(X_test)
st_string = SelfTrainingClassifier(base_estimator, max_iter=max_iter,
criterion=selection_crit,
threshold=threshold)
st_string.fit(X_train, y_train_missing_strings)
pred_string = st_string.predict(X_test)
proba_string = st_string.predict_proba(X_test)
assert_array_equal(np.vectorize(mapping.get)(pred), pred_string)
assert_array_equal(proba, proba_string)
assert st.termination_condition_ == st_string.termination_condition_
# Check consistency between labeled_iter, n_iter and max_iter
labeled = y_train_missing_labels != -1
# assert that labeled samples have labeled_iter = 0
assert_array_equal(st.labeled_iter_ == 0, labeled)
# assert that labeled samples do not change label during training
assert_array_equal(y_train_missing_labels[labeled],
st.transduction_[labeled])
# assert that the max of the iterations is less than the total amount of
# iterations
assert np.max(st.labeled_iter_) <= st.n_iter_ <= max_iter
assert np.max(st_string.labeled_iter_) <= st_string.n_iter_ <= max_iter
# check shapes
assert st.labeled_iter_.shape == st.transduction_.shape
assert st_string.labeled_iter_.shape == st_string.transduction_.shape
def test_k_best():
st = SelfTrainingClassifier(KNeighborsClassifier(n_neighbors=1),
criterion='k_best',
k_best=10,
max_iter=None)
y_train_only_one_label = np.copy(y_train)
y_train_only_one_label[1:] = -1
n_samples = y_train.shape[0]
n_expected_iter = ceil((n_samples - 1) / 10)
st.fit(X_train, y_train_only_one_label)
assert st.n_iter_ == n_expected_iter
# Check labeled_iter_
assert np.sum(st.labeled_iter_ == 0) == 1
for i in range(1, n_expected_iter):
assert np.sum(st.labeled_iter_ == i) == 10
assert np.sum(st.labeled_iter_ == n_expected_iter) == (n_samples - 1) % 10
assert st.termination_condition_ == 'all_labeled'
def test_sanity_classification():
base_estimator = SVC(gamma="scale", probability=True)
base_estimator.fit(X_train[n_labeled_samples:],
y_train[n_labeled_samples:])
st = SelfTrainingClassifier(base_estimator)
st.fit(X_train, y_train_missing_labels)
pred1, pred2 = base_estimator.predict(X_test), st.predict(X_test)
assert not np.array_equal(pred1, pred2)
score_supervised = accuracy_score(base_estimator.predict(X_test), y_test)
score_self_training = accuracy_score(st.predict(X_test), y_test)
assert score_self_training > score_supervised
def test_none_iter():
# Check that the all samples were labeled after a 'reasonable' number of
# iterations.
st = SelfTrainingClassifier(KNeighborsClassifier(), threshold=.55,
max_iter=None)
st.fit(X_train, y_train_missing_labels)
assert st.n_iter_ < 10
assert st.termination_condition_ == "all_labeled"
@pytest.mark.parametrize("base_estimator",
[KNeighborsClassifier(),
SVC(gamma="scale", probability=True,
random_state=0)])
@pytest.mark.parametrize("y", [y_train_missing_labels,
y_train_missing_strings])
def test_zero_iterations(base_estimator, y):
# Check classification for zero iterations.
# Fitting a SelfTrainingClassifier with zero iterations should give the
# same results as fitting a supervised classifier.
# This also asserts that string arrays work as expected.
clf1 = SelfTrainingClassifier(base_estimator, max_iter=0)
clf1.fit(X_train, y)
clf2 = base_estimator.fit(X_train[:n_labeled_samples],
y[:n_labeled_samples])
assert_array_equal(clf1.predict(X_test), clf2.predict(X_test))
assert clf1.termination_condition_ == "max_iter"
def test_prefitted_throws_error():
# Test that passing a pre-fitted classifier and calling predict throws an
# error
knn = KNeighborsClassifier()
knn.fit(X_train, y_train)
st = SelfTrainingClassifier(knn)
with pytest.raises(NotFittedError, match="This SelfTrainingClassifier"
" instance is not fitted yet"):
st.predict(X_train)
@pytest.mark.parametrize("max_iter", range(1, 5))
def test_labeled_iter(max_iter):
# Check that the amount of datapoints labeled in iteration 0 is equal to
# the amount of labeled datapoints we passed.
st = SelfTrainingClassifier(KNeighborsClassifier(), max_iter=max_iter)
st.fit(X_train, y_train_missing_labels)
amount_iter_0 = len(st.labeled_iter_[st.labeled_iter_ == 0])
assert amount_iter_0 == n_labeled_samples
# Check that the max of the iterations is less than the total amount of
# iterations
assert np.max(st.labeled_iter_) <= st.n_iter_ <= max_iter
def test_no_unlabeled():
# Test that training on a fully labeled dataset produces the same results
# as training the classifier by itself.
knn = KNeighborsClassifier()
knn.fit(X_train, y_train)
st = SelfTrainingClassifier(knn)
with pytest.warns(UserWarning, match="y contains no unlabeled samples"):
st.fit(X_train, y_train)
assert_array_equal(knn.predict(X_test), st.predict(X_test))
# Assert that all samples were labeled in iteration 0 (since there were no
# unlabeled samples).
assert np.all(st.labeled_iter_ == 0)
assert st.termination_condition_ == "all_labeled"
def test_early_stopping():
svc = SVC(gamma='scale', probability=True)
st = SelfTrainingClassifier(svc)
X_train_easy = [[1], [0], [1], [0.5]]
y_train_easy = [1, 0, -1, -1]
# X = [[0.5]] cannot be predicted on with a high confidence, so training
# stops early
st.fit(X_train_easy, y_train_easy)
assert st.n_iter_ == 1
assert st.termination_condition_ == 'no_change'
def test_strings_dtype():
clf = SelfTrainingClassifier(KNeighborsClassifier())
X, y = make_blobs(n_samples=30, random_state=0,
cluster_std=0.1)
labels_multiclass = ["one", "two", "three"]
y_strings = np.take(labels_multiclass, y)
with pytest.raises(ValueError, match="dtype"):
clf.fit(X, y_strings)
@pytest.mark.parametrize("verbose", [True, False])
def test_verbose(capsys, verbose):
clf = SelfTrainingClassifier(KNeighborsClassifier(), verbose=verbose)
clf.fit(X_train, y_train_missing_labels)
captured = capsys.readouterr()
if verbose:
assert 'iteration' in captured.out
else:
assert 'iteration' not in captured.out
def test_verbose_k_best(capsys):
st = SelfTrainingClassifier(KNeighborsClassifier(n_neighbors=1),
criterion='k_best',
k_best=10, verbose=True,
max_iter=None)
y_train_only_one_label = np.copy(y_train)
y_train_only_one_label[1:] = -1
n_samples = y_train.shape[0]
n_expected_iter = ceil((n_samples - 1) / 10)
st.fit(X_train, y_train_only_one_label)
captured = capsys.readouterr()
msg = 'End of iteration {}, added {} new labels.'
for i in range(1, n_expected_iter):
assert msg.format(i, 10) in captured.out
assert msg.format(n_expected_iter,
(n_samples - 1) % 10) in captured.out
def test_k_best_selects_best():
# Tests that the labels added by st really are the 10 best labels.
svc = SVC(gamma='scale', probability=True, random_state=0)
st = SelfTrainingClassifier(svc,
criterion='k_best',
max_iter=1, k_best=10)
has_label = y_train_missing_labels != -1
st.fit(X_train, y_train_missing_labels)
got_label = ~has_label & (st.transduction_ != -1)
svc.fit(X_train[has_label], y_train_missing_labels[has_label])
pred = svc.predict_proba(X_train[~has_label])
max_proba = np.max(pred, axis=1)
most_confident_svc = X_train[~has_label][np.argsort(max_proba)[-10:]]
added_by_st = X_train[np.where(got_label)].tolist()
for row in most_confident_svc.tolist():
assert row in added_by_st
def test_base_estimator_meta_estimator():
# Check that a meta-estimator relying on an estimator implementing
# `predict_proba` will work even if it does expose this method before being
# fitted.
# Non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/19119
base_estimator = StackingClassifier(
estimators=[
("svc_1", SVC(probability=True)), ("svc_2", SVC(probability=True)),
],
final_estimator=SVC(probability=True), cv=2
)
# make sure that the `base_estimator` does not expose `predict_proba`
# without being fitted
assert not hasattr(base_estimator, "predict_proba")
clf = SelfTrainingClassifier(base_estimator=base_estimator)
clf.fit(X_train, y_train_missing_labels)
clf.predict_proba(X_test)
| bsd-3-clause |
branden/dcos | pkgpanda/__init__.py | 4 | 35789 | """
See `docs/package_concepts.md` for the package layout.
Packages have ids. Ids are composed of a name + blob. The blob is never
introspected by the packaging stuff.
Each package contains a pkginfo.json. That contains a list of requires as well as
environment variables from the package.
"""
import grp
import json
import os
import os.path
import pwd
import re
import shutil
import tempfile
from collections import Iterable
from itertools import chain
from subprocess import CalledProcessError, check_call, check_output
from typing import Union
from pkgpanda.constants import (DCOS_SERVICE_CONFIGURATION_FILE,
RESERVED_UNIT_NAMES,
STATE_DIR_ROOT)
from pkgpanda.exceptions import (InstallError, PackageError, PackageNotFound,
ValidationError)
from pkgpanda.util import (download, extract_tarball, if_exists, load_json, write_json, write_string)
# TODO(cmaloney): Can we switch to something like a PKGBUILD from ArchLinux and
# then just do the mutli-version stuff ourself and save a lot of re-implementation?
reserved_env_vars = ["LD_LIBRARY_PATH", "PATH"]
env_header = """# Pkgpanda provided environment variables
LD_LIBRARY_PATH={0}/lib
PATH={0}/bin:/usr/bin:/bin:/sbin\n\n"""
env_export_header = """# Pkgpanda provided environment variables
export LD_LIBRARY_PATH={0}/lib
export PATH="{0}/bin:$PATH"\n\n"""
name_regex = "^[a-zA-Z0-9@_+][a-zA-Z0-9@._+\-]*$"
version_regex = "^[a-zA-Z0-9@_+:.]+$"
username_regex = "^dcos_[a-z0-9_]+$"
linux_group_regex = "^[a-z_][a-z0-9_-]*$" # https://github.com/shadow-maint/shadow/blob/master/libmisc/chkname.c#L52
# Manage starting/stopping all systemd services inside a folder.
class Systemd:
def __init__(self, unit_directory, active, block):
self.__unit_directory = unit_directory
self.__active = active
self.__block = block
def stop_all(self):
if not self.__active:
return
if not os.path.exists(self.__unit_directory):
return
for name in os.listdir(self.__unit_directory):
# Skip directories
if os.path.isdir(os.path.join(self.__unit_directory, name)):
continue
try:
cmd = ["systemctl", "stop", name]
if not self.__block:
cmd.append("--no-block")
check_call(cmd)
except CalledProcessError as ex:
# If the service doesn't exist, don't error. This happens when a
# bootstrap tarball has just been extracted but nothing started
# yet during first activation.
if ex.returncode != 5:
raise
@property
def unit_directory(self):
return self.__unit_directory
class PackageId:
@staticmethod
def parse(id: str):
parts = id.split('--')
if len(parts) != 2:
raise ValidationError(
"Invalid package id {0}. Package ids may only ".format(id) +
"contain one '--' which seperates the name and version")
PackageId.validate_name(parts[0])
PackageId.validate_version(parts[1])
return parts[0], parts[1]
@staticmethod
def from_parts(name, version):
# TODO(cmaloney): This format, then parse is less than ideal.
return PackageId("{0}--{1}".format(name, version))
@staticmethod
def validate_name(name):
# [a-zA-Z0-9@._+-]
# May not start with '.' or '-'.
if not re.match(name_regex, name):
raise ValidationError("Invalid package name {0}. Must match the regex {1}".format(name, name_regex))
@staticmethod
def is_id(package_str):
return package_str.count('--') == 1
@staticmethod
def validate_version(version):
# [a-zA-Z0-9@._+:]
# May not contain a '-'.
if not re.match(version_regex, version):
raise ValidationError(
"Invalid package version {0}. Must match the regex {1}".format(version, version_regex))
def __init__(self, id):
self.name, self.version = PackageId.parse(id)
def __repr__(self):
return '{0}--{1}'.format(self.name, self.version)
class Package:
def __init__(self, path, id: Union[PackageId, str], pkginfo):
if isinstance(id, str):
id = PackageId(id)
self.__id = id
self.__path = path
self.__pkginfo = pkginfo
@property
def environment(self):
return self.__pkginfo.get('environment', dict())
@property
def sysctl(self):
return self.__pkginfo.get('sysctl', dict())
@property
def check_dir(self):
return self.__path + '/check'
@property
def id(self):
return self.__id
@property
def name(self):
return self.__id.name
@property
def path(self):
return self.__path
@property
def variant(self):
return self.__pkginfo.get('variant', None)
@property
def requires(self):
return frozenset(self.__pkginfo.get('requires', list()))
@property
def version(self):
return self.__id.version
@property
def state_directory(self):
return self.__pkginfo.get('state_directory', False)
@property
def username(self):
return self.__pkginfo.get('username', None)
@property
def group(self):
return self.__pkginfo.get('group', None)
def __repr__(self):
return str(self.__id)
def expand_require(require: Union[str, dict]):
name = None
variant = None
if isinstance(require, str):
name = require
else:
assert isinstance(require, dict)
if 'name' not in require or 'variant' not in require:
raise ValidationError(
"When specifying a dependency in requires by dictionary to " +
"depend on a variant both the name of the package and the " +
"variant name must always be specified")
name = require['name']
variant = require['variant']
if PackageId.is_id(name):
raise ValidationError(
"ERROR: Specifying a dependency on '" + name + "', an exact" +
"package id isn't allowed. Dependencies may be specified by" +
"package name alone or package name + variant (to change the" +
"package variant).")
return (name, variant)
# Check that a set of packages is reasonable.
def validate_compatible(packages, roles):
# Every package name appears only once.
names = set()
ids = set()
tuples = set()
for package in packages:
if package.name in names:
raise ValidationError(
"Repeated name {0} in set of packages {1}".format(
package.name, ' '.join(map(lambda x: str(x.id), packages))))
if package.username is None and package.group is not None:
raise ValidationError("`group` cannot be used without `username`")
names.add(package.name)
ids.add(str(package.id))
tuples.add((package.name, package.variant))
# All requires are met.
# NOTE: Requires are given just to make it harder to accidentally
# break a cluster.
# Environment variables in packages, mapping from variable to package.
environment = dict()
sysctl_map = dict()
for package in packages:
# Check that all requirements of the package are met.
# Requirements can be specified on a package name or full version string.
for requirement in package.requires:
name, variant = expand_require(requirement)
if name not in names:
raise ValidationError(
("Package {} variant {} requires {} variant {} but that " +
"is not in the set of packages {}").format(
package.id,
package.variant,
name,
variant,
', '.join(str(x.id) for x in packages)))
# No repeated/conflicting environment variables with other packages as
# well as magic system environment variables.
for k, v in package.environment.items():
if k in reserved_env_vars:
raise ValidationError(
"{0} are reserved environment vars and cannot be specified in packages. Present in package {1}"
.format(", ".join(reserved_env_vars), package))
if k in environment:
raise ValidationError(
"Repeated environment variable {0}. In both packages {1} and {2}.".format(
k, v, package))
environment[k] = package
# No conflicting sysctl values.
for service_name, sysctl_settings in package.sysctl.items():
for sysctl_var, sysctl_value in sysctl_settings.items():
if sysctl_var in sysctl_map and sysctl_map[sysctl_var] != sysctl_value:
raise ValueError(
"Conflicting sysctl setting {sysctl_var}={sysctl_value}"
" present in the service {service}".format(
sysctl_var=sysctl_var,
sysctl_value=sysctl_value,
service=service_name))
sysctl_map[sysctl_var] = sysctl_value
# TODO(cmaloney): More complete validation
# - There are no repeated file/folder in the well_known_dirs
# - Including the roles subfolders.
# - There is a base set of required package names (pkgpanda, mesos, config)
# - The config is for this specific type of host (master, slave)?
# TODO(cmaloney): Add a github fetcher, useful for grabbing config tarballs.
def requests_fetcher(base_url, id_str, target, work_dir):
assert base_url
assert type(id_str) == str
id = PackageId(id_str)
# TODO(cmaloney): That file:// urls are allowed in base_url is likely a security hole.
# TODO(cmaloney): Switch to mesos-fetcher or aci or something so
# all the logic can go away, we gain integrity checking, etc.
base_url = base_url.rstrip('/')
url = base_url + "/packages/{0}/{1}.tar.xz".format(id.name, id_str)
# TODO(cmaloney): Use a private tmp directory so there is no chance of a user
# intercepting the tarball + other validation data locally.
with tempfile.NamedTemporaryFile(suffix=".tar.xz") as file:
download(file.name, url, work_dir, rm_on_error=False)
extract_tarball(file.name, target)
class Repository:
def __init__(self, path):
self.__path = os.path.abspath(path)
self.__packages = None
@property
def path(self):
return self.__path
def package_path(self, id):
return os.path.join(self.__path, id)
def get_ids(self, name):
# TODO(cmaloney): There is a lot of excess re-parsing here...
return list(pkg_id for pkg_id in self.list() if PackageId(pkg_id).name == name)
def has_package(self, id):
return id in self.list()
def list(self):
"""List the available packages in the repository.
A package is a folder which contains a pkginfo.json"""
if self.__packages is not None:
return self.__packages
packages = set()
if not os.path.exists(self.__path):
return packages
for id in os.listdir(self.__path):
if PackageId.is_id(id):
packages.add(id)
self.__packages = packages
return self.__packages
# Load the given package
def load(self, id: str):
# Validate the package id.
PackageId(id)
path = self.package_path(id)
if not os.path.exists(path):
raise PackageNotFound(id)
filename = os.path.join(path, "pkginfo.json")
try:
pkginfo = load_json(filename)
except OSError as ex:
raise PackageError("No / unreadable pkginfo.json in {0}: {1}".format(id, ex.strerror)) from ex
if not isinstance(pkginfo, dict):
raise PackageError("Usage should be a dictionary, not a {0}".format(type(pkginfo).__name__))
return Package(path, id, pkginfo)
def load_packages(self, ids: Iterable):
packages = set()
for id in ids:
packages.add(self.load(id))
return packages
def integrity_check(self):
# Check that all packages in the local repository have valid
# signatures, are up to date, all packages valid contents, etc.
raise NotImplementedError()
# Add the given package to the repository.
# If the package is already in the repository does a no-op and returns false.
# Returns true otherwise.
def add(self, fetcher, id, warn_added=True):
# Validate the package id.
PackageId(id)
# If the package already exists, return true
package_path = self.package_path(id)
if os.path.exists(package_path):
if warn_added:
print("Package already added.")
return False
# TODO(cmaloney): Supply a temporary directory to extract to
# Then swap that into place, preventing partially-extracted things from
# becoming an issue.
pkg_path = self.package_path(id)
# Appending _tmp so there is very little chance of us running into the
# rm of another package, since all our PackageID strings are SHA-1, so
# they never end with `_tmp`. `{sha}_tmp` is still a valid version
# number however so other code doing directory scans will be fine with
# the temp folders.
tmp_path = pkg_path + '_tmp'
# Cleanup artifacts (if any) laying around from previous partial
# package extractions.
check_call(['rm', '-rf', tmp_path])
fetcher(id, tmp_path)
os.rename(tmp_path, pkg_path)
return True
def remove(self, id):
path = self.package_path(id)
if not os.path.exists(path):
raise PackageNotFound(id)
shutil.rmtree(path)
class ConflictingFile(ValidationError):
def __init__(self, src, dest, ex):
super().__init__(ex)
self.src = src
self.dest = dest
self.ex = ex
# Create folders and symlink files inside the folders. Allows multiple
# packages to have the same folder and provide it publicly.
def symlink_tree(src, dest):
for name in os.listdir(src):
src_path = os.path.join(src, name)
dest_path = os.path.join(dest, name)
# Symlink files and symlinks directly. For directories make a
# real directory and symlink everything inside.
# NOTE: We could relax this and follow symlinks, but then we
# need to be careful about recursive filesystem layouts.
if os.path.isdir(src_path) and not os.path.islink(src_path):
if os.path.exists(dest_path):
# We can only merge a directory into a directory.
# We won't merge into a symlink directory because that could
# result in a package editing inside another package.
if not os.path.isdir(dest_path) and not os.path.islink(dest_path):
raise ValidationError(
"Can't merge a file `{0}` and directory (or symlink) `{1}` with the same name."
.format(src_path, dest_path))
else:
os.makedirs(dest_path)
# Recurse into the directory symlinking everything so long as the directory isn't
symlink_tree(src_path, dest_path)
else:
try:
os.symlink(src_path, dest_path)
except FileNotFoundError as ex:
raise ConflictingFile(src_path, dest_path, ex) from ex
# Manages a systemd-sysusers user set.
# Can have users
class UserManagement:
"""Manages a systemd-sysusers configuration file / user set
add_user() can be called until `ensure_users_exist` is called.
get_uid() can only be called once `ensure_users_exist` is called.
This helps enforce the code pattern which is needed to build one big sysusers configuration file
and then create all the users / validate they all exist once. After that the users can be
referenced / used.
"""
def __init__(self, manage_users: bool, add_users: bool):
self._manage_users = manage_users
self._add_users = add_users
self._users = set()
@staticmethod
def validate_username(username):
if not re.match(username_regex, username):
raise ValidationError("Username must begin with `dcos_` and only have a-z and underscore after that")
@staticmethod
def validate_group(group):
# Empty group is allowed.
if not group:
return
UserManagement.validate_group_name(group)
try:
grp.getgrnam(group)
except KeyError:
raise ValidationError("Group {} does not exist on the system".format(group))
@staticmethod
def validate_group_name(group_name):
if not group_name:
return
if not re.match(linux_group_regex, group_name):
raise ValidationError("Group {} has invalid name, must match the following regex: {}".format(
group_name, linux_group_regex))
@staticmethod
def validate_user_group(username, group_name):
user = pwd.getpwnam(username)
if not group_name:
return
group = grp.getgrnam(group_name)
if user.pw_gid != group.gr_gid:
# check if the user is the right group, but the group is not primary.
if username in group.gr_mem:
return
raise ValidationError(
"User {} exists with current UID {}, however he should be assigned to group {} with {} UID, please "
"check `buildinfo.json`".format(username, user.pw_gid, group_name, group.gr_gid))
def add_user(self, username, groupname):
UserManagement.validate_username(username)
if not self._manage_users:
return
# Check if the user already exists and exit.
try:
UserManagement.validate_user_group(username, groupname)
self._users.add(username)
return
except KeyError as ex:
# Doesn't exist, fall through
pass
# If we're not allowed to manage users, error
if not self._add_users:
raise ValidationError("User {} doesn't exist but is required by a DC/OS Component, and "
"automatic user addition is disabled".format(username))
# Add the user:
add_user_cmd = [
'useradd',
'--system',
'--home-dir', '/opt/mesosphere',
'--shell', '/sbin/nologin',
'-c', 'DCOS System User',
]
# A group matching the username will be created by the adduser command.
# Any other group that the user is added to needs to exist prior to executing the
# adduser command.
if groupname is not None and groupname != username:
UserManagement.validate_group(groupname)
add_user_cmd += [
'-g', groupname
]
add_user_cmd += [username]
try:
check_output(add_user_cmd)
self._users.add(username)
except CalledProcessError as ex:
raise ValidationError("User {} doesn't exist and couldn't be created because of: {}"
.format(username, ex.output))
def get_uid(self, username):
# Code should have already asserted all users exist, and be passing us
# a user we know about. This method only works for package users.
assert username in self._users
return pwd.getpwnam(username).pw_uid
# A rooted install tree.
# Inside the install tree there will be all the well known folders and files as
# described in `docs/package_concepts.md`
class Install:
# TODO(cmaloney) This is way too many options for these call points. Most
# of these should be made so they can be removed (most are just for testing)
def __init__(
self,
root,
config_dir,
rooted_systemd,
manage_systemd,
block_systemd,
fake_path=False,
skip_systemd_dirs=False,
manage_users=False,
add_users=False,
manage_state_dir=False,
state_dir_root=STATE_DIR_ROOT):
assert type(rooted_systemd) == bool
assert type(fake_path) == bool
self.__root = os.path.abspath(root)
self.__config_dir = os.path.abspath(config_dir) if config_dir else None
if not skip_systemd_dirs:
if rooted_systemd:
self.__systemd_dir = "{}/dcos.target.wants".format(root)
else:
self.__systemd_dir = "/etc/systemd/system/dcos.target.wants"
self.__manage_systemd = manage_systemd
self.__block_systemd = block_systemd
# Look up the machine roles
self.__roles = []
if self.__config_dir:
self.__roles = if_exists(os.listdir, os.path.join(self.__config_dir, "roles"))
if self.__roles is None:
self.__roles = []
self.__well_known_dirs = ["bin", "etc", "include", "lib"]
if not skip_systemd_dirs:
self.__well_known_dirs.append(self.__systemd_dir)
self.__fake_path = fake_path
self.__skip_systemd_dirs = skip_systemd_dirs
self.__manage_users = manage_users
self.__add_users = add_users
self.__manage_state_dir = manage_state_dir
assert not state_dir_root.endswith('/')
self.__state_dir_root = state_dir_root
def _get_dcos_configuration_template(self):
return {"sysctl": {}}
def get_active_dir(self):
return os.path.join(self.__root, "active")
def get_active(self):
"""the active folder has symlinks to all the active packages.
Return the full package ids (The targets of the symlinks)."""
active_dir = self.get_active_dir()
if not os.path.exists(active_dir):
if os.path.exists(active_dir + ".old") or os.path.exists(active_dir + ".new"):
raise InstallError(
("Broken past deploy. See {0}.new for what the (potentially incomplete) new state should be " +
"and optionally {0}.old if it exists for the complete previous state.").format(active_dir))
else:
raise InstallError(
"Install directory {0} has no active folder. Has it been bootstrapped?".format(self.__root))
ids = set()
for name in os.listdir(active_dir):
package_path = os.path.realpath(os.path.join(active_dir, name))
# NOTE: We don't validate the id here because we want to be able to
# cope if there is something invalid in the current active dir.
ids.add(os.path.basename(package_path))
return ids
def has_flag(self, name):
return os.path.exists(self.get_config_filename(name))
def get_config_filename(self, name):
return os.path.join(self.__config_dir, name)
def _make_abs(self, name):
return os.path.abspath(os.path.join(self.__root, name))
def get_active_names(self):
return list(map(
self._make_abs,
self.__well_known_dirs + [
"environment",
"environment.export",
"active",
"active.buildinfo.full.json"
]))
# Builds new working directories for the new active set, then swaps it into place as atomically as possible.
def activate(self, packages):
# Ensure the new set is reasonable.
validate_compatible(packages, self.__roles)
# Build the absolute paths for the running config, new config location,
# and where to archive the config.
active_names = self.get_active_names()
active_dirs = list(map(self._make_abs, self.__well_known_dirs + ["active"]))
new_names = [name + ".new" for name in active_names]
new_dirs = [name + ".new" for name in active_dirs]
old_names = [name + ".old" for name in active_names]
# Remove all pre-existing new and old directories
for name in chain(new_names, old_names):
if os.path.exists(name):
if os.path.isdir(name):
shutil.rmtree(name)
else:
os.remove(name)
# Make the directories for the new config
for name in new_dirs:
os.makedirs(name)
def symlink_all(src, dest):
if not os.path.isdir(src):
return
symlink_tree(src, dest)
# Set the new LD_LIBRARY_PATH, PATH.
env_contents = env_header.format("/opt/mesosphere" if self.__fake_path else self.__root)
env_export_contents = env_export_header.format("/opt/mesosphere" if self.__fake_path else self.__root)
active_buildinfo_full = {}
dcos_service_configuration = self._get_dcos_configuration_template()
# Building up the set of users
sysusers = UserManagement(self.__manage_users, self.__add_users)
def _get_service_files(_dir):
service_files = []
for root, directories, filenames in os.walk(_dir):
for filename in filter(lambda name: name.endswith(".service"), filenames):
service_files.append(os.path.join(root, filename))
return service_files
def _get_service_names(_dir):
service_files = list(map(os.path.basename, _get_service_files(_dir)))
if not service_files:
return []
return list(map(lambda name: os.path.splitext(name)[0], service_files))
# Add the folders, config in each package.
for package in packages:
# Package folders
# NOTE: Since active is at the end of the folder list it will be
# removed by the zip. This is the desired behavior, since it will be
# populated later.
# Do the basename since some well known dirs are full paths (dcos.target.wants)
# while inside the packages they are always top level directories.
for new, dir_name in zip(new_dirs, self.__well_known_dirs):
dir_name = os.path.basename(dir_name)
pkg_dir = os.path.join(package.path, dir_name)
assert os.path.isabs(new)
assert os.path.isabs(pkg_dir)
try:
symlink_all(pkg_dir, new)
# Symlink all applicable role-based config
for role in self.__roles:
role_dir = os.path.join(package.path, "{0}_{1}".format(dir_name, role))
symlink_all(role_dir, new)
except ConflictingFile as ex:
raise ValidationError("Two packages are trying to install the same file {0} or "
"two roles in the set of roles {1} are causing a package "
"to try activating multiple versions of the same file. "
"One of the package files is {2}.".format(ex.dest,
self.__roles,
ex.src))
# Add to the active folder
os.symlink(package.path, os.path.join(self._make_abs("active.new"), package.name))
# Add to the environment and environment.export contents
env_contents += "# package: {0}\n".format(package.id)
env_export_contents += "# package: {0}\n".format(package.id)
for k, v in package.environment.items():
env_contents += "{0}={1}\n".format(k, v)
env_export_contents += "export {0}={1}\n".format(k, v)
env_contents += "\n"
env_export_contents += "\n"
# Add to the buildinfo
try:
active_buildinfo_full[package.name] = load_json(os.path.join(package.path, "buildinfo.full.json"))
except FileNotFoundError:
# TODO(cmaloney): These only come from setup-packages. Should update
# setup-packages to add a buildinfo.full for those packages
active_buildinfo_full[package.name] = None
# NOTE: It is critical the state dir, the package name and the user name are all the
# same. Otherwise on upgrades we might remove access to a files by changing their chown
# to something incompatible. We survive the first upgrade because everything goes from
# root to specific users, and root can access all user files.
if package.username is not None:
sysusers.add_user(package.username, package.group)
# Ensure the state directory exists
# TODO(cmaloney): On upgrade take a snapshot?
if self.__manage_state_dir:
state_dir_path = self.__state_dir_root + '/' + package.name
if package.state_directory:
check_call(['mkdir', '-p', state_dir_path])
if package.username:
uid = sysusers.get_uid(package.username)
check_call(['chown', '-R', str(uid), state_dir_path])
if package.sysctl:
service_names = _get_service_names(package.path)
if not service_names:
raise ValueError("service name required for sysctl could not be determined for {package}".format(
package=package.id))
for service in service_names:
if service in package.sysctl:
dcos_service_configuration["sysctl"][service] = package.sysctl[service]
dcos_service_configuration_file = os.path.join(self._make_abs("etc.new"), DCOS_SERVICE_CONFIGURATION_FILE)
write_json(dcos_service_configuration_file, dcos_service_configuration)
# Write out the new environment file.
new_env = self._make_abs("environment.new")
write_string(new_env, env_contents)
# Write out the new environment.export file
new_env_export = self._make_abs("environment.export.new")
write_string(new_env_export, env_export_contents)
# Write out the buildinfo of every active package
new_buildinfo_meta = self._make_abs("active.buildinfo.full.json.new")
write_json(new_buildinfo_meta, active_buildinfo_full)
self.swap_active(".new")
def recover_swap_active(self):
state_filename = self._make_abs("install_progress")
if not os.path.exists(state_filename):
return False, "Path does not exist: {}".format(state_filename)
state = load_json(state_filename)
extension = state['extension']
stage = state['stage']
if stage == 'archive':
self.swap_active(extension, True)
elif stage == 'move_new':
self.swap_active(extension, False)
else:
raise ValueError("Unexpected state to recover from {}".format(state))
return True, ""
# Does an atomic(ish) upgrade swap with support for recovering if
# only part of the swap happens before a reboot.
# TODO(cmaloney): Implement recovery properly.
def swap_active(self, extension, archive=True):
active_names = self.get_active_names()
state_filename = self._make_abs("install_progress")
systemd = None
if not self.__skip_systemd_dirs:
systemd = Systemd(self._make_abs(self.__systemd_dir), self.__manage_systemd, self.__block_systemd)
# Ensure all the new active files exist
for active in active_names:
if not os.path.exists(active + extension):
raise ValueError(
"Unable to swap active packages. Needed file {} doesn't exist.".format(active + extension))
# Record the state (atomically) on the filesystem so that if there is a
# hard/fast fail at any point the activate swap can continue.
def record_state(state):
# Atomically write all the state to disk, swap into place.
with open(state_filename + ".new", "w+") as f:
state['extension'] = extension
json.dump(state, f)
f.flush()
os.fsync(f.fileno())
os.rename(state_filename + ".new", state_filename)
# TODO(pyronicide): systemd requires units to be both in the
# root directory (/etc/systemd/system) *and* (for starting) in a
# specific wants directory (dcos.target.wants). If they're not in both
# places, units randomly move into a `not-loaded` state (which makes
# for sad pandas). This treats dcos.target.wants as the single source
# of truth and just sets things up locally.
def manage_systemd_linking(method):
base_systemd = os.path.normpath(
os.path.join(self._make_abs(self.__systemd_dir), ".."))
wants_path = self._make_abs(self.__systemd_dir)
if not os.path.exists(wants_path):
return
for unit_name in os.listdir(wants_path):
if unit_name in RESERVED_UNIT_NAMES:
raise Exception(
"Stopping install. " +
"Reserved name encountered - {}.".format(unit_name))
real_path = os.path.realpath(
os.path.join(wants_path, unit_name))
try:
os.remove(os.path.join(base_systemd, unit_name))
except FileNotFoundError:
# This is going from an old to new version of DC/OS.
pass
if method == "setup":
os.symlink(real_path, os.path.join(base_systemd, unit_name))
if archive:
# TODO(cmaloney): stop all systemd services in dcos.target.wants
record_state({"stage": "archive"})
# Stop all systemd services
if not self.__skip_systemd_dirs:
systemd.stop_all()
manage_systemd_linking("cleanup")
# Archive the current config.
for active in active_names:
old_path = active + ".old"
if os.path.exists(active):
os.rename(active, old_path)
record_state({"stage": "move_new"})
# Move new / with extension into active.
# TODO(cmaloney): Capture any failures here and roll-back if possible.
# TODO(cmaloney): Alert for any failures here.
for active in active_names:
new_path = active + extension
os.rename(new_path, active)
if not self.__skip_systemd_dirs:
manage_systemd_linking("setup")
# All done with what we need to redo if host restarts.
os.remove(state_filename)
@property
def manage_systemd(self):
return self.__manage_systemd
@property
def systemd_dir(self):
return self.__systemd_dir
@property
def root(self):
return self.__root
| apache-2.0 |
RhysU/suzerain | postproc/sgr2suz.py | 1 | 5052 | #!/usr/bin/env python
"""Usage: sgr2suz.py --sgr_file=<sgr_file.dat> --suzerain_file=<suzerain_file.h5>
Interpolate solution from a slow growth RANS simulation to a suzerain 'reacting' file.
Input data must be in text format with the following column layout:
{y, rho_diluter, rho_s, rho_u, rho_v, rho_E}, with rho_diluter the diluter density,
rho_s the species densities (s=1, Ns).
Options:
-h --help This help message.
--sgr_file= Text file from sgr.
--suzerain_file= HDF5 file from suzerain with proper metadata.
"""
import sys
import getopt
import h5py
import numpy as np
import gb
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot
def getsgr2suz(datfile, hdf5file):
# Load sgr file
data = np.loadtxt(datfile, delimiter=' ')
print "Data from sgr loaded"
# Grab sgr data
# Number of field variables from sgr data
nvar = data.shape[1] - 1
# Number of grid points in y from sgr
ny_sgr = data.shape[0]
# Number of species from sgr
ns = nvar - 3 # rho_u, rho_v, rhoE
# Declare convenience indices for rhou, rhov, rhoE
irhou = nvar - 2
irhov = nvar - 1
irhoE = nvar
# Declare variables for sgr fields
y_sgr = np.array(data[:,0 ]).transpose().reshape(ny_sgr,1)
rho = np.zeros((ny_sgr,1))
rho_s = np.zeros((ny_sgr,ns))
for i in xrange(1,ns+1):
rho_s[:,i-1] = data[:,i]
rho += np.array(data[:,i]).transpose().reshape(ny_sgr,1)
rho_u = np.array(data[:,irhou]).transpose().reshape(ny_sgr,1)
rho_v = np.array(data[:,irhov]).transpose().reshape(ny_sgr,1)
rho_E = np.array(data[:,irhoE]).transpose().reshape(ny_sgr,1)
# Load suzerain file
f = h5py.File(hdf5file,'r+')
print "Data from suzerain loaded"
# Grab suzerain mesh info
Nyf = f['Ny'].value
xf = f['collocation_points_x'].value
yf = f['collocation_points_y'].value
zf = f['collocation_points_z'].value
# Grab number of species
Ns=f['antioch_constitutive_data'].attrs['Ns'][0]
# Grab relevant metadata
lower_v =f['lower_v' ].value
lower_cs=f['lower_cs'].value
upper_cs=f['upper_cs'].value
# Grab species names
sname= np.chararray(Ns, itemsize=5)
for s in xrange(0,Ns):
sname[s] = f['antioch_constitutive_data'].attrs['Species_'+str(s)]
sname[s] = sname[s].strip(' ')
# Read suzerain data
frho = f['rho'].value
frhou = f['rho_u'].value
frhov = f['rho_v'].value
frhow = f['rho_w'].value
frhoE = f['rho_E'].value
# Interpolate data
frho [:,0,0] = np.interp(yf[:],y_sgr[:,0],rho [:,0])
frhou[:,0,0] = np.interp(yf[:],y_sgr[:,0],rho_u [:,0])
frhov[:,0,0] = np.interp(yf[:],y_sgr[:,0],rho_v [:,0])
frhoE[:,0,0] = np.interp(yf[:],y_sgr[:,0],rho_E [:,0])
# Write fields to suzerain file
f['rho' ][()] = frho
f['rho_u'][()] = frhou
f['rho_v'][()] = frhov
f['rho_w'][()] = 0
f['rho_E'][()] = frhoE
# Flow metadata
lower_v[0] = frhov[0,0,0]/ frho[0,0,0]
f['lower_v'][()] = lower_v
# Species
frhos = f['rho'].value ## Placeholder for species field
# NOTE: s=0 is the diluter, skip writting it, but compute
# lower and upper concentrations
frhos [:,0,0] = np.interp(yf[:],y_sgr[:,0],rho_s [:,0])
lower_cs[0] = frhos[0,0,0]/ frho[0,0,0]
upper_cs[0] = frhos[Nyf-1,0,0]/ frho[Nyf-1,0,0]
for s in xrange(1,Ns):
rhos_key = 'rho_' + sname[s]
frhos [:,0,0] = np.interp(yf[:],y_sgr[:,0],rho_s [:,s])
f[rhos_key.strip(' ')][()] = frhos
lower_cs[s] = frhos[0,0,0]/ frho[0,0,0]
upper_cs[s] = frhos[Nyf-1,0,0]/ frho[Nyf-1,0,0]
# Species metadata
f['lower_cs'][()] = lower_cs
f['upper_cs'][()] = upper_cs
# Close suz file
f.close()
print "Data written to suzerain file"
def main(argv=None):
# Permit interactive use
if argv is None:
argv = sys.argv
# Parse and check incoming command line arguments
try:
try:
opts, args = getopt.getopt(argv[1:], "hn",
[ "help"
, "sgr_file="
, "suzerain_file="
])
except getopt.error as msg:
raise Usage(msg)
for o, a in opts:
if o in ("-h", "--help"):
print __doc__
return 0
if o in ("--sgr_file"):
sgr_file = a
print "sgr file is : ", sgr_file
if o in ("--suzerain_file"):
suzerain_file = a
print "suzerain file is : ", suzerain_file
if len(args) > 0:
print >>sys.stderr, "Incorrect number of arguments. See --help."
return 2
except Usage as err:
print >>sys.stderr, err.msg
return 2
# Interpolate data form sgr to suz file
getsgr2suz(sgr_file, suzerain_file)
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 |
cogmission/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/texmanager.py | 69 | 16818 | """
This module supports embedded TeX expressions in matplotlib via dvipng
and dvips for the raster and postscript backends. The tex and
dvipng/dvips information is cached in ~/.matplotlib/tex.cache for reuse between
sessions
Requirements:
* latex
* \*Agg backends: dvipng
* PS backend: latex w/ psfrag, dvips, and Ghostscript 8.51
(older versions do not work properly)
Backends:
* \*Agg
* PS
* PDF
For raster output, you can get RGBA numpy arrays from TeX expressions
as follows::
texmanager = TexManager()
s = '\\TeX\\ is Number $\\displaystyle\\sum_{n=1}^\\infty\\frac{-e^{i\pi}}{2^n}$!'
Z = self.texmanager.get_rgba(s, size=12, dpi=80, rgb=(1,0,0))
To enable tex rendering of all text in your matplotlib figure, set
text.usetex in your matplotlibrc file (http://matplotlib.sf.net/matplotlibrc)
or include these two lines in your script::
from matplotlib import rc
rc('text', usetex=True)
"""
import copy, glob, os, shutil, sys, warnings
try:
from hashlib import md5
except ImportError:
from md5 import md5 #Deprecated in 2.5
import distutils.version
import numpy as np
import matplotlib as mpl
from matplotlib import rcParams
from matplotlib._png import read_png
DEBUG = False
if sys.platform.startswith('win'): cmd_split = '&'
else: cmd_split = ';'
def dvipng_hack_alpha():
stdin, stdout = os.popen4('dvipng -version')
for line in stdout:
if line.startswith('dvipng '):
version = line.split()[-1]
mpl.verbose.report('Found dvipng version %s'% version,
'helpful')
version = distutils.version.LooseVersion(version)
return version < distutils.version.LooseVersion('1.6')
raise RuntimeError('Could not obtain dvipng version')
class TexManager:
"""
Convert strings to dvi files using TeX, caching the results to a
working dir
"""
oldpath = mpl.get_home()
if oldpath is None: oldpath = mpl.get_data_path()
oldcache = os.path.join(oldpath, '.tex.cache')
configdir = mpl.get_configdir()
texcache = os.path.join(configdir, 'tex.cache')
if os.path.exists(oldcache):
print >> sys.stderr, """\
WARNING: found a TeX cache dir in the deprecated location "%s".
Moving it to the new default location "%s"."""%(oldcache, texcache)
shutil.move(oldcache, texcache)
if not os.path.exists(texcache):
os.mkdir(texcache)
_dvipng_hack_alpha = dvipng_hack_alpha()
# mappable cache of
rgba_arrayd = {}
grey_arrayd = {}
postscriptd = {}
pscnt = 0
serif = ('cmr', '')
sans_serif = ('cmss', '')
monospace = ('cmtt', '')
cursive = ('pzc', r'\usepackage{chancery}')
font_family = 'serif'
font_families = ('serif', 'sans-serif', 'cursive', 'monospace')
font_info = {'new century schoolbook': ('pnc',
r'\renewcommand{\rmdefault}{pnc}'),
'bookman': ('pbk', r'\renewcommand{\rmdefault}{pbk}'),
'times': ('ptm', r'\usepackage{mathptmx}'),
'palatino': ('ppl', r'\usepackage{mathpazo}'),
'zapf chancery': ('pzc', r'\usepackage{chancery}'),
'cursive': ('pzc', r'\usepackage{chancery}'),
'charter': ('pch', r'\usepackage{charter}'),
'serif': ('cmr', ''),
'sans-serif': ('cmss', ''),
'helvetica': ('phv', r'\usepackage{helvet}'),
'avant garde': ('pag', r'\usepackage{avant}'),
'courier': ('pcr', r'\usepackage{courier}'),
'monospace': ('cmtt', ''),
'computer modern roman': ('cmr', ''),
'computer modern sans serif': ('cmss', ''),
'computer modern typewriter': ('cmtt', '')}
_rc_cache = None
_rc_cache_keys = ('text.latex.preamble', )\
+ tuple(['font.'+n for n in ('family', ) + font_families])
def __init__(self):
if not os.path.isdir(self.texcache):
os.mkdir(self.texcache)
ff = rcParams['font.family'].lower()
if ff in self.font_families:
self.font_family = ff
else:
mpl.verbose.report('The %s font family is not compatible with LaTeX. serif will be used by default.' % ff, 'helpful')
self.font_family = 'serif'
fontconfig = [self.font_family]
for font_family, font_family_attr in \
[(ff, ff.replace('-', '_')) for ff in self.font_families]:
for font in rcParams['font.'+font_family]:
if font.lower() in self.font_info:
found_font = self.font_info[font.lower()]
setattr(self, font_family_attr,
self.font_info[font.lower()])
if DEBUG:
print 'family: %s, font: %s, info: %s'%(font_family,
font, self.font_info[font.lower()])
break
else:
if DEBUG: print '$s font is not compatible with usetex'
else:
mpl.verbose.report('No LaTeX-compatible font found for the %s font family in rcParams. Using default.' % ff, 'helpful')
setattr(self, font_family_attr, self.font_info[font_family])
fontconfig.append(getattr(self, font_family_attr)[0])
self._fontconfig = ''.join(fontconfig)
# The following packages and commands need to be included in the latex
# file's preamble:
cmd = [self.serif[1], self.sans_serif[1], self.monospace[1]]
if self.font_family == 'cursive': cmd.append(self.cursive[1])
while r'\usepackage{type1cm}' in cmd:
cmd.remove(r'\usepackage{type1cm}')
cmd = '\n'.join(cmd)
self._font_preamble = '\n'.join([r'\usepackage{type1cm}', cmd,
r'\usepackage{textcomp}'])
def get_basefile(self, tex, fontsize, dpi=None):
"""
returns a filename based on a hash of the string, fontsize, and dpi
"""
s = ''.join([tex, self.get_font_config(), '%f'%fontsize,
self.get_custom_preamble(), str(dpi or '')])
# make sure hash is consistent for all strings, regardless of encoding:
bytes = unicode(s).encode('utf-8')
return os.path.join(self.texcache, md5(bytes).hexdigest())
def get_font_config(self):
"""Reinitializes self if relevant rcParams on have changed."""
if self._rc_cache is None:
self._rc_cache = dict([(k,None) for k in self._rc_cache_keys])
changed = [par for par in self._rc_cache_keys if rcParams[par] != \
self._rc_cache[par]]
if changed:
if DEBUG: print 'DEBUG following keys changed:', changed
for k in changed:
if DEBUG:
print 'DEBUG %-20s: %-10s -> %-10s' % \
(k, self._rc_cache[k], rcParams[k])
# deepcopy may not be necessary, but feels more future-proof
self._rc_cache[k] = copy.deepcopy(rcParams[k])
if DEBUG: print 'DEBUG RE-INIT\nold fontconfig:', self._fontconfig
self.__init__()
if DEBUG: print 'DEBUG fontconfig:', self._fontconfig
return self._fontconfig
def get_font_preamble(self):
"""
returns a string containing font configuration for the tex preamble
"""
return self._font_preamble
def get_custom_preamble(self):
"""returns a string containing user additions to the tex preamble"""
return '\n'.join(rcParams['text.latex.preamble'])
def _get_shell_cmd(self, *args):
"""
On windows, changing directories can be complicated by the presence of
multiple drives. get_shell_cmd deals with this issue.
"""
if sys.platform == 'win32':
command = ['%s'% os.path.splitdrive(self.texcache)[0]]
else:
command = []
command.extend(args)
return ' && '.join(command)
def make_tex(self, tex, fontsize):
"""
Generate a tex file to render the tex string at a specific font size
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
texfile = '%s.tex'%basefile
fh = file(texfile, 'w')
custom_preamble = self.get_custom_preamble()
fontcmd = {'sans-serif' : r'{\sffamily %s}',
'monospace' : r'{\ttfamily %s}'}.get(self.font_family,
r'{\rmfamily %s}')
tex = fontcmd % tex
if rcParams['text.latex.unicode']:
unicode_preamble = """\usepackage{ucs}
\usepackage[utf8x]{inputenc}"""
else:
unicode_preamble = ''
s = r"""\documentclass{article}
%s
%s
%s
\usepackage[papersize={72in,72in}, body={70in,70in}, margin={1in,1in}]{geometry}
\pagestyle{empty}
\begin{document}
\fontsize{%f}{%f}%s
\end{document}
""" % (self._font_preamble, unicode_preamble, custom_preamble,
fontsize, fontsize*1.25, tex)
if rcParams['text.latex.unicode']:
fh.write(s.encode('utf8'))
else:
try:
fh.write(s)
except UnicodeEncodeError, err:
mpl.verbose.report("You are using unicode and latex, but have "
"not enabled the matplotlib 'text.latex.unicode' "
"rcParam.", 'helpful')
raise
fh.close()
return texfile
def make_dvi(self, tex, fontsize):
"""
generates a dvi file containing latex's layout of tex string
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
dvifile = '%s.dvi'% basefile
if DEBUG or not os.path.exists(dvifile):
texfile = self.make_tex(tex, fontsize)
outfile = basefile+'.output'
command = self._get_shell_cmd('cd "%s"'% self.texcache,
'latex -interaction=nonstopmode %s > "%s"'\
%(os.path.split(texfile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
try:
fh = file(outfile)
report = fh.read()
fh.close()
except IOError:
report = 'No latex error report available.'
if exit_status:
raise RuntimeError(('LaTeX was not able to process the following \
string:\n%s\nHere is the full report generated by LaTeX: \n\n'% repr(tex)) + report)
else: mpl.verbose.report(report, 'debug')
for fname in glob.glob(basefile+'*'):
if fname.endswith('dvi'): pass
elif fname.endswith('tex'): pass
else:
try: os.remove(fname)
except OSError: pass
return dvifile
def make_png(self, tex, fontsize, dpi):
"""
generates a png file containing latex's rendering of tex string
returns the filename
"""
basefile = self.get_basefile(tex, fontsize, dpi)
pngfile = '%s.png'% basefile
# see get_rgba for a discussion of the background
if DEBUG or not os.path.exists(pngfile):
dvifile = self.make_dvi(tex, fontsize)
outfile = basefile+'.output'
command = self._get_shell_cmd('cd "%s"' % self.texcache,
'dvipng -bg Transparent -D %s -T tight -o \
"%s" "%s" > "%s"'%(dpi, os.path.split(pngfile)[-1],
os.path.split(dvifile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
try:
fh = file(outfile)
report = fh.read()
fh.close()
except IOError:
report = 'No dvipng error report available.'
if exit_status:
raise RuntimeError('dvipng was not able to \
process the flowing file:\n%s\nHere is the full report generated by dvipng: \
\n\n'% dvifile + report)
else: mpl.verbose.report(report, 'debug')
try: os.remove(outfile)
except OSError: pass
return pngfile
def make_ps(self, tex, fontsize):
"""
generates a postscript file containing latex's rendering of tex string
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
psfile = '%s.epsf'% basefile
if DEBUG or not os.path.exists(psfile):
dvifile = self.make_dvi(tex, fontsize)
outfile = basefile+'.output'
command = self._get_shell_cmd('cd "%s"'% self.texcache,
'dvips -q -E -o "%s" "%s" > "%s"'\
%(os.path.split(psfile)[-1],
os.path.split(dvifile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status:
raise RuntimeError('dvipng was not able to \
process the flowing file:\n%s\nHere is the full report generated by dvipng: \
\n\n'% dvifile + fh.read())
else: mpl.verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
return psfile
def get_ps_bbox(self, tex, fontsize):
"""
returns a list containing the postscript bounding box for latex's
rendering of the tex string
"""
psfile = self.make_ps(tex, fontsize)
ps = file(psfile)
for line in ps:
if line.startswith('%%BoundingBox:'):
return [int(val) for val in line.split()[1:]]
raise RuntimeError('Could not parse %s'%psfile)
def get_grey(self, tex, fontsize=None, dpi=None):
"""returns the alpha channel"""
key = tex, self.get_font_config(), fontsize, dpi
alpha = self.grey_arrayd.get(key)
if alpha is None:
pngfile = self.make_png(tex, fontsize, dpi)
X = read_png(os.path.join(self.texcache, pngfile))
if rcParams['text.dvipnghack'] is not None:
hack = rcParams['text.dvipnghack']
else:
hack = self._dvipng_hack_alpha
if hack:
# hack the alpha channel
# dvipng assumed a constant background, whereas we want to
# overlay these rasters with antialiasing over arbitrary
# backgrounds that may have other figure elements under them.
# When you set dvipng -bg Transparent, it actually makes the
# alpha channel 1 and does the background compositing and
# antialiasing itself and puts the blended data in the rgb
# channels. So what we do is extract the alpha information
# from the red channel, which is a blend of the default dvipng
# background (white) and foreground (black). So the amount of
# red (or green or blue for that matter since white and black
# blend to a grayscale) is the alpha intensity. Once we
# extract the correct alpha information, we assign it to the
# alpha channel properly and let the users pick their rgb. In
# this way, we can overlay tex strings on arbitrary
# backgrounds with antialiasing
#
# red = alpha*red_foreground + (1-alpha)*red_background
#
# Since the foreground is black (0) and the background is
# white (1) this reduces to red = 1-alpha or alpha = 1-red
#alpha = npy.sqrt(1-X[:,:,0]) # should this be sqrt here?
alpha = 1-X[:,:,0]
else:
alpha = X[:,:,-1]
self.grey_arrayd[key] = alpha
return alpha
def get_rgba(self, tex, fontsize=None, dpi=None, rgb=(0,0,0)):
"""
Returns latex's rendering of the tex string as an rgba array
"""
if not fontsize: fontsize = rcParams['font.size']
if not dpi: dpi = rcParams['savefig.dpi']
r,g,b = rgb
key = tex, self.get_font_config(), fontsize, dpi, tuple(rgb)
Z = self.rgba_arrayd.get(key)
if Z is None:
alpha = self.get_grey(tex, fontsize, dpi)
Z = np.zeros((alpha.shape[0], alpha.shape[1], 4), np.float)
Z[:,:,0] = r
Z[:,:,1] = g
Z[:,:,2] = b
Z[:,:,3] = alpha
self.rgba_arrayd[key] = Z
return Z
| agpl-3.0 |
wanggang3333/scikit-learn | examples/classification/plot_lda.py | 164 | 2224 | """
====================================================================
Normal and Shrinkage Linear Discriminant Analysis for classification
====================================================================
Shows how shrinkage improves classification.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.lda import LDA
n_train = 20 # samples for training
n_test = 200 # samples for testing
n_averages = 50 # how often to repeat classification
n_features_max = 75 # maximum number of features
step = 4 # step size for the calculation
def generate_data(n_samples, n_features):
"""Generate random blob-ish data with noisy features.
This returns an array of input data with shape `(n_samples, n_features)`
and an array of `n_samples` target labels.
Only one feature contains discriminative information, the other features
contain only noise.
"""
X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])
# add non-discriminative features
if n_features > 1:
X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])
return X, y
acc_clf1, acc_clf2 = [], []
n_features_range = range(1, n_features_max + 1, step)
for n_features in n_features_range:
score_clf1, score_clf2 = 0, 0
for _ in range(n_averages):
X, y = generate_data(n_train, n_features)
clf1 = LDA(solver='lsqr', shrinkage='auto').fit(X, y)
clf2 = LDA(solver='lsqr', shrinkage=None).fit(X, y)
X, y = generate_data(n_test, n_features)
score_clf1 += clf1.score(X, y)
score_clf2 += clf2.score(X, y)
acc_clf1.append(score_clf1 / n_averages)
acc_clf2.append(score_clf2 / n_averages)
features_samples_ratio = np.array(n_features_range) / n_train
plt.plot(features_samples_ratio, acc_clf1, linewidth=2,
label="LDA with shrinkage", color='r')
plt.plot(features_samples_ratio, acc_clf2, linewidth=2,
label="LDA", color='g')
plt.xlabel('n_features / n_samples')
plt.ylabel('Classification accuracy')
plt.legend(loc=1, prop={'size': 12})
plt.suptitle('LDA vs. shrinkage LDA (1 discriminative feature)')
plt.show()
| bsd-3-clause |
jmargeta/scikit-learn | examples/cluster/plot_lena_ward_segmentation.py | 4 | 1912 | """
===============================================================
A demo of structured Ward hierarchical clustering on Lena image
===============================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD Style.
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import pylab as pl
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import Ward
###############################################################################
# Generate data
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
X = np.reshape(lena, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*lena.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = Ward(n_clusters=n_clusters, connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
pl.figure(figsize=(5, 5))
pl.imshow(lena, cmap=pl.cm.gray)
for l in range(n_clusters):
pl.contour(label == l, contours=1,
colors=[pl.cm.spectral(l / float(n_clusters)), ])
pl.xticks(())
pl.yticks(())
pl.show()
| bsd-3-clause |
nhejazi/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 77 | 2319 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
# #############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
lw = 2
plt.plot(coef[:, feature_to_plot], color='seagreen', linewidth=lw,
label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], color='cornflowerblue', linewidth=lw,
label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot], color='gold', linewidth=lw,
label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
GGoussar/scikit-image | doc/examples/filters/plot_restoration.py | 9 | 1987 | # -*- coding: utf-8 -*-
"""
=====================
Image Deconvolution
=====================
In this example, we deconvolve a noisy version of an image using Wiener
and unsupervised Wiener algorithms. This algorithms are based on
linear models that can't restore sharp edge as much as non-linear
methods (like TV restoration) but are much faster.
Wiener filter
-------------
The inverse filter based on the PSF (Point Spread Function),
the prior regularisation (penalisation of high frequency) and the
tradeoff between the data and prior adequacy. The regularization
parameter must be hand tuned.
Unsupervised Wiener
-------------------
This algorithm has a self-tuned regularisation parameters based on
data learning. This is not common and based on the following
publication. The algorithm is based on a iterative Gibbs sampler that
draw alternatively samples of posterior conditional law of the image,
the noise power and the image frequency power.
.. [1] François Orieux, Jean-François Giovannelli, and Thomas
Rodet, "Bayesian estimation of regularization and point
spread function parameters for Wiener-Hunt deconvolution",
J. Opt. Soc. Am. A 27, 1593-1607 (2010)
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import color, data, restoration
astro = color.rgb2gray(data.astronaut())
from scipy.signal import convolve2d as conv2
psf = np.ones((5, 5)) / 25
astro = conv2(astro, psf, 'same')
astro += 0.1 * astro.std() * np.random.standard_normal(astro.shape)
deconvolved, _ = restoration.unsupervised_wiener(astro, psf)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(8, 5),
sharex=True, sharey=True,
subplot_kw={'adjustable': 'box-forced'})
plt.gray()
ax[0].imshow(astro, vmin=deconvolved.min(), vmax=deconvolved.max())
ax[0].axis('off')
ax[0].set_title('Data')
ax[1].imshow(deconvolved)
ax[1].axis('off')
ax[1].set_title('Self tuned restoration')
fig.tight_layout()
plt.show()
| bsd-3-clause |
h2educ/scikit-learn | sklearn/datasets/tests/test_base.py | 205 | 5878 | import os
import shutil
import tempfile
import warnings
import nose
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
| bsd-3-clause |
NLeSC/SalientDetector-python | salientregions/helpers.py | 1 | 19824 | '''
Module for helper functions, e.g. image and regions visualization, region to ellipse conversion, loading MAT files, array/vector difference etc.
'''
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from numpy import linalg as LA
import cv2
import numpy as np
import scipy.io as sio
import matplotlib.pyplot as plt
import math
import six
from six.moves import range
def show_image(img, title=None):
"""Display the image.
When a key is pressed, the window is closed
Parameters
----------
img : numpy array
image
title : str, optional
Title of the image
"""
fig = plt.figure()
plt.axis("off")
if len(img.shape) == 3:
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
else:
plt.imshow(cv2.cvtColor(img, cv2.COLOR_GRAY2RGB))
fig.canvas.set_window_title(title)
if title is not None:
fig.canvas.set_window_title(title)
fig.suptitle(title)
plt.gcf().canvas.mpl_connect('key_press_event',
lambda event: plt.close(event.canvas.figure))
plt.show()
# colormap bgr
colormap = {'holes': [255, 0, 0], # BLUE
'islands': [0, 255, 255], # YELLOW
'indentations': [0, 255, 0], # GREEN
'protrusions': [0, 0, 255] # RED
}
def visualize_elements(img, regions=None,
holes=None, islands=None,
indentations=None, protrusions=None,
visualize=True,
title='salient regions'):
"""Display the image with the salient regions provided.
Parameters
----------
img : numpy array
image
regions : dict
dictionary with the regions to show
holes : numpy array
Binary mask of the holes, to display in blue
islands : numpy array
Binary mask of the islands, to display in yellow
indentations : numpy array
Binary mask of the indentations, to display in green
protrusions : numpy array
Binary mask of the protrusions, to display in red
visualize: bool, optional
visualizations flag
display_name : str, optional
name of the window
Returns
----------
img_to_show : numpy array
image with the colored regions
"""
# if the image is grayscale, make it BGR:
if len(img.shape) == 2:
img_to_show = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
else:
img_to_show = img.copy()
if regions is not None:
holes = regions.get("holes", None)
islands = regions.get("islands", None)
indentations = regions.get("indentations", None)
protrusions = regions.get("protrusions", None)
if holes is not None:
img_to_show[[holes > 0]] = colormap['holes']
if islands is not None:
img_to_show[[islands > 0]] = colormap['islands']
if indentations is not None:
img_to_show[[indentations > 0]] = colormap['indentations']
if protrusions is not None:
img_to_show[[protrusions > 0]] = colormap['protrusions']
if visualize:
show_image(img_to_show, title=title)
return img_to_show
def visualize_elements_ellipses(img, features,
visualize=True,
title='salient regions'):
"""Display the image with the salient regions provided.
Parameters
----------
img : numpy array
image
features : dict
dictionary with the ellipse features of the regions to show
visualize: bool, optional
visualizations flag
display_name : str, optional
name of the window
Returns
----------
img_to_show : numpy array
image with the colored regions
"""
if len(img.shape) == 2:
img_to_show = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
else:
img_to_show = img.copy()
for region_type in features.keys():
img_to_show = visualize_ellipses(img_to_show, features[region_type],
colormap[region_type], visualize=False)
if visualize:
show_image(img_to_show, title=title)
return img_to_show
def read_matfile(filename, visualize=True):
"""Read a matfile with the binary masks for the salient regions.
Returns the masks with 0/255 values for the 4 salient types
Parameters
----------
filename: str
Path to the mat file
visualize: bool, optional
option for visualizing the process
Returns
----------
holes: numpy array
Binary image with holes as foreground
islands: numpy array
Binary image with islands as foreground
protrusions: numpy array
Binary image with protrusions as foreground
indentations: numpy array
Binary image with indentations as foreground
"""
matfile = sio.loadmat(filename)
regions = matfile['saliency_masks'] * 255
holes = regions[:,:, 0]
islands = regions[:,:, 1]
indentations = regions[:,:, 2]
protrusions = regions[:,:, 3]
if visualize:
show_image(holes, 'holes')
show_image(islands, 'islands')
show_image(indentations, 'indentations')
show_image(protrusions, 'protrusions')
return holes, islands, indentations, protrusions
def image_diff(img1, img2, visualize=True):
"""Compares two images and shows the difference.
Useful for testing purposes.
Parameters
----------
img1: numpy array
first image to compare
img2: numpy array
second image to compare
visualize: bool, optional
option for visualizing the process
Returns
----------
is_same: bool
True if all pixels of the two images are equal
"""
if visualize:
show_image(cv2.bitwise_xor(img1, img2), 'Difference between images')
return np.all(img1 == img2)
def array_diff(arr1, arr2, rtol=1e-05, atol=1e-08):
"""Compares two arrays. Useful for testing purposes.
Parameters
----------
arr1: 2-dimensional numpy, first array to compare
arr2: 2-dimensional numpy, second array to compare
Returns
----------
is_close: bool
True if elemetns of the two arrays are close within the defaults tolerance
(see numpy.allclose documentaiton for tolerance values)
"""
return np.allclose(arr1, arr2, rtol, atol)
def standard2poly_ellipse(half_major_axis, half_minor_axis, theta):
""" Conversion of elliptic parameters to polynomial coefficients.
Parameters
----------
half_major_axis: float
Half of the length of the ellipse's major axis
half_minor_axis: float
Half of the length of the ellipse's minor axis
theta: float
The ellipse orientation angle (radians) between the major and the x axis
Returns
----------
A, B, C: floats
The coefficients of the polynomial equation of an ellipse :math:`Ax^2 + Bxy + Cy^2 = 1`
"""
# trigonometric functions
sin_theta = np.sin(theta)
cos_theta = np.cos(theta)
sin_cos_theta = sin_theta * cos_theta
# squares
a_sq = half_major_axis * half_major_axis
b_sq = half_minor_axis * half_minor_axis
sin_theta_sq = sin_theta * sin_theta
cos_theta_sq = cos_theta * cos_theta
# common denominator
denom = a_sq * b_sq
# polynomial coefficients
A = (b_sq * cos_theta_sq + a_sq * sin_theta_sq) / denom
B = ((b_sq - a_sq) * sin_cos_theta) / denom
C = (b_sq * sin_theta_sq + a_sq * cos_theta_sq) / denom
return A, B, C
def poly2standard_ellipse(A, B, C):
""" Conversion of elliptic polynomial coefficients to standard parameters.
Parameters
----------
A, B, C: floats
The coefficients of the polynomial equation of an ellipse :math:`Ax^2 + Bxy + Cy^2 = 1`
Returns
----------
half_major_axis: float
Half of the length of the ellipse's major axis
half_minor_axis: float
Half of the length of the ellipse's minor axis
theta: float
The ellipse orientation angle (radians) between the major and the x axis
NOTE
------
WARNING: The conversion might be correct only if the resulting angle is between 0 and pi/2!
"""
# construct a matrix from the polynomial coefficients
M = np.array([[A, B], [B, C]])
# find the eigenvalues
evals = LA.eigh(M)[0]
order = evals.argsort()[::-1]
evals = evals[order]
e_min = evals[-1]
e_max = evals[0]
# derive the angle directly from the coefficients
if B == 0:
if A < C:
theta = 0
else:
theta = np.pi/2
else:
if A < C:
theta = 0.5*np.arctan(2*B/(A-C))
else:
theta = np.pi/2 + 0.5*np.arctan(2*B/(A-C))
# axis lengths
half_major_axis = 1/np.sqrt(e_min)
half_minor_axis = 1/np.sqrt(e_max)
return half_major_axis, half_minor_axis, theta
def binary_mask2ellipse_features_single(binary_mask, connectivity=4, saliency_type=1, min_square=False):
""" Conversion of a single saliency type of binary regions to ellipse features.
Parameters
----------
binary_mask: 2-D numpy array
Binary mask of the detected salient regions of the given saliency type
connectivity: int
Neighborhood connectivity
saliency_type: int
Type of salient regions. The code is:
1: holes
2: islands
3: indentations
4: protrusions
min_square: bool, optional
whether to use minimum sqrt fitting for ellipses
(default is bounded rotated rectangle fitting)
Returns
----------
num_regions: int
The number of saleint regions of saliency_type
features_standard: numpy array
array with standard ellipse features for each of the ellipses for a given saliency type
features_poly: numpy array
array with polynomial ellipse features for each of the ellipses for a given saliency type
Notes
----------
Every row in the resulting feature_standard array corresponds to a single
region/ellipse and is of format:
``x0 y0 a b angle saliency_type`` ,
where ``(x0,y0)`` are the coordinates of the ellipse centroid and ``a``, ``b`` and ``angle``(in degrees)
are the standard parameters from the ellipse equation:
math:`(x+cos(angle) + y+sin(angle))^2/a^2 + (x*sin(angle) - y*cos(angle))^2/b^2 = 1`
Every row in the resulting feature_poly array corresponds to a single
region/ellipse and is of format:
``x0 y0 A B C saliency_type`` ,
where ``(x0,y0)`` are the coordinates of the ellipse centroid and ``A``, ``B`` and ``C``
are the polynomial coefficients from the ellipse equation :math:`Ax^2 + Bxy + Cy^2 = 1`.
"""
# num_regions, labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, connectivity=connectivity)
binary_mask2 = binary_mask.copy()
_, contours, hierarchy = cv2.findContours(
binary_mask2, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
indices_regions = np.where(hierarchy[0,:, 3] == -1)[0]
num_regions = len(indices_regions)
features_standard = np.zeros((num_regions, 6), float)
features_poly = np.zeros((num_regions, 6), float)
i = 0
for index_regions in indices_regions:
cnt = contours[index_regions]
# fit an ellipse to the contour
if min_square:
# (x, y), (ma, MA), angle = cv2.fitEllipse(cnt)
ellipse = cv2.fitEllipse(cnt)
# center, axis_length and orientation of ellipse
(center, axes, angle_deg) = ellipse
# center of the ellipse
(x, y) = center
# length of MAJOR and minor axis
MA = max(axes)
ma = min(axes)
else:
(x, y), (ma, MA), angle_deg = cv2.minAreaRect(cnt)
#(center, axes, angle_deg) = cv2.minAreaRect(cnt)
# ellipse parameters
a = np.fix(MA / 2)
b = np.fix(ma / 2)
if ((a > 0) and (b > 0)):
x0 = x
y0 = y
if (angle_deg == 0):
angle_deg = 180
# angle_rad_manual = angle_deg * math.pi / 180
angle_rad = math.radians(angle_deg)
# compute the elliptic polynomial coefficients, aka features
[A, B, C] = standard2poly_ellipse(a, b, -angle_rad)
#[A, B, C] = standard2poly_ellipse(a, b, angle_rad)
features_poly[i, ] = ([x0, y0, A, B, C, saliency_type])
features_standard[i, ] = ([x, y, a, b, angle_rad, saliency_type])
else:
# We still output the ellipse as NaN
features_poly[i,
] = ([np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
saliency_type])
# standard parameters
# features_standard[i, ] = ([x, y, a, b, angle_deg, saliency_type])
i += 1
return num_regions, features_standard, features_poly
def visualize_ellipses(img, features, color=(0, 0, 255), visualize=True):
""" Visualise ellipses in an image
Parameters
----------
regions: img
image to show the ellipses on
features: numpy array
standard ellipse features for each of the ellipses
color: tuple of ints, optional
color to show the ellipses
visualize: bool, optional
visualizations flag
Returns
----------
img_to_show: numpy array
image with the colored ellipses
"""
# if the image is grayscale, make it BGR:
if len(img.shape) == 2:
img_to_show = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
else:
img_to_show = img.copy()
for (x, y, a, b, angle_rad, _) in features:
# for (x, y, a, b, angle_deg, _) in features:
angle_deg = math.degrees(angle_rad)
img_to_show = cv2.ellipse(img_to_show, (int(x), int(y)), (int(b), int(a)), int(angle_deg), 0, 360, color, 2)
# img_to_show = cv2.ellipse(img_to_show, (int(x), int(y)), (int(a), int(b)), int(angle_deg), 0, 360, color, 2)
if visualize:
show_image(img_to_show)
return img_to_show
def binary_mask2ellipse_features(regions, connectivity=4, min_square=False):
""" Conversion of all types of regions to ellipse features.
Parameters
----------
regions: dict
Dict of binary masks of the detected salient regions
connectivity: int, optional
Neighborhood connectivity
min_square: bool, optional
whether to use minimum sqrt fitting for ellipses
(default is bounded rotated rectangle fitting)
Returns
----------
num_regions: dict
The number of saleint regions for each saliency_type
features_standard: dict
dictionary with standard ellipse features for each of the ellipses
features_poly: dict
dictionary with polynomial ellipse features for each of the ellipses
Note
----------
The keys of the dictionaries are the saliency type.
Every row in the array per key of features_standard corresponds to a single
region/ellipse and is of format:
``x0 y0 a b angle saliency_type`` ,
where ``(x0,y0)`` are the coordinates of the ellipse centroid and ``a``, ``b`` and ``angle``(in degrees)
are the standard parameters from the ellipse equation:
math:`(x+cos(angle) + y+sin(angle))^2/a^2 + (x*sin(angle) - y*cos(angle))^2/b^2 = 1`
Every row in the array per key of features_poly corresponds to a single
region/ellipse and is of format:
``x0 y0 A B C saliency_type`` ,
where ``(x0,y0)`` are the coordinates of the ellipse centroid and ``A``, ``B`` and ``C``
are the polynomial coefficients from the ellipse equation :math:`Ax^2 + Bxy + Cy^2 = 1`.
"""
region2int = {"holes": 1,
"islands":2,
"indentations": 3,
"protrusions": 4}
num_regions = {}
features_standard = {}
features_poly = {}
for saltype in regions.keys():
# print "Saliency type: ", saltype
num_regions_s, features_standard_s, features_poly_s = binary_mask2ellipse_features_single(regions[saltype],
connectivity=connectivity, saliency_type=region2int[saltype], min_square=min_square)
num_regions[saltype] = num_regions_s
# print "Number of regions for that saliency type: ", num_regions_s
features_standard[saltype] = features_standard_s
features_poly[saltype] = features_poly_s
return num_regions, features_standard, features_poly
def save_ellipse_features2file(num_regions, features, filename):
""" Saving the ellipse features (polynomial or standard) to file.
Parameters
----------
num_regions: dict
The number of saleint regions for each saliency type
features: dict
dictionary with ellipse features for each of the ellipses
filename: str
the filename where to save the features
Returns
--------
total_num_regions: int
the total number of salient regions of saliency types
NOTES
-------
see load_ellipse_features_from_file
"""
total_num_regions = 0
# open the file in writing mode
f = open(filename, 'w')
for saltype in num_regions.keys():
total_num_regions += num_regions[saltype]
f.write('0 \n');
f.write(str(total_num_regions))
f.write('\n');
for saltype in num_regions.keys():
features_s = features[saltype]
# print "saliency type: ", saltype
# write into the file per ellipse
# for ellipse_entry in features_poly_s: #
for n in range(num_regions[saltype]):
ellipse_entry = features_s[n,:]
# print "n: features", n,":", ellipse_entry
for e in ellipse_entry:
f.write(str(e))
f.write(' ')
f.write('\n')
# close the file
f.close()
return total_num_regions
def load_ellipse_features_from_file(filename):
""" Load elipse features (polynomial or standard) from, file.
Parameters
----------
filename: str
the filename where to load the features from
Returns
--------
total_num_regions: int
the total number of salient regions of saliency types
num_regions: dict
The number of saleint regions for each saliency type
features: dict
dictionary with ellipse features for each of the ellipses
NOTES
-------
see save_ellipse_features2file
"""
# initializations
region2int = {"holes": 1,
"islands":2,
"indentations": 3,
"protrusions": 4}
int2region = {v: k for (k, v) in six.iteritems(region2int)}
keys = list(region2int.keys())
total_num_regions = 0
num_regions = {k: 0 for k in keys}
features_lists = {k: [] for k in keys}
# open the filein mdoe reading
f = open(filename, 'r')
# skip the first line (contains a 0)
f.readline()
# next one is the total number of regions
total_num_regions = int(f.readline())
# read off the feautres line by line
for i in range(total_num_regions):
line = f.readline()
# get the last element- the type
line_numbers = line.split()
sal_type = int2region[int(float(line_numbers[-1]))]
# make the string list- to a float list
feature_list = [float(l) for l in line_numbers]
features_lists[sal_type].append(feature_list)
num_regions[sal_type] += 1
# close the file
f.close()
# make numpy arrays from the lists
features = {k: np.array(v) for (k, v) in six.iteritems(features_lists)}
return total_num_regions, num_regions, features
| apache-2.0 |
materialsproject/pymatgen | pymatgen/analysis/tests/test_phase_diagram.py | 4 | 31734 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import unittest
import warnings
from numbers import Number
from pathlib import Path
from collections import OrderedDict
import numpy as np
from pymatgen.analysis.phase_diagram import (
CompoundPhaseDiagram,
GrandPotentialPhaseDiagram,
GrandPotPDEntry,
PDEntry,
PDPlotter,
PhaseDiagram,
ReactionDiagram,
TransformedPDEntry,
tet_coord,
triangular_coord,
uniquelines,
BasePhaseDiagram,
)
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import DummySpecies, Element
from pymatgen.entries.computed_entries import ComputedEntry
from pymatgen.entries.entry_tools import EntrySet
module_dir = Path(__file__).absolute().parent
class PDEntryTest(unittest.TestCase):
def setUp(self):
comp = Composition("LiFeO2")
self.entry = PDEntry(comp, 53)
self.gpentry = GrandPotPDEntry(self.entry, {Element("O"): 1.5})
def test_get_energy(self):
self.assertEqual(self.entry.energy, 53, "Wrong energy!")
self.assertEqual(self.gpentry.energy, 50, "Wrong energy!")
def test_get_chemical_energy(self):
self.assertEqual(self.gpentry.chemical_energy, 3, "Wrong energy!")
def test_get_energy_per_atom(self):
self.assertEqual(self.entry.energy_per_atom, 53.0 / 4, "Wrong energy per atom!")
self.assertEqual(self.gpentry.energy_per_atom, 50.0 / 2, "Wrong energy per atom!")
def test_get_name(self):
self.assertEqual(self.entry.name, "LiFeO2", "Wrong name!")
self.assertEqual(self.gpentry.name, "LiFeO2", "Wrong name!")
def test_get_composition(self):
comp = self.entry.composition
expected_comp = Composition("LiFeO2")
self.assertEqual(comp, expected_comp, "Wrong composition!")
comp = self.gpentry.composition
expected_comp = Composition("LiFe")
self.assertEqual(comp, expected_comp, "Wrong composition!")
def test_is_element(self):
self.assertFalse(self.entry.is_element)
self.assertFalse(self.gpentry.is_element)
def test_to_from_dict(self):
d = self.entry.as_dict()
gpd = self.gpentry.as_dict()
entry = PDEntry.from_dict(d)
self.assertEqual(entry.name, "LiFeO2", "Wrong name!")
self.assertEqual(entry.energy_per_atom, 53.0 / 4)
gpentry = GrandPotPDEntry.from_dict(gpd)
self.assertEqual(gpentry.name, "LiFeO2", "Wrong name!")
self.assertEqual(gpentry.energy_per_atom, 50.0 / 2)
d_anon = d.copy()
del d_anon["name"]
try:
entry = PDEntry.from_dict(d_anon)
except KeyError:
self.fail("Should not need to supply name!")
def test_str(self):
self.assertIsNotNone(str(self.entry))
def test_read_csv(self):
entries = EntrySet.from_csv(str(module_dir / "pdentries_test.csv"))
self.assertEqual(entries.chemsys, {"Li", "Fe", "O"}, "Wrong elements!")
self.assertEqual(len(entries), 490, "Wrong number of entries!")
class TransformedPDEntryTest(unittest.TestCase):
def setUp(self):
comp = Composition("LiFeO2")
entry = PDEntry(comp, 53)
terminal_compositions = ["Li2O", "FeO", "LiO8"]
terminal_compositions = [Composition(c) for c in terminal_compositions]
sp_mapping = OrderedDict()
for i, comp in enumerate(terminal_compositions):
sp_mapping[comp] = DummySpecies("X" + chr(102 + i))
self.transformed_entry = TransformedPDEntry(entry, sp_mapping)
def test_get_energy(self):
self.assertEqual(self.transformed_entry.energy, 53, "Wrong energy!")
self.assertAlmostEqual(self.transformed_entry.original_entry.energy, 53.0, 11)
def test_get_energy_per_atom(self):
self.assertAlmostEqual(self.transformed_entry.energy_per_atom, 53.0 / (23 / 15), 11)
def test_get_name(self):
self.assertEqual(self.transformed_entry.name, "LiFeO2", "Wrong name!")
def test_get_composition(self):
comp = self.transformed_entry.composition
expected_comp = Composition({DummySpecies("Xf"): 14 / 30, DummySpecies("Xg"): 1.0, DummySpecies("Xh"): 2 / 30})
self.assertEqual(comp, expected_comp, "Wrong composition!")
def test_is_element(self):
self.assertFalse(self.transformed_entry.is_element)
def test_to_from_dict(self):
d = self.transformed_entry.as_dict()
entry = TransformedPDEntry.from_dict(d)
self.assertEqual(entry.name, "LiFeO2", "Wrong name!")
self.assertAlmostEqual(entry.energy_per_atom, 53.0 / (23 / 15), 11)
def test_str(self):
self.assertIsNotNone(str(self.transformed_entry))
def test_normalize(self):
norm_entry = self.transformed_entry.normalize(mode="atom")
expected_comp = Composition(
{DummySpecies("Xf"): 7 / 23, DummySpecies("Xg"): 15 / 23, DummySpecies("Xh"): 1 / 23}
)
self.assertEqual(norm_entry.composition, expected_comp, "Wrong composition!")
class PhaseDiagramTest(unittest.TestCase):
def setUp(self):
self.entries = EntrySet.from_csv(str(module_dir / "pdentries_test.csv"))
self.pd = PhaseDiagram(self.entries)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_init(self):
# Ensure that a bad set of entries raises a PD error. Remove all Li
# from self.entries.
entries = filter(
lambda e: (not e.composition.is_element) or e.composition.elements[0] != Element("Li"),
self.entries,
)
self.assertRaises(ValueError, PhaseDiagram, entries)
def test_dim1(self):
# Ensure that dim 1 PDs can eb generated.
for el in ["Li", "Fe", "O2"]:
entries = [e for e in self.entries if e.composition.reduced_formula == el]
pd = PhaseDiagram(entries)
self.assertEqual(len(pd.stable_entries), 1)
for e in entries:
decomp, ehull = pd.get_decomp_and_e_above_hull(e)
self.assertGreaterEqual(ehull, 0)
plotter = PDPlotter(pd)
lines, stable_entries, unstable_entries = plotter.pd_plot_data
self.assertEqual(lines[0][1], [0, 0])
def test_ordering(self):
# Test sorting of elements
entries = [ComputedEntry(Composition(formula), 0) for formula in ["O", "N", "Fe"]]
pd = PhaseDiagram(entries)
sorted_elements = (Element("Fe"), Element("N"), Element("O"))
self.assertEqual(tuple(pd.elements), sorted_elements)
entries.reverse()
pd = PhaseDiagram(entries)
self.assertEqual(tuple(pd.elements), sorted_elements)
# Test manual specification of order
ordering = [Element(elt_string) for elt_string in ["O", "N", "Fe"]]
pd = PhaseDiagram(entries, elements=ordering)
self.assertEqual(tuple(pd.elements), tuple(ordering))
def test_stable_entries(self):
stable_formulas = [ent.composition.reduced_formula for ent in self.pd.stable_entries]
expected_stable = [
"Fe2O3",
"Li5FeO4",
"LiFeO2",
"Fe3O4",
"Li",
"Fe",
"Li2O",
"O2",
"FeO",
]
for formula in expected_stable:
self.assertTrue(formula in stable_formulas, formula + " not in stable entries!")
def test_get_formation_energy(self):
stable_formation_energies = {
ent.composition.reduced_formula: self.pd.get_form_energy(ent) for ent in self.pd.stable_entries
}
expected_formation_energies = {
"Li5FeO4": -164.8117344866667,
"Li2O2": -14.119232793333332,
"Fe2O3": -16.574164339999996,
"FeO": -5.7141519966666685,
"Li": 0.0,
"LiFeO2": -7.732752316666666,
"Li2O": -6.229303868333332,
"Fe": 0.0,
"Fe3O4": -22.565714456666683,
"Li2FeO3": -45.67166036000002,
"O2": 0.0,
}
for formula, energy in expected_formation_energies.items():
self.assertAlmostEqual(energy, stable_formation_energies[formula], 7)
def test_all_entries_hulldata(self):
self.assertEqual(len(self.pd.all_entries_hulldata), 490)
def test_planar_inputs(self):
e1 = PDEntry("H", 0)
e2 = PDEntry("He", 0)
e3 = PDEntry("Li", 0)
e4 = PDEntry("Be", 0)
e5 = PDEntry("B", 0)
e6 = PDEntry("Rb", 0)
pd = PhaseDiagram([e1, e2, e3, e4, e5, e6], map(Element, ["Rb", "He", "B", "Be", "Li", "H"]))
self.assertEqual(len(pd.facets), 1)
def test_str(self):
self.assertIsNotNone(str(self.pd))
def test_get_e_above_hull(self):
for entry in self.pd.stable_entries:
self.assertLess(
self.pd.get_e_above_hull(entry),
1e-11,
"Stable entries should have e above hull of zero!",
)
for entry in self.pd.all_entries:
if entry not in self.pd.stable_entries:
e_ah = self.pd.get_e_above_hull(entry)
self.assertTrue(isinstance(e_ah, Number))
self.assertGreaterEqual(e_ah, 0)
def test_get_equilibrium_reaction_energy(self):
for entry in self.pd.stable_entries:
self.assertLessEqual(
self.pd.get_equilibrium_reaction_energy(entry),
0,
"Stable entries should have negative equilibrium reaction energy!",
)
def test_get_phase_separation_energy(self):
for entry in self.pd.unstable_entries:
if entry.composition.fractional_composition not in [
e.composition.fractional_composition for e in self.pd.stable_entries
]:
self.assertGreaterEqual(
self.pd.get_phase_separation_energy(entry),
0,
"Unstable entries should have positive decomposition energy!",
)
else:
if entry.is_element:
el_ref = self.pd.el_refs[entry.composition.elements[0]]
e_d = entry.energy_per_atom - el_ref.energy_per_atom
self.assertAlmostEqual(self.pd.get_phase_separation_energy(entry), e_d, 7)
# NOTE the remaining materials would require explicit tests as they
# could be either positive or negative
pass
for entry in self.pd.stable_entries:
if entry.composition.is_element:
self.assertEqual(
self.pd.get_phase_separation_energy(entry),
0,
"Stable elemental entries should have decomposition energy of zero!",
)
else:
self.assertLessEqual(
self.pd.get_phase_separation_energy(entry),
0,
"Stable entries should have negative decomposition energy!",
)
self.assertAlmostEqual(
self.pd.get_phase_separation_energy(entry, stable_only=True),
self.pd.get_equilibrium_reaction_energy(entry),
7,
(
"Using `stable_only=True` should give decomposition energy equal to "
"equilibrium reaction energy!"
),
)
# Test that we get correct behaviour with a polymorph
toy_entries = {
"Li": 0.0,
"Li2O": -5,
"LiO2": -4,
"O2": 0.0,
}
toy_pd = PhaseDiagram([PDEntry(c, e) for c, e in toy_entries.items()])
# stable entry
self.assertAlmostEqual(
toy_pd.get_phase_separation_energy(PDEntry("Li2O", -5)),
-1.0,
7,
)
# polymorph
self.assertAlmostEqual(
toy_pd.get_phase_separation_energy(PDEntry("Li2O", -4)),
-2.0 / 3.0,
7,
)
# Test that the method works for novel entries
novel_stable_entry = PDEntry("Li5FeO4", -999)
self.assertLess(
self.pd.get_phase_separation_energy(novel_stable_entry),
0,
"Novel stable entries should have negative decomposition energy!",
)
novel_unstable_entry = PDEntry("Li5FeO4", 999)
self.assertGreater(
self.pd.get_phase_separation_energy(novel_unstable_entry),
0,
"Novel unstable entries should have positive decomposition energy!",
)
duplicate_entry = PDEntry("Li2O", -14.31361175)
scaled_dup_entry = PDEntry("Li4O2", -14.31361175 * 2)
stable_entry = [e for e in self.pd.stable_entries if e.name == "Li2O"][0]
self.assertEqual(
self.pd.get_phase_separation_energy(duplicate_entry),
self.pd.get_phase_separation_energy(stable_entry),
"Novel duplicates of stable entries should have same decomposition energy!",
)
self.assertEqual(
self.pd.get_phase_separation_energy(scaled_dup_entry),
self.pd.get_phase_separation_energy(stable_entry),
"Novel scaled duplicates of stable entries should have same decomposition energy!",
)
def test_get_decomposition(self):
for entry in self.pd.stable_entries:
self.assertEqual(
len(self.pd.get_decomposition(entry.composition)),
1,
"Stable composition should have only 1 decomposition!",
)
dim = len(self.pd.elements)
for entry in self.pd.all_entries:
ndecomp = len(self.pd.get_decomposition(entry.composition))
self.assertTrue(
ndecomp > 0 and ndecomp <= dim,
"The number of decomposition phases can at most be equal to the number of components.",
)
# Just to test decomp for a ficitious composition
ansdict = {
entry.composition.formula: amt for entry, amt in self.pd.get_decomposition(Composition("Li3Fe7O11")).items()
}
expected_ans = {
"Fe2 O2": 0.0952380952380949,
"Li1 Fe1 O2": 0.5714285714285714,
"Fe6 O8": 0.33333333333333393,
}
for k, v in expected_ans.items():
self.assertAlmostEqual(ansdict[k], v, 7)
def test_get_transition_chempots(self):
for el in self.pd.elements:
self.assertLessEqual(len(self.pd.get_transition_chempots(el)), len(self.pd.facets))
def test_get_element_profile(self):
for el in self.pd.elements:
for entry in self.pd.stable_entries:
if not (entry.composition.is_element):
self.assertLessEqual(
len(self.pd.get_element_profile(el, entry.composition)),
len(self.pd.facets),
)
expected = [
{
"evolution": 1.0,
"chempot": -4.2582781416666666,
"reaction": "Li2O + 0.5 O2 -> Li2O2",
},
{
"evolution": 0,
"chempot": -5.0885906699999968,
"reaction": "Li2O -> Li2O",
},
{
"evolution": -1.0,
"chempot": -10.487582010000001,
"reaction": "Li2O -> 2 Li + 0.5 O2",
},
]
result = self.pd.get_element_profile(Element("O"), Composition("Li2O"))
for d1, d2 in zip(expected, result):
self.assertAlmostEqual(d1["evolution"], d2["evolution"])
self.assertAlmostEqual(d1["chempot"], d2["chempot"])
self.assertEqual(d1["reaction"], str(d2["reaction"]))
def test_get_get_chempot_range_map(self):
elements = [el for el in self.pd.elements if el.symbol != "Fe"]
self.assertEqual(len(self.pd.get_chempot_range_map(elements)), 10)
def test_getmu_vertices_stability_phase(self):
results = self.pd.getmu_vertices_stability_phase(Composition("LiFeO2"), Element("O"))
self.assertAlmostEqual(len(results), 6)
test_equality = False
for c in results:
if (
abs(c[Element("O")] + 7.115) < 1e-2
and abs(c[Element("Fe")] + 6.596) < 1e-2
and abs(c[Element("Li")] + 3.931) < 1e-2
):
test_equality = True
self.assertTrue(test_equality, "there is an expected vertex missing in the list")
def test_getmu_range_stability_phase(self):
results = self.pd.get_chempot_range_stability_phase(Composition("LiFeO2"), Element("O"))
self.assertAlmostEqual(results[Element("O")][1], -4.4501812249999997)
self.assertAlmostEqual(results[Element("Fe")][0], -6.5961470999999996)
self.assertAlmostEqual(results[Element("Li")][0], -3.6250022625000007)
def test_get_hull_energy(self):
for entry in self.pd.stable_entries:
h_e = self.pd.get_hull_energy(entry.composition)
self.assertAlmostEqual(h_e, entry.energy)
n_h_e = self.pd.get_hull_energy(entry.composition.fractional_composition)
self.assertAlmostEqual(n_h_e, entry.energy_per_atom)
def test_1d_pd(self):
entry = PDEntry("H", 0)
pd = PhaseDiagram([entry])
decomp, e = pd.get_decomp_and_e_above_hull(PDEntry("H", 1))
self.assertAlmostEqual(e, 1)
self.assertAlmostEqual(decomp[entry], 1.0)
def test_get_critical_compositions_fractional(self):
c1 = Composition("Fe2O3").fractional_composition
c2 = Composition("Li3FeO4").fractional_composition
c3 = Composition("Li2O").fractional_composition
comps = self.pd.get_critical_compositions(c1, c2)
expected = [
Composition("Fe2O3").fractional_composition,
Composition("Li0.3243244Fe0.1621621O0.51351349"),
Composition("Li3FeO4").fractional_composition,
]
for crit, exp in zip(comps, expected):
self.assertTrue(crit.almost_equals(exp, rtol=0, atol=1e-5))
comps = self.pd.get_critical_compositions(c1, c3)
expected = [
Composition("Fe0.4O0.6"),
Composition("LiFeO2").fractional_composition,
Composition("Li5FeO4").fractional_composition,
Composition("Li2O").fractional_composition,
]
for crit, exp in zip(comps, expected):
self.assertTrue(crit.almost_equals(exp, rtol=0, atol=1e-5))
def test_get_critical_compositions(self):
c1 = Composition("Fe2O3")
c2 = Composition("Li3FeO4")
c3 = Composition("Li2O")
comps = self.pd.get_critical_compositions(c1, c2)
expected = [
Composition("Fe2O3"),
Composition("Li0.3243244Fe0.1621621O0.51351349") * 7.4,
Composition("Li3FeO4"),
]
for crit, exp in zip(comps, expected):
self.assertTrue(crit.almost_equals(exp, rtol=0, atol=1e-5))
comps = self.pd.get_critical_compositions(c1, c3)
expected = [
Composition("Fe2O3"),
Composition("LiFeO2"),
Composition("Li5FeO4") / 3,
Composition("Li2O"),
]
for crit, exp in zip(comps, expected):
self.assertTrue(crit.almost_equals(exp, rtol=0, atol=1e-5))
# Don't fail silently if input compositions aren't in phase diagram
# Can be very confusing if you're working with a GrandPotentialPD
self.assertRaises(
ValueError,
self.pd.get_critical_compositions,
Composition("Xe"),
Composition("Mn"),
)
# For the moment, should also fail even if compositions are in the gppd
# because it isn't handled properly
gppd = GrandPotentialPhaseDiagram(self.pd.all_entries, {"Xe": 1}, self.pd.elements + [Element("Xe")])
self.assertRaises(
ValueError,
gppd.get_critical_compositions,
Composition("Fe2O3"),
Composition("Li3FeO4Xe"),
)
# check that the function still works though
comps = gppd.get_critical_compositions(c1, c2)
expected = [
Composition("Fe2O3"),
Composition("Li0.3243244Fe0.1621621O0.51351349") * 7.4,
Composition("Li3FeO4"),
]
for crit, exp in zip(comps, expected):
self.assertTrue(crit.almost_equals(exp, rtol=0, atol=1e-5))
# case where the endpoints are identical
self.assertEqual(self.pd.get_critical_compositions(c1, c1 * 2), [c1, c1 * 2])
def test_get_composition_chempots(self):
c1 = Composition("Fe3.1O4")
c2 = Composition("Fe3.2O4.1Li0.01")
e1 = self.pd.get_hull_energy(c1)
e2 = self.pd.get_hull_energy(c2)
cp = self.pd.get_composition_chempots(c1)
calc_e2 = e1 + sum(cp[k] * v for k, v in (c2 - c1).items())
self.assertAlmostEqual(e2, calc_e2)
def test_get_all_chempots(self):
c1 = Composition("Fe3.1O4")
c2 = Composition("FeO")
cp1 = self.pd.get_all_chempots(c1)
cpresult = {
Element("Li"): -4.077061954999998,
Element("Fe"): -6.741593864999999,
Element("O"): -6.969907375000003,
}
for elem, energy in cpresult.items():
self.assertAlmostEqual(cp1["Fe3O4-FeO-LiFeO2"][elem], energy)
cp2 = self.pd.get_all_chempots(c2)
cpresult = {
Element("O"): -7.115354140000001,
Element("Fe"): -6.5961471,
Element("Li"): -3.9316151899999987,
}
for elem, energy in cpresult.items():
self.assertAlmostEqual(cp2["FeO-LiFeO2-Fe"][elem], energy)
def test_to_from_dict(self):
# test round-trip for other entry types such as ComputedEntry
entry = ComputedEntry("H", 0.0, 0.0, entry_id="test")
pd = PhaseDiagram([entry])
d = pd.as_dict()
pd_roundtrip = PhaseDiagram.from_dict(d)
self.assertEqual(pd.all_entries[0].entry_id, pd_roundtrip.all_entries[0].entry_id)
class GrandPotentialPhaseDiagramTest(unittest.TestCase):
def setUp(self):
self.entries = EntrySet.from_csv(str(module_dir / "pdentries_test.csv"))
self.pd = GrandPotentialPhaseDiagram(self.entries, {Element("O"): -5})
self.pd6 = GrandPotentialPhaseDiagram(self.entries, {Element("O"): -6})
def test_stable_entries(self):
stable_formulas = [ent.original_entry.composition.reduced_formula for ent in self.pd.stable_entries]
expected_stable = ["Li5FeO4", "Li2FeO3", "LiFeO2", "Fe2O3", "Li2O2"]
for formula in expected_stable:
self.assertTrue(formula in stable_formulas, "{} not in stable entries!".format(formula))
self.assertEqual(len(self.pd6.stable_entries), 4)
def test_get_formation_energy(self):
stable_formation_energies = {
ent.original_entry.composition.reduced_formula: self.pd.get_form_energy(ent)
for ent in self.pd.stable_entries
}
expected_formation_energies = {
"Fe2O3": 0.0,
"Li5FeO4": -5.305515040000046,
"Li2FeO3": -2.3424741500000152,
"LiFeO2": -0.43026396250000154,
"Li2O2": 0.0,
}
for formula, energy in expected_formation_energies.items():
self.assertAlmostEqual(
energy,
stable_formation_energies[formula],
7,
"Calculated formation for {} is not correct!".format(formula),
)
def test_str(self):
self.assertIsNotNone(str(self.pd))
class BasePhaseDiagramTest(PhaseDiagramTest):
def setUp(self):
self.entries = EntrySet.from_csv(str(module_dir / "pdentries_test.csv"))
self.pd = BasePhaseDiagram.from_entries(self.entries)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_init(self):
pass
def test_as_dict_from_dict(self):
dd = self.pd.as_dict()
new_pd = BasePhaseDiagram.from_dict(dd)
new_dd = new_pd.as_dict()
self.assertEqual(new_dd, dd)
class CompoundPhaseDiagramTest(unittest.TestCase):
def setUp(self):
self.entries = EntrySet.from_csv(str(module_dir / "pdentries_test.csv"))
self.pd = CompoundPhaseDiagram(self.entries, [Composition("Li2O"), Composition("Fe2O3")])
def test_stable_entries(self):
stable_formulas = [ent.name for ent in self.pd.stable_entries]
expected_stable = ["Fe2O3", "Li5FeO4", "LiFeO2", "Li2O"]
for formula in expected_stable:
self.assertTrue(formula in stable_formulas)
def test_get_formation_energy(self):
stable_formation_energies = {ent.name: self.pd.get_form_energy(ent) for ent in self.pd.stable_entries}
expected_formation_energies = {
"Li5FeO4": -7.0773284399999739,
"Fe2O3": 0,
"LiFeO2": -0.47455929750000081,
"Li2O": 0,
}
for formula, energy in expected_formation_energies.items():
self.assertAlmostEqual(energy, stable_formation_energies[formula], 7)
def test_str(self):
self.assertIsNotNone(str(self.pd))
class ReactionDiagramTest(unittest.TestCase):
def setUp(self):
module_dir = os.path.dirname(os.path.abspath(__file__))
self.entries = list(EntrySet.from_csv(os.path.join(module_dir, "reaction_entries_test.csv")).entries)
for e in self.entries:
if e.composition.reduced_formula == "VPO5":
entry1 = e
elif e.composition.reduced_formula == "H4(CO)3":
entry2 = e
self.rd = ReactionDiagram(entry1=entry1, entry2=entry2, all_entries=self.entries[2:])
def test_get_compound_pd(self):
self.rd.get_compound_pd()
def test_formula(self):
for e in self.rd.rxn_entries:
self.assertIn(Element.V, e.composition)
self.assertIn(Element.O, e.composition)
self.assertIn(Element.C, e.composition)
self.assertIn(Element.P, e.composition)
self.assertIn(Element.H, e.composition)
# formed_formula = [e.composition.reduced_formula for e in
# self.rd.rxn_entries]
# expected_formula = [
# 'V0.12707182P0.12707182H0.0441989C0.03314917O0.66850829',
# 'V0.125P0.125H0.05C0.0375O0.6625',
# 'V0.12230216P0.12230216H0.05755396C0.04316547O0.65467626',
# 'V0.11340206P0.11340206H0.08247423C0.06185567O0.62886598',
# 'V0.11267606P0.11267606H0.08450704C0.06338028O0.62676056',
# 'V0.11229947P0.11229947H0.0855615C0.06417112O0.62566845',
# 'V0.09677419P0.09677419H0.12903226C0.09677419O0.58064516',
# 'V0.05882353P0.05882353H0.23529412C0.17647059O0.47058824',
# 'V0.04225352P0.04225352H0.28169014C0.21126761O0.42253521']
#
# for formula in expected_formula:
# self.assertTrue(formula in formed_formula, "%s not in %s" % (formed_formula, expected_formula))
class PDPlotterTest(unittest.TestCase):
def setUp(self):
entries = list(EntrySet.from_csv(os.path.join(module_dir, "pdentries_test.csv")))
self.pd_ternary = PhaseDiagram(entries)
self.plotter_ternary_mpl = PDPlotter(self.pd_ternary, backend="matplotlib")
self.plotter_ternary_plotly = PDPlotter(self.pd_ternary, backend="plotly")
entrieslio = [e for e in entries if "Fe" not in e.composition]
self.pd_binary = PhaseDiagram(entrieslio)
self.plotter_binary_mpl = PDPlotter(self.pd_binary, backend="matplotlib")
self.plotter_binary_plotly = PDPlotter(self.pd_binary, backend="plotly")
entries.append(PDEntry("C", 0))
self.pd_quaternary = PhaseDiagram(entries)
self.plotter_quaternary_mpl = PDPlotter(self.pd_quaternary, backend="matplotlib")
self.plotter_quaternary_plotly = PDPlotter(self.pd_quaternary, backend="plotly")
def test_pd_plot_data(self):
(lines, labels, unstable_entries) = self.plotter_ternary_mpl.pd_plot_data
self.assertEqual(len(lines), 22)
self.assertEqual(
len(labels),
len(self.pd_ternary.stable_entries),
"Incorrect number of lines generated!",
)
self.assertEqual(
len(unstable_entries),
len(self.pd_ternary.all_entries) - len(self.pd_ternary.stable_entries),
"Incorrect number of lines generated!",
)
(lines, labels, unstable_entries) = self.plotter_quaternary_mpl.pd_plot_data
self.assertEqual(len(lines), 33)
self.assertEqual(len(labels), len(self.pd_quaternary.stable_entries))
self.assertEqual(
len(unstable_entries),
len(self.pd_quaternary.all_entries) - len(self.pd_quaternary.stable_entries),
)
(lines, labels, unstable_entries) = self.plotter_binary_mpl.pd_plot_data
self.assertEqual(len(lines), 3)
self.assertEqual(len(labels), len(self.pd_binary.stable_entries))
def test_mpl_plots(self):
# Some very basic ("non")-tests. Just to make sure the methods are callable.
self.plotter_binary_mpl.get_plot().close()
self.plotter_ternary_mpl.get_plot().close()
self.plotter_quaternary_mpl.get_plot().close()
self.plotter_ternary_mpl.get_contour_pd_plot().close()
self.plotter_ternary_mpl.get_chempot_range_map_plot([Element("Li"), Element("O")]).close()
self.plotter_ternary_mpl.plot_element_profile(Element("O"), Composition("Li2O")).close()
def test_plotly_plots(self):
# Also very basic tests. Ensures callability and 2D vs 3D properties.
self.plotter_binary_plotly.get_plot()
self.plotter_ternary_plotly.get_plot()
self.plotter_quaternary_plotly.get_plot()
class UtilityFunctionTest(unittest.TestCase):
def test_unique_lines(self):
testdata = [
[5, 53, 353],
[399, 20, 52],
[399, 400, 20],
[13, 399, 52],
[21, 400, 353],
[393, 5, 353],
[400, 393, 353],
[393, 400, 399],
[393, 13, 5],
[13, 393, 399],
[400, 17, 20],
[21, 17, 400],
]
expected_ans = {
(5, 393),
(21, 353),
(353, 400),
(5, 13),
(17, 20),
(21, 400),
(17, 400),
(52, 399),
(393, 399),
(20, 52),
(353, 393),
(5, 353),
(5, 53),
(13, 399),
(393, 400),
(13, 52),
(53, 353),
(17, 21),
(13, 393),
(20, 399),
(399, 400),
(20, 400),
}
self.assertEqual(uniquelines(testdata), expected_ans)
def test_triangular_coord(self):
coord = [0.5, 0.5]
coord = triangular_coord(coord)
self.assertTrue(np.allclose(coord, [0.75, 0.4330127]))
def test_tet_coord(self):
coord = [0.5, 0.5, 0.5]
coord = tet_coord(coord)
self.assertTrue(np.allclose(coord, [1.0, 0.57735027, 0.40824829]))
if __name__ == "__main__":
unittest.main()
| mit |
fluxcapacitor/source.ml | jupyterhub.ml/notebooks/train_deploy/zz_under_construction/zz_old/TensorFlow/GoogleTraining/workshop_sections/extras/intro_word2vec/word2vec_basic_summaries.py | 4 | 10120 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
import time
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words"""
with zipfile.ZipFile(filename) as f:
data = f.read(f.namelist()[0]).split()
return data
words = read_data(filename)
print('Data size', len(words))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words):
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(words)
del words # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(nce_weights, nce_biases, embed, train_labels,
num_sampled, vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Define info to be used by the SummaryWriter. This will let TensorBoard
# plot loss values during the training process.
loss_summary = tf.scalar_summary("loss", loss)
train_summary_op = tf.merge_summary([loss_summary])
# Add variable initializer.
init = tf.initialize_all_variables()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print("Initialized")
# Directory in which to write summary information.
# You can point TensorBoard to this directory via:
# $ tensorboard --logdir=/tmp/word2vec_basic/summaries
# Tensorflow assumes this directory already exists, so we need to create it.
timestamp = str(int(time.time()))
if not os.path.exists(os.path.join("/tmp/word2vec_basic",
"summaries", timestamp)):
os.makedirs(os.path.join("/tmp/word2vec_basic", "summaries", timestamp))
# Create the SummaryWriter
train_summary_writer = tf.train.SummaryWriter(
os.path.join(
"/tmp/word2vec_basic", "summaries", timestamp), session.graph)
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
# Also evaluate the training summary op.
_, loss_val, tsummary = session.run(
[optimizer, loss, train_summary_op],
feed_dict=feed_dict)
average_loss += loss_val
# Write the evaluated summary info to the SummaryWriter. This info will
# then show up in the TensorBoard events.
train_summary_writer.add_summary(tsummary, step)
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):
assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print("Please install sklearn and matplotlib to visualize embeddings.")
| apache-2.0 |
dhruv13J/scikit-learn | sklearn/utils/metaestimators.py | 283 | 2353 | """Utilities for meta-estimators"""
# Author: Joel Nothman
# Andreas Mueller
# Licence: BSD
from operator import attrgetter
from functools import update_wrapper
__all__ = ['if_delegate_has_method']
class _IffHasAttrDescriptor(object):
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if the ``attribute_name`` is not present on the base object.
This allows ducktyping of the decorated method based on ``attribute_name``.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, attribute_name):
self.fn = fn
self.get_attribute = attrgetter(attribute_name)
# update the docstring of the descriptor
update_wrapper(self, fn)
def __get__(self, obj, type=None):
# raise an AttributeError if the attribute is not present on the object
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
self.get_attribute(obj)
# lambda, but not partial, allows help() to work with update_wrapper
out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
# update the docstring of the returned function
update_wrapper(out, self.fn)
return out
def if_delegate_has_method(delegate):
"""Create a decorator for methods that are delegated to a sub-estimator
This enables ducktyping by hasattr returning True according to the
sub-estimator.
>>> from sklearn.utils.metaestimators import if_delegate_has_method
>>>
>>>
>>> class MetaEst(object):
... def __init__(self, sub_est):
... self.sub_est = sub_est
...
... @if_delegate_has_method(delegate='sub_est')
... def predict(self, X):
... return self.sub_est.predict(X)
...
>>> class HasPredict(object):
... def predict(self, X):
... return X.sum(axis=1)
...
>>> class HasNoPredict(object):
... pass
...
>>> hasattr(MetaEst(HasPredict()), 'predict')
True
>>> hasattr(MetaEst(HasNoPredict()), 'predict')
False
"""
return lambda fn: _IffHasAttrDescriptor(fn, '%s.%s' % (delegate, fn.__name__))
| bsd-3-clause |
kandluis/crime-prediction | predictor/sf_gp.py | 1 | 2178 | '''
Script to process the San Franscisco data. Currently, parameters must be changed
directly at the script level!
To run the script, use the following command from the parent directory (ie.
make sure you're in the crime-predictions directory)
python -m predictor.sf_gp
Authors:
Alex Wang ([email protected])
Luis Perez ([email protected])
Copyright 2015, Harvard University
'''
import numpy as np
import pandas as pd
import re
import warnings
import os
import pickle
from . import GP
''' Global Variables '''
sfdata_file = os.path.abspath(
'../cs281_data/large_data/sfclean.pk') # Location of data
buckets = 10 # Number of buckets.
# Square Exponential Kernel Parameters
# These are the optimal parameters for n = 10 (we can try with other
# values too)
# BAYESIAN
# l = [9.164520, 0.296120, 10.153288]
# horz = 33.522111
# GPy
l = [0.82754075018, 0.82754075018, 0.82754075018]
horz = 9620.11949755
# This is a function that takes as input the training data results.
# BAYESIAN
# sig_eps_f = lambda train_t: 105.693084
# GPy
sig_eps_f = lambda train_t: train_t.std()
logTransform = False # Should we do GP under the logspace?
# Prefix to use for plots created in bos directory.
file_prefix = 'GPSEOptimizedGPyTrain'
def createDataMatrix(data):
'''
Transforms a panda dataframe into latitude longitude time period matrix
record of crimes.
'''
X = np.zeros((len(data), 3))
X[:, 1] = data.Latitude.values.astype(float)
X[:, 2] = data.Longitude.values.astype(float)
X[:, 0] = data.TimeFeature.values.astype(int)
return X
def read_data(sfdata_file):
''' Read in data '''
# Let's make a plot for some values of N to see if the data works out...
with open(sfdata_file) as fp:
data = pickle.load(fp)
# For sfdata, need to remove outliers
data = data[-120 > data.Longitude][data.Longitude > (-130)]
data = data[data.Latitude > 37][data.Latitude < 40]
return (createDataMatrix(data))
print "Finished processing San Franscisco data..."
GP.run_gp(read_data(sfdata_file), buckets, l, horz, sig_eps_f, logTransform,
file_prefix, 'sf')
| gpl-2.0 |
kevin-intel/scikit-learn | sklearn/svm/tests/test_svm.py | 1 | 46994 | """
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
import pytest
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_allclose
from scipy import sparse
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.svm import LinearSVC
from sklearn.svm import LinearSVR
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils._testing import ignore_warnings
from sklearn.utils.validation import _num_samples
from sklearn.utils import shuffle
from sklearn.exceptions import ConvergenceWarning
from sklearn.exceptions import NotFittedError, UndefinedMetricWarning
from sklearn.multiclass import OneVsRestClassifier
# mypy error: Module 'sklearn.svm' has no attribute '_libsvm'
from sklearn.svm import _libsvm # type: ignore
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert np.mean(clf.predict(iris.data) == iris.target) > 0.9
assert hasattr(clf, "coef_") == (k == 'linear')
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = _libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = _libsvm.predict(iris.data, *model)
assert np.mean(pred == iris.target) > .95
model = _libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = _libsvm.predict(iris.data, *model, kernel='linear')
assert np.mean(pred == iris.target) > .95
pred = _libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert np.mean(pred == iris.target) > .95
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deterministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = _libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
with pytest.raises(ValueError):
clf.predict(KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
def kfunc(x, y):
return np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(np.array(X), Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.)):
clf.fit(diabetes.data, diabetes.target)
assert clf.score(diabetes.data, diabetes.target) > 0.02
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert_allclose(np.linalg.norm(lsvr.coef_),
np.linalg.norm(svr.coef_), 1, 0.0001)
assert_almost_equal(score1, score2, 2)
def test_linearsvr_fit_sampleweight():
# check correct result when sample_weight is 1
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
n_samples = len(diabetes.target)
unit_weight = np.ones(n_samples)
lsvr = svm.LinearSVR(C=1e3, tol=1e-12, max_iter=10000).fit(
diabetes.data, diabetes.target, sample_weight=unit_weight)
score1 = lsvr.score(diabetes.data, diabetes.target)
lsvr_no_weight = svm.LinearSVR(C=1e3, tol=1e-12, max_iter=10000).fit(
diabetes.data, diabetes.target)
score2 = lsvr_no_weight.score(diabetes.data, diabetes.target)
assert_allclose(np.linalg.norm(lsvr.coef_),
np.linalg.norm(lsvr_no_weight.coef_), 1, 0.0001)
assert_almost_equal(score1, score2, 2)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
random_weight = random_state.randint(0, 10, n_samples)
lsvr_unflat = svm.LinearSVR(C=1e3, tol=1e-12, max_iter=10000).fit(
diabetes.data, diabetes.target, sample_weight=random_weight)
score3 = lsvr_unflat.score(diabetes.data, diabetes.target,
sample_weight=random_weight)
X_flat = np.repeat(diabetes.data, random_weight, axis=0)
y_flat = np.repeat(diabetes.target, random_weight, axis=0)
lsvr_flat = svm.LinearSVR(C=1e3, tol=1e-12, max_iter=10000).fit(
X_flat, y_flat)
score4 = lsvr_flat.score(X_flat, y_flat)
assert_almost_equal(score3, score4, 2)
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
with pytest.raises(ValueError):
clf.predict(X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_equal(pred, [1, -1, -1])
assert pred.dtype == np.dtype('intp')
assert_array_almost_equal(clf.intercept_, [-1.218], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.750, 0.750, 0.750, 0.750]],
decimal=3)
with pytest.raises(AttributeError):
(lambda: clf.coef_)()
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert np.mean(y_pred_test == 1) > .9
y_pred_outliers = clf.predict(X_outliers)
assert np.mean(y_pred_outliers == -1) > .9
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_oneclass_score_samples():
X_train = [[1, 1], [1, 2], [2, 1]]
clf = svm.OneClassSVM(gamma=1).fit(X_train)
assert_array_equal(clf.score_samples([[2., 2.]]),
clf.decision_function([[2., 2.]]) + clf.offset_)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
@pytest.mark.parametrize('SVM', (svm.SVC, svm.NuSVC))
def test_decision_function_shape(SVM):
# check that decision_function_shape='ovr' or 'ovo' gives
# correct shape and is consistent with predict
clf = SVM(kernel='linear',
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert dec.shape == (len(iris.data), 3)
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = SVM(kernel='linear',
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert dec.shape == (len(X_test), 5)
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = SVM(kernel='linear',
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert dec.shape == (len(X_train), 10)
with pytest.raises(ValueError, match="must be either 'ovr' or 'ovo'"):
SVM(decision_function_shape='bad').fit(X_train, y_train)
def test_svr_predict():
# Test SVR's decision_function
# Sanity check, test that predict implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert f1_score(y_[100:], y_pred) > .3
@pytest.mark.parametrize("estimator", [svm.SVC(C=1e-2), svm.NuSVC()])
def test_svm_classifier_sided_sample_weight(estimator):
# fit a linear SVM and check that giving more weight to opposed samples
# in the space will flip the decision toward these samples.
X = [[-2, 0], [-1, -1], [0, -2], [0, 2], [1, 1], [2, 0]]
estimator.set_params(kernel='linear')
# check that with unit weights, a sample is supposed to be predicted on
# the boundary
sample_weight = [1] * 6
estimator.fit(X, Y, sample_weight=sample_weight)
y_pred = estimator.decision_function([[-1., 1.]])
assert y_pred == pytest.approx(0)
# give more weights to opposed samples
sample_weight = [10., .1, .1, .1, .1, 10]
estimator.fit(X, Y, sample_weight=sample_weight)
y_pred = estimator.decision_function([[-1., 1.]])
assert y_pred < 0
sample_weight = [1., .1, 10., 10., .1, .1]
estimator.fit(X, Y, sample_weight=sample_weight)
y_pred = estimator.decision_function([[-1., 1.]])
assert y_pred > 0
@pytest.mark.parametrize(
"estimator",
[svm.SVR(C=1e-2), svm.NuSVR(C=1e-2)]
)
def test_svm_regressor_sided_sample_weight(estimator):
# similar test to test_svm_classifier_sided_sample_weight but for
# SVM regressors
X = [[-2, 0], [-1, -1], [0, -2], [0, 2], [1, 1], [2, 0]]
estimator.set_params(kernel='linear')
# check that with unit weights, a sample is supposed to be predicted on
# the boundary
sample_weight = [1] * 6
estimator.fit(X, Y, sample_weight=sample_weight)
y_pred = estimator.predict([[-1., 1.]])
assert y_pred == pytest.approx(1.5)
# give more weights to opposed samples
sample_weight = [10., .1, .1, .1, .1, 10]
estimator.fit(X, Y, sample_weight=sample_weight)
y_pred = estimator.predict([[-1., 1.]])
assert y_pred < 1.5
sample_weight = [1., .1, 10., 10., .1, .1]
estimator.fit(X, Y, sample_weight=sample_weight)
y_pred = estimator.predict([[-1., 1.]])
assert y_pred > 1.5
def test_svm_equivalence_sample_weight_C():
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_allclose(dual_coef_no_weight, clf.dual_coef_)
@pytest.mark.parametrize(
"Estimator, err_msg",
[(svm.SVC,
'Invalid input - all samples have zero or negative weights.'),
(svm.NuSVC, '(negative dimensions are not allowed|nu is infeasible)'),
(svm.SVR,
'Invalid input - all samples have zero or negative weights.'),
(svm.NuSVR,
'Invalid input - all samples have zero or negative weights.'),
(svm.OneClassSVM,
'Invalid input - all samples have zero or negative weights.')
],
ids=['SVC', 'NuSVC', 'SVR', 'NuSVR', 'OneClassSVM']
)
@pytest.mark.parametrize(
"sample_weight",
[[0] * len(Y), [-0.3] * len(Y)],
ids=['weights-are-zero', 'weights-are-negative']
)
def test_negative_sample_weights_mask_all_samples(Estimator,
err_msg, sample_weight):
est = Estimator(kernel='linear')
with pytest.raises(ValueError, match=err_msg):
est.fit(X, Y, sample_weight=sample_weight)
@pytest.mark.parametrize(
"Classifier, err_msg",
[(svm.SVC,
'Invalid input - all samples with positive weights have the same label'),
(svm.NuSVC, 'specified nu is infeasible')],
ids=['SVC', 'NuSVC']
)
@pytest.mark.parametrize(
"sample_weight",
[[0, -0.5, 0, 1, 1, 1],
[1, 1, 1, 0, -0.1, -0.3]],
ids=['mask-label-1', 'mask-label-2']
)
def test_negative_weights_svc_leave_just_one_label(Classifier,
err_msg,
sample_weight):
clf = Classifier(kernel='linear')
with pytest.raises(ValueError, match=err_msg):
clf.fit(X, Y, sample_weight=sample_weight)
@pytest.mark.parametrize(
"Classifier, model",
[(svm.SVC, {'when-left': [0.3998, 0.4], 'when-right': [0.4, 0.3999]}),
(svm.NuSVC, {'when-left': [0.3333, 0.3333],
'when-right': [0.3333, 0.3333]})],
ids=['SVC', 'NuSVC']
)
@pytest.mark.parametrize(
"sample_weight, mask_side",
[([1, -0.5, 1, 1, 1, 1], 'when-left'),
([1, 1, 1, 0, 1, 1], 'when-right')],
ids=['partial-mask-label-1', 'partial-mask-label-2']
)
def test_negative_weights_svc_leave_two_labels(Classifier, model,
sample_weight, mask_side):
clf = Classifier(kernel='linear')
clf.fit(X, Y, sample_weight=sample_weight)
assert_allclose(clf.coef_, [model[mask_side]], rtol=1e-3)
@pytest.mark.parametrize(
"Estimator",
[svm.SVC, svm.NuSVC, svm.NuSVR],
ids=['SVC', 'NuSVC', 'NuSVR']
)
@pytest.mark.parametrize(
"sample_weight",
[[1, -0.5, 1, 1, 1, 1], [1, 1, 1, 0, 1, 1]],
ids=['partial-mask-label-1', 'partial-mask-label-2']
)
def test_negative_weight_equal_coeffs(Estimator, sample_weight):
# model generates equal coefficients
est = Estimator(kernel='linear')
est.fit(X, Y, sample_weight=sample_weight)
coef = np.abs(est.coef_).ravel()
assert coef[0] == pytest.approx(coef[1], rel=1e-3)
@ignore_warnings(category=UndefinedMetricWarning)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test:
# class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('balanced', classes=classes,
y=y[unbalanced])
assert np.argmax(class_weights) == 2
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='balanced')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert (metrics.f1_score(y, y_pred, average='macro')
<= metrics.f1_score(y, y_pred_balanced,
average='macro'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
with pytest.raises(ValueError):
svm.SVC(C=-1).fit(X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
with pytest.raises(ValueError):
clf.fit(X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
with pytest.raises(ValueError):
clf.fit(X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert not Xf.flags['C_CONTIGUOUS']
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert not yf.flags['F_CONTIGUOUS']
assert not yf.flags['C_CONTIGUOUS']
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
with pytest.raises(ValueError):
clf.fit(X, Y)
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
with pytest.raises(ValueError):
clf.predict(sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
with pytest.raises(ValueError):
clf.predict(X)
clf = svm.SVC()
clf.fit(X, Y)
with pytest.raises(ValueError):
clf.predict(Xt)
@pytest.mark.parametrize(
'Estimator, data',
[(svm.SVC, datasets.load_iris(return_X_y=True)),
(svm.NuSVC, datasets.load_iris(return_X_y=True)),
(svm.SVR, datasets.load_diabetes(return_X_y=True)),
(svm.NuSVR, datasets.load_diabetes(return_X_y=True)),
(svm.OneClassSVM, datasets.load_iris(return_X_y=True))]
)
def test_svm_gamma_error(Estimator, data):
X, y = data
est = Estimator(gamma='auto_deprecated')
err_msg = "When 'gamma' is a string, it should be either 'scale' or 'auto'"
with pytest.raises(ValueError, match=err_msg):
est.fit(X, y)
def test_unicode_kernel():
# Test that a unicode kernel name does not cause a TypeError
clf = svm.SVC(kernel='linear', probability=True)
clf.fit(X, Y)
clf.predict_proba(T)
_libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
with pytest.raises(TypeError, match="Sparse precomputed"):
clf.fit(sparse_gram, [0, 1])
def test_sparse_fit_support_vectors_empty():
# Regression test for #14893
X_train = sparse.csr_matrix([[0, 1, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 0, 0, 1]])
y_train = np.array([0.04, 0.04, 0.10, 0.16])
model = svm.SVR(kernel='linear')
model.fit(X_train, y_train)
assert not model.support_vectors_.data.size
assert not model.dual_coef_.data.size
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
with pytest.raises(ValueError, match="Unsupported set of "
"arguments.*penalty='%s.*loss='%s.*dual=%s"
% (penalty, loss, dual)):
clf.fit(X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
with pytest.raises(ValueError, match=".*loss='l3' is not supported.*"):
svm.LinearSVC(loss="l3").fit(X, y)
def test_linear_svx_uppercase_loss_penality_raises_error():
# Check if Upper case notation raises error at _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
msg = "loss='SQuared_hinge' is not supported"
with pytest.raises(ValueError, match=msg):
svm.LinearSVC(loss="SQuared_hinge").fit(X, y)
msg = (
"The combination of penalty='L2'"
" and loss='squared_hinge' is not supported"
)
with pytest.raises(ValueError, match=msg):
svm.LinearSVC(penalty="L2").fit(X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert clf.fit_intercept
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert (ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9
# classifiers shouldn't be the same
assert (ovr_clf.coef_ != cs_clf.coef_).all()
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_linearsvc_fit_sampleweight():
# check correct result when sample_weight is 1
n_samples = len(X)
unit_weight = np.ones(n_samples)
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf_unitweight = svm.LinearSVC(random_state=0, tol=1e-12, max_iter=1000).\
fit(X, Y, sample_weight=unit_weight)
# check if same as sample_weight=None
assert_array_equal(clf_unitweight.predict(T), clf.predict(T))
assert_allclose(clf.coef_, clf_unitweight.coef_, 1, 0.0001)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
random_weight = random_state.randint(0, 10, n_samples)
lsvc_unflat = svm.LinearSVC(random_state=0, tol=1e-12, max_iter=1000).\
fit(X, Y, sample_weight=random_weight)
pred1 = lsvc_unflat.predict(T)
X_flat = np.repeat(X, random_weight, axis=0)
y_flat = np.repeat(Y, random_weight, axis=0)
lsvc_flat = svm.LinearSVC(random_state=0, tol=1e-12, max_iter=1000).fit(
X_flat, y_flat)
pred2 = lsvc_flat.predict(T)
assert_array_equal(pred1, pred2)
assert_allclose(lsvc_unflat.coef_, lsvc_flat.coef_, 1, 0.0001)
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert acc > 0.9
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert set(clf.classes_) == set(iris.target_names)
assert np.mean(clf.predict(iris.data) == target) > 0.8
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert clf.intercept_scaling == 1, clf.intercept_scaling
assert clf.fit_intercept
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert intercept1 < -1
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
with pytest.raises(AttributeError):
clf.__setattr__('coef_', np.arange(3))
with pytest.raises((RuntimeError, ValueError)):
clf.coef_.__setitem__((0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0,
decision_function_shape='ovr')
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0,
decision_function_shape='ovr')
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
with pytest.raises(ValueError):
svc.fit(X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
warning_msg = (
r'Solver terminated early \(max_iter=1\). Consider pre-processing '
r'your data with StandardScaler or MinMaxScaler.'
)
with pytest.warns(ConvergenceWarning, match=warning_msg):
a.fit(np.array(X), Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
with pytest.raises(Exception, match=r".*\bSVC\b.*\bnot\b.*\bfitted\b"):
clf.predict(X)
clf = svm.NuSVR()
with pytest.raises(Exception, match=r".*\bNuSVR\b.*\bnot\b.*\bfitted\b"):
clf.predict(X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svm_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(random_state=0, max_iter=2)
warning_msg = (
"Liblinear failed to converge, increase the number of iterations."
)
with pytest.warns(ConvergenceWarning, match=warning_msg):
lsvc.fit(X, Y)
assert lsvc.n_iter_ == 2
lsvr = svm.LinearSVR(random_state=0, max_iter=2)
with pytest.warns(ConvergenceWarning, match=warning_msg):
lsvr.fit(iris.data, iris.target)
assert lsvr.n_iter_ == 2
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(
svr.predict(X), np.dot(X, svr.coef_.ravel()) + svr.intercept_
)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
with pytest.raises(ValueError, match=msg):
lsvc.fit(X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert lsvc.intercept_ == 0.
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(probability=True)
assert hasattr(G, 'predict_proba')
G.fit(iris.data, iris.target)
assert hasattr(G, 'predict_proba')
G = svm.SVC(probability=False)
assert not hasattr(G, 'predict_proba')
G.fit(iris.data, iris.target)
assert not hasattr(G, 'predict_proba')
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert hasattr(G, 'predict_proba')
msg = "predict_proba is not available when fitted with probability=False"
with pytest.raises(NotFittedError, match=msg):
G.predict_proba(iris.data)
def test_decision_function_shape_two_class():
for n_classes in [2, 3]:
X, y = make_blobs(centers=n_classes, random_state=0)
for estimator in [svm.SVC, svm.NuSVC]:
clf = OneVsRestClassifier(
estimator(decision_function_shape="ovr")).fit(X, y)
assert len(clf.predict(X)) == len(y)
def test_ovr_decision_function():
# One point from each quadrant represents one class
X_train = np.array([[1, 1], [-1, 1], [-1, -1], [1, -1]])
y_train = [0, 1, 2, 3]
# First point is closer to the decision boundaries than the second point
base_points = np.array([[5, 5], [10, 10]])
# For all the quadrants (classes)
X_test = np.vstack((
base_points * [1, 1], # Q1
base_points * [-1, 1], # Q2
base_points * [-1, -1], # Q3
base_points * [1, -1] # Q4
))
y_test = [0] * 2 + [1] * 2 + [2] * 2 + [3] * 2
clf = svm.SVC(kernel='linear', decision_function_shape='ovr')
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# Test if the prediction is the same as y
assert_array_equal(y_pred, y_test)
deci_val = clf.decision_function(X_test)
# Assert that the predicted class has the maximum value
assert_array_equal(np.argmax(deci_val, axis=1), y_pred)
# Get decision value at test points for the predicted class
pred_class_deci_val = deci_val[range(8), y_pred].reshape((4, 2))
# Assert pred_class_deci_val > 0 here
assert np.min(pred_class_deci_val) > 0.0
# Test if the first point has lower decision value on every quadrant
# compared to the second point
assert np.all(pred_class_deci_val[:, 0] < pred_class_deci_val[:, 1])
@pytest.mark.parametrize("SVCClass", [svm.SVC, svm.NuSVC])
def test_svc_invalid_break_ties_param(SVCClass):
X, y = make_blobs(random_state=42)
svm = SVCClass(kernel="linear", decision_function_shape='ovo',
break_ties=True, random_state=42).fit(X, y)
with pytest.raises(ValueError, match="break_ties must be False"):
svm.predict(y)
@pytest.mark.parametrize("SVCClass", [svm.SVC, svm.NuSVC])
def test_svc_ovr_tie_breaking(SVCClass):
"""Test if predict breaks ties in OVR mode.
Related issue: https://github.com/scikit-learn/scikit-learn/issues/8277
"""
X, y = make_blobs(random_state=27)
xs = np.linspace(X[:, 0].min(), X[:, 0].max(), 1000)
ys = np.linspace(X[:, 1].min(), X[:, 1].max(), 1000)
xx, yy = np.meshgrid(xs, ys)
svm = SVCClass(kernel="linear", decision_function_shape='ovr',
break_ties=False, random_state=42).fit(X, y)
pred = svm.predict(np.c_[xx.ravel(), yy.ravel()])
dv = svm.decision_function(np.c_[xx.ravel(), yy.ravel()])
assert not np.all(pred == np.argmax(dv, axis=1))
svm = SVCClass(kernel="linear", decision_function_shape='ovr',
break_ties=True, random_state=42).fit(X, y)
pred = svm.predict(np.c_[xx.ravel(), yy.ravel()])
dv = svm.decision_function(np.c_[xx.ravel(), yy.ravel()])
assert np.all(pred == np.argmax(dv, axis=1))
def test_gamma_auto():
X, y = [[0.0, 1.2], [1.0, 1.3]], [0, 1]
with pytest.warns(None) as record:
svm.SVC(kernel='linear').fit(X, y)
assert not len(record)
with pytest.warns(None) as record:
svm.SVC(kernel='precomputed').fit(X, y)
assert not len(record)
def test_gamma_scale():
X, y = [[0.], [1.]], [0, 1]
clf = svm.SVC()
with pytest.warns(None) as record:
clf.fit(X, y)
assert not len(record)
assert_almost_equal(clf._gamma, 4)
# X_var ~= 1 shouldn't raise warning, for when
# gamma is not explicitly set.
X, y = [[1, 2], [3, 2 * np.sqrt(6) / 3 + 2]], [0, 1]
with pytest.warns(None) as record:
clf.fit(X, y)
assert not len(record)
@pytest.mark.parametrize(
"SVM, params",
[(LinearSVC, {'penalty': 'l1', 'loss': 'squared_hinge', 'dual': False}),
(LinearSVC, {'penalty': 'l2', 'loss': 'squared_hinge', 'dual': True}),
(LinearSVC, {'penalty': 'l2', 'loss': 'squared_hinge', 'dual': False}),
(LinearSVC, {'penalty': 'l2', 'loss': 'hinge', 'dual': True}),
(LinearSVR, {'loss': 'epsilon_insensitive', 'dual': True}),
(LinearSVR, {'loss': 'squared_epsilon_insensitive', 'dual': True}),
(LinearSVR, {'loss': 'squared_epsilon_insensitive', 'dual': True})]
)
def test_linearsvm_liblinear_sample_weight(SVM, params):
X = np.array([[1, 3], [1, 3], [1, 3], [1, 3],
[2, 1], [2, 1], [2, 1], [2, 1],
[3, 3], [3, 3], [3, 3], [3, 3],
[4, 1], [4, 1], [4, 1], [4, 1]], dtype=np.dtype('float'))
y = np.array([1, 1, 1, 1, 2, 2, 2, 2,
1, 1, 1, 1, 2, 2, 2, 2], dtype=np.dtype('int'))
X2 = np.vstack([X, X])
y2 = np.hstack([y, 3 - y])
sample_weight = np.ones(shape=len(y) * 2)
sample_weight[len(y):] = 0
X2, y2, sample_weight = shuffle(X2, y2, sample_weight, random_state=0)
base_estimator = SVM(random_state=42)
base_estimator.set_params(**params)
base_estimator.set_params(tol=1e-12, max_iter=1000)
est_no_weight = base.clone(base_estimator).fit(X, y)
est_with_weight = base.clone(base_estimator).fit(
X2, y2, sample_weight=sample_weight
)
for method in ("predict", "decision_function"):
if hasattr(base_estimator, method):
X_est_no_weight = getattr(est_no_weight, method)(X)
X_est_with_weight = getattr(est_with_weight, method)(X)
assert_allclose(X_est_no_weight, X_est_with_weight)
def test_n_support_oneclass_svr():
# Make n_support is correct for oneclass and SVR (used to be
# non-initialized)
# this is a non regression test for issue #14774
X = np.array([[0], [0.44], [0.45], [0.46], [1]])
clf = svm.OneClassSVM()
assert not hasattr(clf, 'n_support_')
clf.fit(X)
assert clf.n_support_ == clf.support_vectors_.shape[0]
assert clf.n_support_.size == 1
assert clf.n_support_ == 3
y = np.arange(X.shape[0])
reg = svm.SVR().fit(X, y)
assert reg.n_support_ == reg.support_vectors_.shape[0]
assert reg.n_support_.size == 1
assert reg.n_support_ == 4
@pytest.mark.parametrize("Estimator", [svm.SVC, svm.SVR])
def test_custom_kernel_not_array_input(Estimator):
"""Test using a custom kernel that is not fed with array-like for floats"""
data = ["A A", "A", "B", "B B", "A B"]
X = np.array([[2, 0], [1, 0], [0, 1], [0, 2], [1, 1]]) # count encoding
y = np.array([1, 1, 2, 2, 1])
def string_kernel(X1, X2):
assert isinstance(X1[0], str)
n_samples1 = _num_samples(X1)
n_samples2 = _num_samples(X2)
K = np.zeros((n_samples1, n_samples2))
for ii in range(n_samples1):
for jj in range(ii, n_samples2):
K[ii, jj] = X1[ii].count('A') * X2[jj].count('A')
K[ii, jj] += X1[ii].count('B') * X2[jj].count('B')
K[jj, ii] = K[ii, jj]
return K
K = string_kernel(data, data)
assert_array_equal(np.dot(X, X.T), K)
svc1 = Estimator(kernel=string_kernel).fit(data, y)
svc2 = Estimator(kernel='linear').fit(X, y)
svc3 = Estimator(kernel='precomputed').fit(K, y)
assert svc1.score(data, y) == svc3.score(K, y)
assert svc1.score(data, y) == svc2.score(X, y)
if hasattr(svc1, 'decision_function'): # classifier
assert_allclose(svc1.decision_function(data),
svc2.decision_function(X))
assert_allclose(svc1.decision_function(data),
svc3.decision_function(K))
assert_array_equal(svc1.predict(data), svc2.predict(X))
assert_array_equal(svc1.predict(data), svc3.predict(K))
else: # regressor
assert_allclose(svc1.predict(data), svc2.predict(X))
assert_allclose(svc1.predict(data), svc3.predict(K))
| bsd-3-clause |
ericmjl/bokeh | examples/reference/models/multi_select_server.py | 1 | 1247 | ## Bokeh server for MultiSelect
import pandas as pd
from bokeh.io import curdoc
from bokeh.layouts import row
from bokeh.models import ColumnDataSource, MultiSelect
from bokeh.plotting import figure
x=[3,4,6,12,10,1]
y=[7,1,3,4,1,6]
label=['Red', 'Orange', 'Red', 'Orange','Red', 'Orange']
df=pd.DataFrame({'x':x,'y':y,'label':label}) #create a dataframe for future use
source = ColumnDataSource(data=dict(x=x, y=y,label=label))
plot_figure = figure(title='Multi-Select',plot_height=450, plot_width=600,
tools="save,reset", toolbar_location="below")
plot_figure.scatter('x', 'y',color='label', source=source, size=10)
multi_select = MultiSelect(title="Filter Plot by color:", value=["Red", "Orange"],
options=[("Red", "Red"), ("Orange", "Orange")])
def multiselect_click(attr,old,new):
active_mselect=multi_select.value ##Getting multi-select value
selected_df=df[df['label'].isin(active_mselect)] #filter the dataframe with value in multi-select
source.data=dict(x=selected_df.x, y=selected_df.y,label=selected_df.label)
multi_select.on_change('value',multiselect_click)
layout=row(multi_select, plot_figure)
curdoc().add_root(layout)
curdoc().title = "Multi-Select Bokeh Server"
| bsd-3-clause |
alisidd/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py | 88 | 31139 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations of different data feeders to provide data for TF trainer."""
# TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
# pylint: disable=g-multiple-import,g-bad-import-order
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
# pylint: enable=g-multiple-import,g-bad-import-order
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None):
"""Returns shape for input and output of the data feeder."""
x_is_dict, y_is_dict = isinstance(
x_shape, dict), y_shape is not None and isinstance(y_shape, dict)
if y_is_dict and n_classes is not None:
assert (isinstance(n_classes, dict))
if batch_size is None:
batch_size = list(x_shape.values())[0][0] if x_is_dict else x_shape[0]
elif batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
if x_is_dict:
input_shape = {}
for k, v in list(x_shape.items()):
input_shape[k] = [batch_size] + (list(v[1:]) if len(v) > 1 else [1])
else:
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None, batch_size
def out_el_shape(out_shape, num_classes):
out_shape = list(out_shape[1:]) if len(out_shape) > 1 else []
# Skip first dimension if it is 1.
if out_shape and out_shape[0] == 1:
out_shape = out_shape[1:]
if num_classes is not None and num_classes > 1:
return [batch_size] + out_shape + [num_classes]
else:
return [batch_size] + out_shape
if not y_is_dict:
output_shape = out_el_shape(y_shape, n_classes)
else:
output_shape = dict([
(k, out_el_shape(v, n_classes[k]
if n_classes is not None and k in n_classes else None))
for k, v in list(y_shape.items())
])
return input_shape, output_shape, batch_size
def _data_type_filter(x, y):
"""Filter data types into acceptable format."""
if HAS_DASK:
x = extract_dask_data(x)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
x = extract_pandas_data(x)
if y is not None:
y = extract_pandas_labels(y)
return x, y
def _is_iterable(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
def setup_train_data_feeder(x,
y,
n_classes,
batch_size=None,
shuffle=True,
epochs=None):
"""Create data feeder, to sample inputs from dataset.
If `x` and `y` are iterators, use `StreamingDataFeeder`.
Args:
x: numpy, pandas or Dask matrix or dictionary of aforementioned. Also
supports iterables.
y: numpy, pandas or Dask array or dictionary of aforementioned. Also
supports
iterables.
n_classes: number of classes. Must be None or same type as y. In case, `y`
is `dict`
(or iterable which returns dict) such that `n_classes[key] = n_classes for
y[key]`
batch_size: size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
DataFeeder object that returns training data.
Raises:
ValueError: if one of `x` and `y` is iterable and the other is not.
"""
x, y = _data_type_filter(x, y)
if HAS_DASK:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
if (isinstance(x, (dd.Series, dd.DataFrame)) and
(y is None or isinstance(y, (dd.Series, dd.DataFrame)))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(x):
if y is not None and not _is_iterable(y):
raise ValueError('Both x and y should be iterators for '
'streaming learning to work.')
return StreamingDataFeeder(x, y, n_classes, batch_size)
return data_feeder_cls(
x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs)
def _batch_data(x, batch_size=None):
if (batch_size is not None) and (batch_size <= 0):
raise ValueError('Invalid batch_size %d.' % batch_size)
x_first_el = six.next(x)
x = itertools.chain([x_first_el], x)
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
for data in x:
if isinstance(data, dict):
for k, v in list(data.items()):
chunk[k].append(v)
if (batch_size is not None) and (len(chunk[k]) >= batch_size):
chunk[k] = np.matrix(chunk[k])
chunk_filled = True
if chunk_filled:
yield chunk
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
else:
chunk.append(data)
if (batch_size is not None) and (len(chunk) >= batch_size):
yield np.matrix(chunk)
chunk = []
if isinstance(x_first_el, dict):
for k, v in list(data.items()):
chunk[k] = np.matrix(chunk[k])
yield chunk
else:
yield np.matrix(chunk)
def setup_predict_data_feeder(x, batch_size=None):
"""Returns an iterable for feeding into predict step.
Args:
x: numpy, pandas, Dask array or dictionary of aforementioned. Also supports
iterable.
batch_size: Size of batches to split data into. If `None`, returns one
batch of full size.
Returns:
List or iterator (or dictionary thereof) of parts of data to predict on.
Raises:
ValueError: if `batch_size` <= 0.
"""
if HAS_DASK:
x = extract_dask_data(x)
if HAS_PANDAS:
x = extract_pandas_data(x)
if _is_iterable(x):
return _batch_data(x, batch_size)
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if batch_size is not None:
if batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
n_batches = int(math.ceil(float(len(x)) / batch_size))
return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [x]
def setup_processor_data_feeder(x):
"""Sets up processor iterable.
Args:
x: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
x = extract_pandas_matrix(x)
return x
def check_array(array, dtype):
"""Checks array on dtype and converts it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
# skip check if array is instance of other classes, e.g. h5py.Dataset
# to avoid copying array and loading whole data into memory
if isinstance(array, (np.ndarray, list)):
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
def _access(data, iloc):
"""Accesses an element from collection, using integer location based indexing.
Args:
data: array-like. The collection to access
iloc: `int` or `list` of `int`s. Location(s) to access in `collection`
Returns:
The element of `a` found at location(s) `iloc`.
"""
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame):
return data.iloc[iloc]
return data[iloc]
def _check_dtype(dtype):
if dtypes.as_dtype(dtype) == dtypes.float64:
logging.warn(
'float64 is not supported by many models, consider casting to float32.')
return dtype
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer."""
def __init__(self,
x,
y,
n_classes,
batch_size=None,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DataFeeder instance.
Args:
x: One feature sample which can either Nd numpy matrix of shape
`[n_samples, n_features, ...]` or dictionary of Nd numpy matrix.
y: label vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence of labels.
Can be `None` for unsupervised setting. Also supports dictionary of
labels.
n_classes: Number of classes, 0 and 1 are considered regression, `None`
will pass through the input labels without one-hot conversion. Also, if
`y` is `dict`, then `n_classes` must be `dict` such that
`n_classes[key] = n_classes for label y[key]`, `None` otherwise.
batch_size: Mini-batch size to accumulate samples in one mini batch.
shuffle: Whether to shuffle `x`.
random_state: Numpy `RandomState` object to reproduce sampling.
epochs: Number of times to iterate over input data before raising
`StopIteration` exception.
Attributes:
x: Input features (ndarray or dictionary of ndarrays).
y: Input label (ndarray or dictionary of ndarrays).
n_classes: Number of classes (if `None`, pass through indices without
one-hot conversion).
batch_size: Mini-batch size to accumulate.
input_shape: Shape of the input (or dictionary of shapes).
output_shape: Shape of the output (or dictionary of shapes).
input_dtype: DType of input (or dictionary of shapes).
output_dtype: DType of output (or dictionary of shapes.
"""
x_is_dict, y_is_dict = isinstance(x, dict), y is not None and isinstance(
y, dict)
if isinstance(y, list):
y = np.array(y)
self._x = dict([(k, check_array(v, v.dtype)) for k, v in list(x.items())
]) if x_is_dict else check_array(x, x.dtype)
self._y = None if y is None else \
dict([(k, check_array(v, v.dtype)) for k, v in list(y.items())]) if x_is_dict else check_array(y, y.dtype)
# self.n_classes is not None means we're converting raw target indices to one-hot.
if n_classes is not None:
if not y_is_dict:
y_dtype = (np.int64
if n_classes is not None and n_classes > 1 else np.float32)
self._y = (None if y is None else check_array(y, dtype=y_dtype))
self.n_classes = n_classes
self.max_epochs = epochs
x_shape = dict([(k, v.shape) for k, v in list(self._x.items())
]) if x_is_dict else self._x.shape
y_shape = dict([(k, v.shape) for k, v in list(self._y.items())
]) if y_is_dict else None if y is None else self._y.shape
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
# Input dtype matches dtype of x.
self._input_dtype = dict([(k, _check_dtype(v.dtype)) for k, v in list(self._x.items())]) if x_is_dict \
else _check_dtype(self._x.dtype)
# note: self._output_dtype = np.float32 when y is None
self._output_dtype = dict([(k, _check_dtype(v.dtype)) for k, v in list(self._y.items())]) if y_is_dict \
else _check_dtype(self._y.dtype) if y is not None else np.float32
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None and y_is_dict:
for key in list(n_classes.keys()):
if key in self._output_dtype:
self._output_dtype[key] = np.float32
self._shuffle = shuffle
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
num_samples = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
if self._shuffle:
self.indices = self.random_state.permutation(num_samples)
else:
self.indices = np.array(range(num_samples))
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def shuffle(self):
return self._shuffle
@property
def input_dtype(self):
return self._input_dtype
@property
def output_dtype(self):
return self._output_dtype
@property
def batch_size(self):
return self._batch_size
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(
dtypes.int32, [1], name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
def get_placeholder(shape, dtype, name_prepend):
if shape is None:
return None
if isinstance(shape, dict):
placeholder = {}
for key in list(shape.keys()):
placeholder[key] = array_ops.placeholder(
dtypes.as_dtype(dtype[key]), [None] + shape[key][1:],
name=name_prepend + '_' + key)
else:
placeholder = array_ops.placeholder(
dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend)
return placeholder
self._input_placeholder = get_placeholder(self.input_shape,
self._input_dtype, 'input')
self._output_placeholder = get_placeholder(self.output_shape,
self._output_dtype, 'output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `x` variable. Should match shape
of the examples in the x dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be `None`.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self._batch_size
}
def get_feed_dict_fn(self):
"""Returns a function that samples data into given placeholders.
Returns:
A function that when called samples a random subset of batch size
from `x` and `y`.
"""
x_is_dict, y_is_dict = isinstance(
self._x, dict), self._y is not None and isinstance(self._y, dict)
# Assign input features from random indices.
def extract(data, indices):
return (np.array(_access(data, indices)).reshape((indices.shape[0], 1)) if
len(data.shape) == 1 else _access(data, indices))
# assign labels from random indices
def assign_label(data, shape, dtype, n_classes, indices):
shape[0] = indices.shape[0]
out = np.zeros(shape, dtype=dtype)
for i in xrange(out.shape[0]):
sample = indices[i]
# self.n_classes is None means we're passing in raw target indices
if n_classes is None:
out[i] = _access(data, sample)
else:
if n_classes > 1:
if len(shape) == 2:
out.itemset((i, int(_access(data, sample))), 1.0)
else:
for idx, value in enumerate(_access(data, sample)):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = _access(data, sample)
return out
def _feed_dict_fn():
"""Function that samples data into given placeholders."""
if self.max_epochs is not None and self.epoch + 1 > self.max_epochs:
raise StopIteration
assert self._input_placeholder is not None
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = [self.epoch]
# Take next batch of indices.
x_len = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
end = min(x_len, self.offset + self._batch_size)
batch_indices = self.indices[self.offset:end]
# adding input placeholder
feed_dict.update(
dict([(self._input_placeholder[k].name, extract(v, batch_indices))
for k, v in list(self._x.items())]) if x_is_dict else
{self._input_placeholder.name: extract(self._x, batch_indices)})
# move offset and reset it if necessary
self.offset += self._batch_size
if self.offset >= x_len:
self.indices = self.random_state.permutation(
x_len) if self._shuffle else np.array(range(x_len))
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# adding output placeholders
if y_is_dict:
for k, v in list(self._y.items()):
n_classes = (self.n_classes[k] if k in self.n_classes else
None) if self.n_classes is not None else None
shape, dtype = self.output_shape[k], self._output_dtype[k]
feed_dict.update({
self._output_placeholder[k].name:
assign_label(v, shape, dtype, n_classes, batch_indices)
})
else:
shape, dtype, n_classes = self.output_shape, self._output_dtype, self.n_classes
feed_dict.update({
self._output_placeholder.name:
assign_label(self._y, shape, dtype, n_classes, batch_indices)
})
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
"""
def __init__(self, x, y, n_classes, batch_size):
"""Initializes a StreamingDataFeeder instance.
Args:
x: iterator each element of which returns one feature sample. Sample can
be a Nd numpy matrix or dictionary of Nd numpy matrices.
y: iterator each element of which returns one label sample. Sample can be
a Nd numpy matrix or dictionary of Nd numpy matrices with 1 or many
classes regression values.
n_classes: indicator of how many classes the corresponding label sample
has for the purposes of one-hot conversion of label. In case where `y`
is a dictionary, `n_classes` must be dictionary (with same keys as `y`)
of how many classes there are in each label in `y`. If key is
present in `y` and missing in `n_classes`, the value is assumed `None`
and no one-hot conversion will be applied to the label with that key.
batch_size: Mini batch size to accumulate samples in one batch. If set
`None`, then assumes that iterator to return already batched element.
Attributes:
x: input features (or dictionary of input features).
y: input label (or dictionary of output features).
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input (can be dictionary depending on `x`).
output_shape: shape of the output (can be dictionary depending on `y`).
input_dtype: dtype of input (can be dictionary depending on `x`).
output_dtype: dtype of output (can be dictionary depending on `y`).
"""
# pylint: disable=invalid-name,super-init-not-called
x_first_el = six.next(x)
self._x = itertools.chain([x_first_el], x)
if y is not None:
y_first_el = six.next(y)
self._y = itertools.chain([y_first_el], y)
else:
y_first_el = None
self._y = None
self.n_classes = n_classes
x_is_dict = isinstance(x_first_el, dict)
y_is_dict = y is not None and isinstance(y_first_el, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
# extract shapes for first_elements
if x_is_dict:
x_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(x_first_el.items())])
else:
x_first_el_shape = [1] + list(x_first_el.shape)
if y_is_dict:
y_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(y_first_el.items())])
elif y is None:
y_first_el_shape = None
else:
y_first_el_shape = ([1] + list(y_first_el[0].shape if isinstance(
y_first_el, list) else y_first_el.shape))
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_first_el_shape, y_first_el_shape, n_classes, batch_size)
# Input dtype of x_first_el.
if x_is_dict:
self._input_dtype = dict(
[(k, _check_dtype(v.dtype)) for k, v in list(x_first_el.items())])
else:
self._input_dtype = _check_dtype(x_first_el.dtype)
# Output dtype of y_first_el.
def check_y_dtype(el):
if isinstance(el, np.ndarray):
return el.dtype
elif isinstance(el, list):
return check_y_dtype(el[0])
else:
return _check_dtype(np.dtype(type(el)))
# Output types are floats, due to both softmaxes and regression req.
if n_classes is not None and (y is None or not y_is_dict) and n_classes > 0:
self._output_dtype = np.float32
elif y_is_dict:
self._output_dtype = dict(
[(k, check_y_dtype(v)) for k, v in list(y_first_el.items())])
elif y is None:
self._output_dtype = None
else:
self._output_dtype = check_y_dtype(y_first_el)
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
self.stopped = False
def _feed_dict_fn():
"""Samples data and provides it to placeholders.
Returns:
`dict` of input and output tensors.
"""
def init_array(shape, dtype):
"""Initialize array of given shape or dict of shapes and dtype."""
if shape is None:
return None
elif isinstance(shape, dict):
return dict([(k, np.zeros(shape[k], dtype[k]))
for k in list(shape.keys())])
else:
return np.zeros(shape, dtype=dtype)
def put_data_array(dest, index, source=None, n_classes=None):
"""Puts data array into container."""
if source is None:
dest = dest[:index]
elif n_classes is not None and n_classes > 1:
if len(self.output_shape) == 2:
dest.itemset((index, source), 1.0)
else:
for idx, value in enumerate(source):
dest.itemset(tuple([index, idx, value]), 1.0)
else:
if len(dest.shape) > 1:
dest[index, :] = source
else:
dest[index] = source[0] if isinstance(source, list) else source
return dest
def put_data_array_or_dict(holder, index, data=None, n_classes=None):
"""Puts data array or data dictionary into container."""
if holder is None:
return None
if isinstance(holder, dict):
if data is None:
data = {k: None for k in holder.keys()}
assert isinstance(data, dict)
for k in holder.keys():
num_classes = n_classes[k] if (n_classes is not None and
k in n_classes) else None
holder[k] = put_data_array(holder[k], index, data[k], num_classes)
else:
holder = put_data_array(holder, index, data, n_classes)
return holder
if self.stopped:
raise StopIteration
inp = init_array(self.input_shape, self._input_dtype)
out = init_array(self.output_shape, self._output_dtype)
for i in xrange(self._batch_size):
# Add handling when queue ends.
try:
next_inp = six.next(self._x)
inp = put_data_array_or_dict(inp, i, next_inp, None)
except StopIteration:
self.stopped = True
if i == 0:
raise
inp = put_data_array_or_dict(inp, i, None, None)
out = put_data_array_or_dict(out, i, None, None)
break
if self._y is not None:
next_out = six.next(self._y)
out = put_data_array_or_dict(out, i, next_out, self.n_classes)
# creating feed_dict
if isinstance(inp, dict):
feed_dict = dict([(self._input_placeholder[k].name, inp[k])
for k in list(self._input_placeholder.keys())])
else:
feed_dict = {self._input_placeholder.name: inp}
if self._y is not None:
if isinstance(out, dict):
feed_dict.update(
dict([(self._output_placeholder[k].name, out[k])
for k in list(self._output_placeholder.keys())]))
else:
feed_dict.update({self._output_placeholder.name: out})
return feed_dict
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for that reads data from dask.Series and dask.DataFrame.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them. DaskDataFeeder will remove requirement to have full dataset in the
memory and still do random seeks for sampling of batches.
"""
def __init__(self,
x,
y,
n_classes,
batch_size,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DaskDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the label has.
batch_size: Mini batch size to accumulate.
shuffle: Whether to shuffle the inputs.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
epochs: Number of epochs to run.
Attributes:
x: input features.
y: input label.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
Raises:
ValueError: if `x` or `y` are `dict`, as they are not supported currently.
"""
if isinstance(x, dict) or isinstance(y, dict):
raise ValueError(
'DaskDataFeeder does not support dictionaries at the moment.')
# pylint: disable=invalid-name,super-init-not-called
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# TODO(terrytangyuan): check x and y dtypes in dask_io like pandas
self._x = x
self._y = y
# save column names
self._x_columns = list(x.columns)
if isinstance(y.columns[0], str):
self._y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self._y_columns = len(self._x_columns) + 1
self._y = self._y.rename(columns={y.columns[0]: self._y_columns})
# TODO(terrytangyuan): deal with unsupervised cases
# combine into a data frame
self.df = dd.multi.concat([self._x, self._y], axis=1)
self.n_classes = n_classes
x_count = x.count().compute()[0]
x_shape = (x_count, len(self._x.columns))
y_shape = (x_count, len(self._y.columns))
# TODO(terrytangyuan): Add support for shuffle and epochs.
self._shuffle = shuffle
self.epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
self.sample_fraction = self._batch_size / float(x_count)
self._input_dtype = _check_dtype(self._x.dtypes[0])
self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns])
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.Placeholder for input features mini batch.
output_placeholder: tf.Placeholder for output labels.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Samples data and provides it to placeholders."""
# TODO(ipolosukhin): option for with/without replacement (dev version of
# dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self._y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self._input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self._y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp, output_placeholder.name: encoded_out}
return _feed_dict_fn
| apache-2.0 |
nborwankar/opendatasci | notebooks/linreg.py | 4 | 6100 | # Supporting library for Lin Reg Notebooks
# Author: Nitin Borwankar
# Open Data Science Training
import warnings
# squelch an anaconda "bug" and some python verbosity
# this can move to system wide python if needed
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
warnings.filterwarnings("ignore", category=UserWarning)
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
#import patsy
import statsmodels.api as sm
from numpy.random import RandomState
def make_standard_normal(seed=12345, size=20):
prng = RandomState(seed)
return prng.standard_normal(size)
def abline(intercept, gradient, *args, **kwargs):
global np, plt
a = plt.gca()
xlim = a.get_xlim()
ylim = a.get_ylim()
if args:
sty = args[0]
else:
sty = 'r'
if kwargs:
lw = kwargs['linewidth']
else:
lw = 1
a.plot(xlim, [intercept + gradient * x for x in xlim], sty, linewidth=lw)
a.set_xlim(xlim)
a.set_ylim(ylim)
return
def make_data_points(noise_factor=0.5,seed=12345):
global np, plt
sn = make_standard_normal(seed)
z = noise_factor*sn
x = np.linspace(0,5,20)
y = 2*x + 0.5 + z
return (x,y)
def make_single_plot(xarr,yarr,xlabel=None,ylabel=None,title=None, xinches=4, yinches=4, resize=False, show=True):
global np, plt
f, ax1 = plt.subplots(ncols=1,nrows=1)
if resize:
f.set_size_inches(xinches, yinches)
ax1 = plt.subplot(1,1,1)
if xlabel:
ax1.set_xlabel(xlabel)
if ylabel:
ax1.set_ylabel(ylabel)
if title:
ax1.set_title(title)
if show:
ax1.plot(xarr,yarr,'o')
return ax1
def lab_experiment(noise_factor=0.5,seed=12345, exptnum=0):
global np, plt
x, y = make_data_points(noise_factor,seed)
if exptnum:
title = "Lab Experiment %d"%(exptnum)
else:
title = "Lab Experiment"
ax = make_single_plot(x,y,'Force','Acceleration', title)
return ax
def lab_experiment_with_line(noise_factor=0.5, gradient_offset=0, intercept_offset=0, style='r', linewidth=1, seed=12345, exptnum=0):
x, y = make_data_points(noise_factor,seed)
if exptnum:
title = "Lab Experiment %d"%(exptnum)
else:
title = "Lab Experiment"
ax = make_single_plot(x,y,'Force','Acceleration', title)
# statsmodel lin reg
X = sm.add_constant(x)
model = sm.OLS(y,X)
gradient, intercept = model.fit().params
#ax.plot(x,y,'o')
abline(intercept + intercept_offset,gradient+gradient_offset,style,linewidth)
return
def lab_expt_1():
return lab_experiment_with_line(exptnum=1)
def lab_expt_2():
return lab_experiment_with_line(0.6,0.2,0.1,'g',1,123456,2)
def two_lab_experiments():
lab_expt_1()
lab_expt_2()
return
def linreg_example(x=None,y=None):
z = 0.65*make_standard_normal(size=20)
if x is None:
x = np.linspace(0,5,20)
y = 2*x + 0.5 + z
X = sm.add_constant(x)
model = sm.OLS(y,X)
gradient, intercept = model.fit().params
plt.plot(x,y,'o')
abline(intercept,gradient,'r',linewidth=1)
print "Intercept = %f" % (intercept)
print "Slope = %f"% (gradient)
def linreg_sm(x,y):
X = sm.add_constant(x)
model = sm.OLS(y,X)
gradient, intercept = model.fit().params
return (gradient, intercept)
############### Exploration ##############
def make_hist():
plt.figure()
loansmin = pd.read_csv('../datasets/loanf.csv')
fico = loansmin['FICO.Score']
p = fico.hist()
def make_boxplot():
plt.figure()
loansmin = pd.read_csv('../datasets/loanf.csv')
p = loansmin.boxplot('Interest.Rate','FICO.Score')
q = p.set_xticklabels(['640','','','','660','','','','680','','','','700',
'720','','','','740','','','','760','','','','780','','','','800','','','','820','','','','840'])
q0 = p.set_xlabel('FICO Score')
q1 = p.set_ylabel('Interest Rate %')
q2 = p.set_title(' ')
############## Analysis ###############
"""
# this goes as full script in notebook
import pylab as pl
import numpy as np
#from sklearn import datasets, linear_model
import pandas as pd
import statsmodels.api as sm
# import the cleaned up dataset
df = pd.read_csv('../datasets/loanf.csv')
intrate = df['Interest.Rate']
loanamt = df['Loan.Amount']
fico = df['FICO.Score']
# reshape the data from a pandas Series to columns
# the dependent variable
y = np.matrix(intrate).transpose()
# the independent variables shaped as columns
x1 = np.matrix(fico).transpose()
x2 = np.matrix(loanamt).transpose()
# put the two columns together to create an input matrix
# if we had n independent variables we would have n columns here
x = np.column_stack([x1,x2])
# create a linear model and fit it to the data
X = sm.add_constant(x)
model = sm.OLS(y,X)
f = model.fit()
print 'Coefficients: ', f.params[0:2]
print 'Intercept: ', f.params[2]
print 'P-Values: ', f.pvalues
print 'R-Squared: ', f.rsquared
"""
############## Candidate code snippets below - we may need these in future iterations ###############
"""
from scipy import stats
>>> x = [5.05, 6.75, 3.21, 2.66]
>>> y = [1.65, 26.5, -5.93, 7.96]
>>> gradient, intercept, r_value, p_value, std_err = stats.linregress(x,y)
"""
"""
from numpy import arange,array,ones,linalg
from pylab import plot,show
xi = arange(0,9)
A = array([ xi, ones(9)])
# linearly generated sequence
y = [19, 20, 20.5, 21.5, 22, 23, 23, 25.5, 24]
w = linalg.lstsq(A.T,y)[0] # obtaining the parameters
# plotting the line
line = w[0]*xi+w[1] # regression line
plot(xi,line,'r-',xi,y,'o')
show()
"""
"""
from numpy import arange,array,ones,linalg
from pylab import plot,show
xi = arange(0,9)
A = array([ xi, ones(9)])
# linearly generated sequence
y = [19, 20, 20.5, 21.5, 22, 23, 23, 25.5, 24]
w = linalg.lstsq(A.T,y)[0] # obtaining the parameters
# plotting the line
line = w[0]*xi+w[1] # regression line
plot(xi,line,'r-',xi,y,'o')
show()
"""
| bsd-2-clause |
tmhm/scikit-learn | examples/svm/plot_iris.py | 225 | 3252 | """
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problems.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
| bsd-3-clause |
scotgl/sonify | DataStethoscope/dep/scripts/bow.py | 5 | 1550 | from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
from IPython.display import display
from gtts import gTTS
import os
import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline
import pandas as pd
import ctcsound
pan = 0
index = 10
cs = ctcsound.Csound()
csd = '''
<CsoundSynthesizer>
<CsOptions>
-odac -d
</CsOptions>
<CsInstruments>
sr = 44100
ksmps = 32
nchnls = 2
0dbfs = 1
instr 1
;aMod1 poscil 200, 700, 1
aMod1 poscil p4, p5, 1 ; p4 = amp1, p5 = f1, p6 = amp2, p7 = f2
;aMod2 poscil 1800, 290, 1
aMod2 poscil p6, p7, 1
kenv linen p9 , 0.3 , p3, p9
aSig poscil kenv, 440+aMod1+aMod2, 1
outs aSig*(1-p8), aSig*p8
endin
</CsInstruments>
<CsScore>
f 0 14400
f 1 0 1024 10 1
</CsScore>
</CsoundSynthesizer>
'''
cs.compileCsdText(csd)
cs.start()
pt = ctcsound.CsoundPerformanceThread(cs.csound())
pt.play()
def f(percentage):
global index
index = percentage
def on_button_clicked(b):
in_min = 0
in_max = 100
out_min=690
out_max = 710
global index
tts = gTTS(text=(str(index)+'percent'), lang='en')
tts.save("num.mp3")
os.system("afplay num.mp3")
if (index>50):
pan = 1
if (index<50):
pan = 0
if (index==50):
pan = 0.5
freq = (index - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
#print(freq)
pt.scoreEvent(False, 'i', (1, 0, 4, 200, 700, 200, freq, pan, 0.5))
| gpl-3.0 |
Garrett-R/scikit-learn | examples/manifold/plot_mds.py | 261 | 2616 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <[email protected]>
# Licence: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
plt.scatter(X_true[:, 0], X_true[:, 1], c='r', s=20)
plt.scatter(pos[:, 0], pos[:, 1], s=20, c='g')
plt.scatter(npos[:, 0], npos[:, 1], s=20, c='b')
plt.legend(('True position', 'MDS', 'NMDS'), loc='best')
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
tdhopper/scikit-learn | examples/linear_model/plot_lasso_lars.py | 363 | 1080 | #!/usr/bin/env python
"""
=====================
Lasso path using LARS
=====================
Computes Lasso Path along the regularization parameter using the LARS
algorithm on the diabetes dataset. Each color represents a different
feature of the coefficient vector, and this is displayed as a function
of the regularization parameter.
"""
print(__doc__)
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
print("Computing regularization path using the LARS ...")
alphas, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True)
xx = np.sum(np.abs(coefs.T), axis=1)
xx /= xx[-1]
plt.plot(xx, coefs.T)
ymin, ymax = plt.ylim()
plt.vlines(xx, ymin, ymax, linestyle='dashed')
plt.xlabel('|coef| / max|coef|')
plt.ylabel('Coefficients')
plt.title('LASSO Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
CompPhysics/ComputationalPhysicsMSU | doc/src/pde/Programs/python/diffanimate.py | 3 | 3943 | # Code for solving the 1+1 dimensional diffusion equation
# du/dt = ddu/ddx on a rectangular grid of size L x (T*dt),
# with with L = 1, u(x,0) = g(x), u(0,t) = u(L,t) = 0
import numpy, sys, math
import matplotlib.animation as animation
import time, glob, os
from matplotlib import pyplot as plt
import numpy as np
def forward_step(alpha,u,uPrev,N):
"""
Steps forward-euler algo one step ahead.
Implemented in a separate function for code-reuse from crank_nicolson()
"""
for x in xrange(1,N+1): #loop from i=1 to i=N
u[x] = alpha*uPrev[x-1] + (1.0-2*alpha)*uPrev[x] + alpha*uPrev[x+1]
def forward_euler(alpha,u,N,T):
"""
Implements the forward Euler sheme, results saved to
array u
"""
#Skip boundary elements
for t in xrange(1,T):
forward_step(alpha,u[t],u[t-1],N)
def tridiag(alpha,u,N):
"""
Tridiagonal gaus-eliminator, specialized to diagonal = 1+2*alpha,
super- and sub- diagonal = - alpha
"""
d = numpy.zeros(N) + (1+2*alpha)
b = numpy.zeros(N-1) - alpha
#Forward eliminate
for i in xrange(1,N):
#Normalize row i (i in u convention):
b[i-1] /= d[i-1];
u[i] /= d[i-1] #Note: row i in u = row i-1 in the matrix
d[i-1] = 1.0
#Eliminate
u[i+1] += u[i]*alpha
d[i] += b[i-1]*alpha
#Normalize bottom row
u[N] /= d[N-1]
d[N-1] = 1.0
#Backward substitute
for i in xrange(N,1,-1): #loop from i=N to i=2
u[i-1] -= u[i]*b[i-2]
#b[i-2] = 0.0 #This is never read, why bother...
def backward_euler(alpha,u,N,T):
"""
Implements backward euler scheme by gaus-elimination of tridiagonal matrix.
Results are saved to u.
"""
for t in xrange(1,T):
u[t] = u[t-1].copy()
tridiag(alpha,u[t],N) #Note: Passing a pointer to row t, which is modified in-place
def crank_nicolson(alpha,u,N,T):
"""
Implents crank-nicolson scheme, reusing code from forward- and backward euler
"""
for t in xrange(1,T):
forward_step(alpha/2,u[t],u[t-1],N)
tridiag(alpha/2,u[t],N)
def g(x):
"""Initial condition u(x,0) = g(x), x \in [0,1]"""
return numpy.sin(math.pi*x)
# Number of integration points along x-axis
N = 100
# Step length in time
dt = 0.01
# Number of time steps till final time
T = 100
# Define method to use 1 = explicit scheme, 2= implicit scheme, 3 = Crank-Nicolson
method = 2
dx = 1/float(N+1)
alpha = dt/(dx**2)
x = np.linspace(0, 1, N+2)
t = np.linspace(0, 1, T)
u = numpy.zeros((T,N+2),numpy.double)
#Initial codition
u[0,:] = g(x)
u[0,0] = u[0,N+1] = 0.0 #Implement boundaries rigidly
if method == 1:
forward_euler(alpha,u,N,T)
elif method == 2:
backward_euler(alpha,u,N,T)
elif method == 3:
crank_nicolson(alpha,u,N,T)
else:
print "Please select method 1,2, or 3!"
import sys
sys.exit(0)
# Make a movie of the results
# Make a first plot (save the lines objects returned from plt.plot)
fig = plt.figure()
plt.axis([0, 1, 0, 10])
lines = plt.plot([], [])
plt.xlabel('x')
plt.ylabel('u')
# Function to return the background plot in the animation
def init():
lines[0].set_data([], []) # empty plot
return lines
# Function to return a frame in the movie
def frame(args):
frame_no, s, x, lines = args
lines[0].set_data(x, u)
# Does not work: lines[0].set_label('s=%4.2f' % s)
# Does not work: plt.legend(['s=%4.2f' % s])
# Does not work: plt.savefig('tmp_%04d.png' % frame_no)
return lines
# Construct list of all arguments to frame function
# (each call sends frame number, s value, x array, and lines list)
all_args = [(frame_no, s, x, lines)
for frame_no, s in enumerate(t)]
# Run the animation
anim = animation.FuncAnimation(
fig, frame, all_args, interval=150, init_func=init, blit=True)
# Make movie file in MP4 format
anim.save('movie1.mp4', fps=5)
plt.show()
| cc0-1.0 |
ifrenzyc/itrace_startup_in_python | training.py | 1 | 4333 | # -*- coding: utf-8 -*-
import numpy as np
import urllib
# url with dataset
url = "http://archive.ics.uci.edu/ml/machine-learning-databases/pima-indians-diabetes/pima-indians-diabetes.data"
# download the file
raw_data = urllib.urlopen(url)
# load the CSV file as a numpy matrix
dataset = np.loadtxt(raw_data, delimiter=",")
# separate the data from the target attributes
X = dataset[:,0:7]
y = dataset[:,8]
#数据归一化(Data Normalization)
from sklearn import preprocessing
#scale the data attributes
scaled_X = preprocessing.scale(X)
# normalize the data attributes
normalized_X = preprocessing.normalize(X)
# standardize the data attributes
standardized_X = preprocessing.scale(X)
#特征选择(Feature Selection)
from sklearn import metrics
from sklearn.ensemble import ExtraTreesClassifier
model = ExtraTreesClassifier()
model.fit(X, y)
# display the relative importance of each attribute
print(model.feature_importances_)
#逻辑回归(官方文档)
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(X, y)
print('MODEL')
print(model)
# make predictions
expected = y
predicted = model.predict(X)
# summarize the fit of the model
print('RESULT')
print(metrics.classification_report(expected, predicted))
print('CONFUSION MATRIX')
print(metrics.confusion_matrix(expected, predicted))
#朴素贝叶斯(官方文档)
from sklearn import metrics
from sklearn.naive_bayes import GaussianNB
model = GaussianNB()
model.fit(X, y)
print('MODEL')
print(model)
# make predictions
expected = y
predicted = model.predict(X)
# summarize the fit of the model
print('RESULT')
print(metrics.classification_report(expected, predicted))
print('CONFUSION MATRIX')
print(metrics.confusion_matrix(expected, predicted))
#k近邻(官方文档)
from sklearn import metrics
from sklearn.neighbors import KNeighborsClassifier
# fit a k-nearest neighbor model to the data
model = KNeighborsClassifier()
model.fit(X, y)
print(model)
# make predictions
expected = y
predicted = model.predict(X)
# summarize the fit of the model
print(metrics.classification_report(expected, predicted))
print(metrics.confusion_matrix(expected, predicted))
#决策树(官方文档)
from sklearn import metrics
from sklearn.tree import DecisionTreeClassifier
# fit a CART model to the data
model = DecisionTreeClassifier()
model.fit(X, y)
print(model)
# make predictions
expected = y
predicted = model.predict(X)
# summarize the fit of the model
print(metrics.classification_report(expected, predicted))
print(metrics.confusion_matrix(expected, predicted))
#支持向量机(官方文档)
from sklearn import metrics
from sklearn.svm import SVC
# fit a SVM model to the data
model = SVC()
model.fit(X, y)
print(model)
# make predictions
expected = y
predicted = model.predict(X)
# summarize the fit of the model
print(metrics.classification_report(expected, predicted))
print(metrics.confusion_matrix(expected, predicted))
#GridSearchCV官方文档1(模块使用) 官方文档2 (原理详解)
import numpy as np
from sklearn.linear_model import Ridge
from sklearn.grid_search import GridSearchCV
# prepare a range of alpha values to test
alphas = np.array([1,0.1,0.01,0.001,0.0001,0])
# create and fit a ridge regression model, testing each alpha
model = Ridge()
grid = GridSearchCV(estimator=model, param_grid=dict(alpha=alphas))
grid.fit(X, y)
print(grid)
# summarize the results of the grid search
print(grid.best_score_)
print(grid.best_estimator_.alpha)
#RandomizedSearchCV官方文档(模块使用)官方文档2 (原理详解)
import numpy as np
from scipy.stats import uniform as sp_rand
from sklearn.linear_model import Ridge
from sklearn.grid_search import RandomizedSearchCV
# prepare a uniform distribution to sample for the alpha parameter
param_grid = {'alpha': sp_rand()}
# create and fit a ridge regression model, testing random alpha values
model = Ridge()
rsearch = RandomizedSearchCV(estimator=model, param_distributions=param_grid, n_iter=100)
rsearch.fit(X, y)
print(rsearch)
# summarize the results of the random parameter search
print(rsearch.best_score_)
print(rsearch.best_estimator_.alpha)
| mit |
ilo10/scikit-learn | examples/cluster/plot_lena_ward_segmentation.py | 271 | 1998 | """
===============================================================
A demo of structured Ward hierarchical clustering on Lena image
===============================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
###############################################################################
# Generate data
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
X = np.reshape(lena, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*lena.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
dcolombo/FilFinder | examples/paper_figures/ks_plots.py | 3 | 1672 | # Licensed under an MIT open source license - see LICENSE
'''
KS p-values for different properties.
'''
import numpy as np
from pandas import read_csv
import matplotlib.pyplot as p
import numpy as np
import seaborn as sn
sn.set_context('talk')
sn.set_style('ticks')
# sn.mpl.rc("figure", figsize=(7, 9))
# Widths
widths = read_csv("width_ks_table_pvals.csv")
widths.index = widths["Unnamed: 0"]
del widths["Unnamed: 0"]
widths_arr = np.asarray(widths)
widths_arr[np.arange(0, 14), np.arange(0, 14)] = 1.0
widths_arr = -np.log10(widths_arr)
# p.figure(figsize=(12, 10))
p.subplot(111)
# p.xlabel("Widths")
p.imshow(widths_arr, origin='lower', cmap='binary', interpolation='nearest')
p.xticks(np.arange(0, 14), widths.columns, rotation=90)
# p.xticks(np.arange(0, 14), [], rotation=90)
p.yticks(np.arange(0, 14), widths.columns)
# p.figtext(0.05, 0.95, "a)", fontsize=20)
cb = p.colorbar()
cb.set_label(r'$-\log_{10}$ p-value')
cb.solids.set_edgecolor("face")
p.tight_layout()
p.show()
# Curvature
# curve = read_csv("curvature_ks_table_pvals.csv")
# curve.index = curve["Unnamed: 0"]
# del curve["Unnamed: 0"]
# curve_arr = np.asarray(curve)
# curve_arr[np.arange(0, 14), np.arange(0, 14)] = 1.0
# curve_arr = -np.log10(curve_arr)
# # p.figure(figsize=(12, 10))
# p.subplot(212)
# # p.xlabel("Curvature")
# p.imshow(curve_arr, interpolation='nearest', origin='lower', cmap='binary')
# p.xticks(np.arange(0, 14), curve.columns, rotation=90)
# p.yticks(np.arange(0, 14), curve.columns)
# p.figtext(0.05, 0.55, "b)", fontsize=20)
# cb = p.colorbar()
# cb.set_label(r'$-\log_{10}$ p-value')
# cb.solids.set_edgecolor("face")
# p.tight_layout()
# p.show()
| mit |
mira67/DL4RS | MPFprj/visdata.py | 1 | 1659 | """
Visualization Module
Author: Qi Liu
"""
import os
import MySQLdb
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.gridspec import GridSpec
plt.style.use(u'seaborn-paper')
def vis_train(history,predicted,gt):
fs = 14
#train/validation cost curve
plt.figure(1)
gs = GridSpec(2, 2)
plt.subplot(gs[0, :])
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.ylabel('loss',fontsize=fs, color='black')
plt.xlabel('epoch',fontsize=fs, color='black')
plt.legend(['train', 'validation'], loc='upper left')
plt.tick_params(axis='both', which='major', labelsize=fs)
#predicted vs ground truth correlation
t = np.arange(0, len(gt), 1)
plt.subplot(gs[1, 0])
plt.scatter(predicted[:,0], gt[:,0])
axes = plt.gca()
m, b = np.polyfit(predicted[:,0], gt[:,0], 1)
X_plot = np.linspace(axes.get_xlim()[0],axes.get_xlim()[1],100)
r = np.corrcoef(predicted[:,0], gt[:,0])[1,0]
plt.plot(X_plot, m*X_plot + b, 'r-',label="r = %2.2f"%(r))
plt.legend(loc=2)
plt.ylabel('GT MPF(%)',fontsize=fs, color='black')
plt.xlabel('Predicted MPF(%)',fontsize=fs, color='black')
plt.tick_params(axis='both', which='major', labelsize=fs)
plt.subplot(gs[1, 1])
plt.plot(t, gt[:,0], 'b-', label='Ground Truth')
plt.plot(t, predicted[:,0], 'r-',label='Predicted')
plt.ylabel('MPF(%)',fontsize=fs, color='black')
plt.xlabel('Grid Index, Not ordered',fontsize=fs, color='black')
plt.tick_params(axis='both', which='major', labelsize=fs)
plt.legend(loc=2)
plt.show()
#def vis_test(history,predicted,gt):
| gpl-3.0 |
linebp/pandas | pandas/tests/series/test_combine_concat.py | 7 | 11771 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytest
from datetime import datetime
from numpy import nan
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, date_range, DatetimeIndex
from pandas import compat
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesCombine(TestData):
def test_append(self):
appendedSeries = self.series.append(self.objSeries)
for idx, value in compat.iteritems(appendedSeries):
if idx in self.series.index:
assert value == self.series[idx]
elif idx in self.objSeries.index:
assert value == self.objSeries[idx]
else:
self.fail("orphaned index!")
pytest.raises(ValueError, self.ts.append, self.ts,
verify_integrity=True)
def test_append_many(self):
pieces = [self.ts[:5], self.ts[5:10], self.ts[10:]]
result = pieces[0].append(pieces[1:])
assert_series_equal(result, self.ts)
def test_append_duplicates(self):
# GH 13677
s1 = pd.Series([1, 2, 3])
s2 = pd.Series([4, 5, 6])
exp = pd.Series([1, 2, 3, 4, 5, 6], index=[0, 1, 2, 0, 1, 2])
tm.assert_series_equal(s1.append(s2), exp)
tm.assert_series_equal(pd.concat([s1, s2]), exp)
# the result must have RangeIndex
exp = pd.Series([1, 2, 3, 4, 5, 6])
tm.assert_series_equal(s1.append(s2, ignore_index=True),
exp, check_index_type=True)
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True),
exp, check_index_type=True)
msg = 'Indexes have overlapping values:'
with tm.assert_raises_regex(ValueError, msg):
s1.append(s2, verify_integrity=True)
with tm.assert_raises_regex(ValueError, msg):
pd.concat([s1, s2], verify_integrity=True)
def test_combine_first(self):
values = tm.makeIntIndex(20).values.astype(float)
series = Series(values, index=tm.makeIntIndex(20))
series_copy = series * 2
series_copy[::2] = np.NaN
# nothing used from the input
combined = series.combine_first(series_copy)
tm.assert_series_equal(combined, series)
# Holes filled from input
combined = series_copy.combine_first(series)
assert np.isfinite(combined).all()
tm.assert_series_equal(combined[::2], series[::2])
tm.assert_series_equal(combined[1::2], series_copy[1::2])
# mixed types
index = tm.makeStringIndex(20)
floats = Series(tm.randn(20), index=index)
strings = Series(tm.makeStringIndex(10), index=index[::2])
combined = strings.combine_first(floats)
tm.assert_series_equal(strings, combined.loc[index[::2]])
tm.assert_series_equal(floats[1::2].astype(object),
combined.loc[index[1::2]])
# corner case
s = Series([1., 2, 3], index=[0, 1, 2])
result = s.combine_first(Series([], index=[]))
assert_series_equal(s, result)
def test_update(self):
s = Series([1.5, nan, 3., 4., nan])
s2 = Series([nan, 3.5, nan, 5.])
s.update(s2)
expected = Series([1.5, 3.5, 3., 5., np.nan])
assert_series_equal(s, expected)
# GH 3217
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
# this will fail as long as series is a sub-class of ndarray
# df['c'].update(Series(['foo'],index=[0])) #####
def test_concat_empty_series_dtypes_roundtrips(self):
# round-tripping with self & like self
dtypes = map(np.dtype, ['float64', 'int8', 'uint8', 'bool', 'm8[ns]',
'M8[ns]'])
for dtype in dtypes:
assert pd.concat([Series(dtype=dtype)]).dtype == dtype
assert pd.concat([Series(dtype=dtype),
Series(dtype=dtype)]).dtype == dtype
def int_result_type(dtype, dtype2):
typs = set([dtype.kind, dtype2.kind])
if not len(typs - set(['i', 'u', 'b'])) and (dtype.kind == 'i' or
dtype2.kind == 'i'):
return 'i'
elif not len(typs - set(['u', 'b'])) and (dtype.kind == 'u' or
dtype2.kind == 'u'):
return 'u'
return None
def float_result_type(dtype, dtype2):
typs = set([dtype.kind, dtype2.kind])
if not len(typs - set(['f', 'i', 'u'])) and (dtype.kind == 'f' or
dtype2.kind == 'f'):
return 'f'
return None
def get_result_type(dtype, dtype2):
result = float_result_type(dtype, dtype2)
if result is not None:
return result
result = int_result_type(dtype, dtype2)
if result is not None:
return result
return 'O'
for dtype in dtypes:
for dtype2 in dtypes:
if dtype == dtype2:
continue
expected = get_result_type(dtype, dtype2)
result = pd.concat([Series(dtype=dtype), Series(dtype=dtype2)
]).dtype
assert result.kind == expected
def test_concat_empty_series_dtypes(self):
# booleans
assert pd.concat([Series(dtype=np.bool_),
Series(dtype=np.int32)]).dtype == np.int32
assert pd.concat([Series(dtype=np.bool_),
Series(dtype=np.float32)]).dtype == np.object_
# datetime-like
assert pd.concat([Series(dtype='m8[ns]'),
Series(dtype=np.bool)]).dtype == np.object_
assert pd.concat([Series(dtype='m8[ns]'),
Series(dtype=np.int64)]).dtype == np.object_
assert pd.concat([Series(dtype='M8[ns]'),
Series(dtype=np.bool)]).dtype == np.object_
assert pd.concat([Series(dtype='M8[ns]'),
Series(dtype=np.int64)]).dtype == np.object_
assert pd.concat([Series(dtype='M8[ns]'),
Series(dtype=np.bool_),
Series(dtype=np.int64)]).dtype == np.object_
# categorical
assert pd.concat([Series(dtype='category'),
Series(dtype='category')]).dtype == 'category'
assert pd.concat([Series(dtype='category'),
Series(dtype='float64')]).dtype == 'float64'
assert pd.concat([Series(dtype='category'),
Series(dtype='object')]).dtype == 'object'
# sparse
result = pd.concat([Series(dtype='float64').to_sparse(), Series(
dtype='float64').to_sparse()])
assert result.dtype == np.float64
assert result.ftype == 'float64:sparse'
result = pd.concat([Series(dtype='float64').to_sparse(), Series(
dtype='float64')])
assert result.dtype == np.float64
assert result.ftype == 'float64:sparse'
result = pd.concat([Series(dtype='float64').to_sparse(), Series(
dtype='object')])
assert result.dtype == np.object_
assert result.ftype == 'object:dense'
def test_combine_first_dt64(self):
from pandas.core.tools.datetimes import to_datetime
s0 = to_datetime(Series(["2010", np.NaN]))
s1 = to_datetime(Series([np.NaN, "2011"]))
rs = s0.combine_first(s1)
xp = to_datetime(Series(['2010', '2011']))
assert_series_equal(rs, xp)
s0 = to_datetime(Series(["2010", np.NaN]))
s1 = Series([np.NaN, "2011"])
rs = s0.combine_first(s1)
xp = Series([datetime(2010, 1, 1), '2011'])
assert_series_equal(rs, xp)
class TestTimeseries(object):
def test_append_concat(self):
rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
result = ts.append(ts)
result_df = df.append(df)
ex_index = DatetimeIndex(np.tile(rng.values, 2))
tm.assert_index_equal(result.index, ex_index)
tm.assert_index_equal(result_df.index, ex_index)
appended = rng.append(rng)
tm.assert_index_equal(appended, ex_index)
appended = rng.append([rng, rng])
ex_index = DatetimeIndex(np.tile(rng.values, 3))
tm.assert_index_equal(appended, ex_index)
# different index names
rng1 = rng.copy()
rng2 = rng.copy()
rng1.name = 'foo'
rng2.name = 'bar'
assert rng1.append(rng1).name == 'foo'
assert rng1.append(rng2).name is None
def test_append_concat_tz(self):
# see gh-2938
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
tm.assert_index_equal(result.index, rng3)
tm.assert_index_equal(result_df.index, rng3)
appended = rng.append(rng2)
tm.assert_index_equal(appended, rng3)
def test_append_concat_tz_explicit_pytz(self):
# see gh-2938
from pytz import timezone as timezone
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz=timezone('US/Eastern'))
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz=timezone('US/Eastern'))
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz=timezone('US/Eastern'))
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
tm.assert_index_equal(result.index, rng3)
tm.assert_index_equal(result_df.index, rng3)
appended = rng.append(rng2)
tm.assert_index_equal(appended, rng3)
def test_append_concat_tz_dateutil(self):
# see gh-2938
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='dateutil/US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='dateutil/US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='dateutil/US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
tm.assert_index_equal(result.index, rng3)
tm.assert_index_equal(result_df.index, rng3)
appended = rng.append(rng2)
tm.assert_index_equal(appended, rng3)
| bsd-3-clause |
aseciwa/independent-study | scripts/tweet_analysis.py | 1 | 9579 | from tweet_preprocess import load_df
from textblob import TextBlob
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
import cartopy
# set graph display options
pd.set_option('display.max_colwidth', 200)
pd.options.display.mpl_style = 'default'
matplotlib.style.use('ggplot')
sns.set_context('talk')
sns.set_style('darkgrid')
# load captured tweets
df = load_df('/Users/alanseciwa/Desktop/results3.csv')
# See the overall count relating to the keys
df.info()
# prints out first row from tweets
print(df[['candidate', 'created_at', 'lang', 'place', 'user_followers_count',
'user_time_zone', 'polarity', 'influenced_polarity', 'text']].head(1))
# find polarity of ONLY english words and display the set
# the textblob function translate() could be used
english_df = df[df.lang == 'en']
english_df.sort('polarity', ascending=False).head(3)[['candidate', 'polarity', 'subjectivity', 'text']]
# Find mean polarity for each candidate by looking at the influenced_polarity.
# this takes into account the number of retweets and number of followers
candidate_group = english_df.groupby('candidate')
print(candidate_group[['polarity', 'influence', 'influenced_polarity']].mean())
# Look at the influential Tweets about Donald Trump and Bernie Sanders
'''
jeb = candidate_group.get_group('Jeb Bush')
jeb_influence = jeb.sort('influence', ascending=False)
print('')
print('-----------')
print('Jeb Bush')
print('-----------')
print(jeb_influence[['influence', 'polarity', 'influenced_polarity', 'user_name', 'text', 'created_at']].head(5))
print('')
print('-----------')
print(df[df.user_name == 'Jeb Bush'].groupby('candidate').size())
'''
# Trump
trump = candidate_group.get_group('Donald Trump')
trump_influence = trump.sort('influence', ascending=False)
print('--------------')
print('Donald Trump')
print('--------------')
trump_influence[['influence', 'polarity', 'influenced_polarity', 'user_name', 'text', 'created_at']].head(5)
# Sanders
sanders = candidate_group.get_group('Bernie Sanders')
sanders_influence = sanders.sort('influence', ascending=False)
print('--------------')
print('Bernie Sanders')
print('--------------')
sanders_influence[['influence', 'polarity', 'influenced_polarity', 'user_name', 'text', 'created_at']].head(5)
# LANGUAGE
# display who are all twitter from different languages
print('')
print('Language')
lang_group = df.groupby(['candidate', 'lang'])
print(lang_group.size())
# graph the languages
print('')
l_lang = lang_group.filter(lambda group: len(group) > 10)
# get rid of english language
non_eng = l_lang[l_lang.lang != 'en']
non_eng_grp = non_eng.groupby(['lang', 'candidate'], as_index = False)
non_eng_grp
print('')
print('ploting...')
s = non_eng_grp.text.agg(np.size)
s = s.rename(columns={'text': 'count'})
s_pivot_dis = s.pivot_table(index='lang', columns='candidate', values='count', fill_value=0)
plot = sns.heatmap(s_pivot_dis)
plot.set_title('Number of non-English Tweets by Candidate')
plot.set_ylabel('language code')
plot.set_xlabel('candidate')
plot.figure.set_size_inches(12, 7)
print('')
print('ending plotting')
# Time-influence polarity over time for each candidate
mean_pol = df.groupby(['candidate', 'created_at']).influenced_polarity.mean()
plot = mean_pol.unstack('candidate').resample('60min').plot()
plot.set_title('Influence Polarity Over Time for Candidates')
plot.set_ylabel('Influence Polarity')
plot.set_xlabel('Time')
plot.figure.set_size_inches(15, 9)
# Get top languages
lang_size =df.groupby('lang').size()
th = lang_size.quantile(.75)
top_lang_df = lang_size[lang_size > th]
top_lang = set(top_lang_df.index) - {'und'}
print(top_lang)
# Get tweet frequency
df['hour'] = df.created_at.apply(lambda datetime: datetime.hour)
for lang_code in top_lang:
l_df = df[df.lang == lang_code]
normalized_freq = l_df.groupby('hour').size() / l_df.lang.count()
plot = normalized_freq.plot(label = lang_code)
plot.set_title('Tweet Frequency by hour of day')
plot.set_ylabel('frequency')
plot.set_xlabel('hr of day')
plot.legend()
plot.figure.set_size_inches(10, 8)
# find the uniqueness of tweets
spike_interest = df[(df.hour == 23) & (df.lang == 'in')]
print('Number of tweets:', spike_interest.text.count())
print('Number of unique users:', spike_interest.user_name.unique().size)
#investigate spike from Indonesia
spike_interest.text.head(10).unique()
# Find the Timezone of tweets in different locations with Influenced_Polarity
timez_df = english_df.dropna(subset=['user_time_zone'])
us_timez_df = timez_df[timez_df.user_time_zone.str.contains('US & Canada')]
us_timez_candidate_group = us_timez_df.groupby(['candidate', 'user_time_zone'])
us_timez_candidate_group.influenced_polarity.mean()
# Graph timezone on a map
timez_map = cartopy.io.shapereader.Reader("/Users/alanseciwa/Desktop/World_Maps/tz_world_mp.shp")
timez_rec = list(timez_map.records())
timez_trans = {
'Eastern Time (US & Canada)': 'America/New_York',
'Central Time (US & Canada)': 'America/Chicago',
'Mountain Time (US & Canada)': 'America/Denver',
'Pacific Time (US & Canada)': 'America/Los_Angeles',
}
america_timez_rec = {
timez_name: next(filter(lambda record: record.attributes['TZID'] == timez_id, timez_rec))
for timez_name, timez_id
in timez_trans.items()
}
# -----
aea = cartopy.crs.AlbersEqualArea(-95, 35)
pc = cartopy.crs.PlateCarree()
state_province = cartopy.feature.NaturalEarthFeature(
category='cultural',
name='admin_1_states_provinces_lines',
scale='50m',
facecolor='none'
)
c_map = [matplotlib.cm.Blues, matplotlib.cm.Greens, matplotlib.cm.Reds, matplotlib.cm.Oranges]
norm = matplotlib.colors.Normalize(vmin=0, vmax=40)
candidates = df['candidate'].unique()
for i, c in enumerate(candidates):
plt.figure()
plot = plt.axes(projection=aea)
plot.set_extent((-125, -66, 20, 50))
plot.add_feature(cartopy.feature.LAND)
plot.add_feature(cartopy.feature.COASTLINE)
plot.add_feature(cartopy.feature.BORDERS)
plot.add_feature(state_province, edgecolor='gray')
plot.add_feature(cartopy.feature.LAKES, facecolor='#00BCD4')
for j, r in america_timez_rec.items():
timez_spec_df = us_timez_df[us_timez_df.user_time_zone == j]
timez_cand_spec_df = timez_spec_df[timez_spec_df.candidate == c]
mean_pol = timez_cand_spec_df.influenced_polarity.mean()
plot.add_geometries(
[r.geometry],
crs = pc,
color = c_map[i](norm(mean_pol)),
alpha = 0.8
)
plot.set_title('Influenced Polarity towards {} by U.S. Timezone'.format(c))
plot.figure.set_size_inches(7, 4)
plt.show()
print()
# Find the Twitter users outside of the U.S.
american_timez = ('US & Canada|Canada|Arizona|America|Hawaii|Indiana|Alaska'
'|New_York|Chicago|Los_Angeles|Detroit|CST|PST|EST|MST')
foreign_timez_df = timez_df[~timez_df.user_time_zone.str.contains(american_timez)]
foreign_timez_grp = foreign_timez_df.groupby('user_time_zone')
foreign_timez_grp.size().sort(inplace=False, ascending=False).head(25)
# find Foreign timezones and influenced_polarity of candidates
foreign_english_timez_df = foreign_timez_df[foreign_timez_df.lang == 'en']
foreign_timez_grp2 = foreign_english_timez_df.groupby(['candidate', 'user_time_zone'])
top_foreign_timez_df = foreign_timez_grp2.filter(lambda group: len(group) > 40)
top_foreign_timez_grp = top_foreign_timez_df.groupby(['user_time_zone', 'candidate'], as_index=False)
mean_infl_pol = top_foreign_timez_grp.influenced_polarity.mean()
pivot = mean_infl_pol.pivot_table(
index='user_time_zone',
columns='candidate',
values='influenced_polarity',
fill_value=0
)
plot = sns.heatmap(pivot)
plot.set_title('Influenced Polarity in Major Foreign (timezones) Regions by Candidate')
plot.set_ylabel('City', family='Ubuntu')
plot.set_xlabel('Influenced Polarity by Candidate')
plot.figure.set_size_inches(10, 9)
# Find the Geolocation of Tweets made
geo_df = df.dropna(subset=['place'])
mollweide = cartopy.crs.Mollweide()
plot = plt.axes(projection=mollweide)
plot.set_global()
plot.add_feature(cartopy.feature.LAND)
plot.add_feature(cartopy.feature.COASTLINE)
plot.add_feature(cartopy.feature.BORDERS)
plot.scatter(
list(geo_df.longitude),
list(geo_df.latitude),
transform=pc,
zorder=2
)
plot.set_title('International Twitter Users W/Enabled Geo Data')
plot.figure.set_size_inches(14, 9)
# Plot Twitter user in the US
plot = plt.axes(projection=aea)
## this set the size of the map. If other portions of the
## map need to be accessed, the these coordinates
plot.set_extent((-150, 60, -25, 60))
# <fix> need to fix, state border lines are not showing
plot.add_feature(state_province, edgecolor='black')
plot.add_feature(cartopy.feature.COASTLINE)
plot.add_feature(cartopy.feature.LAND)
plot.add_feature(cartopy.feature.BORDERS)
plot.add_feature(cartopy.feature.LAKES)
candidate_grp2 = geo_df.groupby('candidate', as_index = False)
# Colors for the legend table
colors = ['#DC143C', '#0000FF', '#FFD700', '#9932CC']
# Go through loop to display the coordinates
for i, (can, grp) in enumerate(candidate_grp2):
longitudes = grp.longitude.values
latitudes = grp.latitude.values
plot.scatter(
longitudes,
latitudes,
transform=pc,
color=colors[i],
label=can,
zorder=2
)
plot.set_title('Twitter Users by Candidate')
plt.legend(loc='lower right')
plot.figure.set_size_inches(12, 7) | mit |
justincassidy/scikit-learn | examples/covariance/plot_lw_vs_oas.py | 248 | 2903 | """
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotically optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are Gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using Gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='r')
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)
# plot shrinkage coefficient
plt.subplot(2, 1, 2)
plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='r')
plt.xlabel("n_samples")
plt.ylabel("Shrinkage")
plt.legend(loc="lower right")
plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.)
plt.xlim(5, 31)
plt.show()
| bsd-3-clause |
Djabbz/scikit-learn | examples/calibration/plot_calibration_multiclass.py | 272 | 6972 | """
==================================================
Probability Calibration for 3-class classification
==================================================
This example illustrates how sigmoid calibration changes predicted
probabilities for a 3-class classification problem. Illustrated is the
standard 2-simplex, where the three corners correspond to the three classes.
Arrows point from the probability vectors predicted by an uncalibrated
classifier to the probability vectors predicted by the same classifier after
sigmoid calibration on a hold-out validation set. Colors indicate the true
class of an instance (red: class 1, green: class 2, blue: class 3).
The base classifier is a random forest classifier with 25 base estimators
(trees). If this classifier is trained on all 800 training datapoints, it is
overly confident in its predictions and thus incurs a large log-loss.
Calibrating an identical classifier, which was trained on 600 datapoints, with
method='sigmoid' on the remaining 200 datapoints reduces the confidence of the
predictions, i.e., moves the probability vectors from the edges of the simplex
towards the center. This calibration results in a lower log-loss. Note that an
alternative would have been to increase the number of base estimators which
would have resulted in a similar decrease in log-loss.
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import log_loss
np.random.seed(0)
# Generate data
X, y = make_blobs(n_samples=1000, n_features=2, random_state=42,
cluster_std=5.0)
X_train, y_train = X[:600], y[:600]
X_valid, y_valid = X[600:800], y[600:800]
X_train_valid, y_train_valid = X[:800], y[:800]
X_test, y_test = X[800:], y[800:]
# Train uncalibrated random forest classifier on whole train and validation
# data and evaluate on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train_valid, y_train_valid)
clf_probs = clf.predict_proba(X_test)
score = log_loss(y_test, clf_probs)
# Train random forest classifier, calibrate on validation data and evaluate
# on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
sig_clf = CalibratedClassifierCV(clf, method="sigmoid", cv="prefit")
sig_clf.fit(X_valid, y_valid)
sig_clf_probs = sig_clf.predict_proba(X_test)
sig_score = log_loss(y_test, sig_clf_probs)
# Plot changes in predicted probabilities via arrows
plt.figure(0)
colors = ["r", "g", "b"]
for i in range(clf_probs.shape[0]):
plt.arrow(clf_probs[i, 0], clf_probs[i, 1],
sig_clf_probs[i, 0] - clf_probs[i, 0],
sig_clf_probs[i, 1] - clf_probs[i, 1],
color=colors[y_test[i]], head_width=1e-2)
# Plot perfect predictions
plt.plot([1.0], [0.0], 'ro', ms=20, label="Class 1")
plt.plot([0.0], [1.0], 'go', ms=20, label="Class 2")
plt.plot([0.0], [0.0], 'bo', ms=20, label="Class 3")
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
# Annotate points on the simplex
plt.annotate(r'($\frac{1}{3}$, $\frac{1}{3}$, $\frac{1}{3}$)',
xy=(1.0/3, 1.0/3), xytext=(1.0/3, .23), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.plot([1.0/3], [1.0/3], 'ko', ms=5)
plt.annotate(r'($\frac{1}{2}$, $0$, $\frac{1}{2}$)',
xy=(.5, .0), xytext=(.5, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $\frac{1}{2}$, $\frac{1}{2}$)',
xy=(.0, .5), xytext=(.1, .5), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($\frac{1}{2}$, $\frac{1}{2}$, $0$)',
xy=(.5, .5), xytext=(.6, .6), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $0$, $1$)',
xy=(0, 0), xytext=(.1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($1$, $0$, $0$)',
xy=(1, 0), xytext=(1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $1$, $0$)',
xy=(0, 1), xytext=(.1, 1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
# Add grid
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Change of predicted probabilities after sigmoid calibration")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.legend(loc="best")
print("Log-loss of")
print(" * uncalibrated classifier trained on 800 datapoints: %.3f "
% score)
print(" * classifier trained on 600 datapoints and calibrated on "
"200 datapoint: %.3f" % sig_score)
# Illustrate calibrator
plt.figure(1)
# generate grid over 2-simplex
p1d = np.linspace(0, 1, 20)
p0, p1 = np.meshgrid(p1d, p1d)
p2 = 1 - p0 - p1
p = np.c_[p0.ravel(), p1.ravel(), p2.ravel()]
p = p[p[:, 2] >= 0]
calibrated_classifier = sig_clf.calibrated_classifiers_[0]
prediction = np.vstack([calibrator.predict(this_p)
for calibrator, this_p in
zip(calibrated_classifier.calibrators_, p.T)]).T
prediction /= prediction.sum(axis=1)[:, None]
# Ploit modifications of calibrator
for i in range(prediction.shape[0]):
plt.arrow(p[i, 0], p[i, 1],
prediction[i, 0] - p[i, 0], prediction[i, 1] - p[i, 1],
head_width=1e-2, color=colors[np.argmax(p[i])])
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Illustration of sigmoid calibrator")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.show()
| bsd-3-clause |
lekshmideepu/nest-simulator | pynest/examples/twoneurons.py | 8 | 2663 | # -*- coding: utf-8 -*-
#
# twoneurons.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Two neuron example
------------------
This script simulates two connected pre- and postsynaptic neurons.
The presynaptic neuron receives a constant external current,
and the membrane potential of both neurons are recorded.
See Also
~~~~~~~~
:doc:`one_neuron`
"""
###############################################################################
# First, we import all necessary modules for simulation, analysis and plotting.
# Additionally, we set the verbosity to suppress info messages and reset
# the kernel.
import nest
import nest.voltage_trace
import matplotlib.pyplot as plt
nest.set_verbosity("M_WARNING")
nest.ResetKernel()
###############################################################################
# Second, we create the two neurons and the recording device.
neuron_1 = nest.Create("iaf_psc_alpha")
neuron_2 = nest.Create("iaf_psc_alpha")
voltmeter = nest.Create("voltmeter")
###############################################################################
# Third, we set the external current of neuron 1.
neuron_1.I_e = 376.0
###############################################################################
# Fourth, we connect neuron 1 to neuron 2.
# Then, we connect a voltmeter to the two neurons.
# To learn more about the previous steps, please check out the
# :doc:`one neuron example <one_neuron>`.
weight = 20.0
delay = 1.0
nest.Connect(neuron_1, neuron_2, syn_spec={"weight": weight, "delay": delay})
nest.Connect(voltmeter, neuron_1)
nest.Connect(voltmeter, neuron_2)
###############################################################################
# Now we simulate the network using ``Simulate``, which takes the
# desired simulation time in milliseconds.
nest.Simulate(1000.0)
###############################################################################
# Finally, we plot the neurons' membrane potential as a function of
# time.
nest.voltage_trace.from_device(voltmeter)
plt.show()
| gpl-2.0 |
rm--/matplotlib_examples | example1/evaluate1.py | 1 | 1327 | #!/usr/bin/env python
import matplotlib.pyplot as plt
# dict: {title of plot : [measure value files]}
# The input data may not have a \n at file end.
inputFiles = {'LibMergeSort_Sortierszenarien_im_Vergleich':
['sorted', 'shuffle', 'reverse']}
# different colors of the function graphs
COLORS = ['g', 'k', 'm']
print(inputFiles.items())
counter = 0
for outputFileName, fileNames in inputFiles.items():
fig = plt.figure()
ax1 = fig.add_subplot(111)
for fileName in fileNames:
with open(fileName) as f:
data = f.read()
data = data.split('\n')
#print(str(fileName) + str(data))
x = [row.split()[0] for row in data]
y = [float(row.split()[1]) for row in data]
err = [float(row.split()[2]) for row in data]
ax1.plot(x, y, c=COLORS[counter], label=fileName)
ax1.errorbar(x, y, yerr=err, fmt='_',
ecolor=COLORS[counter], capthick=2)
counter = counter + 1
# ax1.set_title(outputFileName)
ax1.set_xlabel('Anzahl Elemente N')
ax1.set_ylabel('Laufzeit [s]')
leg = ax1.legend(loc='upper left')
#leg = ax1.legend(loc='lower right')
# ax1.set_yscale('log')
#fig.savefig(outputFileName + '.png', format='png')
fig.savefig(outputFileName + '.pdf', format='pdf')
# plt.show()
| gpl-2.0 |
moberweger/deep-prior-pp | src/main_nyu_posereg_embedding.py | 1 | 9266 | """This is the main file for training hand joint classifier on NYU dataset
Copyright 2015 Markus Oberweger, ICG,
Graz University of Technology <[email protected]>
This file is part of DeepPrior.
DeepPrior is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
DeepPrior is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with DeepPrior. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy
import matplotlib
matplotlib.use('Agg') # plot to file
import matplotlib.pyplot as plt
import os
import cPickle
from sklearn.decomposition import PCA
from trainer.poseregnettrainer import PoseRegNetTrainer, PoseRegNetTrainerParams
from net.poseregnet import PoseRegNetParams, PoseRegNet
from data.importers import NYUImporter
from data.dataset import NYUDataset
from util.handdetector import HandDetector
from util.handpose_evaluation import NYUHandposeEvaluation
from data.transformations import transformPoints2D
from net.hiddenlayer import HiddenLayer, HiddenLayerParams
if __name__ == '__main__':
eval_prefix = 'NYU_EMB_t0nF8mp421fD553h1024_PCA30_AUGMENT'
if not os.path.exists('./eval/'+eval_prefix+'/'):
os.makedirs('./eval/'+eval_prefix+'/')
rng = numpy.random.RandomState(23455)
print("create data")
aug_modes = ['com', 'rot', 'none'] # 'sc',
comref = None # "./eval/NYU_COM_AUGMENT/net_NYU_COM_AUGMENT.pkl"
docom = False
di = NYUImporter('../data/NYU/', refineNet=comref)
Seq1 = di.loadSequence('train', shuffle=True, rng=rng, docom=docom)
trainSeqs = [Seq1]
Seq2_1 = di.loadSequence('test_1', docom=docom)
Seq2_2 = di.loadSequence('test_2', docom=docom)
testSeqs = [Seq2_1, Seq2_2]
# create training data
trainDataSet = NYUDataset(trainSeqs)
train_data, train_gt3D = trainDataSet.imgStackDepthOnly('train')
train_data_cube = numpy.asarray([Seq1.config['cube']]*train_data.shape[0], dtype='float32')
train_data_com = numpy.asarray([d.com for d in Seq1.data], dtype='float32')
train_data_M = numpy.asarray([da.T for da in Seq1.data], dtype='float32')
train_gt3Dcrop = numpy.asarray([d.gt3Dcrop for d in Seq1.data], dtype='float32')
mb = (train_data.nbytes) / (1024 * 1024)
print("data size: {}Mb".format(mb))
valDataSet = NYUDataset(testSeqs)
val_data, val_gt3D = valDataSet.imgStackDepthOnly('test_1')
testDataSet = NYUDataset(testSeqs)
test_data1, test_gt3D1 = testDataSet.imgStackDepthOnly('test_1')
test_data2, test_gt3D2 = testDataSet.imgStackDepthOnly('test_2')
print train_gt3D.max(), test_gt3D1.max(), train_gt3D.min(), test_gt3D1.min()
print train_data.max(), test_data1.max(), train_data.min(), test_data1.min()
imgSizeW = train_data.shape[3]
imgSizeH = train_data.shape[2]
nChannels = train_data.shape[1]
####################################
# convert data to embedding
pca = PCA(n_components=30)
pca.fit(HandDetector.sampleRandomPoses(di, rng, train_gt3Dcrop, train_data_com, train_data_cube, 1e6,
aug_modes).reshape((-1, train_gt3D.shape[1]*3)))
train_gt3D_embed = pca.transform(train_gt3D.reshape((train_gt3D.shape[0], train_gt3D.shape[1]*3)))
test_gt3D_embed1 = pca.transform(test_gt3D1.reshape((test_gt3D1.shape[0], test_gt3D1.shape[1]*3)))
test_gt3D_embed2 = pca.transform(test_gt3D2.reshape((test_gt3D2.shape[0], test_gt3D2.shape[1]*3)))
val_gt3D_embed = pca.transform(val_gt3D.reshape((val_gt3D.shape[0], val_gt3D.shape[1]*3)))
############################################################################
print("create network")
batchSize = 128
poseNetParams = PoseRegNetParams(type=0, nChan=nChannels, wIn=imgSizeW, hIn=imgSizeH, batchSize=batchSize,
numJoints=1, nDims=train_gt3D_embed.shape[1])
poseNet = PoseRegNet(rng, cfgParams=poseNetParams)
poseNetTrainerParams = PoseRegNetTrainerParams()
poseNetTrainerParams.batch_size = batchSize
poseNetTrainerParams.learning_rate = 0.001
poseNetTrainerParams.weightreg_factor = 0.0
poseNetTrainerParams.force_macrobatch_reload = True
poseNetTrainerParams.para_augment = True
poseNetTrainerParams.augment_fun_params = {'fun': 'augment_poses', 'args': {'normZeroOne': False,
'di': di,
'aug_modes': aug_modes,
'hd': HandDetector(train_data[0, 0].copy(), abs(di.fx), abs(di.fy), importer=di),
'proj': pca}}
print("setup trainer")
poseNetTrainer = PoseRegNetTrainer(poseNet, poseNetTrainerParams, rng, './eval/'+eval_prefix)
poseNetTrainer.setData(train_data, train_gt3D_embed, val_data, val_gt3D_embed)
poseNetTrainer.addStaticData({'val_data_y3D': val_gt3D})
poseNetTrainer.addStaticData({'pca_data': pca.components_, 'mean_data': pca.mean_})
poseNetTrainer.addManagedData({'train_data_cube': train_data_cube,
'train_data_com': train_data_com,
'train_data_M': train_data_M,
'train_gt3Dcrop': train_gt3Dcrop})
poseNetTrainer.compileFunctions(compileDebugFcts=False)
###################################################################
# TRAIN
train_res = poseNetTrainer.train(n_epochs=100)
train_costs = train_res[0]
val_errs = train_res[2]
###################################################################
# TEST
# plot cost
fig = plt.figure()
plt.semilogy(train_costs)
plt.show(block=False)
fig.savefig('./eval/'+eval_prefix+'/'+eval_prefix+'_cost.png')
fig = plt.figure()
plt.plot(numpy.asarray(val_errs).T)
plt.show(block=False)
fig.savefig('./eval/'+eval_prefix+'/'+eval_prefix+'_errs.png')
# save results
poseNet.save("./eval/{}/net_{}.pkl".format(eval_prefix, eval_prefix))
# poseNet.load("./eval/{}/net_{}.pkl".format(eval_prefix, eval_prefix))
# add prior to network
cfg = HiddenLayerParams(inputDim=(batchSize, train_gt3D_embed.shape[1]),
outputDim=(batchSize, numpy.prod(train_gt3D.shape[1:])), activation=None)
pcalayer = HiddenLayer(rng, poseNet.layers[-1].output, cfg, layerNum=len(poseNet.layers))
pcalayer.W.set_value(pca.components_)
pcalayer.b.set_value(pca.mean_)
poseNet.layers.append(pcalayer)
poseNet.output = pcalayer.output
poseNet.cfgParams.numJoints = train_gt3D.shape[1]
poseNet.cfgParams.nDims = train_gt3D.shape[2]
poseNet.cfgParams.outputDim = pcalayer.cfgParams.outputDim
poseNet.save("./eval/{}/network_prior.pkl".format(eval_prefix))
###################################################################
print("Testing ...")
gt3D = []
joints = []
for seq in testSeqs:
gt3D.extend([j.gt3Dorig for j in seq.data])
test_data, _ = testDataSet.imgStackDepthOnly(seq.name)
jts_embed = poseNet.computeOutput(test_data)
# Backtransform from embedding
# jts = pca.inverse_transform(jts_embed)
jts = jts_embed
for i in xrange(test_data.shape[0]):
joints.append(jts[i].reshape((-1, 3))*(seq.config['cube'][2]/2.) + seq.data[i].com)
joints = numpy.array(joints)
hpe = NYUHandposeEvaluation(gt3D, joints)
hpe.subfolder += '/'+eval_prefix+'/'
print("Train samples: {}, test samples: {}".format(train_data.shape[0], len(gt3D)))
print("Mean error: {}mm, max error: {}mm".format(hpe.getMeanError(), hpe.getMaxError()))
print("{}".format([hpe.getJointMeanError(j) for j in range(joints[0].shape[0])]))
print("{}".format([hpe.getJointMaxError(j) for j in range(joints[0].shape[0])]))
# save results
cPickle.dump(joints, open("./eval/{}/result_{}_{}.pkl".format(eval_prefix, os.path.split(__file__)[1], eval_prefix), "wb"), protocol=cPickle.HIGHEST_PROTOCOL)
print "Testing baseline"
#################################
# BASELINE
# Load the evaluation
data_baseline = di.loadBaseline('../data/NYU/test/test_predictions.mat', numpy.asarray(gt3D))
hpe_base = NYUHandposeEvaluation(gt3D, data_baseline)
hpe_base.subfolder += '/'+eval_prefix+'/'
print("Mean error: {}mm".format(hpe_base.getMeanError()))
hpe.plotEvaluation(eval_prefix, methodName='Our regr', baseline=[('Tompson et al.', hpe_base)])
ind = 0
for i in testSeqs[0].data:
if ind % 20 != 0:
ind += 1
continue
jtI = transformPoints2D(di.joints3DToImg(joints[ind]), i.T)
hpe.plotResult(i.dpt, i.gtcrop, jtI, "{}_{}".format(eval_prefix, ind))
ind += 1
| gpl-3.0 |
btabibian/scikit-learn | sklearn/cluster/tests/test_mean_shift.py | 23 | 3957 | """
Testing for mean shift clustering methods
"""
import numpy as np
import warnings
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.cluster import MeanShift
from sklearn.cluster import mean_shift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.datasets.samples_generator import make_blobs
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=300, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=11)
def test_estimate_bandwidth():
# Test estimate_bandwidth
bandwidth = estimate_bandwidth(X, n_samples=200)
assert_true(0.9 <= bandwidth <= 1.5)
def test_mean_shift():
# Test MeanShift algorithm
bandwidth = 1.2
ms = MeanShift(bandwidth=bandwidth)
labels = ms.fit(X).labels_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
cluster_centers, labels = mean_shift(X, bandwidth=bandwidth)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
def test_estimate_bandwidth_with_sparse_matrix():
# Test estimate_bandwidth with sparse matrix
X = sparse.lil_matrix((1000, 1000))
msg = "A sparse matrix was passed, but dense data is required."
assert_raise_message(TypeError, msg, estimate_bandwidth, X, 200)
def test_parallel():
ms1 = MeanShift(n_jobs=2)
ms1.fit(X)
ms2 = MeanShift()
ms2.fit(X)
assert_array_equal(ms1.cluster_centers_, ms2.cluster_centers_)
assert_array_equal(ms1.labels_, ms2.labels_)
def test_meanshift_predict():
# Test MeanShift.predict
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_meanshift_all_orphans():
# init away from the data, crash with a sensible warning
ms = MeanShift(bandwidth=0.1, seeds=[[-9, -9], [-10, -10]])
msg = "No point was within bandwidth=0.1"
assert_raise_message(ValueError, msg, ms.fit, X,)
def test_unfitted():
# Non-regression: before fit, there should be not fitted attributes.
ms = MeanShift()
assert_false(hasattr(ms, "cluster_centers_"))
assert_false(hasattr(ms, "labels_"))
def test_bin_seeds():
# Test the bin seeding technique which can be used in the mean shift
# algorithm
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.4, 1.4], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
# we bail and use the whole data here.
with warnings.catch_warnings(record=True):
test_bins = get_bin_seeds(X, 0.01, 1)
assert_array_equal(test_bins, X)
# tight clusters around [0, 0] and [1, 1], only get two bins
X, _ = make_blobs(n_samples=100, n_features=2, centers=[[0, 0], [1, 1]],
cluster_std=0.1, random_state=0)
test_bins = get_bin_seeds(X, 1)
assert_array_equal(test_bins, [[0, 0], [1, 1]])
| bsd-3-clause |
mhue/scikit-learn | sklearn/cross_validation.py | 96 | 58309 | """
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
check_array, column_or_1d)
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
__all__ = ['KFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = cross_validation.KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold: take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = cross_validation.StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
This generator must include all elements in the test set exactly once.
Otherwise, a ValueError is raised.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
p = np.concatenate([p for p, _ in preds_blocks])
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
preds = p.copy()
preds[locs] = p
return preds
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
class FitFailedWarning(RuntimeWarning):
pass
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, a cv generator instance, or None
The input specifying which cv generator to use. It can be an
integer, in which case it is the number of folds in a KFold,
None, in which case 3 fold is used, or another object, that
will then be used as a cv generator.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
Returns
-------
splitting : list of arrays, length=2 * len(arrays)
List containing train-test split of input array.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
dtype = options.pop('dtype', None)
if dtype is not None:
warnings.warn("dtype option is ignored and will be removed in 0.18.",
DeprecationWarning)
allow_nd = options.pop('allow_nd', None)
allow_lists = options.pop('allow_lists', None)
stratify = options.pop('stratify', None)
if allow_lists is not None:
warnings.warn("The allow_lists option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if allow_nd is not None:
warnings.warn("The allow_nd option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if allow_lists is False or allow_nd is False:
arrays = [check_array(x, 'csr', allow_nd=allow_nd,
force_all_finite=False, ensure_2d=False)
if x is not None else x
for x in arrays]
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
cv = StratifiedShuffleSplit(stratify, test_size=test_size,
train_size=train_size,
random_state=random_state)
else:
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| bsd-3-clause |
agbs2k8/toolbelt_dev | toolbelt/trees/trees2.py | 1 | 24528 | import copy
import json
import hashlib
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
class Tree:
"""
A Tree is a set of nodes that have a single, common trunk/origin/starting node from which all other branch out
It can contain only 1 trunk/origin node, but n number of leafs on k branches
"""
def __init__(self, tree_id=None, *args, **kwargs):
self._tree_id = tree_id
self._nodes = dict() # {node_id: level }
self._starting_node = None
self._max_width = None
self._max_depth = None
def __repr__(self):
return f"<Instance of Tree with ID:{self._tree_id}>"
def __str__(self):
return f"Tree ID '{self._tree_id}' with {len(self._nodes.keys())} Nodes."
# @classmethod
# def from_file(cls, filename):
# with open(filename) as f:
# first, last, pay = json.load(f)
# return cls(first, last, pay)
@classmethod
def read_json(cls, filepath=None, json_str=None, data_dict=None):
if filepath:
data = json.load(open(filepath))
elif json_str:
data = json.loads(json_str)
elif data_dict:
if isinstance(data_dict, dict):
data = data_dict
else:
raise ValueError('No valid data provided.')
tree_id_=data['tree_id']
num_layers = len(data['nodes'].keys())
nodes_ = dict()
for layer in range(num_layers):
layer_data = data['nodes'][str(layer)]
for _, node in layer_data.items():
parent_id = None
if 'parent' in node.keys():
parent_id = node['parent']
nodes_.append_node(node_id=node['node_id'],
name=node['name'],
parent_id=parent_id)
return cls()
@staticmethod
def make_id():
return hashlib.md5(str(np.random.rand()).encode('utf8')).hexdigest()[:10]
@property
def tree_id(self):
return self._tree_id
@tree_id.setter
def tree_id(self, value):
if value is not None:
self._tree_id = str(value)
else:
self._tree_id = self.make_id()
@property
def nodes(self):
return self._nodes
@nodes.setter
def nodes(self, value):
self._nodes = value
def to_list(self):
"""
Condense the tree into a nested list for easy visualization. NOT HASHABLE because there is not control on order
:return: nested list of all nodes in the tree
"""
if self._starting_node:
return self._starting_node.get_branch_to_leaf()
else:
self.check_tree()
if self._starting_node:
return self._starting_node.get_branch_to_leaf()
else:
raise ValueError('Cannot find starting node; check that nodes exists.')
def print_tree(self, return_string=False):
self.check_tree()
result = self._starting_node.print_branch()
if return_string:
return result
else:
print(result)
def get_layers(self):
num_layers = self.get_depth()
layers = [[self._starting_node]]
for i in range(num_layers-1):
new_layer = []
for node in layers[i]:
new_layer += node.get_children()
layers.append(new_layer)
return layers
def to_dict(self):
ret_val = {'tree_id': self.tree_id}
layers = self.get_layers()
layers_dict = dict()
for layer_idx, layer in enumerate(layers):
layer_dict = dict()
for node_idx, node in enumerate(layer):
layer_dict[node_idx] = node.to_dict()
layers_dict[layer_idx] = layer_dict
ret_val['nodes'] = layers_dict
return ret_val
def to_json(self, filepath=None, indent=4):
if not filepath:
return json.dumps(self.to_dict(), indent=indent)
else:
with open(filepath, 'w') as f:
json.dump(self.to_dict(), f, indent=indent)
def find_node(self, node_id):
"""
Lookup Node object by the node_id from the parent's dictionary
:param node_id: duh...
:return: Node
"""
return self.nodes[node_id]
def append_node(self, node_id, name, parent_id=None, ignore_structure=False):
"""
Adds a new node at end of a branch; assumes it is the trunk/origin node or already has a parent
:param node_id: id of new node
:param name: name of new node
:param parent_id: id of the preceding node in the branch - if none it must be the trunk/origin node
:param ignore_structure: False to skip error checking - can cause errors later
:return: Nothing
"""
# make sure the node isn't in the dict of nodes, and add it
if node_id not in self.nodes.keys():
new_node = Node(node_id=node_id, name=name, tree=self)
self.nodes[node_id] = new_node
else: # if the node already exists, raise an error
raise ValueError('That node id already exists.')
# if they passed a id for a parent, try looking it up and adding it
if parent_id and parent_id in self.nodes.keys():
new_node.find_and_set_parent(parent_id)
# If the parent node_id is invalid
elif parent_id and parent_id not in self.nodes.keys():
raise ValueError('The designated parent ID does not exist.')
# Make sure that the node we added did not break the tree structure into multiple trees
if not ignore_structure and len(self.nodes.keys()) > 1:
self.check_tree()
def push_node(self, node_id, name, children_ids=(), ignore_structure=False):
"""
Add a new parent node to the structure, needs to be setting the master node as a child, otherwise it will break
the tree structure and trip an error (unless you force it to ignore that)
:param node_id: id of new node
:param name: name of new node
:param children_ids: ids of any child node(s) already in the tree
:param ignore_structure: False to skip the error checking - can cause failures later
:return: Nothing
"""
if node_id not in self.nodes.keys():
new_node = Node(node_id=node_id, name=name, tree=self)
self.nodes[node_id] = new_node
else:
raise ValueError('That node already exists')
if len(children_ids) > 0:
for child_id in children_ids:
new_node.find_and_add_child(child_id)
# Make sure that the node we added did not break the tree structure into multiple trees
if not ignore_structure and len(self.nodes.keys()) > 1:
self.check_tree()
def delete_node(self, node):
if not isinstance(node, Node) and isinstance(node, str):
node = self.find_node(node)
parent = node.get_parent()
if parent:
parent.children.remove(node)
if len(node.get_children()) > 0:
for child in node.iter_child():
child.set_parent(None)
self.nodes = {_id: _node for _id, _node in self.nodes.items() if _id != node.node_id}
self.check_tree()
def check_tree(self):
"""
Ensure that we still have a valid tree structure with only 1 trunk/origin node shared by all other nodes
Also, set the tree's starting_node value, once validated, in case it is not yet defined or has changed
"""
trunk = None
is_valid = False
# catch any issues if this is called when the tree is empty or a single node
if len(self.nodes.keys()) == 0:
raise KeyError('The tree is empty.')
elif len(self.nodes.keys()) == 1:
self._starting_node = self.nodes[list(self.nodes.keys())[0]]
return
# assuming there is more than a single node, make sure it is a single tree
for _, node in self.nodes.items():
if not trunk:
trunk = node.get_trunk_node()
else:
is_valid = (trunk == node.get_trunk_node())
if not is_valid:
raise ValueError('Incorrect tree structure.')
self._starting_node = trunk
def get_leafs(self):
"""
Find all leaf nodes = node objects that have no children
:return: list of leaf node objects
"""
return [node for _, node in self.nodes.items() if len(node.get_children()) < 1]
def get_node_list(self):
"""
Get the list of nodes out of the dict that contains them
:return: a list of all node objects
"""
return [node for _, node in self.nodes.items()]
def get_edge_list(self):
"""
For building the Graph... node_id -> node_id mapped /directional edges
:return: list of tuples of (from, to) node_id of nodes in tree
"""
edges = []
for node in self.get_node_list():
if node.get_parent():
edges.append((node.get_parent().node_id, node.node_id))
return edges
def get_depth(self):
"""
Find the distance from the starting_node to the furthest leaf node. For building visualizations so we know
how many layers are needed. Also set's the variable for the tree's starting node if not already identified
:return: integer depth from trunk/origin to furthest leaf
"""
self.check_tree()
if not self._starting_node:
raise KeyError('Starting node not found.')
self._max_depth = self._starting_node.subtree_depth()
return self._max_depth
def get_width(self):
self._max_width = len(self.get_leafs)
return self._max_width
def to_graph(self):
"""
Create NetworkX Directed Graph of the tree. Nodes tracked by node_id
:return: NetworkX DiGraph obj
"""
g = nx.DiGraph()
g.add_nodes_from([node.node_id for node in self.get_node_list()])
g.add_edges_from(self.get_edge_list())
return g
def make_layout(self, horizontal=True):
"""
Map all tree nodes to (x,y) coordinates where x & y are each in range [0,1] so they can be plotted
:param horizontal: by default it plots left/right, if False it flips to plot up/down
:return: dict of {node_id: [x,y], ...}
"""
self._max_depth = self.get_depth()
leafs = self.get_leafs()
self._max_width = len(leafs)
x_options = np.linspace(0, 1, self._max_depth)
y_options = np.linspace(0, 1, self._max_width)
pos = {self._starting_node.node_id: [x_options[0], None]}
layers = [[self._starting_node]]
for i in range(self._max_depth - 1):
next_layer = []
for node in layers[i]:
next_layer += node.get_children()
for node in next_layer:
pos[node.node_id] = [x_options[i + 1], None]
layers.append(next_layer)
for i, leaf in enumerate(leafs):
pos[leaf.node_id][1] = y_options[i]
parent = leaf.get_parent()
while parent:
pos[parent.node_id][1] = y_options[i]
parent = parent.get_parent()
if horizontal:
return {key: np.array(val, dtype=float) for key, val in pos.items()}
else:
return {key: np.array([1, 0])-np.array(val[::-1], dtype=float) for key, val in pos.items()}
def label_dict(self):
"""
For making plots - maps node_id to node name
:return: dict {node_id: name, ...}
"""
return {node_id: node.name for node_id, node in self.nodes.items()}
@staticmethod
def fix_plot_ratios(max_x, max_y):
ratio = max_x / max_y
if ratio < 1:
return max_x, max_x
elif ratio > (5/3):
return max_x, (3/5)*max_x
else:
return max_x, max_y
def plot(self, figsize=None, save_path=None, horizontal=True):
"""
Create Matplotlib Figure of the graph
:param figsize: (x, y) values of the size of the plot. By default is set based on the height/width of graph
:param save_path: if a path is provided, the graph will be saved to disk
:param horizontal: by default it plots left/right, if False it flips to plot up/down
:return: matplotlib plot object
"""
g = self.to_graph()
if horizontal:
pos = self.make_layout()
if not figsize:
max_x, max_y = self.fix_plot_ratios(self._max_depth * 4, self._max_width * 4)
else:
max_x, max_y = figsize
else:
pos = self.make_layout(horizontal=False)
if not figsize:
max_y, max_x = self.fix_plot_ratios(self._max_depth * 4, self._max_width * 4)
else:
max_x, max_y = figsize
font_size = int(max_x)
node_size = max_x * ((2 / 3) * 1000)
fig, ax = plt.subplots(figsize=(max_x, max_y))
nx.draw_networkx_nodes(g, pos,
node_color='lightgray',
node_size=node_size)
nx.draw_networkx_edges(g, pos,
node_size=node_size,
arrowsize=max_x)
nx.draw_networkx_labels(g, pos,
self.label_dict(),
font_size=font_size)
ax.axis('off')
if save_path:
plt.savefig(save_path)
return ax
def matches(self, other):
"""
Compares two trees (this one to another) and makes sure that they have 1 common starting process (by name)
*not by ID*, makes sure they have the exact same set of leafs, and then compares all branches between the
leafs and the trunk to make sure they match
:param other: another tree to compare to
:return: True if the match, False if they are different
"""
# 0. Make sure we have a starting node:
self.check_tree()
if not self._starting_node:
return False
# 1. Make sure the trunk/origin process is the same for both
if self._starting_node.name != other.starting_node.name:
return False
# ...Now we compare the leafs...
my_leafs = self.get_leafs()
ot_leafs = other.get_leafs()
# 2. Make sure the set of leafs processes are the same
if set([leaf.name for leaf in my_leafs]) != set([leaf.name for leaf in ot_leafs]):
return False
# 3. compare the contents of each branch and make sure there is a matching one in each
my_branches = [[node.name for node in leaf.get_branch_to_trunk()] for leaf in my_leafs]
ot_branches = [[node.name for node in leaf.get_branch_to_trunk()] for leaf in ot_leafs]
while len(my_branches) > 0:
cur_branch = my_branches.pop()
if cur_branch in ot_branches:
ot_branches.remove(cur_branch)
else:
return False
# ... make sure there wasn't anything else left in the the other one.
if len(ot_branches) == 0:
return True
else:
return False
def subtree(self, starting_node):
"""
Returns a duplicated tree of all branches from the given node
:param starting_node: either Node OBJ or node_id to look up and retrieve the node
:return: Tree
"""
if isinstance(starting_node, str):
starting_node = self.find_node(starting_node)
new_tree = Tree()
starting_node.copy_to(new_tree, with_parent=False)
children = starting_node.get_children()
for _ in range(starting_node.subtree_depth()-1):
next_layer = []
for child in children:
child.copy_to(new_tree)
next_layer += child.get_children()
children = next_layer
return new_tree
def has_subtree(self, other):
first_proc_name = other.starting_node.name
prospects = [sub_start_node for _, sub_start_node in self.nodes.items() if
sub_start_node.name == first_proc_name]
search_depth = other.get_depth()
# The other cannot be larger than I am
if self.get_depth() < search_depth:
return False
# Make sure the passed set is a subset of what I already have
elif not set([node.name for _, node in other.nodes.items()]).issubset(
set([node.name for _, node in self.nodes.items()])):
return False
# Make sure I have some processes that match the starting process name
elif len(prospects) < 1:
return False
# Now go through all of the prospective starting points...
else:
for prospect in prospects:
prospect_matched = True
pr_children = [x for x in prospect.get_children()]
ot_children = [x for x in other.starting_node.get_children()]
for i in range(search_depth - 1):
# fill these for the next iteration as we go
pr_next_layer = []
ot_next_layer = []
for ot_child in ot_children:
ot_next_layer += ot_child.get_children()
child_matched = False
for pr_child in pr_children:
if ot_child.name == pr_child.name and ot_child.parent.name == pr_child.parent.name:
pr_children.remove(pr_child)
pr_next_layer += pr_child.get_children()
child_matched = True
if not child_matched:
prospect_matched = False
pr_children = pr_next_layer
ot_children = ot_next_layer
if prospect_matched:
return True
return False
def is_subtree(self, other):
return other.has_subtree(self)
class Node:
"""
A node is a single step in the tree. It can be mid branch, a split in a branch, or a leaf
It can have only 1 parent (not required if it is the trunk/origin/start) but n children.
There can be nodes with duplicate names which are equal to one another but have different IDs
"""
def __init__(self, node_id, name, tree):
self._node_id = node_id
self._name = name
self._tree = tree
self._parent = None
self._children = []
def __repr__(self):
return f"<Instance of Node with ID:{self.node_id}>"
def __str__(self):
return f"Node ID: '{self.node_id}', Name: '{self.name}' on TreeID: '{self.tree.tree_id}'."
def __eq__(self, other):
if self.name == other.name:
return True
else:
return False
def __hash__(self):
return hash(self.name)
@property
def parent(self):
return self._parent
@parent.setter
def parent(self, parent_):
if isinstance(parent_, Node):
self._parent = parent_
else:
self._parent = self.tree.find_node(parent_)
self._parent.add_child(self)
@property
def children(self):
return self._children
@children.setter
def children(self, values):
if isinstance(values, list) and isinstance(values[0], Node):
self._children = values
elif isinstance(values, list) and not isinstance(values[0], Node):
for child in values:
self.add_child(child)
else:
self.add_child(values)
def add_child(self, child):
if isinstance(child, Node):
self.children.append(child)
else:
child_node = self.tree.find_node(child)
self.children.append(child_node)
if child_node.parent is None:
child_node.set_parent(self)
@property
def node_id(self):
return self._node_id
@node_id.setter
def node_id(self, value):
self._node_id = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def tree(self):
return self._tree
@tree.setter
def tree(self, value):
self._tree = value
def copy(self):
return copy.deepcopy(self)
def copy_to(self, new_tree, with_parent=True):
if with_parent:
new_tree.append_node(node_id=self.node_id, name=self.name, parent_id=self.parent.node_id)
else:
new_tree.append_node(node_id=self.node_id, name=self.name, parent_id=None)
def to_dict(self):
ret_val = {'node_id': self.node_id, 'name': self.name, 'tree_id': self.tree.tree_id}
if self.parent:
ret_val['parent'] = self.parent.node_id
if len(self.children) > 0:
child_dict = dict()
for idx, child in enumerate(self.children):
child_dict[idx] = child.node_id
ret_val['children'] = child_dict
return ret_val
def get_trunk_node(self):
"""
Recursively find the origin/parent of the tree
:return: trunk/origin/starting node
"""
parent = self.parent
if parent:
return parent.get_trunk_node()
else:
return self
def get_last_leaf(self):
"""
Recursively find the last leaf node of the sub-tree sprouting from this node
:return: leaf furthest from the trunk/origin
"""
children = self.children
if children:
for child in children:
return child.get_last_leaf()
else:
return self
def get_branch_to_leaf(self):
"""
Node method for creating the "to_list" tree method. Recursively search to all leafs out from the node
:return: list of subsequent node branches
"""
if len(self.children) > 0:
return [self.name, [child.get_branch_to_leaf() for child in self.children]]
else:
return [self.name]
def get_branch_to_trunk(self):
"""
Create list of nodes from a given node back to the trunk/origin
:return:
"""
branch = [self]
parent = self.parent
while parent and parent != self.tree.starting_node:
branch.append(parent)
parent = parent.get_parent()
if self.node_id != self.tree.starting_node.node_id:
branch.append(self.tree.starting_node)
return branch
def same_children(self, other) -> bool:
"""
Make sure that two nodes have the same children nodes by name
:param other: another node
:return: True if the children names match
"""
if len(self.children) > 0 and len(self.children) == len(other.children):
return set([child.name for child in self.children]) == set(
[other.name for other in other.iter_child()])
elif len(self.children) == 0 and len(other.childrent) == 0:
return True
else:
return False
def subtree_depth(self):
"""
Recursively find the max depth of the last leaf node branched out from our node
:return: max depth from the selected node
"""
if self is None:
return 0
else:
max_depth = 0
for child in self.children:
depth = child.subtree_depth()
if depth > max_depth:
max_depth = depth
return max_depth + 1
def print_branch(self):
depth = 4*(len(self.get_branch_to_trunk())-1)
if self.parent and len(self.children) > 0:
return (" "*(depth-4))+'└── '+self.name+"\n"+"".join([child.print_branch() for child in self.children])
elif len(self.children) > 0:
return (" "*(depth-4))+self.name+"\n"+"".join([child.print_branch() for child in self.children])
else:
return (" "*(depth-4))+'└── '+self.name+"\n"
| mit |
kazukisona/ThinkStats2 | code/survival.py | 65 | 17881 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import numpy as np
import pandas
import nsfg
import thinkstats2
import thinkplot
"""
Outcome codes from http://www.icpsr.umich.edu/nsfg6/Controller?
displayPage=labelDetails&fileCode=PREG§ion=&subSec=8016&srtLabel=611932
1 LIVE BIRTH 9148
2 INDUCED ABORTION 1862
3 STILLBIRTH 120
4 MISCARRIAGE 1921
5 ECTOPIC PREGNANCY 190
6 CURRENT PREGNANCY 352
"""
FORMATS = ['pdf', 'eps', 'png']
class SurvivalFunction(object):
"""Represents a survival function."""
def __init__(self, cdf, label=''):
self.cdf = cdf
self.label = label or cdf.label
@property
def ts(self):
return self.cdf.xs
@property
def ss(self):
return 1 - self.cdf.ps
def __getitem__(self, t):
return self.Prob(t)
def Prob(self, t):
"""Returns S(t), the probability that corresponds to value t.
t: time
returns: float probability
"""
return 1 - self.cdf.Prob(t)
def Probs(self, xs):
"""Gets probabilities for a sequence of values."""
return [self.Prob(x) for x in xs]
def Mean(self):
"""Mean survival time."""
return self.cdf.Mean()
def Items(self):
"""Sorted list of (t, s) pairs."""
return zip(self.ts, self.ss)
def Render(self):
"""Generates a sequence of points suitable for plotting.
returns: tuple of (sorted times, survival function)
"""
return self.ts, self.ss
def MakeHazard(self, label=''):
"""Computes the hazard function.
sf: survival function
returns: Pmf that maps times to hazard rates
"""
ss = self.ss
lams = {}
for i, t in enumerate(self.ts[:-1]):
hazard = (ss[i] - ss[i+1]) / ss[i]
lams[t] = hazard
return HazardFunction(lams, label=label)
def MakePmf(self, filler=None):
"""Makes a PMF of lifetimes.
filler: value to replace missing values
returns: Pmf
"""
pmf = thinkstats2.Pmf()
for val, prob in self.cdf.Items():
pmf.Set(val, prob)
cutoff = self.cdf.ps[-1]
if filler is not None:
pmf[filler] = 1-cutoff
return pmf
def RemainingLifetime(self, filler=None, func=thinkstats2.Pmf.Mean):
"""Computes remaining lifetime as a function of age.
func: function from conditional Pmf to expected liftime
returns: Series that maps from age to remaining lifetime
"""
pmf = self.MakePmf(filler=filler)
d = {}
for t in sorted(pmf.Values())[:-1]:
pmf[t] = 0
pmf.Normalize()
d[t] = func(pmf) - t
#print(t, d[t])
return pandas.Series(d)
class HazardFunction(object):
"""Represents a hazard function."""
def __init__(self, d, label=''):
"""Initialize the hazard function.
d: dictionary (or anything that can initialize a series)
label: string
"""
self.series = pandas.Series(d)
self.label = label
def __getitem__(self, t):
return self.series[t]
def Render(self):
"""Generates a sequence of points suitable for plotting.
returns: tuple of (sorted times, hazard function)
"""
return self.series.index, self.series.values
def MakeSurvival(self, label=''):
"""Makes the survival function.
returns: SurvivalFunction
"""
ts = self.series.index
ss = (1 - self.series).cumprod()
cdf = thinkstats2.Cdf(ts, 1-ss)
sf = SurvivalFunction(cdf, label=label)
return sf
def Extend(self, other):
"""Extends this hazard function by copying the tail from another.
other: HazardFunction
"""
last = self.series.index[-1]
more = other.series[other.series.index > last]
self.series = pandas.concat([self.series, more])
def ConditionalSurvival(pmf, t0):
"""Computes conditional survival function.
Probability that duration exceeds t0+t, given that
duration >= t0.
pmf: Pmf of durations
t0: minimum time
returns: tuple of (ts, conditional survivals)
"""
cond = thinkstats2.Pmf()
for t, p in pmf.Items():
if t >= t0:
cond.Set(t-t0, p)
return SurvivalFunction(thinkstats2.Cdf(cond))
def PlotConditionalSurvival(durations):
"""Plots conditional survival curves for a range of t0.
durations: list of durations
"""
pmf = thinkstats2.Pmf(durations)
times = [8, 16, 24, 32]
thinkplot.PrePlot(len(times))
for t0 in times:
sf = ConditionalSurvival(pmf, t0)
label = 't0=%d' % t0
thinkplot.Plot(sf, label=label)
thinkplot.Show()
def PlotSurvival(complete):
"""Plots survival and hazard curves.
complete: list of complete lifetimes
"""
thinkplot.PrePlot(3, rows=2)
cdf = thinkstats2.Cdf(complete, label='cdf')
sf = SurvivalFunction(cdf, label='survival')
print(cdf[13])
print(sf[13])
thinkplot.Plot(sf)
thinkplot.Cdf(cdf, alpha=0.2)
thinkplot.Config()
thinkplot.SubPlot(2)
hf = sf.MakeHazard(label='hazard')
print(hf[39])
thinkplot.Plot(hf)
thinkplot.Config(ylim=[0, 0.75])
def PlotHazard(complete, ongoing):
"""Plots the hazard function and survival function.
complete: list of complete lifetimes
ongoing: list of ongoing lifetimes
"""
# plot S(t) based on only complete pregnancies
cdf = thinkstats2.Cdf(complete)
sf = SurvivalFunction(cdf)
thinkplot.Plot(sf, label='old S(t)', alpha=0.1)
thinkplot.PrePlot(2)
# plot the hazard function
hf = EstimateHazardFunction(complete, ongoing)
thinkplot.Plot(hf, label='lams(t)', alpha=0.5)
# plot the survival function
sf = hf.MakeSurvival()
thinkplot.Plot(sf, label='S(t)')
thinkplot.Show(xlabel='t (weeks)')
def EstimateHazardFunction(complete, ongoing, label='', shift=1e-7):
"""Estimates the hazard function by Kaplan-Meier.
http://en.wikipedia.org/wiki/Kaplan%E2%80%93Meier_estimator
complete: list of complete lifetimes
ongoing: list of ongoing lifetimes
label: string
shift: presumed additional survival of ongoing
"""
# pmf and sf of complete lifetimes
n = len(complete)
hist_complete = thinkstats2.Hist(complete)
sf_complete = SurvivalFunction(thinkstats2.Cdf(complete))
# sf for ongoing lifetimes
# The shift is a regrettable hack needed to deal with simultaneity.
# If a case is complete at some t and another case is ongoing
# at t, we presume that the ongoing case exceeds t+shift.
m = len(ongoing)
cdf = thinkstats2.Cdf(ongoing).Shift(shift)
sf_ongoing = SurvivalFunction(cdf)
lams = {}
for t, ended in sorted(hist_complete.Items()):
at_risk = ended + n * sf_complete[t] + m * sf_ongoing[t]
lams[t] = ended / at_risk
#print(t, ended, n * sf_complete[t], m * sf_ongoing[t], at_risk)
return HazardFunction(lams, label=label)
def CleanData(resp):
"""Cleans a respondent DataFrame.
resp: DataFrame of respondents
"""
resp.cmmarrhx.replace([9997, 9998, 9999], np.nan, inplace=True)
resp['agemarry'] = (resp.cmmarrhx - resp.cmbirth) / 12.0
resp['age'] = (resp.cmintvw - resp.cmbirth) / 12.0
month0 = pandas.to_datetime('1899-12-15')
dates = [month0 + pandas.DateOffset(months=cm)
for cm in resp.cmbirth]
resp['decade'] = (pandas.DatetimeIndex(dates).year - 1900) // 10
def AddLabelsByDecade(groups, **options):
"""Draws fake points in order to add labels to the legend.
groups: GroupBy object
"""
thinkplot.PrePlot(len(groups))
for name, _ in groups:
label = '%d0s' % name
thinkplot.Plot([15], [1], label=label, **options)
def EstimateSurvivalByDecade(groups, **options):
"""Groups respondents by decade and plots survival curves.
groups: GroupBy object
"""
thinkplot.PrePlot(len(groups))
for _, group in groups:
_, sf = EstimateSurvival(group)
thinkplot.Plot(sf, **options)
def PlotPredictionsByDecade(groups, **options):
"""Groups respondents by decade and plots survival curves.
groups: GroupBy object
"""
hfs = []
for _, group in groups:
hf, sf = EstimateSurvival(group)
hfs.append(hf)
thinkplot.PrePlot(len(hfs))
for i, hf in enumerate(hfs):
if i > 0:
hf.Extend(hfs[i-1])
sf = hf.MakeSurvival()
thinkplot.Plot(sf, **options)
def ResampleSurvival(resp, iters=101):
"""Resamples respondents and estimates the survival function.
resp: DataFrame of respondents
iters: number of resamples
"""
_, sf = EstimateSurvival(resp)
thinkplot.Plot(sf)
low, high = resp.agemarry.min(), resp.agemarry.max()
ts = np.arange(low, high, 1/12.0)
ss_seq = []
for _ in range(iters):
sample = thinkstats2.ResampleRowsWeighted(resp)
_, sf = EstimateSurvival(sample)
ss_seq.append(sf.Probs(ts))
low, high = thinkstats2.PercentileRows(ss_seq, [5, 95])
thinkplot.FillBetween(ts, low, high, color='gray', label='90% CI')
thinkplot.Save(root='survival3',
xlabel='age (years)',
ylabel='prob unmarried',
xlim=[12, 46],
ylim=[0, 1],
formats=FORMATS)
def EstimateSurvival(resp):
"""Estimates the survival curve.
resp: DataFrame of respondents
returns: pair of HazardFunction, SurvivalFunction
"""
complete = resp[resp.evrmarry == 1].agemarry
ongoing = resp[resp.evrmarry == 0].age
hf = EstimateHazardFunction(complete, ongoing)
sf = hf.MakeSurvival()
return hf, sf
def PlotMarriageData(resp):
"""Plots hazard and survival functions.
resp: DataFrame of respondents
"""
hf, sf = EstimateSurvival(resp)
thinkplot.PrePlot(rows=2)
thinkplot.Plot(hf)
thinkplot.Config(legend=False)
thinkplot.SubPlot(2)
thinkplot.Plot(sf)
thinkplot.Save(root='survival2',
xlabel='age (years)',
ylabel='prob unmarried',
ylim=[0, 1],
legend=False,
formats=FORMATS)
return sf
def PlotPregnancyData(preg):
"""Plots survival and hazard curves based on pregnancy lengths.
preg:
"""
complete = preg.query('outcome in [1, 3, 4]').prglngth
print('Number of complete pregnancies', len(complete))
ongoing = preg[preg.outcome == 6].prglngth
print('Number of ongoing pregnancies', len(ongoing))
PlotSurvival(complete)
thinkplot.Save(root='survival1',
xlabel='t (weeks)',
formats=FORMATS)
hf = EstimateHazardFunction(complete, ongoing)
sf = hf.MakeSurvival()
return sf
def PlotRemainingLifetime(sf1, sf2):
"""Plots remaining lifetimes for pregnancy and age at first marriage.
sf1: SurvivalFunction for pregnancy length
sf2: SurvivalFunction for age at first marriage
"""
thinkplot.PrePlot(cols=2)
rem_life1 = sf1.RemainingLifetime()
thinkplot.Plot(rem_life1)
thinkplot.Config(title='pregnancy length',
xlabel='weeks',
ylabel='mean remaining weeks')
thinkplot.SubPlot(2)
func = lambda pmf: pmf.Percentile(50)
rem_life2 = sf2.RemainingLifetime(filler=np.inf, func=func)
thinkplot.Plot(rem_life2)
thinkplot.Config(title='age at first marriage',
ylim=[0, 15],
xlim=[11, 31],
xlabel='age (years)',
ylabel='median remaining years')
thinkplot.Save(root='survival6',
formats=FORMATS)
def ReadFemResp(dct_file='2002FemResp.dct',
dat_file='2002FemResp.dat.gz',
**options):
"""Reads the NSFG respondent data.
dct_file: string file name
dat_file: string file name
returns: DataFrame
"""
dct = thinkstats2.ReadStataDct(dct_file, encoding='iso-8859-1')
df = dct.ReadFixedWidth(dat_file, compression='gzip', **options)
CleanData(df)
return df
def ReadFemResp2002():
"""Reads respondent data from NSFG Cycle 6.
returns: DataFrame
"""
usecols = ['cmmarrhx', 'cmdivorcx', 'cmbirth', 'cmintvw',
'evrmarry', 'finalwgt']
resp = ReadFemResp(usecols=usecols)
CleanData(resp)
return resp
def ReadFemResp2010():
"""Reads respondent data from NSFG Cycle 7.
returns: DataFrame
"""
usecols = ['cmmarrhx', 'cmdivorcx', 'cmbirth', 'cmintvw',
'evrmarry', 'wgtq1q16']
resp = ReadFemResp('2006_2010_FemRespSetup.dct',
'2006_2010_FemResp.dat.gz',
usecols=usecols)
resp['finalwgt'] = resp.wgtq1q16
CleanData(resp)
return resp
def ReadFemResp2013():
"""Reads respondent data from NSFG Cycle 8.
returns: DataFrame
"""
usecols = ['cmmarrhx', 'cmdivorcx', 'cmbirth', 'cmintvw',
'evrmarry', 'wgt2011_2013']
resp = ReadFemResp('2011_2013_FemRespSetup.dct',
'2011_2013_FemRespData.dat.gz',
usecols=usecols)
resp['finalwgt'] = resp.wgt2011_2013
CleanData(resp)
return resp
def ReadFemResp1995():
"""Reads respondent data from NSFG Cycle 5.
returns: DataFrame
"""
dat_file = '1995FemRespData.dat.gz'
names = ['a_doi', 'timesmar', 'mardat01', 'bdaycenm', 'post_wt']
colspecs = [(12359, 12363),
(3538, 3540),
(11758, 11762),
(13, 16),
(12349, 12359)]
df = pandas.read_fwf(dat_file,
compression='gzip',
colspecs=colspecs,
names=names)
df['cmmarrhx'] = df.mardat01
df['cmbirth'] = df.bdaycenm
df['cmintvw'] = df.a_doi
df['finalwgt'] = df.post_wt
df.timesmar.replace([98, 99], np.nan, inplace=True)
df['evrmarry'] = (df.timesmar > 0).astype(int)
CleanData(df)
return df
def ReadFemResp1982():
"""Reads respondent data from NSFG Cycle 4.
returns: DataFrame
"""
dat_file = '1982NSFGData.dat.gz'
names = ['cmmarrhx', 'MARNO', 'cmintvw', 'cmbirth', 'finalwgt']
#actual = ['MARIMO', 'MARNO', 'TL', 'TL', 'W5']
colspecs = [(1028, 1031),
(1258, 1259),
(841, 844),
(12, 15),
(976, 982)]
df = pandas.read_fwf(dat_file, compression='gzip', colspecs=colspecs, names=names)
df.MARNO.replace([98, 99], np.nan, inplace=True)
df['evrmarry'] = (df.MARNO > 0).astype(int)
CleanData(df)
return df[:7969]
def ReadFemResp1988():
"""Reads respondent data from NSFG Cycle 4.
returns: DataFrame
"""
dat_file = '1988FemRespData.dat.gz'
names = ['F_13'] #['CMOIMO', 'F_13', 'F19M1MO', 'A_3']
# colspecs = [(799, 803)],
colspecs = [(20, 22)]#,
# (1538, 1542),
# (26, 30),
# (2568, 2574)]
df = pandas.read_fwf(dat_file, compression='gzip', colspecs=colspecs, names=names)
# df['cmmarrhx'] = df.F19M1MO
# df['cmbirth'] = df.A_3
# df['cmintvw'] = df.CMOIMO
# df['finalwgt'] = df.W5
df.F_13.replace([98, 99], np.nan, inplace=True)
df['evrmarry'] = (df.F_13 > 0).astype(int)
# CleanData(df)
return df
def PlotResampledByDecade(resps, iters=11, predict_flag=False, omit=None):
"""Plots survival curves for resampled data.
resps: list of DataFrames
iters: number of resamples to plot
predict_flag: whether to also plot predictions
"""
for i in range(iters):
samples = [thinkstats2.ResampleRowsWeighted(resp)
for resp in resps]
sample = pandas.concat(samples, ignore_index=True)
groups = sample.groupby('decade')
if omit:
groups = [(name, group) for name, group in groups
if name not in omit]
# TODO: refactor this to collect resampled estimates and
# plot shaded areas
if i == 0:
AddLabelsByDecade(groups, alpha=0.7)
if predict_flag:
PlotPredictionsByDecade(groups, alpha=0.1)
EstimateSurvivalByDecade(groups, alpha=0.1)
else:
EstimateSurvivalByDecade(groups, alpha=0.2)
def main():
thinkstats2.RandomSeed(17)
preg = nsfg.ReadFemPreg()
sf1 = PlotPregnancyData(preg)
# make the plots based on Cycle 6
resp6 = ReadFemResp2002()
sf2 = PlotMarriageData(resp6)
ResampleSurvival(resp6)
PlotRemainingLifetime(sf1, sf2)
# read Cycles 5 and 7
resp5 = ReadFemResp1995()
resp7 = ReadFemResp2010()
# plot resampled survival functions by decade
resps = [resp5, resp6, resp7]
PlotResampledByDecade(resps)
thinkplot.Save(root='survival4',
xlabel='age (years)',
ylabel='prob unmarried',
xlim=[13, 45],
ylim=[0, 1],
formats=FORMATS)
# plot resampled survival functions by decade, with predictions
PlotResampledByDecade(resps, predict_flag=True, omit=[5])
thinkplot.Save(root='survival5',
xlabel='age (years)',
ylabel='prob unmarried',
xlim=[13, 45],
ylim=[0, 1],
formats=FORMATS)
if __name__ == '__main__':
main()
| gpl-3.0 |
tomevans/linvb | linvb/vbr_planet.py | 1 | 23844 | try:
from MyFunctions import EccLightCurveModels
except:
print '\nCould not import EccLightCurveModels.py from the MyFunctions module!\n'
try:
from planet import orbit
except:
print '\nCould not import orbit.py from the planet module!\n'
import matplotlib.pyplot as plt
import numpy as np
import pdb, sys
EPS_MIN = sys.float_info.min
#################################################################################
#
# This is a specialised module to allow planet-related basis functions, such as
# transits/eclipses, phase curves etc, to be appended to pre-normalised basis
# matrices.
#
#################################################################################
def append_transit_function( vbr_object, time, system_params, transit_type=None, transit_function='ma02' ):
"""
Appends a transit or eclipse basis function to the existing normalised training
basis matrix, and records the appended array, so that it can be automatically
appended to the normalised predictive basis matrix when that's constructed later.
The basis function is set to zero out-of-transit, and is negative within transits
and eclipses.
Because we want to vary their depths independently, we must construct separate
basis functions for the primary transits and secondary eclipses, which is what
the transit_type keyword argument allows. Specifically, it accepts the following
two options:
**'primary' - a basis function containing only primary transits, with zeros where
the secondary eclipses should occur.
**'secondary' - a basis function containing only secondary eclipses, with zeros
where the primary transits should occur.
The content of the system_params input array depends on the type of limb darkening:
[ T0, P, aRs, RpRs, b, c1, c2, ecc, omega ] ... quadratic
[ T0, P, aRs, RpRs, b, c1, c2, c3, c4, ecc, omega ] ... 4-parameter nonlinear
The length of the array is used to determine which type of limb darkening is to be
used.
Note that b=a*cosi/Rs and omega is the argument of periastron, which equals 90degrees
for a circular orbit by convention, if T0 is to be the time of mid-transit. Do not
confuse omega here with the longitude of the ascending node Omega, which is set to
180degrees by convention.
Also note that no value is provided for the secondary flux ratio, because this only
affects the depth of the secondary eclipse. Because this depth is free to vary in
the fitting, we set it to an arbitrary value here if transit_type=='secondary', and
then standardise it so that the basis function has a comparable amplitude to all
the other basis functions.
On a practical matter, if you only have the time of central transit or eclipse,
you must convert it to T0 before passing into this function. If the orbit is
circular, this can be done trivially by setting T0=Tmid and omega=np.pi/2. For a
non-circular orbit, it is less straightforward, but can be done using the get_t0()
function in the orbit.py module.
"""
print '\nAppending %s transit basis function...' % transit_type
if transit_function=='ma02':
transit_shape = ma02( time, system_params, transit_type=transit_type )
elif transit_function=='piecewise_tr':
transit_shape = piecewise_tr( time, system_params, transit_type )
# Ensure that the basis function is zero out of transit:
if vbr_object.target_log_units==False:
transit_basis = transit_shape-1
else:
transit_basis = np.log( transit_shape )
# Normalise the transit function to the same level as the target data;
# by doing this, we are implying that the parameters for the transit
# function represent our best guess; note that if we're attempting to
# detect a faint signal, it's best to overestimate its depth otherwise
# the VB algorithm is reluctant to move the transit depth away from
# zero even if there's a faint signal present:
transit_basis = transit_basis / vbr_object.target_train_norm_scaling
if transit_type=='primary':
appendage_name = 'Primary Transit'
elif transit_type=='secondary':
appendage_name = 'Secondary Eclipse'
vbr_object.append_training_basis_matrix( transit_basis, appendage_name=appendage_name )
# Record the column index containing the transit basis function:
if transit_type=='primary':
vbr_object.transit_basis_ix = vbr_object.phi_ixs_appendages_postnorm[-1]
elif transit_type=='secondary':
vbr_object.eclipse_basis_ix = vbr_object.phi_ixs_appendages_postnorm[-1]
return None
def ma02( time, system_params, transit_type ):
"""
This evaluates transit and eclipse functions using the analytic formulas
of Mandel & Agol 2002. To do this, it uses the EccLightCurve_aRs() function
in the EccLightCurveModels.py module.
If transit_type=='primary', then a light curve with only the primary transits
will be returned. Alternatively, if transit_type=='secondary', a light curve
with only the secondary eclipses will be returned.
"""
# Work out if we're using quadratic or 4-parameter nonlinear limb darkening:
if len(system_params)==12:
T0, P, aRs, RpRs, b, c1_q, c2_q, ecc, omega, foot, Tgrad, sec_depth = system_params
# Just in case, force the limb darkening to be zero if it's an eclipse:
if transit_type=='secondary':
c1_q = c2_q = 0.0
system_params_arr = np.array([ T0, P, aRs, RpRs, b, c1_q, c2_q, ecc, omega, \
foot, Tgrad, sec_depth ])
ld_type = 'quad'
elif len(system_params)==14:
T0, P, aRs, RpRs, b, c1_nl, c2_nl, c3_nl, c4_nl, ecc, omega, foot, Tgrad, sec_depth = system_params
# Just in case, force the limb darkening to be zero if it's an eclipse:
if transit_type=='secondary':
c1_nl = c2_nl = c3_nl = c4_nl = 0.0
system_params_arr = np.array([ T0, P, aRs, RpRs, b, c1_nl, c2_nl, c3_nl, c4_nl, \
ecc, omega, foot, Tgrad, sec_depth ])
ld_type = 'nonlin'
else:
pdb.set_trace() #this shouldn't happen
# Evaluate the transit function:
transit_shape, ycoord = EccLightCurveModels.EccLightCurve_aRs( system_params_arr, time, \
return_y=True, ld=ld_type )
if transit_type=='primary':
transit_shape[ycoord>0] = 1 # i.e. when planet further away than star
elif transit_type=='secondary':
transit_shape[ycoord<=0] = 1 # i.e. when planet closer than star
return transit_shape
def piecewise_tr( time, system_params, transit_type ):
"""
This evaluates transit and eclipse functions using the piece-wise linear
approximation of Carter et al 2009. The ingress-egress times and transit
duration are approximated. Note of course that the flat-bottomed function
prevents a straightforward mapping of the fitted signal back to Rp/Rs etc
for a primary transit due to the degeneracy of limb darkening which is not
accounted for here at all.
If transit_type=='primary', then a light curve with only the primary transits
will be returned. Alternatively, if transit_type=='secondary', a light curve
with only the secondary eclipses will be returned.
"""
if (transit_type!='primary')*(transit_type!='secondary'):
pdb.set_trace()
# Unpack the system parameters, but be careful to account for the possibility that
# we could be using either a quadratic or 4-parameter nonlinear limb darkening law,
# as that will affect the number of system parameters that have been passed in:
if len(system_params)==12:
T0, P, aRs, RpRs, b, c1_q, c2_q, ecc, omega, foot, Tgrad, sec_depth = system_params
elif len(system_params)==14:
T0, P, aRs, RpRs, b, c1_nl, c2_nl, c3_nl, c4_nl, ecc, omega, foot, Tgrad, sec_depth = system_params
else:
pdb.set_trace() #this shouldn't happen
# Calculate the true anomalies of the times of central transit and the times of
# central transit:
f_tr = - np.pi / 2.0-omega
f_ec = + np.pi / 2.0-omega
# Now we proceed to calculate the corresponding eccentric anomalies, using the
# standard relation between the true anomaly and the eccentric anomaly:
cosf_tr = np.cos( f_tr )
cosf_ec = np.cos( f_ec )
cosean_tr = ( ecc + cosf_tr ) / ( 1. + ecc*cosf_tr )
cosean_ec = ( ecc + cosf_ec ) / ( 1. + ecc*cosf_ec )
ean_tr = np.arccos( cosean_tr )
ean_ec = np.arccos( cosean_ec )
# Convert the eccentric anomaly to the mean anomaly using the simple formula:
man_tr = ean_tr - ecc*np.sin( ean_tr )
man_ec = ean_ec - ecc*np.sin( ean_ec )
# Given the definition of the mean anomaly as the fraction of the orbital period
# that has passed since periastron, calculate the time since periastron:
delt_tr = ( P*man_tr ) / ( 2*np.pi )
delt_ec = ( P*man_ec ) / ( 2*np.pi )
# Hence, calculate the times of transit and eclipse:
t_tr = T0+delt_tr
t_ec = T0+delt_ec
# Use Equations 6-10 of Carter et al 2008 to calculate the key parameters that
# define the transit shape:
n = 2.*np.pi/P
b0 = b * ( ( 1. - ecc**2. )/( 1. + ecc*np.sin( omega ) ) )
tau0 = ( ( np.sqrt( 1. - ecc*2. ) )/( 1 + ecc*np.sin( omega ) ) ) / n / aRs
bigT = 2 * tau0 * np.sqrt( 1-b0**2. )
tau = 2. * tau0 * RpRs / np.sqrt( 1.-b0**2. )
# We have now calculated the parameters that define our approximated shape for
# the transits/eclipses. Next, we need to find the times of transit and eclipse
# that occured immediately before our time series (in case there is some overlap
# of a partially-complete transit right at the start of our time series, with the
# central time actually falling before the start):
if t_tr<time.min():
while t_tr<time.min():
t_tr += P
t_tr -= P
else:
while t_tr>time.min():
t_tr -= P
if t_ec<time.min():
while t_ec<time.min():
t_ec += P
t_ec -= P
else:
while t_ec>time.min():
t_ec -= P
# Build up a list of the transit and eclipse times until the one immediately
# after the end of our time series:
t_trs = [t_tr]
while t_tr<time.max():
t_tr += P
t_trs += [ t_tr ]
t_ecs = [ t_ec ]
while t_ec<time.max():
t_ec += P
t_ecs += [ t_ec ]
# Construct the approximated light curve one transit and one eclipse at a time:
f = np.ones( len( time ) )
tr_depth = RpRs**2.
if transit_type!='secondary':
for t_tr in t_trs:
# Full transit times:
ixs_23 = abs( time-t_tr ) < 0.5*bigT - 0.5*tau
f[ixs_23] = f[ixs_23] - tr_depth
# Ingress times:
t_1 = t_tr - 0.5*bigT - 0.5*tau
t_2 = t_tr - 0.5*bigT + 0.5*tau
ixs_12 = ( ( time>t_1 ) * ( time<t_2 ) )
f[ixs_12] = f[ixs_12] - tr_depth + ( tr_depth/tau )*( -time[ixs_12] + t_tr - 0.5*bigT + 0.5*tau )
# Egress times:
t_3 = t_tr + 0.5*bigT - 0.5*tau
t_4 = t_tr + 0.5*bigT + 0.5*tau
ixs_34 = ( ( time>t_3 ) * ( time<t_4 ) )
f[ixs_34] = f[ixs_34] - tr_depth + ( tr_depth/tau )*( +time[ixs_34] - t_tr - 0.5*bigT + 0.5*tau )
if transit_type!='primary':
for t_ec in t_ecs:
# Full transit times:
ixs_23 = abs( time-t_ec ) < 0.5*bigT - 0.5*tau
f[ixs_23] = f[ixs_23] - sec_depth
# Ingress times:
t_1 = t_ec - 0.5*bigT - 0.5*tau
t_2 = t_ec - 0.5*bigT + 0.5*tau
ixs_12 = ( ( time>t_1 ) * ( time<t_2 ) )
f[ixs_12] = f[ixs_12] - sec_depth + ( sec_depth/tau )*( -time[ixs_12] + t_ec - 0.5*bigT + 0.5*tau )
# Egress times:
t_3 = t_ec + 0.5*bigT - 0.5*tau
t_4 = t_ec + 0.5*bigT + 0.5*tau
ixs_34 = ( ( time>t_3 ) * ( time<t_4 ) )
f[ixs_34] = f[ixs_34] - sec_depth + ( sec_depth/tau )*( +time[ixs_34] - t_ec - 0.5*bigT + 0.5*tau )
return f
def calc_transit_depth( vbr_object, transit_type='primary', print_precision=1e-6 ):
"""
Uses the posterior distribution over the transit/eclipse function to calculate the
inferred depth and associated uncertainty. The transit_function_ix specifies which
column in the basis matrix corresponds to the transit/eclipse basis function.
"""
# Find the in-transit data points:
if transit_type=='primary':
ix = vbr_object.transit_basis_ix
elif transit_type=='secondary':
ix = vbr_object.eclipse_basis_ix
# Extract the transit basis column from the normalised basis matrix, and unnormalise
# it, making sure to use the same factor as was used in append_transit_function():
transit_basis = vbr_object.phi_pred_norm[:,ix].flatten() * vbr_object.target_train_norm_scaling
intransit_ixs = np.arange( vbr_object.n_data_pred )[transit_basis!=0]
n_in = len(intransit_ixs)
if n_in>0:
# Treat the inferred distributions over the weights as scaled normal random variables,
# where the scaling factor is the value of the transit basis function at each of the
# in-transit points:
mu_x = ( transit_basis * vbr_object.model_weights_means[ix] )[intransit_ixs]
sig_x = np.sqrt( ( ( transit_basis * vbr_object.model_weights_stdvs[ix] )[intransit_ixs] )**2. )
# Now we need to make sure these distributions can be translated back to the native units
# of the target data:
# If we were fitting to the data in its native units, then we're done:
if vbr_object.target_log_units==False:
if mu_x.min()<0:
inferred_depth_mean = 0 - mu_x[ np.argmin( mu_x ) ]
else:
inferred_depth_mean = 0 - mu_x[ np.argmax( mu_x ) ]
inferred_depth_median = inferred_depth_mean
inferred_depth_mode = inferred_depth_mean
uncertainty = sig_x.max()
# Otherwise, if we're working with a multiplicative model (i.e. in log flux), then we
# have a little more work, because the posterior is a log-normal distribution:
elif vbr_object.target_log_units==True:
mean_tr = np.zeros( n_in )
median_tr = np.zeros( n_in )
mode_tr = np.zeros( n_in )
sig_tr = np.zeros( n_in )
for i in range(n_in):
var_x = sig_x[i]**2.
var_tr = ( np.exp( var_x ) - 1.0 ) * np.exp( 2*mu_x[i] + var_x )
sig_tr[i] = np.sqrt( var_tr )
mean_tr[i] = np.exp( mu_x[i] + var_x/2. )
median_tr[i] = np.exp( mu_x[i] )
mode_tr[i] = np.exp( mu_x[i] - var_x )
if mean_tr.min()<0:
inferred_depth_mean = 1. - mean_tr[ np.argmin( mean_tr ) ]
inferred_depth_median = 1. - median_tr[ np.argmin( mean_tr ) ]
inferred_depth_mode = 1. - mode_tr[ np.argmin( mean_tr ) ]
else:
inferred_depth_mean = 1. - mean_tr[ np.argmax( mean_tr ) ]
inferred_depth_median = 1. - median_tr[ np.argmax( mean_tr ) ]
inferred_depth_mode = 1. - mode_tr[ np.argmax( mean_tr ) ]
uncertainty = sig_tr.max()
# NOTE: In general, the mean, median and mode of a log-normal distribution do
# not coincide. We're counting on the fact that they roughly do here for the
# mean and standard deviation of the transit depth to be meaningful.
else:
pdb.set_trace() #this shouldn't happen
# The stuff below here is to format the printed output nicely; hopefully it
# does what it's supposed to, but if the printed results look strange, may
# need to check here that it's not simply a bug in the output formatting:
if ( ( inferred_depth_mean>(1e-4) ) + ( print_precision>1e-4) ):
units = 'percent'
factor = 1e2
else:
units = 'ppm'
factor = 1e6
dec_places = str( int( -np.log10( factor*print_precision ) ) ) ### here is where you need to think about it
format_string = '%.'+dec_places+'f'
mean = format_string % ( inferred_depth_mean * factor )
median = format_string % ( inferred_depth_median * factor )
mode = format_string % ( inferred_depth_mode * factor )
uncert = format_string % ( uncertainty * factor )
print '\n Inferred %s transit depth:' % ( transit_type )
print ' %s +/- %s %s' % ( mean, uncert, units )
print ' (median = %s, mode = %s %s)' % ( median, mode, units )
if transit_type=='primary':
vbr_object.transit_depth_inferred_mean = inferred_depth_mean
vbr_object.transit_depth_inferred_median = inferred_depth_median
vbr_object.transit_depth_inferred_mode = inferred_depth_mode
vbr_object.transit_depth_inferred_stdv = uncertainty
elif transit_type=='secondary':
vbr_object.eclipse_depth_inferred_mean = inferred_depth_mean
vbr_object.eclipse_depth_inferred_median = inferred_depth_median
vbr_object.eclipse_depth_inferred_mode = inferred_depth_mode
vbr_object.eclipse_depth_inferred_stdv = uncertainty
else:
if transit_type=='primary':
vbr_object.transit_depth_inferred_mean = None
vbr_object.transit_depth_inferred_median = None
vbr_object.transit_depth_inferred_mode = None
vbr_object.transit_depth_inferred_stdv = None
elif transit_type=='secondary':
vbr_object.eclipse_depth_inferred_mean = None
vbr_object.eclipse_depth_inferred_median = None
vbr_object.eclipse_depth_inferred_mode = None
vbr_object.eclipse_depth_inferred_stdv = None
return None
def append_phasecurve( vbr_object, time, params, model_type='additive' ):
"""
!!!Needs testing!!!
Appends a planetary phase curve to the existing normalised training basis
matrix, and records the appended array, so that it can be automatically
appended to the normalised predictive basis matrix when that's constructed
later.
Requires the EccLightCurveModels.py and orbit.py modules.
!!!Needs testing!!!
"""
phasecurve_shape = simple_phasecurve( time, params, model_type=model_type )
phasecurve_basis = phasecurve_shape/np.std(phasecurve_shape)
vbr_object.append_training_basis_matrix(phasecurve_basis)
return None
def simple_phasecurve( time, system_params, model_type='additive' ):
"""
!!!Needs testing!!!
A simple phase function that uses a simple sinusoidal variation only. It's
of the same form as that used by Winn et al 2011 when modelling the observed
phase curve of 55Cnc-e (see their Equation 1).
!!!Needs testing!!!
"""
# Unpack the system parameters, but be careful to account for the possibility that
# we could be using either a quadratic or 4-parameter nonlinear limb darkening law,
# as that will affect the number of system parameters that have been passed in:
if len(system_params)==9:
T0, P, aRs, RpRs, b, c1_q, c2_q, ecc, omega = system_params
ld_type = 'quad'
elif len(system_params)==11:
T0, P, aRs, RpRs, b, c1_nl, c2_nl, c3_nl, c4_nl, ecc, omega = system_params
ld_type = 'nonlin'
else:
pdb.set_trace() #this shouldn't happen
# Append a nonzero secondary eclipse depth to the system_params array:
system_params = np.array( [ system_params, [0.01] ] )
# Calculate the orbital phase since mid-transit:
orbphase = orbit.time_to_phase( time, P, T0, omega=omega, ecc=ecc )
# Calculate the phase curve function according to Eqs 1 of Winn et al 2011
# or Eqs 4&7 of Mislis et al 2011:
cosz = -np.sin( incl ) * np.cos( orbphase )
phase_curve = ( phase_amplitude/2.0 ) * ( 1+cosz )
# Work out the in-transit and in-eclipse
f, ycoords = EccLightCurveModels.EccLightCurve_aRs( system_pars, time, ld=ld_type, return_y=True )
ixs_inec = ( ( f<1. )*( ycoords<0.0 ) )
ixs_outec = ( f==1. ) + ( ( f<1. )*( ycoords>=0.0 ) )
# Impose a flat 'bridge' across the times of eclipse, which makes it possible to
# simultaneously fit a flat-bottomed eclipse function. Note that we do not need
# to similarly enforce a flat-bottomed phase curve during times of primary transit!
if model_type=='additive':
phase_curve[ixs_inec] = phase_curve[ixs_outec].max()
else:
pdb.set_trace()
return phase_curve
def lambert_phasecurve( time, params ):
"""
UNDER CONSTRUCTION!!! UNTESTED!!!
Supposed to return the phase curve for a Lambertian sphere.
Uses the form given in Equation 6 of Mislis et al (2011).
But I want to make sure this is all consistent with the equations
for the same thing given by Seager in her Exoplanet Atmospheres book???
UNDER CONSTRUCTION!!! UNTESTED!!!
"""
# Unpack the parameters:
P, T0, omega, ecc, incl = params
# Calculate the angle theta, as it is defined in Figure 2
# of Mislis et al 2011:
theta = orbit.time_to_phase( time, P, T0, omega=omega, ecc=ecc ) # Fig 2
z = np.arccos(-np.sin(incl)*np.cos(theta)) # Eq 4
phase_curve = phase_amplitude*(np.sin(z)+(np.pi-z)*np.cos(z)) # Eq 6
# Work out the in-transit and in-eclipse
f, ycoords = EccLightCurveModels.EccLightCurve_aRs( system_pars, time, ld=ld_type, return_y=True )
ixs_inec = ( ( f<1. )*( ycoords<0.0 ) )
ixs_outec = ( f==1. ) + ( ( f<1. )*( ycoords>=0.0 ) )
# Impose a flat 'bridge' across the times of eclipse, which makes it possible to
# simultaneously fit a flat-bottomed eclipse function. Note that we do not need
# to similarly enforce a flat-bottomed phase curve during times of primary transit!
if model_type=='additive':
phase_curve[ixs_inec] = phase_curve[ixs_outec].max()
else:
pdb.set_trace()
return phase_curve
def append_ellipsoid_distortion( vbr_object, time, params ):
"""
!!!Needs testing!!!
Appends a sinusoidal function approximating stellar ellipsoidal distortion to
the existing normalised training basis matrix, and records the appended array,
so that it can be automatically appended to the normalised predictive basis
matrix when that's constructed later.
** params = [ P, T0, ecc, omega ]
Requires the orbit.py module.
!!!Needs testing!!!
"""
ellipsoid_distortion_shape = stellar_ellipsoid_distortion( time, system_params, template_amplitude )
ellipsoid_distortion_basis = ellipsoid_distortion_shape / np.std( ellipsoid_distortion_shape )
vbr_object.append_training_basis_matrix( ellipsoid_distortion_basis )
return None
def stellar_ellipsoid_distortion( time, params, ellipsoid_amplitude ):
"""
!!!Needs testing!!!
Takes the system parameters and calculates a sinusoidal variation in the
flux in phase with the planetary orbital period, such that the maximum
brightness occurs when the planet is passing through the ascending and
descending nodes, and the minimum brightness occurs when the planet at
inferior and superior conjunction.
Requires the orbit.py module.
!!!Needs testing!!!
"""
P, T0, ecc, omega = params
orbphase = orbit.time_to_phase( time, P, T0, omega=omega, ecc=ecc )
cos2z = -np.sin( incl ) * np.cos( 2*orbphase )
ellipsoid_distortion = ( ellipsoid_amplitude / 2.0 ) * ( 1+cos2z )
return ellipsoid_distortion
| gpl-2.0 |
phaller0513/aima-python | submissions/aartiste/myNN.py | 3 | 5247 | from sklearn import datasets
from sklearn.neural_network import MLPClassifier
import traceback
from submissions.aartiste import election
from submissions.aartiste import county_demographics
class DataFrame:
data = []
feature_names = []
target = []
target_names = []
trumpECHP = DataFrame()
'''
Extract data from the CORGIS elections, and merge it with the
CORGIS demographics. Both data sets are organized by county and state.
'''
joint = {}
elections = election.get_results()
for county in elections:
try:
st = county['Location']['State Abbreviation']
countyST = county['Location']['County'] + st
trump = county['Vote Data']['Donald Trump']['Percent of Votes']
joint[countyST] = {}
joint[countyST]['ST']= st
joint[countyST]['Trump'] = trump
except:
traceback.print_exc()
demographics = county_demographics.get_all_counties()
for county in demographics:
try:
countyNames = county['County'].split()
cName = ' '.join(countyNames[:-1])
st = county['State']
countyST = cName + st
# elderly =
# college =
# home =
# poverty =
if countyST in joint:
joint[countyST]['Elderly'] = county['Age']["Percent 65 and Older"]
joint[countyST]['HighSchool'] = county['Education']["High School or Higher"]
joint[countyST]['College'] = county['Education']["Bachelor's Degree or Higher"]
joint[countyST]['White'] = county['Ethnicities']["White Alone, not Hispanic or Latino"]
joint[countyST]['Persons'] = county['Housing']["Persons per Household"]
joint[countyST]['Home'] = county['Housing']["Homeownership Rate"]
joint[countyST]['Income'] = county['Income']["Median Houseold Income"]
joint[countyST]['Poverty'] = county['Income']["Persons Below Poverty Level"]
joint[countyST]['Sales'] = county['Sales']["Retail Sales per Capita"]
except:
traceback.print_exc()
'''
Remove the counties that did not appear in both samples.
'''
intersection = {}
for countyST in joint:
if 'College' in joint[countyST]:
intersection[countyST] = joint[countyST]
trumpECHP.data = []
'''
Build the input frame, row by row.
'''
for countyST in intersection:
# choose the input values
row = []
for key in intersection[countyST]:
if key in ['ST', 'Trump']:
continue
row.append(intersection[countyST][key])
trumpECHP.data.append(row)
firstCounty = next(iter(intersection.keys()))
firstRow = intersection[firstCounty]
trumpECHP.feature_names = list(firstRow.keys())
trumpECHP.feature_names.remove('ST')
trumpECHP.feature_names.remove('Trump')
'''
Build the target list,
one entry for each row in the input frame.
The Naive Bayesian network is a classifier,
i.e. it sorts data points into bins.
The best it can do to estimate a continuous variable
is to break the domain into segments, and predict
the segment into which the variable's value will fall.
In this example, I'm breaking Trump's % into two
arbitrary segments.
'''
trumpECHP.target = []
def trumpTarget(percentage):
if percentage > 45:
return 1
return 0
for countyST in intersection:
# choose the target
tt = trumpTarget(intersection[countyST]['Trump'])
trumpECHP.target.append(tt)
trumpECHP.target_names = [
'Trump <= 45%',
'Trump > 45%',
]
'''
Make a customn classifier,
'''
mlpc = MLPClassifier(
# hidden_layer_sizes = (100,),
# activation = 'relu',
solver='sgd', # 'adam',
# alpha = 0.0001,
# batch_size='auto',
learning_rate = 'adaptive', # 'constant',
# power_t = 0.5,
max_iter = 1000, # 200,
# shuffle = True,
# random_state = None,
# tol = 1e-4,
# verbose = False,
# warm_start = False,
# momentum = 0.9,
# nesterovs_momentum = True,
# early_stopping = False,
# validation_fraction = 0.1,
# beta_1 = 0.9,
# beta_2 = 0.999,
# epsilon = 1e-8,
)
'''
Try scaling the data.
'''
trumpScaled = DataFrame()
def setupScales(grid):
global min, max
min = list(grid[0])
max = list(grid[0])
for row in range(1, len(grid)):
for col in range(len(grid[row])):
cell = grid[row][col]
if cell < min[col]:
min[col] = cell
if cell > max[col]:
max[col] = cell
def scaleGrid(grid):
newGrid = []
for row in range(len(grid)):
newRow = []
for col in range(len(grid[row])):
try:
cell = grid[row][col]
scaled = (cell - min[col]) \
/ (max[col] - min[col])
newRow.append(scaled)
except:
pass
newGrid.append(newRow)
return newGrid
setupScales(trumpECHP.data)
trumpScaled.data = scaleGrid(trumpECHP.data)
trumpScaled.feature_names = trumpECHP.feature_names
trumpScaled.target = trumpECHP.target
trumpScaled.target_names = trumpECHP.target_names
Examples = {
'TrumpDefault': {
'frame': trumpECHP,
},
'TrumpSGD': {
'frame': trumpECHP,
'mlpc': mlpc
},
'TrumpScaled': {
'frame': trumpScaled,
},
} | mit |
DistrictDataLabs/03-censusables | censusables/fivey.py | 1 | 3631 | """ACS5-based analysis
"""
import argparse
import json
import matplotlib.pyplot as plt
import pandas as pd
parser = argparse.ArgumentParser(__doc__)
parser.add_argument("join", help="Business/county join file")
parser.add_argument("reviews", help="Yelp review file")
parser.add_argument("census", help="ACS 5-year data")
parser.add_argument("name", help="Output name prefix")
args = parser.parse_args()
oname = args.name
# Load reviews
reviews = pd.DataFrame(json.loads(l) for l in open(args.reviews))
# Only use 2009 to 2013 reviews
reviews = reviews[reviews.date.apply(lambda d: d >= '2009' and d < '2014')]
# Reduce reviews to business review counts
reviews = (reviews[['stars']]
.groupby(reviews.business_id)
.count()
.reset_index()
)
# Fix column names
reviews.columns = 'business_id reviews'.split()
# Load the geo join data and join with the reviews
join = pd.DataFrame(json.loads(l) for l in open(args.join))
reviews = reviews.merge(join)
# Get review counts by GISJOIN
reviews = (reviews[['reviews']]
.groupby(reviews.GISJOIN)
.sum()
.reset_index()
)
# Load the 5-year census data
census = pd.read_csv(args.census)
# We want the columns that start with UEE. There should be 49.
uee = [c for c in census.columns if c.startswith('UEE')]
assert len(uee) == 49
census = census[['GISJOIN'] + uee]
# Assign more useful column names:
census.columns = '''
GISJOIN TOTAL
M M_4 M5_9 M10_14 M15_17 M18_19 M20 M21 M22_24 M25_29 M30_34
M35_39 M40_44 M45_49 M50_54 M55_59 M60_61 M62_64 M65_66 M67_69
M70_74 M75_79 M80_84 M85_
F F_4 F5_9 F10_14 F15_17 F18_19 F20 F21 F22_24 F25_29 F30_34
F35_39 F40_44 F45_49 F50_54 F55_59 F60_61 F62_64 F65_66 F67_69
F70_74 F75_79 F80_84 F85_
'''.strip().split()
# Compute young and old columns:
age_groups = {}
for n in '''
M18_19 M20 M21 M22_24 M25_29 M30_34F18_19 F20 F21 F22_24 F25_29 F30_34
'''.strip().split():
age_groups[n] = 'young'
for n in '''
M35_39 M40_44 M45_49 M50_54 M55_59 M60_61 M62_64 M65_66 M67_69
M70_74 M75_79 M80_84 M85_
F35_39 F40_44 F45_49 F50_54 F55_59 F60_61 F62_64 F65_66 F67_69
F70_74 F75_79 F80_84 F85_
'''.strip().split():
age_groups[n] = 'old'
yo = census.groupby(age_groups, axis=1).sum()
census = pd.concat((census, yo), axis=1)
# Join with reviews
census = census.merge(reviews)
# Normalize by total population
norm = census[census.columns[3:]].div(census.TOTAL, axis=0)
census = pd.concat((census[census.columns[:3]], norm), axis=1)
# Whew, now we're ready to explore relationships. Plot response
# rate vs age-group fraction for young and old.
fig, ax = plt.subplots(2, 1)
ax[0].set_yscale('log')
ax[1].set_yscale('log')
ax[0].scatter(census.young, census.reviews, c='r', label='young')
ax[1].scatter(census.old, census.reviews, c='b', label='old')
ax[0].set_title("ACS5 %s Yelp review rate by fraction young" % oname)
ax[1].set_title("ACS5 %s Yelp review rate by fraction old" % oname)
plt.savefig(oname+'_acs5_reviews_fraction_young_and_old.svg')
# I wonder what it would look like wo Vegas
census = census[census.GISJOIN.apply(lambda g: g[:3] != 'G32')]
fig, ax = plt.subplots(2, 1)
ax[0].set_yscale('log')
ax[1].set_yscale('log')
ax[0].scatter(census.young, census.reviews, c='r', label='young')
ax[1].scatter(census.old, census.reviews, c='b', label='old')
ax[0].set_title("ACS5 %s Yelp review rate by fraction young no Vegas" % oname)
ax[1].set_title("ACS5 %s Yelp review rate by fraction old no Vegas" % oname)
plt.savefig(oname+'_acs5_reviews_fraction_young_and_old_no_vegas.png')
| apache-2.0 |
metaml/NAB | tests/integration/scorer_test.py | 8 | 7316 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
import datetime
import pandas
import random
import unittest
from nab.scorer import Scorer
from nab.test_helpers import generateTimestamps, generateWindows, generateLabels
class ScorerTest(unittest.TestCase):
def _checkCounts(self, counts, tn, tp, fp, fn):
"""Ensure the metric counts are correct."""
self.assertEqual(counts['tn'], tn, "Incorrect tn count")
self.assertEqual(counts['tp'], tp, "Incorrect tp count")
self.assertEqual(counts['fp'], fp, "Incorrect fp count")
self.assertEqual(counts['fn'], fn, "Incorrect fn count")
def setUp(self):
# Standard application profile
self.costMatrix = {"tpWeight": 1.0,
"fnWeight": 1.0,
"fpWeight": 1.0,
"tnWeight": 1.0}
def testNullCase(self):
"""No windows and no predictions should yield a score of 0.0."""
start = datetime.datetime.now()
increment = datetime.timedelta(minutes=5)
length = 10
timestamps = generateTimestamps(start, increment, length)
predictions = pandas.Series([0]*length)
labels = pandas.Series([0]*length)
windows = []
scorer = Scorer(timestamps, predictions, labels, windows, self.costMatrix,
probationaryPeriod=0)
(_, score) = scorer.getScore()
self.assertEqual(score, 0.0)
self._checkCounts(scorer.counts, 10, 0, 0, 0)
def testFalsePositiveScaling(self):
"""
Test scaling the weight of false positives results in an approximate
balance with the true positives.
The contributions of TP and FP scores should approximately cancel; i.e.
total score =0. With x windows, this total score should on average decrease
x/2 because of x FNs. Thus, the acceptable range for score should be
centered about -x/2.
"""
start = datetime.datetime.now()
increment = datetime.timedelta(minutes=5)
length = 100
numWindows = 1
windowSize = 10
timestamps = generateTimestamps(start, increment, length)
windows = generateWindows(timestamps, numWindows, windowSize)
labels = generateLabels(timestamps, windows)
# Scale for 10% = windowSize/length
self.costMatrix["fpWeight"] = 0.11
# Make arbitrary detections, score, repeat
scores = []
for _ in xrange(20):
predictions = pandas.Series([0]*length)
indices = random.sample(range(length), 10)
predictions[indices] = 1
scorer = Scorer(timestamps, predictions, labels, windows, self.costMatrix,
probationaryPeriod=0)
(_, score) = scorer.getScore()
scores.append(score)
avgScore = sum(scores)/float(len(scores))
self.assertTrue(-1.5 <= avgScore <= 0.5, "The average score across 20 sets "
"of random detections is %f, which is not within the acceptable range "
"-1.5 to 0.5." % avgScore)
def testRewardLowFalseNegatives(self):
"""
Given false negatives in the set of detections, the score output with the
Reward Low False Negatives application profile will be greater than with
the Standard application profile.
"""
start = datetime.datetime.now()
increment = datetime.timedelta(minutes=5)
length = 100
numWindows = 1
windowSize = 10
timestamps = generateTimestamps(start, increment, length)
windows = generateWindows(timestamps, numWindows, windowSize)
labels = generateLabels(timestamps, windows)
predictions = pandas.Series([0]*length)
costMatrixFN = copy.deepcopy(self.costMatrix)
costMatrixFN["fnWeight"] = 2.0
costMatrixFN["fpWeight"] = 0.055
scorer1 = Scorer(timestamps, predictions, labels, windows, self.costMatrix,
probationaryPeriod=0)
(_, score1) = scorer1.getScore()
scorer2 = Scorer(timestamps, predictions, labels, windows, costMatrixFN,
probationaryPeriod=0)
(_, score2) = scorer2.getScore()
self.assertEqual(score1, 0.5*score2)
self._checkCounts(scorer1.counts, length-windowSize*numWindows, 0, 0,
windowSize*numWindows)
self._checkCounts(scorer2.counts, length-windowSize*numWindows, 0, 0,
windowSize*numWindows)
def testRewardLowFalsePositives(self):
"""
Given false positives in the set of detections, the score output with the
Reward Low False Positives application profile will be greater than with
the Standard application profile.
"""
start = datetime.datetime.now()
increment = datetime.timedelta(minutes=5)
length = 100
numWindows = 0
windowSize = 10
timestamps = generateTimestamps(start, increment, length)
windows = []
labels = generateLabels(timestamps, windows)
predictions = pandas.Series([0]*length)
costMatrixFP = copy.deepcopy(self.costMatrix)
costMatrixFP["fpWeight"] = 2.0
costMatrixFP["fnWeight"] = 0.5
# FP
predictions[0] = 1
scorer1 = Scorer(timestamps, predictions, labels, windows, self.costMatrix,
probationaryPeriod=0)
(_, score1) = scorer1.getScore()
scorer2 = Scorer(timestamps, predictions, labels, windows, costMatrixFP,
probationaryPeriod=0)
(_, score2) = scorer2.getScore()
self.assertEqual(score1, 0.5*score2)
self._checkCounts(scorer1.counts, length-windowSize*numWindows-1, 0, 1, 0)
self._checkCounts(scorer2.counts, length-windowSize*numWindows-1, 0, 1, 0)
def testScoringAllMetrics(self):
"""
This tests an example set of detections, where all metrics have counts > 0.
"""
start = datetime.datetime.now()
increment = datetime.timedelta(minutes=5)
length = 100
numWindows = 2
windowSize = 5
timestamps = generateTimestamps(start, increment, length)
windows = generateWindows(timestamps, numWindows, windowSize)
labels = generateLabels(timestamps, windows)
predictions = pandas.Series([0]*length)
index = timestamps[timestamps == windows[0][0]].index[0]
# TP, add'l TP, and FP
predictions[index] = 1
predictions[index+1] = 1
predictions[index+7] = 1
scorer = Scorer(timestamps, predictions, labels, windows, self.costMatrix,
probationaryPeriod=0)
(_, score) = scorer.getScore()
self.assertAlmostEquals(score, -0.9540, 4)
self._checkCounts(scorer.counts, length-windowSize*numWindows-1, 2, 1, 8)
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
walterreade/scikit-learn | sklearn/linear_model/tests/test_base.py | 83 | 15089 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from itertools import product
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import _preprocess_data
from sklearn.linear_model.base import sparse_center_data, center_data
from sklearn.linear_model.base import _rescale_data
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_greater
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
rng = np.random.RandomState(0)
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [1])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [0])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [0])
def test_linear_regression_sample_weights():
# TODO: loop over sparse data as well
rng = np.random.RandomState(0)
# It would not work with under-determined systems
for n_samples, n_features in ((6, 5), ):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
for intercept in (True, False):
# LinearRegression with explicit sample_weight
reg = LinearRegression(fit_intercept=intercept)
reg.fit(X, y, sample_weight=sample_weight)
coefs1 = reg.coef_
inter1 = reg.intercept_
assert_equal(reg.coef_.shape, (X.shape[1], )) # sanity checks
assert_greater(reg.score(X, y), 0.5)
# Closed form of the weighted least square
# theta = (X^T W X)^(-1) * X^T W y
W = np.diag(sample_weight)
if intercept is False:
X_aug = X
else:
dummy_column = np.ones(shape=(n_samples, 1))
X_aug = np.concatenate((dummy_column, X), axis=1)
coefs2 = linalg.solve(X_aug.T.dot(W).dot(X_aug),
X_aug.T.dot(W).dot(y))
if intercept is False:
assert_array_almost_equal(coefs1, coefs2)
else:
assert_array_almost_equal(coefs1, coefs2[1:])
assert_almost_equal(inter1, coefs2[0])
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
reg = LinearRegression()
# make sure the "OK" sample weights actually work
reg.fit(X, y, sample_weights_OK)
reg.fit(X, y, sample_weights_OK_1)
reg.fit(X, y, sample_weights_OK_2)
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
# Test that linear regression also works with sparse data
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.predict(X) - y.ravel(), 0)
def test_linear_regression_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
reg = LinearRegression(fit_intercept=True)
reg.fit((X), Y)
assert_equal(reg.coef_.shape, (2, n_features))
Y_pred = reg.predict(X)
reg.fit(X, y)
y_pred = reg.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions with sparse data
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_preprocess_data():
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
expected_X_norm = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=False, normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_norm)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_preprocess_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [X, sparse.csc_matrix(X)]
for X in args:
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_preprocess_data_weighted():
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
expected_X_norm = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_norm)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_preprocess_data_with_return_mean():
n_samples = 200
n_features = 2
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
expected_X_norm = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=False, normalize=False,
return_mean=True)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False,
return_mean=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True,
return_mean=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt.A, XA / expected_X_norm)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_preprocess_data():
# Test output format of _preprocess_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = _preprocess_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
def test_rescale_data():
n_samples = 200
n_features = 2
sample_weight = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
rescaled_X, rescaled_y = _rescale_data(X, y, sample_weight)
rescaled_X2 = X * np.sqrt(sample_weight)[:, np.newaxis]
rescaled_y2 = y * np.sqrt(sample_weight)
assert_array_almost_equal(rescaled_X, rescaled_X2)
assert_array_almost_equal(rescaled_y, rescaled_y2)
@ignore_warnings # all deprecation warnings
def test_deprecation_center_data():
n_samples = 200
n_features = 2
w = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
param_grid = product([True, False], [True, False], [True, False],
[None, w])
for (fit_intercept, normalize, copy, sample_weight) in param_grid:
XX = X.copy() # such that we can try copy=False as well
X1, y1, X1_mean, X1_var, y1_mean = \
center_data(XX, y, fit_intercept=fit_intercept,
normalize=normalize, copy=copy,
sample_weight=sample_weight)
XX = X.copy()
X2, y2, X2_mean, X2_var, y2_mean = \
_preprocess_data(XX, y, fit_intercept=fit_intercept,
normalize=normalize, copy=copy,
sample_weight=sample_weight)
assert_array_almost_equal(X1, X2)
assert_array_almost_equal(y1, y2)
assert_array_almost_equal(X1_mean, X2_mean)
assert_array_almost_equal(X1_var, X2_var)
assert_array_almost_equal(y1_mean, y2_mean)
# Sparse cases
X = sparse.csr_matrix(X)
for (fit_intercept, normalize, copy, sample_weight) in param_grid:
X1, y1, X1_mean, X1_var, y1_mean = \
center_data(X, y, fit_intercept=fit_intercept, normalize=normalize,
copy=copy, sample_weight=sample_weight)
X2, y2, X2_mean, X2_var, y2_mean = \
_preprocess_data(X, y, fit_intercept=fit_intercept,
normalize=normalize, copy=copy,
sample_weight=sample_weight, return_mean=False)
assert_array_almost_equal(X1.toarray(), X2.toarray())
assert_array_almost_equal(y1, y2)
assert_array_almost_equal(X1_mean, X2_mean)
assert_array_almost_equal(X1_var, X2_var)
assert_array_almost_equal(y1_mean, y2_mean)
for (fit_intercept, normalize) in product([True, False], [True, False]):
X1, y1, X1_mean, X1_var, y1_mean = \
sparse_center_data(X, y, fit_intercept=fit_intercept,
normalize=normalize)
X2, y2, X2_mean, X2_var, y2_mean = \
_preprocess_data(X, y, fit_intercept=fit_intercept,
normalize=normalize, return_mean=True)
assert_array_almost_equal(X1.toarray(), X2.toarray())
assert_array_almost_equal(y1, y2)
assert_array_almost_equal(X1_mean, X2_mean)
assert_array_almost_equal(X1_var, X2_var)
assert_array_almost_equal(y1_mean, y2_mean)
| bsd-3-clause |
ericdill/bluesky | bluesky/scientific_callbacks.py | 2 | 4876 | import numpy as np
from cycler import cycler
import matplotlib.pyplot as plt
from scipy.ndimage import center_of_mass
from bluesky.callbacks import CollectThenCompute
class PeakStats(CollectThenCompute):
def __init__(self, x, y, edge_count=None):
"""
Compute peak statsitics after a run finishes.
Results are stored in the attributes.
Parameters
----------
x : string
field name for the x variable (e.g., a motor)
y : string
field name for the y variable (e.g., a detector)
edge_count : int or None, optional
If not None, number of points at beginning and end to use
for quick and dirty background subtraction.
Note
----
It is assumed that the two fields, x and y, are recorded in the same
Event stream.
Attributes
----------
com : center of mass
cen : TBD
max : x location of y maximum
min : x location of y minimum
"""
self.x = x
self.y = y
self.com = None
self.cen = None
self.max = None
self.min = None
self.nlls = None
self.fwhm = None
self.lin_bkg = None
self._edge_count = edge_count
super().__init__()
def __getitem__(self, key):
if key in ['com', 'cen', 'max', 'min']:
return getattr(self, key)
else:
raise KeyError
def compute(self):
"This method is called at run-stop time by the superclass."
# clear all results
self.com = None
self.cen = None
self.max = None
self.min = None
self.nlls = None
self.fwhm = None
self.lin_bkg = None
x = []
y = []
for event in self._events:
try:
_x = event['data'][self.x]
_y = event['data'][self.y]
except KeyError:
pass
else:
x.append(_x)
y.append(_y)
x = np.array(x)
y = np.array(y)
self.x_data = x
self.y_data = y
if self._edge_count is not None:
left_x = np.mean(x[:self._edge_count])
left_y = np.mean(y[:self._edge_count])
right_x = np.mean(x[-self._edge_count:])
right_y = np.mean(y[-self._edge_count:])
m = (right_y - left_y) / (right_x - left_x)
b = left_y - m * left_x
# don't do this in place to not mess with self.y_data
y = y - (m * x + b)
self.lin_bkg = {'m': m, 'b': b}
# Compute x value at min and max of y
self.max = x[np.argmax(y)], self.y_data[np.argmax(y)],
self.min = x[np.argmin(y)], self.y_data[np.argmin(y)],
self.com = np.interp(center_of_mass(y), np.arange(len(x)), x)
mid = (np.max(y) + np.min(y)) / 2
crossings = np.where(np.diff(y > mid))[0]
_cen_list = []
for cr in crossings.ravel():
_x = x[cr:cr+2]
_y = y[cr:cr+2] - mid
dx = np.diff(_x)
dy = np.diff(_y)
m = dy / dx
_cen_list.append((-_y[0] / m) + _x[0])
if _cen_list:
self.cen = np.mean(_cen_list)
if len(_cen_list) == 2:
self.fwhm = float(_cen_list[1] - _cen_list[0])
# reset y data
y = self.y_data
# insert lmfit
def plot_peak_stats(peak_stats, ax=None):
"""
Plot data and various peak statistics.
Parameters
----------
peak_stats : PeakStats
ax : matplotlib.Axes, optional
Returns
-------
arts : dict
dictionary of matplotlib Artist objects, for further styling
"""
arts = {}
ps = peak_stats # for brevity
if ax is None:
fig, ax = plt.subplots()
ax.margins(.1)
# Plot points, vertical lines, and a legend. Collect Artist objs to return.
points, = ax.plot(ps.x_data, ps.y_data, 'o')
vlines = []
styles = iter(cycler('color', 'krgbm'))
for style, attr in zip(styles, ['cen', 'com']):
print(style, attr)
val = getattr(ps, attr)
if val is None:
continue
vlines.append(ax.axvline(val, label=attr, **style))
for style, attr in zip(styles, ['max', 'min']):
print(style, attr)
val = getattr(ps, attr)
if val is None:
continue
vlines.append(ax.axvline(val[0], label=attr, lw=3, **style))
vlines.append(ax.axhline(val[1], lw=3, **style))
if ps.lin_bkg:
lb = ps.lin_bkg
ln, = ax.plot(ps.x_data, ps.x_data*lb['m'] + lb['b'],
ls='--', lw=2, color='k')
arts['bkg'] = ln
legend = ax.legend(loc='best')
arts.update({'points': points, 'vlines': vlines, 'legend': legend})
return arts
| bsd-3-clause |
lukas/scikit-class | examples/keras-encoding/cnn.py | 2 | 3409 | import pandas as pd
import numpy as np
from keras.utils import to_categorical
import wandb
from wandb.wandb_keras import WandbKerasCallback
run = wandb.init()
config = run.config
df = pd.read_csv('../scikit/tweets.csv')
target = df['is_there_an_emotion_directed_at_a_brand_or_product']
text = df['tweet_text']
fixed_text = text[pd.notnull(text)]
fixed_target = target[pd.notnull(text)]
# mapping from labels to numbers
mapping = {'Negative emotion':0,
'No emotion toward brand or product':1,
'Positive emotion':2,
'I can\'t tell':3}
numeric_target = [mapping[t] for t in fixed_target]
num_labels = len(mapping)
# one hot encode outputs
labels = to_categorical(numeric_target)
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
tokenizer = Tokenizer(num_words=config.max_words)
tokenizer.fit_on_texts(fixed_text)
sequences = tokenizer.texts_to_sequences(fixed_text)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=config.max_sequence_length)
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
# split the data into a training set and a validation set
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
nb_validation_samples = int(config.validation_split * data.shape[0])
x_train = data[:-nb_validation_samples]
y_train = labels[:-nb_validation_samples]
x_val = data[-nb_validation_samples:]
y_val = labels[-nb_validation_samples:]
# Load the word embedding
embeddings_index = {}
f = open('../scikit/glove/glove.6B.100d.txt')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
embedding_dim = 100
embedding_matrix = np.zeros((len(word_index) + 1, embedding_dim))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
from keras.layers import Embedding, Input, Dense, Flatten, Conv1D
from keras.layers import MaxPooling1D, Dropout
from keras.models import Model
embedding_layer = Embedding(len(word_index) + 1,
embedding_dim,
weights=[embedding_matrix],
input_length=config.max_sequence_length,
trainable=False)
sequence_input = Input(shape=(config.max_sequence_length,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = Dropout(0.3)(embedded_sequences)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Dropout(0.3)(x)
#x = Conv1D(128, 5, activation='relu')(x)
#x = MaxPooling1D(5)(x) # global max pooling
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
x = Dropout(0.3)(x)
preds = Dense(num_labels, activation='softmax')(x)
model = Model(sequence_input, preds)
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['acc'])
model.fit(x_train, y_train, validation_data=(x_val, y_val),
epochs=config.epochs, batch_size=config.batch_size,
callbacks=[WandbKerasCallback()])
| gpl-2.0 |
maxalbert/blaze | blaze/compute/tests/test_sparksql.py | 3 | 14017 | from __future__ import absolute_import, print_function, division
import pytest
pyspark = pytest.importorskip('pyspark')
py4j = pytest.importorskip('py4j')
sa = pytest.importorskip('sqlalchemy')
import os
import itertools
import shutil
from py4j.protocol import Py4JJavaError
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from blaze import compute, symbol, into, by, sin, exp, cos, tan, join
try:
from pyspark.sql import DataFrame as SparkDataFrame
except ImportError:
from pyspark.sql import SchemaRDD as SparkDataFrame
from pyspark import HiveContext, SQLContext
from pyspark.sql import Row, SchemaRDD
from odo import odo, discover
from odo.utils import tmpfile
data = [['Alice', 100.0, 1],
['Bob', 200.0, 2],
['Alice', 50.0, 3]]
date_data = []
np.random.seed(0)
for attr in ('YearBegin', 'MonthBegin', 'Day', 'Hour', 'Minute', 'Second'):
rng = pd.date_range(start='now', periods=len(data),
freq=getattr(pd.datetools, attr)()).values
date_data += list(zip(np.random.choice(['Alice', 'Bob', 'Joe', 'Lester'],
size=len(data)),
np.random.rand(len(data)) * 100,
np.random.randint(100, size=3),
rng))
cities_data = [['Alice', 'NYC'],
['Bob', 'Boston']]
df = pd.DataFrame(data, columns=['name', 'amount', 'id'])
date_df = pd.DataFrame(date_data, columns=['name', 'amount', 'id', 'ds'])
cities_df = pd.DataFrame(cities_data, columns=['name', 'city'])
# sc is from conftest.py
@pytest.yield_fixture(scope='module')
def sql(sc):
try:
if hasattr(pyspark.sql, 'types'): # pyspark >= 1.3
yield HiveContext(sc)
else:
yield SQLContext(sc)
finally:
dbpath = 'metastore_db'
logpath = 'derby.log'
if os.path.exists(dbpath):
assert os.path.isdir(dbpath)
shutil.rmtree(dbpath)
if os.path.exists(logpath):
assert os.path.isfile(logpath)
os.remove(logpath)
@pytest.yield_fixture(scope='module')
def people(sc):
with tmpfile('.txt') as fn:
df.to_csv(fn, header=False, index=False)
raw = sc.textFile(fn)
parts = raw.map(lambda line: line.split(','))
yield parts.map(lambda person: Row(name=person[0],
amount=float(person[1]),
id=int(person[2])))
@pytest.yield_fixture(scope='module')
def cities(sc):
with tmpfile('.txt') as fn:
cities_df.to_csv(fn, header=False, index=False)
raw = sc.textFile(fn)
parts = raw.map(lambda line: line.split(','))
yield parts.map(lambda person: Row(name=person[0], city=person[1]))
@pytest.yield_fixture(scope='module')
def date_people(sc):
with tmpfile('.txt') as fn:
date_df.to_csv(fn, header=False, index=False)
raw = sc.textFile(fn)
parts = raw.map(lambda line: line.split(','))
yield parts.map(lambda person: Row(name=person[0],
amount=float(person[1]),
id=int(person[2]),
ds=pd.Timestamp(person[3]).to_pydatetime()))
@pytest.fixture(scope='module')
def ctx(sql, people, cities, date_people):
try:
sql.registerDataFrameAsTable(sql.createDataFrame(people), 't')
sql.cacheTable('t')
sql.registerDataFrameAsTable(sql.createDataFrame(cities), 's')
sql.cacheTable('s')
sql.registerDataFrameAsTable(sql.createDataFrame(date_people), 'dates')
sql.cacheTable('dates')
except AttributeError:
sql.inferSchema(people).registerTempTable('t')
sql.inferSchema(cities).registerTempTable('s')
sql.inferSchema(date_people).registerTempTable('dates')
return sql
@pytest.fixture(scope='module')
def db(ctx):
return symbol('db', discover(ctx))
def test_projection(db, ctx):
expr = db.t[['id', 'name']]
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(set, result) == into(set, expected)
def test_symbol_compute(db, ctx):
assert isinstance(compute(db.t, ctx), (SparkDataFrame, SchemaRDD))
def test_field_access(db, ctx):
for field in db.t.fields:
expr = getattr(db.t, field)
result = into(pd.Series, compute(expr, ctx))
expected = compute(expr, {db: {'t': df}})
assert result.name == expected.name
np.testing.assert_array_equal(result.values,
expected.values)
def test_head(db, ctx):
expr = db.t[['name', 'amount']].head(2)
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(list, result) == into(list, expected)
def test_literals(db, ctx):
expr = db.t[db.t.amount >= 100]
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert list(map(set, into(list, result))) == list(map(set, into(list,
expected)))
def test_by_summary(db, ctx):
t = db.t
expr = by(t.name, mymin=t.amount.min(), mymax=t.amount.max())
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(set, result) == into(set, expected)
def test_join(db, ctx):
expr = join(db.t, db.s)
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df, 's': cities_df}})
assert isinstance(result, (SparkDataFrame, SchemaRDD))
assert into(set, result) == into(set, expected)
assert discover(result) == expr.dshape
def test_join_diff_contexts(db, ctx, cities):
expr = join(db.t, db.s, 'name')
people = ctx.table('t')
cities = into(ctx, cities, dshape=discover(ctx.table('s')))
scope = {db: {'t': people, 's': cities}}
result = compute(expr, scope)
expected = compute(expr, {db: {'t': df, 's': cities_df}})
assert (set(map(frozenset, odo(result, set))) ==
set(map(frozenset, odo(expected, set))))
def test_field_distinct(ctx, db):
expr = db.t.name.distinct()
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(set, result, dshape=expr.dshape) == into(set, expected)
def test_boolean(ctx, db):
expr = db.t.amount > 50
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(set, result, dshape=expr.dshape) == into(set, expected)
def test_selection(ctx, db):
expr = db.t[db.t.amount > 50]
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert list(map(set, into(list, result))) == list(map(set, into(list,
expected)))
def test_selection_field(ctx, db):
expr = db.t[db.t.amount > 50].name
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(set, result, dshape=expr.dshape) == into(set, expected)
@pytest.mark.parametrize(['field', 'reduction'],
itertools.product(['id', 'amount'], ['sum', 'max',
'min', 'mean',
'count',
'nunique']))
def test_reductions(ctx, db, field, reduction):
expr = getattr(db.t[field], reduction)()
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(list, result)[0][0] == expected
def test_column_arithmetic(ctx, db):
expr = db.t.amount + 1
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(set, result, dshape=expr.dshape) == into(set, expected)
# pyspark doesn't use __version__ so we use this kludge
# should submit a bug report upstream to get __version__
def fail_on_spark_one_two(x):
if hasattr(pyspark.sql, 'types'):
return x
else:
return pytest.mark.xfail(x, raises=py4j.protocol.Py4JJavaError,
reason=('math functions only supported in '
'HiveContext'))
@pytest.mark.parametrize('func', list(map(fail_on_spark_one_two,
[sin, cos, tan, exp])))
def test_math(ctx, db, func):
expr = func(db.t.amount)
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
np.testing.assert_allclose(np.sort(odo(result, np.ndarray,
dshape=expr.dshape)),
np.sort(odo(expected, np.ndarray)))
@pytest.mark.parametrize(['field', 'ascending'],
itertools.product(['name', 'id', ['name', 'amount']],
[True, False]))
def test_sort(ctx, db, field, ascending):
expr = db.t.sort(field, ascending=ascending)
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert list(map(set, into(list, result))) == list(map(set, into(list,
expected)))
@pytest.mark.xfail
def test_map(ctx, db):
expr = db.t.id.map(lambda x: x + 1, 'int')
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(set, result, dshape=expr.dshape) == into(set, expected)
@pytest.mark.parametrize(['grouper', 'reducer', 'reduction'],
itertools.chain(itertools.product(['name', 'id',
['id', 'amount']],
['id', 'amount'],
['sum', 'count',
'max', 'min',
'mean',
'nunique']),
[('name', 'name', 'count'),
('name', 'name', 'nunique')]))
def test_by(ctx, db, grouper, reducer, reduction):
t = db.t
expr = by(t[grouper], total=getattr(t[reducer], reduction)())
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert (set(map(frozenset, into(list, result))) ==
set(map(frozenset, into(list, expected))))
@pytest.mark.parametrize(['reducer', 'reduction'],
itertools.product(['id', 'name'],
['count', 'nunique']))
def test_multikey_by(ctx, db, reducer, reduction):
t = db.t
expr = by(t[['id', 'amount']], total=getattr(getattr(t, reducer),
reduction)())
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert (set(map(frozenset, into(list, result))) ==
set(map(frozenset, into(list, expected))))
def test_grouper_with_arith(ctx, db):
expr = by(db.t[['id', 'amount']], total=(db.t.amount + 1).sum())
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert list(map(set, into(list, result))) == list(map(set, into(list,
expected)))
def test_by_non_native_ops(ctx, db):
expr = by(db.t.id, total=db.t.id.nunique())
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert list(map(set, into(list, result))) == list(map(set, into(list,
expected)))
@pytest.mark.xfail(not hasattr(pyspark.sql, 'types'),
reason=('length string function not available without '
'HiveContext'),
raises=py4j.protocol.Py4JJavaError)
def test_strlen(ctx, db):
expr = db.t.name.strlen()
result = odo(compute(expr, ctx), pd.Series)
expected = compute(expr, {db: {'t': df}})
assert result.name == 'name'
assert expected.name == 'name'
assert odo(result, set) == odo(expected, set)
date_attrs = [pytest.mark.xfail(not hasattr(pyspark.sql, 'types'),
attr,
raises=(Py4JJavaError, AssertionError),
reason=('date attribute %r not supported '
'without hive') % attr)
for attr in ['year', 'month', 'day', 'hour', 'minute', 'second']]
date_attrs += [pytest.mark.xfail(attr,
raises=Py4JJavaError,
reason=('Hive does not support date '
'attribute %r') % attr)
for attr in ['millisecond', 'microsecond']]
@pytest.mark.parametrize('attr', date_attrs)
def test_by_with_date(ctx, db, attr):
# TODO: investigate CSV writing precision between pandas 0.16.0 and 0.16.1
# TODO: see if we can use odo to convert the dshape of an existing
# DataFrame
expr = by(getattr(db.dates.ds, attr),
mean=db.dates.amount.mean())
result = odo(compute(expr, ctx), pd.DataFrame).sort('mean').reset_index(drop=True)
expected = compute(expr, {db: {'dates': date_df}}).sort('mean').reset_index(drop=True)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize('keys', [[1], [1, 2]])
def test_isin(ctx, db, keys):
expr = db.t[db.t.id.isin(keys)]
result = odo(compute(expr, ctx), set)
expected = odo(compute(expr, {db: {'t': df}}), set)
assert (set(map(frozenset, odo(result, list))) ==
set(map(frozenset, odo(expected, list))))
def test_nunique_spark_dataframe(ctx, db):
assert (odo(compute(db.t.nunique(), ctx), int) ==
ctx.table('t').distinct().count())
| bsd-3-clause |
bigdataelephants/scikit-learn | examples/neighbors/plot_classification.py | 287 | 1790 | """
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
| bsd-3-clause |
kernc/scikit-learn | examples/classification/plot_classification_probability.py | 138 | 2871 | """
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting, and Gaussian process classification.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
kernel = 1.0 * RBF([1.0, 1.0]) # for GPC
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'),
'GPC': GaussianProcessClassifier(kernel)
}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
buguen/pylayers | pylayers/location/geometric/util/cdf2.py | 1 | 3372 | # -*- coding:Utf-8 -*-
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import pdb
try:
import mplrc.ieee.transaction
except:
pass
from matplotlib import rcParams
rcParams['text.usetex'] = True
rcParams['text.latex.unicode'] = True
class CDF(object):
def __init__(self, ld, filename):
"""
cdf = CDF(ld)
ld is a list of dictionnary
d0 = ld[0]
d0['bound'] : bornes en abscisses de la cdf 0
d0['values'] : valeurs
d0['xlabel'] :
d0['ylabel'] :
d0['legend'] : legend
d0['title] : title
d0['filename] : filename
d0['linewidth'] : linewidth
"""
self.ld = ld
self.parmsh = {}
self.parmsh['file'] = True
self.filename = filename
self.cdf = []
for d in self.ld:
try:
self.save=d['save']
except:
self.save=True
bound = d['bound']
values = d['values']
Nv = len(values)
cdf = np.array([])
for k in bound:
u = np.nonzero(values <= k)
lu = len(u[0]) / (Nv * 1.0)
cdf = np.hstack((cdf, lu))
self.cdf.append(cdf)
def show(self):
"""
show()
"""
f = plt.figure()
leg = []
c = []
ax = f.add_subplot(111)
for k in range(len(self.ld)):
d = self.ld[k]
bound = d['bound']
marker = d['marker']
markersize = d['markersize']
markercolor = d['markercolor']
markerfrequency = d['markerfrequency']
linewidth = d['linewidth']
line = d['line']
color = d['color']
legend = d['legend']
cdf = self.cdf[k]
c.append(
ax.plot(bound, cdf, marker=marker, markevery=markerfrequency,
ms=markersize, mfc=markercolor, ls=line, c=color, linewidth=linewidth, label=legend))
plt.xlabel(self.ld[0]['xlabel'])
plt.ylabel(self.ld[0]['ylabel'])
ax.legend(loc='best', scatterpoints=1, numpoints=1.)
plt.grid()
plt.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.1)
if self.save :
if os.system('cd ./cdf/'+self.filename) == 512:
os.system('mkdir ./cdf/'+self.filename)
plt.savefig('./cdf/' + self.filename + '/' + self.filename + '.pdf',
format='pdf', bbox_inches='tight', pad_inches=0)
plt.savefig('./cdf/' + self.filename + '/' + self.filename + '.eps',
format='eps', bbox_inches='tight', pad_inches=0)
if __name__ == "__main__":
d0 = {}
d0['values'] = sp.randn(1000)
d0['bound'] = np.arange(-10, 10, 0.1)
d0['xlabel'] = 'xlabel'
d0['ylabel'] = 'ylabel'
d0['legend'] = 'legend '
d0['title'] = 'title'
d0['marker'] = 'r-'
d0['linewidth'] = 3
d0['filename'] = 'essai.png'
d1 = {}
d1['values'] = 4 * sp.randn(1000)
d1['bound'] = np.arange(-10, 10, 0.1)
d1['xlabel'] = 'xlabel'
d1['ylabel'] = 'ylabel'
d1['legend'] = 'legend '
d1['title'] = 'title'
d1['marker'] = 'bo'
d1['linewidth'] = 3
lv = [d0, d1]
c = CDF(lv)
| lgpl-3.0 |
stylianos-kampakis/scikit-learn | examples/cluster/plot_digits_agglomeration.py | 377 | 1694 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| bsd-3-clause |
fspaolo/scikit-learn | sklearn/feature_selection/tests/test_base.py | 9 | 3669 | import numpy as np
from scipy import sparse as sp
from nose.tools import assert_raises, assert_equal
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils import atleast2d_or_csc
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = atleast2d_or_csc(X)
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_t_actual = sel.transform(feature_names)
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.todense())
assert_array_equal(Xt, Xt_actual2.todense())
# Check dtype matches
assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform(feature_names_t)
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.todense())
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
| bsd-3-clause |
mjgrav2001/scikit-learn | sklearn/preprocessing/tests/test_imputation.py | 213 | 11911 | import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.preprocessing.imputation import Imputer
from sklearn.pipeline import Pipeline
from sklearn import grid_search
from sklearn import tree
from sklearn.random_projection import sparse_random_matrix
def _check_statistics(X, X_true,
strategy, statistics, missing_values):
"""Utility function for testing imputation for a given strategy.
Test:
- along the two axes
- with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly"""
err_msg = "Parameters: strategy = %s, missing_values = %s, " \
"axis = {0}, sparse = {1}" % (strategy, missing_values)
# Normal matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
X_trans = imputer.fit(X).transform(X.copy())
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, False))
assert_array_equal(X_trans, X_true, err_msg.format(0, False))
# Normal matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(X.transpose())
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform, X.copy().transpose())
else:
X_trans = imputer.transform(X.copy().transpose())
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, False))
# Sparse matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
imputer.fit(sparse.csc_matrix(X))
X_trans = imputer.transform(sparse.csc_matrix(X.copy()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, True))
assert_array_equal(X_trans, X_true, err_msg.format(0, True))
# Sparse matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(sparse.csc_matrix(X.transpose()))
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform,
sparse.csc_matrix(X.copy().transpose()))
else:
X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, True))
def test_imputation_shape():
# Verify the shapes of the imputed matrix for different strategies.
X = np.random.randn(10, 2)
X[::2] = np.nan
for strategy in ['mean', 'median', 'most_frequent']:
imputer = Imputer(strategy=strategy)
X_imputed = imputer.fit_transform(X)
assert_equal(X_imputed.shape, (10, 2))
X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
assert_equal(X_imputed.shape, (10, 2))
def test_imputation_mean_median_only_zero():
# Test imputation using the mean and median strategies, when
# missing_values == 0.
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
])
X_imputed_mean = np.array([
[3, 5],
[1, 3],
[2, 7],
[6, 13],
])
statistics_mean = [np.nan, 3, np.nan, np.nan, 7]
# Behaviour of median with NaN is undefined, e.g. different results in
# np.median and np.ma.median
X_for_median = X[:, [0, 1, 2, 4]]
X_imputed_median = np.array([
[2, 5],
[1, 3],
[2, 5],
[6, 13],
])
statistics_median = [np.nan, 2, np.nan, 5]
_check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0)
_check_statistics(X_for_median, X_imputed_median, "median",
statistics_median, 0)
def test_imputation_mean_median():
# Test imputation using the mean and median strategies, when
# missing_values != 0.
rng = np.random.RandomState(0)
dim = 10
dec = 10
shape = (dim * dim, dim + dec)
zeros = np.zeros(shape[0])
values = np.arange(1, shape[0]+1)
values[4::2] = - values[4::2]
tests = [("mean", "NaN", lambda z, v, p: np.mean(np.hstack((z, v)))),
("mean", 0, lambda z, v, p: np.mean(v)),
("median", "NaN", lambda z, v, p: np.median(np.hstack((z, v)))),
("median", 0, lambda z, v, p: np.median(v))]
for strategy, test_missing_values, true_value_fun in tests:
X = np.empty(shape)
X_true = np.empty(shape)
true_statistics = np.empty(shape[1])
# Create a matrix X with columns
# - with only zeros,
# - with only missing values
# - with zeros, missing values and values
# And a matrix X_true containing all true values
for j in range(shape[1]):
nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
nb_missing_values = max(shape[0] + dec * dec
- (j + dec) * (j + dec), 0)
nb_values = shape[0] - nb_zeros - nb_missing_values
z = zeros[:nb_zeros]
p = np.repeat(test_missing_values, nb_missing_values)
v = values[rng.permutation(len(values))[:nb_values]]
true_statistics[j] = true_value_fun(z, v, p)
# Create the columns
X[:, j] = np.hstack((v, z, p))
if 0 == test_missing_values:
X_true[:, j] = np.hstack((v,
np.repeat(
true_statistics[j],
nb_missing_values + nb_zeros)))
else:
X_true[:, j] = np.hstack((v,
z,
np.repeat(true_statistics[j],
nb_missing_values)))
# Shuffle them the same way
np.random.RandomState(j).shuffle(X[:, j])
np.random.RandomState(j).shuffle(X_true[:, j])
# Mean doesn't support columns containing NaNs, median does
if strategy == "median":
cols_to_keep = ~np.isnan(X_true).any(axis=0)
else:
cols_to_keep = ~np.isnan(X_true).all(axis=0)
X_true = X_true[:, cols_to_keep]
_check_statistics(X, X_true, strategy,
true_statistics, test_missing_values)
def test_imputation_median_special_cases():
# Test median imputation with sparse boundary cases
X = np.array([
[0, np.nan, np.nan], # odd: implicit zero
[5, np.nan, np.nan], # odd: explicit nonzero
[0, 0, np.nan], # even: average two zeros
[-5, 0, np.nan], # even: avg zero and neg
[0, 5, np.nan], # even: avg zero and pos
[4, 5, np.nan], # even: avg nonzeros
[-4, -5, np.nan], # even: avg negatives
[-1, 2, np.nan], # even: crossing neg and pos
]).transpose()
X_imputed_median = np.array([
[0, 0, 0],
[5, 5, 5],
[0, 0, 0],
[-5, 0, -2.5],
[0, 5, 2.5],
[4, 5, 4.5],
[-4, -5, -4.5],
[-1, 2, .5],
]).transpose()
statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5]
_check_statistics(X, X_imputed_median, "median",
statistics_median, 'NaN')
def test_imputation_most_frequent():
# Test imputation using the most-frequent strategy.
X = np.array([
[-1, -1, 0, 5],
[-1, 2, -1, 3],
[-1, 1, 3, -1],
[-1, 2, 3, 7],
])
X_true = np.array([
[2, 0, 5],
[2, 3, 3],
[1, 3, 3],
[2, 3, 7],
])
# scipy.stats.mode, used in Imputer, doesn't return the first most
# frequent as promised in the doc but the lowest most frequent. When this
# test will fail after an update of scipy, Imputer will need to be updated
# to be consistent with the new (correct) behaviour
_check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1)
def test_imputation_pipeline_grid_search():
# Test imputation within a pipeline + gridsearch.
pipeline = Pipeline([('imputer', Imputer(missing_values=0)),
('tree', tree.DecisionTreeRegressor(random_state=0))])
parameters = {
'imputer__strategy': ["mean", "median", "most_frequent"],
'imputer__axis': [0, 1]
}
l = 100
X = sparse_random_matrix(l, l, density=0.10)
Y = sparse_random_matrix(l, 1, density=0.10).toarray()
gs = grid_search.GridSearchCV(pipeline, parameters)
gs.fit(X, Y)
def test_imputation_pickle():
# Test for pickling imputers.
import pickle
l = 100
X = sparse_random_matrix(l, l, density=0.10)
for strategy in ["mean", "median", "most_frequent"]:
imputer = Imputer(missing_values=0, strategy=strategy)
imputer.fit(X)
imputer_pickled = pickle.loads(pickle.dumps(imputer))
assert_array_equal(imputer.transform(X.copy()),
imputer_pickled.transform(X.copy()),
"Fail to transform the data after pickling "
"(strategy = %s)" % (strategy))
def test_imputation_copy():
# Test imputation with copy
X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0)
# copy=True, dense => copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_false(np.all(X == Xt))
# copy=True, sparse csr => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, dense => no copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_true(np.all(X == Xt))
# copy=False, sparse csr, axis=1 => no copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=0 => no copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=1 => copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=1, missing_values=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=0, strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
assert_false(sparse.issparse(Xt))
# Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
# made, even if copy=False.
| bsd-3-clause |
hugobowne/scikit-learn | examples/cluster/plot_segmentation_toy.py | 91 | 3522 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
# We use a mask that limits to the foreground: the problem that we are
# interested in here is not separating the objects from the background,
# but separating them one from the other.
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
| bsd-3-clause |
altermarkive/Resurrecting-JimFleming-Numerai | src/ml-auto-sklearn/run.py | 1 | 1372 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import autosklearn.classification
import numpy
import os
import pandas
import sys
def ingest():
training_data = pandas.read_csv(os.getenv('TRAINING'), header=0)
tournament_data = pandas.read_csv(os.getenv('TESTING'), header=0)
features = [f for f in list(training_data) if 'feature' in f]
x = training_data[features]
y = training_data['target']
x_tournament = tournament_data[features]
ids = tournament_data['id']
return (x, y, x_tournament, ids)
def train(x, y):
model = autosklearn.classification.AutoSklearnClassifier(
time_left_for_this_task=int(os.getenv('TIME_LIMIT_ALL', '3600')),
per_run_time_limit=int(os.getenv('TIME_LIMIT_PART', '360')))
model.fit(x, y)
print(model.show_models())
return model
def predict(model, x_tournament, ids):
eps = sys.float_info.epsilon
y_prediction = model.predict_proba(x_tournament)
results = numpy.clip(y_prediction[:, 1], 0.0 + eps, 1.0 - eps)
results_df = pandas.DataFrame(data={'probability': results})
joined = pandas.DataFrame(ids).join(results_df)
joined.to_csv(os.getenv('PREDICTING'), index=False, float_format='%.16f')
def main():
x, y, x_tournament, ids = ingest()
model = train(x, y)
predict(model, x_tournament.copy(), ids)
if __name__ == '__main__':
main()
| mit |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/qtconsole/rich_jupyter_widget.py | 7 | 17139 | # Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from base64 import decodestring
import os
import re
from warnings import warn
from qtconsole.qt import QtCore, QtGui
from ipython_genutils.path import ensure_dir_exists
from traitlets import Bool
from qtconsole.svg import save_svg, svg_to_clipboard, svg_to_image
from .jupyter_widget import JupyterWidget
try:
from IPython.lib.latextools import latex_to_png
except ImportError:
latex_to_png = None
class LatexError(Exception):
"""Exception for Latex errors"""
class RichIPythonWidget(JupyterWidget):
"""Dummy class for config inheritance. Destroyed below."""
class RichJupyterWidget(RichIPythonWidget):
""" An JupyterWidget that supports rich text, including lists, images, and
tables. Note that raw performance will be reduced compared to the plain
text version.
"""
# RichJupyterWidget protected class variables.
_payload_source_plot = 'ipykernel.pylab.backend_payload.add_plot_payload'
_jpg_supported = Bool(False)
# Used to determine whether a given html export attempt has already
# displayed a warning about being unable to convert a png to svg.
_svg_warning_displayed = False
#---------------------------------------------------------------------------
# 'object' interface
#---------------------------------------------------------------------------
def __init__(self, *args, **kw):
""" Create a RichJupyterWidget.
"""
kw['kind'] = 'rich'
super(RichJupyterWidget, self).__init__(*args, **kw)
# Configure the ConsoleWidget HTML exporter for our formats.
self._html_exporter.image_tag = self._get_image_tag
# Dictionary for resolving document resource names to SVG data.
self._name_to_svg_map = {}
# Do we support jpg ?
# it seems that sometime jpg support is a plugin of QT, so try to assume
# it is not always supported.
_supported_format = map(str, QtGui.QImageReader.supportedImageFormats())
self._jpg_supported = 'jpeg' in _supported_format
#---------------------------------------------------------------------------
# 'ConsoleWidget' public interface overides
#---------------------------------------------------------------------------
def export_html(self):
""" Shows a dialog to export HTML/XML in various formats.
Overridden in order to reset the _svg_warning_displayed flag prior
to the export running.
"""
self._svg_warning_displayed = False
super(RichJupyterWidget, self).export_html()
#---------------------------------------------------------------------------
# 'ConsoleWidget' protected interface
#---------------------------------------------------------------------------
def _context_menu_make(self, pos):
""" Reimplemented to return a custom context menu for images.
"""
format = self._control.cursorForPosition(pos).charFormat()
name = format.stringProperty(QtGui.QTextFormat.ImageName)
if name:
menu = QtGui.QMenu()
menu.addAction('Copy Image', lambda: self._copy_image(name))
menu.addAction('Save Image As...', lambda: self._save_image(name))
menu.addSeparator()
svg = self._name_to_svg_map.get(name, None)
if svg is not None:
menu.addSeparator()
menu.addAction('Copy SVG', lambda: svg_to_clipboard(svg))
menu.addAction('Save SVG As...',
lambda: save_svg(svg, self._control))
else:
menu = super(RichJupyterWidget, self)._context_menu_make(pos)
return menu
#---------------------------------------------------------------------------
# 'BaseFrontendMixin' abstract interface
#---------------------------------------------------------------------------
def _pre_image_append(self, msg, prompt_number):
"""Append the Out[] prompt and make the output nicer
Shared code for some the following if statement
"""
self._append_plain_text(self.output_sep, True)
self._append_html(self._make_out_prompt(prompt_number), True)
self._append_plain_text('\n', True)
def _handle_execute_result(self, msg):
"""Overridden to handle rich data types, like SVG."""
self.log.debug("execute_result: %s", msg.get('content', ''))
if self.include_output(msg):
self.flush_clearoutput()
content = msg['content']
prompt_number = content.get('execution_count', 0)
data = content['data']
metadata = msg['content']['metadata']
if 'image/svg+xml' in data:
self._pre_image_append(msg, prompt_number)
self._append_svg(data['image/svg+xml'], True)
self._append_html(self.output_sep2, True)
elif 'image/png' in data:
self._pre_image_append(msg, prompt_number)
png = decodestring(data['image/png'].encode('ascii'))
self._append_png(png, True, metadata=metadata.get('image/png', None))
self._append_html(self.output_sep2, True)
elif 'image/jpeg' in data and self._jpg_supported:
self._pre_image_append(msg, prompt_number)
jpg = decodestring(data['image/jpeg'].encode('ascii'))
self._append_jpg(jpg, True, metadata=metadata.get('image/jpeg', None))
self._append_html(self.output_sep2, True)
elif 'text/latex' in data:
self._pre_image_append(msg, prompt_number)
try:
self._append_latex(data['text/latex'], True)
except LatexError:
return super(RichJupyterWidget, self)._handle_display_data(msg)
self._append_html(self.output_sep2, True)
else:
# Default back to the plain text representation.
return super(RichJupyterWidget, self)._handle_execute_result(msg)
def _handle_display_data(self, msg):
"""Overridden to handle rich data types, like SVG."""
self.log.debug("display_data: %s", msg.get('content', ''))
if self.include_output(msg):
self.flush_clearoutput()
data = msg['content']['data']
metadata = msg['content']['metadata']
# Try to use the svg or html representations.
# FIXME: Is this the right ordering of things to try?
self.log.debug("display: %s", msg.get('content', ''))
if 'image/svg+xml' in data:
svg = data['image/svg+xml']
self._append_svg(svg, True)
elif 'image/png' in data:
# PNG data is base64 encoded as it passes over the network
# in a JSON structure so we decode it.
png = decodestring(data['image/png'].encode('ascii'))
self._append_png(png, True, metadata=metadata.get('image/png', None))
elif 'image/jpeg' in data and self._jpg_supported:
jpg = decodestring(data['image/jpeg'].encode('ascii'))
self._append_jpg(jpg, True, metadata=metadata.get('image/jpeg', None))
elif 'text/latex' in data and latex_to_png:
try:
self._append_latex(data['text/latex'], True)
except LatexError:
return super(RichJupyterWidget, self)._handle_display_data(msg)
else:
# Default back to the plain text representation.
return super(RichJupyterWidget, self)._handle_display_data(msg)
#---------------------------------------------------------------------------
# 'RichJupyterWidget' protected interface
#---------------------------------------------------------------------------
def _is_latex_math(self, latex):
"""
Determine if a Latex string is in math mode
This is the only mode supported by qtconsole
"""
basic_envs = ['math', 'displaymath']
starable_envs = ['equation', 'eqnarray' 'multline', 'gather', 'align',
'flalign', 'alignat']
star_envs = [env + '*' for env in starable_envs]
envs = basic_envs + starable_envs + star_envs
env_syntax = [r'\begin{{{0}}} \end{{{0}}}'.format(env).split() for env in envs]
math_syntax = [
(r'\[', r'\]'), (r'\(', r'\)'),
('$$', '$$'), ('$', '$'),
]
for start, end in math_syntax + env_syntax:
inner = latex[len(start):-len(end)]
if start in inner or end in inner:
return False
if latex.startswith(start) and latex.endswith(end):
return True
return False
def _append_latex(self, latex, before_prompt=False, metadata=None):
""" Append latex data to the widget."""
png = None
if self._is_latex_math(latex):
png = latex_to_png(latex, wrap=False, backend='dvipng')
if png is None and latex.startswith('$') and latex.endswith('$'):
# matplotlib only supports strings enclosed in dollar signs
png = latex_to_png(latex, wrap=False, backend='matplotlib')
if png:
self._append_png(png, before_prompt, metadata)
else:
raise LatexError
def _append_jpg(self, jpg, before_prompt=False, metadata=None):
""" Append raw JPG data to the widget."""
self._append_custom(self._insert_jpg, jpg, before_prompt, metadata=metadata)
def _append_png(self, png, before_prompt=False, metadata=None):
""" Append raw PNG data to the widget.
"""
self._append_custom(self._insert_png, png, before_prompt, metadata=metadata)
def _append_svg(self, svg, before_prompt=False):
""" Append raw SVG data to the widget.
"""
self._append_custom(self._insert_svg, svg, before_prompt)
def _add_image(self, image):
""" Adds the specified QImage to the document and returns a
QTextImageFormat that references it.
"""
document = self._control.document()
name = str(image.cacheKey())
document.addResource(QtGui.QTextDocument.ImageResource,
QtCore.QUrl(name), image)
format = QtGui.QTextImageFormat()
format.setName(name)
return format
def _copy_image(self, name):
""" Copies the ImageResource with 'name' to the clipboard.
"""
image = self._get_image(name)
QtGui.QApplication.clipboard().setImage(image)
def _get_image(self, name):
""" Returns the QImage stored as the ImageResource with 'name'.
"""
document = self._control.document()
image = document.resource(QtGui.QTextDocument.ImageResource,
QtCore.QUrl(name))
return image
def _get_image_tag(self, match, path = None, format = "png"):
""" Return (X)HTML mark-up for the image-tag given by match.
Parameters
----------
match : re.SRE_Match
A match to an HTML image tag as exported by Qt, with
match.group("Name") containing the matched image ID.
path : string|None, optional [default None]
If not None, specifies a path to which supporting files may be
written (e.g., for linked images). If None, all images are to be
included inline.
format : "png"|"svg"|"jpg", optional [default "png"]
Format for returned or referenced images.
"""
if format in ("png","jpg"):
try:
image = self._get_image(match.group("name"))
except KeyError:
return "<b>Couldn't find image %s</b>" % match.group("name")
if path is not None:
ensure_dir_exists(path)
relpath = os.path.basename(path)
if image.save("%s/qt_img%s.%s" % (path, match.group("name"), format),
"PNG"):
return '<img src="%s/qt_img%s.%s">' % (relpath,
match.group("name"),format)
else:
return "<b>Couldn't save image!</b>"
else:
ba = QtCore.QByteArray()
buffer_ = QtCore.QBuffer(ba)
buffer_.open(QtCore.QIODevice.WriteOnly)
image.save(buffer_, format.upper())
buffer_.close()
return '<img src="data:image/%s;base64,\n%s\n" />' % (
format,re.sub(r'(.{60})',r'\1\n',str(ba.toBase64())))
elif format == "svg":
try:
svg = str(self._name_to_svg_map[match.group("name")])
except KeyError:
if not self._svg_warning_displayed:
QtGui.QMessageBox.warning(self, 'Error converting PNG to SVG.',
'Cannot convert PNG images to SVG, export with PNG figures instead. '
'If you want to export matplotlib figures as SVG, add '
'to your ipython config:\n\n'
'\tc.InlineBackend.figure_format = \'svg\'\n\n'
'And regenerate the figures.',
QtGui.QMessageBox.Ok)
self._svg_warning_displayed = True
return ("<b>Cannot convert PNG images to SVG.</b> "
"You must export this session with PNG images. "
"If you want to export matplotlib figures as SVG, add to your config "
"<span>c.InlineBackend.figure_format = 'svg'</span> "
"and regenerate the figures.")
# Not currently checking path, because it's tricky to find a
# cross-browser way to embed external SVG images (e.g., via
# object or embed tags).
# Chop stand-alone header from matplotlib SVG
offset = svg.find("<svg")
assert(offset > -1)
return svg[offset:]
else:
return '<b>Unrecognized image format</b>'
def _insert_jpg(self, cursor, jpg, metadata=None):
""" Insert raw PNG data into the widget."""
self._insert_img(cursor, jpg, 'jpg', metadata=metadata)
def _insert_png(self, cursor, png, metadata=None):
""" Insert raw PNG data into the widget.
"""
self._insert_img(cursor, png, 'png', metadata=metadata)
def _insert_img(self, cursor, img, fmt, metadata=None):
""" insert a raw image, jpg or png """
if metadata:
width = metadata.get('width', None)
height = metadata.get('height', None)
else:
width = height = None
try:
image = QtGui.QImage()
image.loadFromData(img, fmt.upper())
if width and height:
image = image.scaled(width, height, transformMode=QtCore.Qt.SmoothTransformation)
elif width and not height:
image = image.scaledToWidth(width, transformMode=QtCore.Qt.SmoothTransformation)
elif height and not width:
image = image.scaledToHeight(height, transformMode=QtCore.Qt.SmoothTransformation)
except ValueError:
self._insert_plain_text(cursor, 'Received invalid %s data.'%fmt)
else:
format = self._add_image(image)
cursor.insertBlock()
cursor.insertImage(format)
cursor.insertBlock()
def _insert_svg(self, cursor, svg):
""" Insert raw SVG data into the widet.
"""
try:
image = svg_to_image(svg)
except ValueError:
self._insert_plain_text(cursor, 'Received invalid SVG data.')
else:
format = self._add_image(image)
self._name_to_svg_map[format.name()] = svg
cursor.insertBlock()
cursor.insertImage(format)
cursor.insertBlock()
def _save_image(self, name, format='PNG'):
""" Shows a save dialog for the ImageResource with 'name'.
"""
dialog = QtGui.QFileDialog(self._control, 'Save Image')
dialog.setAcceptMode(QtGui.QFileDialog.AcceptSave)
dialog.setDefaultSuffix(format.lower())
dialog.setNameFilter('%s file (*.%s)' % (format, format.lower()))
if dialog.exec_():
filename = dialog.selectedFiles()[0]
image = self._get_image(name)
image.save(filename, format)
# clobber RichIPythonWidget above:
class RichIPythonWidget(RichJupyterWidget):
"""Deprecated class. Use RichJupyterWidget"""
def __init__(self, *a, **kw):
warn("RichIPythonWidget is deprecated, use RichJupyterWidget")
super(RichIPythonWidget, self).__init__(*a, **kw)
| gpl-3.0 |
gph82/PyEMMA | pyemma/coordinates/clustering/interface.py | 1 | 13210 |
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Created on 18.02.2015
@author: marscher
'''
from __future__ import absolute_import
import os
from pyemma._base.model import Model
from pyemma._ext.sklearn.base import ClusterMixin
from pyemma.coordinates.clustering import regspatial
from pyemma.coordinates.transform.transformer import StreamingTransformer
from pyemma.util.discrete_trajectories import index_states, sample_indexes_by_state
from pyemma.util.files import mkdir_p
from six.moves import range, zip
import numpy as np
class AbstractClustering(StreamingTransformer, Model, ClusterMixin):
"""
provides a common interface for cluster algorithms.
Parameters
----------
metric: str, default='euclidean'
metric to pass to c extension
n_jobs: int or None, default=None
How much threads to use during assignment
If None, all available CPUs will be used.
"""
def __init__(self, metric='euclidean', n_jobs=None):
super(AbstractClustering, self).__init__()
self.metric = metric
self._clustercenters = None
self._previous_stride = -1
self._dtrajs = []
self._overwrite_dtrajs = False
self._index_states = []
self.n_jobs = n_jobs
@property
def n_jobs(self):
""" Returns number of jobs/threads to use during assignment of data.
Returns
-------
If None it will return number of processors /or cores or the setting of 'OMP_NUM_THREADS' env variable.
Notes
-----
By setting the environment variable 'OMP_NUM_THREADS' to an integer,
one will override the default argument of n_jobs (currently None).
"""
assert isinstance(self._n_jobs, int)
return self._n_jobs
@n_jobs.setter
def n_jobs(self, val):
""" set number of jobs/threads to use via assignment of data.
Parameters
----------
val: int or None
a positive int for the number of jobs. Or None to usage all available resources.
Notes
-----
"""
from pyemma.util.reflection import get_default_args
def_args = get_default_args(self.__init__)
# default value from constructor?
if val == def_args['n_jobs']:
omp_threads_from_env = os.getenv('OMP_NUM_THREADS', None)
import psutil
n_cpus = psutil.cpu_count()
if omp_threads_from_env:
try:
self._n_jobs = int(omp_threads_from_env)
self.logger.info("number of threads obtained from env variable"
" 'OMP_NUM_THREADS'=%s" % omp_threads_from_env)
except ValueError as ve:
self.logger.warning("could not parse env variable 'OMP_NUM_THREADS'."
"Value='%s'. Error=%s" % (omp_threads_from_env, ve))
self._n_jobs = n_cpus
else:
self._n_jobs = n_cpus
else:
self._n_jobs = int(val)
@property
def clustercenters(self):
""" Array containing the coordinates of the calculated cluster centers. """
return self._clustercenters
@clustercenters.setter
def clustercenters(self, val):
val = np.asarray(val, dtype='float32', order='C')
self._clustercenters = val
@property
def overwrite_dtrajs(self):
"""
Should existing dtraj files be overwritten. Set this property to True to overwrite.
"""
return self._overwrite_dtrajs
@overwrite_dtrajs.setter
def overwrite_dtrajs(self, value):
self._overwrite_dtrajs = value
@property
def dtrajs(self):
"""Discrete trajectories (assigned data to cluster centers)."""
if len(self._dtrajs) == 0: # nothing assigned yet, doing that now
self._dtrajs = self.assign(stride=1)
return self._dtrajs # returning what we have saved
@property
def index_clusters(self):
"""Returns trajectory/time indexes for all the clusters
Returns
-------
indexes : list of ndarray( (N_i, 2) )
For each state, all trajectory and time indexes where this cluster occurs.
Each matrix has a number of rows equal to the number of occurrences of the corresponding state,
with rows consisting of a tuple (i, t), where i is the index of the trajectory and t is the time index
within the trajectory.
"""
if len(self._dtrajs) == 0: # nothing assigned yet, doing that now
self._dtrajs = self.assign()
if len(self._index_states) == 0: # has never been run
self._index_states = index_states(self._dtrajs)
return self._index_states
def sample_indexes_by_cluster(self, clusters, nsample, replace=True):
"""Samples trajectory/time indexes according to the given sequence of states.
Parameters
----------
clusters : iterable of integers
It contains the cluster indexes to be sampled
nsample : int
Number of samples per cluster. If replace = False, the number of returned samples per cluster could be smaller
if less than nsample indexes are available for a cluster.
replace : boolean, optional
Whether the sample is with or without replacement
Returns
-------
indexes : list of ndarray( (N, 2) )
List of the sampled indices by cluster.
Each element is an index array with a number of rows equal to N=len(sequence), with rows consisting of a
tuple (i, t), where i is the index of the trajectory and t is the time index within the trajectory.
"""
# Check if the catalogue (index_states)
if len(self._index_states) == 0: # has never been run
self._index_states = index_states(self.dtrajs)
return sample_indexes_by_state(self._index_states[clusters], nsample, replace=replace)
def _transform_array(self, X):
"""get closest index of point in :attr:`clustercenters` to x."""
dtraj = np.empty(X.shape[0], dtype=self.output_type())
regspatial.assign(X.astype(np.float32, order='C', copy=False),
self.clustercenters, dtraj, self.metric, self.n_jobs)
res = dtraj[:, None] # always return a column vector in this function
return res
def dimension(self):
"""output dimension of clustering algorithm (always 1)."""
return 1
def output_type(self):
return np.int32
def assign(self, X=None, stride=1):
"""
Assigns the given trajectory or list of trajectories to cluster centers by using the discretization defined
by this clustering method (usually a Voronoi tesselation).
You can assign multiple times with different strides. The last result of assign will be saved and is available
as the attribute :func:`dtrajs`.
Parameters
----------
X : ndarray(T, n) or list of ndarray(T_i, n), optional, default = None
Optional input data to map, where T is the number of time steps and n is the number of dimensions.
When a list is provided they can have differently many time steps, but the number of dimensions need
to be consistent. When X is not provided, the result of assign is identical to get_output(), i.e. the
data used for clustering will be assigned. If X is given, the stride argument is not accepted.
stride : int, optional, default = 1
If set to 1, all frames of the input data will be assigned. Note that this could cause this calculation
to be very slow for large data sets. Since molecular dynamics data is usually
correlated at short timescales, it is often sufficient to obtain the discretization at a longer stride.
Note that the stride option used to conduct the clustering is independent of the assign stride.
This argument is only accepted if X is not given.
Returns
-------
Y : ndarray(T, dtype=int) or list of ndarray(T_i, dtype=int)
The discretized trajectory: int-array with the indexes of the assigned clusters, or list of such int-arrays.
If called with a list of trajectories, Y will also be a corresponding list of discrete trajectories
"""
if X is None:
# if the stride did not change and the discrete trajectory is already present,
# just return it
if self._previous_stride is stride and len(self._dtrajs) > 0:
return self._dtrajs
self._previous_stride = stride
# map to column vectors
mapped = self.get_output(stride=stride, chunk=self.chunksize)
# flatten and save
self._dtrajs = [np.transpose(m)[0] for m in mapped]
# return
return self._dtrajs
else:
if stride != 1:
raise ValueError('assign accepts either X or stride parameters, but not both. If you want to map '+
'only a subset of your data, extract the subset yourself and pass it as X.')
# map to column vector(s)
mapped = self.transform(X)
# flatten
if isinstance(mapped, np.ndarray):
mapped = np.transpose(mapped)[0]
else:
mapped = [np.transpose(m)[0] for m in mapped]
# return
return mapped
def save_dtrajs(self, trajfiles=None, prefix='',
output_dir='.',
output_format='ascii',
extension='.dtraj'):
"""saves calculated discrete trajectories. Filenames are taken from
given reader. If data comes from memory dtrajs are written to a default
filename.
Parameters
----------
trajfiles : list of str (optional)
names of input trajectory files, will be used generate output files.
prefix : str
prepend prefix to filenames.
output_dir : str
save files to this directory.
output_format : str
if format is 'ascii' dtrajs will be written as csv files, otherwise
they will be written as NumPy .npy files.
extension : str
file extension to append (eg. '.itraj')
"""
if extension[0] != '.':
extension = '.' + extension
# obtain filenames from input (if possible, reader is a featurereader)
if output_format == 'ascii':
from msmtools.dtraj import write_discrete_trajectory as write_dtraj
else:
from msmtools.dtraj import save_discrete_trajectory as write_dtraj
import os.path as path
output_files = []
if trajfiles is not None: # have filenames available?
for f in trajfiles:
p, n = path.split(f) # path and file
basename, _ = path.splitext(n)
if prefix != '':
name = "%s_%s%s" % (prefix, basename, extension)
else:
name = "%s%s" % (basename, extension)
# name = path.join(p, name)
output_files.append(name)
else:
for i in range(len(self.dtrajs)):
if prefix is not '':
name = "%s_%i%s" % (prefix, i, extension)
else:
name = str(i) + extension
output_files.append(name)
assert len(self.dtrajs) == len(output_files)
if not os.path.exists(output_dir):
mkdir_p(output_dir)
for filename, dtraj in zip(output_files, self.dtrajs):
dest = path.join(output_dir, filename)
self._logger.debug('writing dtraj to "%s"' % dest)
try:
if path.exists(dest) and not self.overwrite_dtrajs:
raise EnvironmentError('Attempted to write dtraj "%s" which already existed. To automatically'
' overwrite existing files, set source.overwrite_dtrajs=True.' % dest)
write_dtraj(dest, dtraj)
except IOError:
self._logger.exception('Exception during writing dtraj to "%s"' % dest)
| lgpl-3.0 |
dupontke/concatenated_analyses | RMSD_basic/plotting_functions.py | 1 | 8844 | #!/Library/Frameworks/Python.framework/Versions/2.7/bin/python
# USAGE:
# from fn_plotting.py import *
# PREAMBLE:
import numpy as np
import sys
import os
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
stdev = np.std
sqrt = np.sqrt
nullfmt = NullFormatter()
# ----------------------------------------
# PLOTTING SUBROUTINES
def plot_1d(xdata, ydata, color, x_axis, y_axis, system, analysis, average = False, t0 = 0, **kwargs):
""" Creates a 1D scatter/line plot:
Usage: plot_1d(xdata, ydata, color, x_axis, y_axis, system, analysis, average = [False|True], t0 = 0)
Arguments:
xdata, ydata: self-explanatory
color: color to be used to plot data
x_axis, y_axis: strings to be used for the axis label
system: descriptor for the system that produced the data
analysis: descriptor for the analysis that produced the data
average: [False|True]; Default is False; if set to True, the function will calc the average, standard dev, and standard dev of mean of the y-data
t0: index to begin averaging from; Default is 0
kwargs:
xunits, yunits: string with correct math text describing the units for the x/y data
x_lim, y_lim: list w/ two elements, setting the limits of the x/y ranges of plot
plt_title: string to be added as the plot title
"""
# INITIATING THE PLOT...
plt.plot(xdata, ydata, '%s' %(color))
# READING IN KWARG DICTIONARY INTO SPECIFIC VARIABLES
for name, value in kwargs.items():
if name == 'xunits':
x_units = value
x_axis = '%s (%s)' %(x_axis, value)
elif name == 'yunits':
y_units = value
y_axis = '%s (%s)' %(y_axis, value)
elif name == 'x_lim':
plt.xlim(value)
elif name == 'y_lim':
plt.ylim(value)
elif name == 'plt_title':
plt.title(r'%s' %(value), size='14')
plt.grid(b=True, which='major', axis='both', color='#808080', linestyle='--')
plt.xlabel(r'%s' %(x_axis), size=12)
plt.ylabel(r'%s' %(y_axis), size=12)
# CALCULATING THE AVERAGE/SD/SDOM OF THE Y-DATA
if average != False:
avg = np.sum(ydata[t0:])/len(ydata[t0:])
SD = stdev(ydata[t0:])
SDOM = SD/sqrt(len(ydata[t0:]))
plt.axhline(avg, xmin=0.0, xmax=1.0, c='r')
plt.figtext(0.680, 0.780, '%s\n%6.4f $\\pm$ %6.4f %s \nSD = %4.3f %s' %(analysis, avg, SDOM, y_units, SD, y_units), bbox=dict(boxstyle='square', ec='r', fc='w'), fontsize=12)
plt.savefig('%s.%s.plot1d.png' %(system,analysis))
plt.close()
def hist1d(data, x_axis, system, analysis, num_b = 100, norm = False, average = False, t0 = 0, **kwargs):
""" Creates a 1D histogram:
Usage: hist1d(data, x_axis, num_b, system, analysis, norm)
Arguments:
data: self-explanatory
x_axis: string to be used for the axis label
system: descriptor for the system analyzed
analysis: descriptor for the analysis performed and plotted
num_b: number of bins to be used when binning the data; Default is 100
norm = [False][True]; Default is False; if False, plotting a frequency of data; if True, plotting a probability density
average: [False|True]; Default is False; if set to True, the function will calc the average, standard dev, and standard dev of mean of the y-data
t0: index to begin averaging from; Default is 0
kwargs:
xunits: string with correct math text describing the units for the x data
x_lim, y_lim: list w/ two elements, setting the limits of the x/y ranges of plot
plt_title: string to be added as the plot title
"""
# INITIATING THE PLOT...
events, edges, patches = plt.hist(data, bins=num_b, histtype = 'bar', normed=norm)
# READING IN KWARG DICTIONARY INTO SPECIFIC VARIABLES
for name, value in kwargs.items():
if name == 'xunits':
x_units = value
x_axis = '%s (%s)' %(x_axis, value)
elif name == 'x_lim':
plt.xlim(value)
elif name == 'y_lim':
plt.ylim(value)
elif name == 'plt_title':
plt.title(r'%s' %(value), size='14')
plt.grid(b=True, which='major', axis='both', color='#808080', linestyle='--')
plt.xlabel(r'%s' %(x_axis), size=12)
# CALCULATING THE AVERAGE/SD/SDOM OF THE Y-DATA
if average != False:
avg = np.sum(data[t0:])/len(data[t0:])
SD = stdev(data[t0:])
SDOM = SD/sqrt(len(data[t0:]))
plt.axvline(avg, ymin=0.0, ymax=1.0, c='r')
plt.figtext(0.680, 0.780, '%s\n%6.4f $\\pm$ %6.4f %s \nSD = %4.3f %s' %(analysis, avg, SDOM, x_units, SD, x_units), bbox=dict(boxstyle='square', ec='r', fc='w'), fontsize=12)
if norm == True:
plt.ylabel('Probability Density')
plt.savefig('%s.%s.prob1d.png' %(system,analysis))
nf = open('%s.%s.prob1d.dat' %(system,analysis),'w')
else:
plt.ylabel('Frequency', size=12)
plt.savefig('%s.%s.hist1d.png' %(system,analysis))
nf = open('%s.%s.hist1d.dat' %(system,analysis), 'w')
for i in range(len(events)):
nf.write('%10.1f %10.4f\n' %(events[i], edges[i]))
plt.close()
nf.close()
events = []
edges = []
patches = []
def scat_hist(xdata, ydata, color, x_axis, y_axis, system, analysis, num_b = 100, average = False, t0 = 0, **kwargs):
""" Creates 1D scatter plot w/ a 1D histogram
Usage: scat_hist(xdata, ydata, color, x_axis, y_axis, system, analysis, num_b)
Arguments:
xdata, ydata: self-explanatory
color: color to be used to plot data
x_axis, y_axis: strings to be printed on the axi labels
system: descriptor for the system analyzed
analysis: descriptor for the analysis performed and plotted
num_b: number of bins to be used when binning the data; Default is 100
average: [False|True]; Default is False; if set to True, the function will calc the average, standard dev, and standard dev of mean of the y-data
t0: index to begin averaging from; Default is 0
kwargs:
xunits, yunits: string with correct math text describing the units for the x/y data
x_lim, y_lim: list w/ two elements, setting the limits of the x/y ranges of plot
plt_title: string to be added as the plot title
"""
# INITIATING THE PLOT SIZES
left, width = 0.1, 0.65
bottom, height = 0.1, 0.8
bottom_h = left_h = left+width+0.01
rect_scatter = [left, bottom, width, height]
rect_histy = [left_h, bottom, 0.2, height]
# INITIATING THE PLOT...
plt.figure(1, figsize=(10,8))
axScatter =plt.axes(rect_scatter)
axScatter.plot(xdata, ydata, '%s.' %(color))
# READING IN KWARG DICTIONARY INTO SPECIFIC VARIABLES
for name, value in kwargs.items():
if name == 'xunits':
x_units = value
x_axis = '%s (%s)' %(x_axis, value)
elif name == 'yunits':
y_units = value
y_axis = '%s (%s)' %(y_axis, value)
elif name == 'x_lim':
plt.xlim(value)
elif name == 'y_lim':
plt.ylim(value)
elif name == 'plt_title':
plt.title(r'%s' %(value), size='14')
plt.grid(b=True, which='major', axis='both', color='#808080', linestyle='--')
# plt.xlim((0,500))
plt.ylabel(r'%s' %(y_axis),size=12)
plt.xlabel(r'%s' %(x_axis),size=12)
if average != False:
avg = np.sum(ydata[t0:])/len(ydata[t0:])
SD = stdev(ydata[t0:])
SDOM = SD/sqrt(len(ydata[t0:]))
plt.axhline(avg, xmin=0.0, xmax=1.0, c='r')
axHisty = plt.axes(rect_histy)
axHisty.yaxis.set_major_formatter(nullfmt)
axHisty.xaxis.set_major_formatter(nullfmt)
plt.grid(b=True, which='major', axis='both', color='#808080', linestyle='--')
axHisty.hist(ydata, bins=num_b, orientation='horizontal', color = ['gray'])
axHisty.set_ylim(axScatter.get_ylim())
# CALCULATING THE AVERAGE/SD/SDOM OF THE Y-DATA
if average != False:
plt.axhline(avg, xmin=0.0, xmax=1.0, c='r')
plt.figtext(0.775, 0.810, '%s\n%6.4f $\\pm$ %6.4f %s \nSD = %4.3f %s' %(analysis, avg, SDOM, y_units, SD, y_units), bbox=dict(boxstyle='square', ec='r', fc='w'), fontsize=12)
plt.savefig('%s.%s.scat_hist.png' %(system, analysis))
plt.close()
def hist2d(xdata, ydata, x_axis, y_axis, num_b, system, analysis, norm):
""" Creates a 2D histogram (heat map)
Usage: hist2d(xdata, ydata, x_axis, y_axis, num_b, system, analysis, norm)
Arguments:
xdata, ydata: self-explanatory
x_axis, y_axis: strings to be printed on the axi labels
num_b: number of bins to be used when binning the data
system: descriptor for the system analyzed
analysis: descriptor for the analysis performed and plotted
norm = [False][True]; if False, plotting a frequency of data; if True, plotting a probability density
"""
my_cmap = plt.cm.get_cmap('jet')
my_cmap.set_under('w')
counts, xedges, yedges, image = plt.hist2d(xdata, ydata, bins=num_b, normed=norm, cmap=my_cmap, vmin=0.001)#, cmap=plt.get_cmap('jet')) # cmap: jet (blue to red), blues (white to blue), ...
cb1 = plt.colorbar()
if norm == True:
cb1.set_label('Prob. Density', size=12)
else:
cb1.set_label('Frequency')
# plt.title('Distribution of Base Pair interactions - %s-%s' %(base_a, base_b))
# plt.xlim((0,8))
# plt.ylim((0,8))
plt.xlabel(r'%s' %(x_axis), size=12)
plt.ylabel(r'%s' %(y_axis), size=12)
plt.savefig('%s.%s.hist2d.png' %(system, analysis))
plt.close()
counts = []
xedges = []
yedges = []
image = []
| gpl-3.0 |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/pandas/tools/merge.py | 7 | 67927 | """
SQL-style merge routines
"""
import copy
import warnings
import string
import numpy as np
from pandas.compat import range, lrange, lzip, zip, map, filter
import pandas.compat as compat
from pandas import (Categorical, DataFrame, Series,
Index, MultiIndex, Timedelta)
from pandas.core.categorical import (_factorize_from_iterable,
_factorize_from_iterables)
from pandas.core.frame import _merge_doc
from pandas.types.generic import ABCSeries
from pandas.types.common import (is_datetime64tz_dtype,
is_datetime64_dtype,
needs_i8_conversion,
is_int64_dtype,
is_integer_dtype,
is_float_dtype,
is_integer,
is_int_or_datetime_dtype,
is_dtype_equal,
is_bool,
is_list_like,
_ensure_int64,
_ensure_float64,
_ensure_object,
_get_dtype)
from pandas.types.missing import na_value_for_dtype
from pandas.core.generic import NDFrame
from pandas.core.index import (_get_combined_index,
_ensure_index, _get_consensus_names,
_all_indexes_same)
from pandas.core.internals import (items_overlap_with_suffix,
concatenate_block_managers)
from pandas.util.decorators import Appender, Substitution
import pandas.core.algorithms as algos
import pandas.core.common as com
import pandas.types.concat as _concat
import pandas._join as _join
import pandas.hashtable as _hash
@Substitution('\nleft : DataFrame')
@Appender(_merge_doc, indents=0)
def merge(left, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True, indicator=False):
op = _MergeOperation(left, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, sort=sort, suffixes=suffixes,
copy=copy, indicator=indicator)
return op.get_result()
if __debug__:
merge.__doc__ = _merge_doc % '\nleft : DataFrame'
class MergeError(ValueError):
pass
def _groupby_and_merge(by, on, left, right, _merge_pieces,
check_duplicates=True):
"""
groupby & merge; we are always performing a left-by type operation
Parameters
----------
by: field to group
on: duplicates field
left: left frame
right: right frame
_merge_pieces: function for merging
check_duplicates: boolean, default True
should we check & clean duplicates
"""
pieces = []
if not isinstance(by, (list, tuple)):
by = [by]
lby = left.groupby(by, sort=False)
# if we can groupby the rhs
# then we can get vastly better perf
try:
# we will check & remove duplicates if indicated
if check_duplicates:
if on is None:
on = []
elif not isinstance(on, (list, tuple)):
on = [on]
if right.duplicated(by + on).any():
right = right.drop_duplicates(by + on, keep='last')
rby = right.groupby(by, sort=False)
except KeyError:
rby = None
for key, lhs in lby:
if rby is None:
rhs = right
else:
try:
rhs = right.take(rby.indices[key])
except KeyError:
# key doesn't exist in left
lcols = lhs.columns.tolist()
cols = lcols + [r for r in right.columns
if r not in set(lcols)]
merged = lhs.reindex(columns=cols)
merged.index = range(len(merged))
pieces.append(merged)
continue
merged = _merge_pieces(lhs, rhs)
# make sure join keys are in the merged
# TODO, should _merge_pieces do this?
for k in by:
try:
if k in merged:
merged[k] = key
except:
pass
pieces.append(merged)
# preserve the original order
# if we have a missing piece this can be reset
result = concat(pieces, ignore_index=True)
result = result.reindex(columns=pieces[0].columns, copy=False)
return result, lby
def ordered_merge(left, right, on=None,
left_on=None, right_on=None,
left_by=None, right_by=None,
fill_method=None, suffixes=('_x', '_y')):
warnings.warn("ordered_merge is deprecated and replaced by merge_ordered",
FutureWarning, stacklevel=2)
return merge_ordered(left, right, on=on,
left_on=left_on, right_on=right_on,
left_by=left_by, right_by=right_by,
fill_method=fill_method, suffixes=suffixes)
def merge_ordered(left, right, on=None,
left_on=None, right_on=None,
left_by=None, right_by=None,
fill_method=None, suffixes=('_x', '_y'),
how='outer'):
"""Perform merge with optional filling/interpolation designed for ordered
data like time series data. Optionally perform group-wise merge (see
examples)
Parameters
----------
left : DataFrame
right : DataFrame
on : label or list
Field names to join on. Must be found in both DataFrames.
left_on : label or list, or array-like
Field names to join on in left DataFrame. Can be a vector or list of
vectors of the length of the DataFrame to use a particular vector as
the join key instead of columns
right_on : label or list, or array-like
Field names to join on in right DataFrame or vector/list of vectors per
left_on docs
left_by : column name or list of column names
Group left DataFrame by group columns and merge piece by piece with
right DataFrame
right_by : column name or list of column names
Group right DataFrame by group columns and merge piece by piece with
left DataFrame
fill_method : {'ffill', None}, default None
Interpolation method for data
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively
how : {'left', 'right', 'outer', 'inner'}, default 'outer'
* left: use only keys from left frame (SQL: left outer join)
* right: use only keys from right frame (SQL: right outer join)
* outer: use union of keys from both frames (SQL: full outer join)
* inner: use intersection of keys from both frames (SQL: inner join)
.. versionadded:: 0.19.0
Examples
--------
>>> A >>> B
key lvalue group key rvalue
0 a 1 a 0 b 1
1 c 2 a 1 c 2
2 e 3 a 2 d 3
3 a 1 b
4 c 2 b
5 e 3 b
>>> ordered_merge(A, B, fill_method='ffill', left_by='group')
key lvalue group rvalue
0 a 1 a NaN
1 b 1 a 1
2 c 2 a 2
3 d 2 a 3
4 e 3 a 3
5 f 3 a 4
6 a 1 b NaN
7 b 1 b 1
8 c 2 b 2
9 d 2 b 3
10 e 3 b 3
11 f 3 b 4
Returns
-------
merged : DataFrame
The output type will the be same as 'left', if it is a subclass
of DataFrame.
See also
--------
merge
merge_asof
"""
def _merger(x, y):
# perform the ordered merge operation
op = _OrderedMerge(x, y, on=on, left_on=left_on, right_on=right_on,
suffixes=suffixes, fill_method=fill_method,
how=how)
return op.get_result()
if left_by is not None and right_by is not None:
raise ValueError('Can only group either left or right frames')
elif left_by is not None:
result, _ = _groupby_and_merge(left_by, on, left, right,
lambda x, y: _merger(x, y),
check_duplicates=False)
elif right_by is not None:
result, _ = _groupby_and_merge(right_by, on, right, left,
lambda x, y: _merger(y, x),
check_duplicates=False)
else:
result = _merger(left, right)
return result
ordered_merge.__doc__ = merge_ordered.__doc__
def merge_asof(left, right, on=None,
left_on=None, right_on=None,
left_index=False, right_index=False,
by=None, left_by=None, right_by=None,
suffixes=('_x', '_y'),
tolerance=None,
allow_exact_matches=True):
"""Perform an asof merge. This is similar to a left-join except that we
match on nearest key rather than equal keys.
For each row in the left DataFrame, we select the last row in the right
DataFrame whose 'on' key is less than or equal to the left's key. Both
DataFrames must be sorted by the key.
Optionally match on equivalent keys with 'by' before searching for nearest
match with 'on'.
.. versionadded:: 0.19.0
Parameters
----------
left : DataFrame
right : DataFrame
on : label
Field name to join on. Must be found in both DataFrames.
The data MUST be ordered. Furthermore this must be a numeric column,
such as datetimelike, integer, or float. On or left_on/right_on
must be given.
left_on : label
Field name to join on in left DataFrame.
right_on : label
Field name to join on in right DataFrame.
left_index : boolean
Use the index of the left DataFrame as the join key.
.. versionadded:: 0.19.2
right_index : boolean
Use the index of the right DataFrame as the join key.
.. versionadded:: 0.19.2
by : column name or list of column names
Match on these columns before performing merge operation.
left_by : column name
Field names to match on in the left DataFrame.
.. versionadded:: 0.19.2
right_by : column name
Field names to match on in the right DataFrame.
.. versionadded:: 0.19.2
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively
tolerance : integer or Timedelta, optional, default None
select asof tolerance within this range; must be compatible
to the merge index.
allow_exact_matches : boolean, default True
- If True, allow matching the same 'on' value
(i.e. less-than-or-equal-to)
- If False, don't match the same 'on' value
(i.e., stricly less-than)
Returns
-------
merged : DataFrame
Examples
--------
>>> left
a left_val
0 1 a
1 5 b
2 10 c
>>> right
a right_val
0 1 1
1 2 2
2 3 3
3 6 6
4 7 7
>>> pd.merge_asof(left, right, on='a')
a left_val right_val
0 1 a 1
1 5 b 3
2 10 c 7
>>> pd.merge_asof(left, right, on='a', allow_exact_matches=False)
a left_val right_val
0 1 a NaN
1 5 b 3.0
2 10 c 7.0
For this example, we can achieve a similar result thru
``pd.merge_ordered()``, though its not nearly as performant.
>>> (pd.merge_ordered(left, right, on='a')
... .ffill()
... .drop_duplicates(['left_val'])
... )
a left_val right_val
0 1 a 1.0
3 5 b 3.0
6 10 c 7.0
We can use indexed DataFrames as well.
>>> left
left_val
1 a
5 b
10 c
>>> right
right_val
1 1
2 2
3 3
6 6
7 7
>>> pd.merge_asof(left, right, left_index=True, right_index=True)
left_val right_val
1 a 1
5 b 3
10 c 7
Here is a real-world times-series example
>>> quotes
time ticker bid ask
0 2016-05-25 13:30:00.023 GOOG 720.50 720.93
1 2016-05-25 13:30:00.023 MSFT 51.95 51.96
2 2016-05-25 13:30:00.030 MSFT 51.97 51.98
3 2016-05-25 13:30:00.041 MSFT 51.99 52.00
4 2016-05-25 13:30:00.048 GOOG 720.50 720.93
5 2016-05-25 13:30:00.049 AAPL 97.99 98.01
6 2016-05-25 13:30:00.072 GOOG 720.50 720.88
7 2016-05-25 13:30:00.075 MSFT 52.01 52.03
>>> trades
time ticker price quantity
0 2016-05-25 13:30:00.023 MSFT 51.95 75
1 2016-05-25 13:30:00.038 MSFT 51.95 155
2 2016-05-25 13:30:00.048 GOOG 720.77 100
3 2016-05-25 13:30:00.048 GOOG 720.92 100
4 2016-05-25 13:30:00.048 AAPL 98.00 100
By default we are taking the asof of the quotes
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker')
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
We only asof within 2ms betwen the quote time and the trade time
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker',
... tolerance=pd.Timedelta('2ms'))
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 NaN NaN
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
We only asof within 10ms betwen the quote time and the trade time
and we exclude exact matches on time. However *prior* data will
propogate forward
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker',
... tolerance=pd.Timedelta('10ms'),
... allow_exact_matches=False)
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
See also
--------
merge
merge_ordered
"""
op = _AsOfMerge(left, right,
on=on, left_on=left_on, right_on=right_on,
left_index=left_index, right_index=right_index,
by=by, left_by=left_by, right_by=right_by,
suffixes=suffixes,
how='asof', tolerance=tolerance,
allow_exact_matches=allow_exact_matches)
return op.get_result()
# TODO: transformations??
# TODO: only copy DataFrames when modification necessary
class _MergeOperation(object):
"""
Perform a database (SQL) merge operation between two DataFrame objects
using either columns as keys or their row indexes
"""
_merge_type = 'merge'
def __init__(self, left, right, how='inner', on=None,
left_on=None, right_on=None, axis=1,
left_index=False, right_index=False, sort=True,
suffixes=('_x', '_y'), copy=True, indicator=False):
self.left = self.orig_left = left
self.right = self.orig_right = right
self.how = how
self.axis = axis
self.on = com._maybe_make_list(on)
self.left_on = com._maybe_make_list(left_on)
self.right_on = com._maybe_make_list(right_on)
self.copy = copy
self.suffixes = suffixes
self.sort = sort
self.left_index = left_index
self.right_index = right_index
self.indicator = indicator
if isinstance(self.indicator, compat.string_types):
self.indicator_name = self.indicator
elif isinstance(self.indicator, bool):
self.indicator_name = '_merge' if self.indicator else None
else:
raise ValueError(
'indicator option can only accept boolean or string arguments')
if not isinstance(left, DataFrame):
raise ValueError(
'can not merge DataFrame with instance of '
'type {0}'.format(type(left)))
if not isinstance(right, DataFrame):
raise ValueError(
'can not merge DataFrame with instance of '
'type {0}'.format(type(right)))
if not is_bool(left_index):
raise ValueError(
'left_index parameter must be of type bool, not '
'{0}'.format(type(left_index)))
if not is_bool(right_index):
raise ValueError(
'right_index parameter must be of type bool, not '
'{0}'.format(type(right_index)))
# warn user when merging between different levels
if left.columns.nlevels != right.columns.nlevels:
msg = ('merging between different levels can give an unintended '
'result ({0} levels on the left, {1} on the right)')
msg = msg.format(left.columns.nlevels, right.columns.nlevels)
warnings.warn(msg, UserWarning)
self._validate_specification()
# note this function has side effects
(self.left_join_keys,
self.right_join_keys,
self.join_names) = self._get_merge_keys()
def get_result(self):
if self.indicator:
self.left, self.right = self._indicator_pre_merge(
self.left, self.right)
join_index, left_indexer, right_indexer = self._get_join_info()
ldata, rdata = self.left._data, self.right._data
lsuf, rsuf = self.suffixes
llabels, rlabels = items_overlap_with_suffix(ldata.items, lsuf,
rdata.items, rsuf)
lindexers = {1: left_indexer} if left_indexer is not None else {}
rindexers = {1: right_indexer} if right_indexer is not None else {}
result_data = concatenate_block_managers(
[(ldata, lindexers), (rdata, rindexers)],
axes=[llabels.append(rlabels), join_index],
concat_axis=0, copy=self.copy)
typ = self.left._constructor
result = typ(result_data).__finalize__(self, method=self._merge_type)
if self.indicator:
result = self._indicator_post_merge(result)
self._maybe_add_join_keys(result, left_indexer, right_indexer)
return result
def _indicator_pre_merge(self, left, right):
columns = left.columns.union(right.columns)
for i in ['_left_indicator', '_right_indicator']:
if i in columns:
raise ValueError("Cannot use `indicator=True` option when "
"data contains a column named {}".format(i))
if self.indicator_name in columns:
raise ValueError(
"Cannot use name of an existing column for indicator column")
left = left.copy()
right = right.copy()
left['_left_indicator'] = 1
left['_left_indicator'] = left['_left_indicator'].astype('int8')
right['_right_indicator'] = 2
right['_right_indicator'] = right['_right_indicator'].astype('int8')
return left, right
def _indicator_post_merge(self, result):
result['_left_indicator'] = result['_left_indicator'].fillna(0)
result['_right_indicator'] = result['_right_indicator'].fillna(0)
result[self.indicator_name] = Categorical((result['_left_indicator'] +
result['_right_indicator']),
categories=[1, 2, 3])
result[self.indicator_name] = (
result[self.indicator_name]
.cat.rename_categories(['left_only', 'right_only', 'both']))
result = result.drop(labels=['_left_indicator', '_right_indicator'],
axis=1)
return result
def _maybe_add_join_keys(self, result, left_indexer, right_indexer):
left_has_missing = None
right_has_missing = None
keys = zip(self.join_names, self.left_on, self.right_on)
for i, (name, lname, rname) in enumerate(keys):
if not _should_fill(lname, rname):
continue
take_left, take_right = None, None
if name in result:
if left_indexer is not None and right_indexer is not None:
if name in self.left:
if left_has_missing is None:
left_has_missing = (left_indexer == -1).any()
if left_has_missing:
take_right = self.right_join_keys[i]
if not is_dtype_equal(result[name].dtype,
self.left[name].dtype):
take_left = self.left[name]._values
elif name in self.right:
if right_has_missing is None:
right_has_missing = (right_indexer == -1).any()
if right_has_missing:
take_left = self.left_join_keys[i]
if not is_dtype_equal(result[name].dtype,
self.right[name].dtype):
take_right = self.right[name]._values
elif left_indexer is not None \
and isinstance(self.left_join_keys[i], np.ndarray):
take_left = self.left_join_keys[i]
take_right = self.right_join_keys[i]
if take_left is not None or take_right is not None:
if take_left is None:
lvals = result[name]._values
else:
lfill = na_value_for_dtype(take_left.dtype)
lvals = algos.take_1d(take_left, left_indexer,
fill_value=lfill)
if take_right is None:
rvals = result[name]._values
else:
rfill = na_value_for_dtype(take_right.dtype)
rvals = algos.take_1d(take_right, right_indexer,
fill_value=rfill)
# if we have an all missing left_indexer
# make sure to just use the right values
mask = left_indexer == -1
if mask.all():
key_col = rvals
else:
key_col = Index(lvals).where(~mask, rvals)
if name in result:
result[name] = key_col
else:
result.insert(i, name or 'key_%d' % i, key_col)
def _get_join_indexers(self):
""" return the join indexers """
return _get_join_indexers(self.left_join_keys,
self.right_join_keys,
sort=self.sort,
how=self.how)
def _get_join_info(self):
left_ax = self.left._data.axes[self.axis]
right_ax = self.right._data.axes[self.axis]
if self.left_index and self.right_index and self.how != 'asof':
join_index, left_indexer, right_indexer = \
left_ax.join(right_ax, how=self.how, return_indexers=True)
elif self.right_index and self.how == 'left':
join_index, left_indexer, right_indexer = \
_left_join_on_index(left_ax, right_ax, self.left_join_keys,
sort=self.sort)
elif self.left_index and self.how == 'right':
join_index, right_indexer, left_indexer = \
_left_join_on_index(right_ax, left_ax, self.right_join_keys,
sort=self.sort)
else:
(left_indexer,
right_indexer) = self._get_join_indexers()
if self.right_index:
if len(self.left) > 0:
join_index = self.left.index.take(left_indexer)
else:
join_index = self.right.index.take(right_indexer)
left_indexer = np.array([-1] * len(join_index))
elif self.left_index:
if len(self.right) > 0:
join_index = self.right.index.take(right_indexer)
else:
join_index = self.left.index.take(left_indexer)
right_indexer = np.array([-1] * len(join_index))
else:
join_index = Index(np.arange(len(left_indexer)))
if len(join_index) == 0:
join_index = join_index.astype(object)
return join_index, left_indexer, right_indexer
def _get_merge_data(self):
"""
Handles overlapping column names etc.
"""
ldata, rdata = self.left._data, self.right._data
lsuf, rsuf = self.suffixes
llabels, rlabels = items_overlap_with_suffix(
ldata.items, lsuf, rdata.items, rsuf)
if not llabels.equals(ldata.items):
ldata = ldata.copy(deep=False)
ldata.set_axis(0, llabels)
if not rlabels.equals(rdata.items):
rdata = rdata.copy(deep=False)
rdata.set_axis(0, rlabels)
return ldata, rdata
def _get_merge_keys(self):
"""
Note: has side effects (copy/delete key columns)
Parameters
----------
left
right
on
Returns
-------
left_keys, right_keys
"""
left_keys = []
right_keys = []
join_names = []
right_drop = []
left_drop = []
left, right = self.left, self.right
is_lkey = lambda x: isinstance(
x, (np.ndarray, ABCSeries)) and len(x) == len(left)
is_rkey = lambda x: isinstance(
x, (np.ndarray, ABCSeries)) and len(x) == len(right)
# Note that pd.merge_asof() has separate 'on' and 'by' parameters. A
# user could, for example, request 'left_index' and 'left_by'. In a
# regular pd.merge(), users cannot specify both 'left_index' and
# 'left_on'. (Instead, users have a MultiIndex). That means the
# self.left_on in this function is always empty in a pd.merge(), but
# a pd.merge_asof(left_index=True, left_by=...) will result in a
# self.left_on array with a None in the middle of it. This requires
# a work-around as designated in the code below.
# See _validate_specification() for where this happens.
# ugh, spaghetti re #733
if _any(self.left_on) and _any(self.right_on):
for lk, rk in zip(self.left_on, self.right_on):
if is_lkey(lk):
left_keys.append(lk)
if is_rkey(rk):
right_keys.append(rk)
join_names.append(None) # what to do?
else:
if rk is not None:
right_keys.append(right[rk]._values)
join_names.append(rk)
else:
# work-around for merge_asof(right_index=True)
right_keys.append(right.index)
join_names.append(right.index.name)
else:
if not is_rkey(rk):
if rk is not None:
right_keys.append(right[rk]._values)
else:
# work-around for merge_asof(right_index=True)
right_keys.append(right.index)
if lk is not None and lk == rk:
# avoid key upcast in corner case (length-0)
if len(left) > 0:
right_drop.append(rk)
else:
left_drop.append(lk)
else:
right_keys.append(rk)
if lk is not None:
left_keys.append(left[lk]._values)
join_names.append(lk)
else:
# work-around for merge_asof(left_index=True)
left_keys.append(left.index)
join_names.append(left.index.name)
elif _any(self.left_on):
for k in self.left_on:
if is_lkey(k):
left_keys.append(k)
join_names.append(None)
else:
left_keys.append(left[k]._values)
join_names.append(k)
if isinstance(self.right.index, MultiIndex):
right_keys = [lev._values.take(lab)
for lev, lab in zip(self.right.index.levels,
self.right.index.labels)]
else:
right_keys = [self.right.index.values]
elif _any(self.right_on):
for k in self.right_on:
if is_rkey(k):
right_keys.append(k)
join_names.append(None)
else:
right_keys.append(right[k]._values)
join_names.append(k)
if isinstance(self.left.index, MultiIndex):
left_keys = [lev._values.take(lab)
for lev, lab in zip(self.left.index.levels,
self.left.index.labels)]
else:
left_keys = [self.left.index.values]
if left_drop:
self.left = self.left.drop(left_drop, axis=1)
if right_drop:
self.right = self.right.drop(right_drop, axis=1)
return left_keys, right_keys, join_names
def _validate_specification(self):
# Hm, any way to make this logic less complicated??
if self.on is None and self.left_on is None and self.right_on is None:
if self.left_index and self.right_index:
self.left_on, self.right_on = (), ()
elif self.left_index:
if self.right_on is None:
raise MergeError('Must pass right_on or right_index=True')
elif self.right_index:
if self.left_on is None:
raise MergeError('Must pass left_on or left_index=True')
else:
# use the common columns
common_cols = self.left.columns.intersection(
self.right.columns)
if len(common_cols) == 0:
raise MergeError('No common columns to perform merge on')
if not common_cols.is_unique:
raise MergeError("Data columns not unique: %s"
% repr(common_cols))
self.left_on = self.right_on = common_cols
elif self.on is not None:
if self.left_on is not None or self.right_on is not None:
raise MergeError('Can only pass on OR left_on and '
'right_on')
self.left_on = self.right_on = self.on
elif self.left_on is not None:
n = len(self.left_on)
if self.right_index:
if len(self.left_on) != self.right.index.nlevels:
raise ValueError('len(left_on) must equal the number '
'of levels in the index of "right"')
self.right_on = [None] * n
elif self.right_on is not None:
n = len(self.right_on)
if self.left_index:
if len(self.right_on) != self.left.index.nlevels:
raise ValueError('len(right_on) must equal the number '
'of levels in the index of "left"')
self.left_on = [None] * n
if len(self.right_on) != len(self.left_on):
raise ValueError("len(right_on) must equal len(left_on)")
def _get_join_indexers(left_keys, right_keys, sort=False, how='inner',
**kwargs):
"""
Parameters
----------
Returns
-------
"""
from functools import partial
assert len(left_keys) == len(right_keys), \
'left_key and right_keys must be the same length'
# bind `sort` arg. of _factorize_keys
fkeys = partial(_factorize_keys, sort=sort)
# get left & right join labels and num. of levels at each location
llab, rlab, shape = map(list, zip(* map(fkeys, left_keys, right_keys)))
# get flat i8 keys from label lists
lkey, rkey = _get_join_keys(llab, rlab, shape, sort)
# factorize keys to a dense i8 space
# `count` is the num. of unique keys
# set(lkey) | set(rkey) == range(count)
lkey, rkey, count = fkeys(lkey, rkey)
# preserve left frame order if how == 'left' and sort == False
kwargs = copy.copy(kwargs)
if how == 'left':
kwargs['sort'] = sort
join_func = _join_functions[how]
return join_func(lkey, rkey, count, **kwargs)
class _OrderedMerge(_MergeOperation):
_merge_type = 'ordered_merge'
def __init__(self, left, right, on=None, left_on=None, right_on=None,
left_index=False, right_index=False, axis=1,
suffixes=('_x', '_y'), copy=True,
fill_method=None, how='outer'):
self.fill_method = fill_method
_MergeOperation.__init__(self, left, right, on=on, left_on=left_on,
left_index=left_index,
right_index=right_index,
right_on=right_on, axis=axis,
how=how, suffixes=suffixes,
sort=True # factorize sorts
)
def get_result(self):
join_index, left_indexer, right_indexer = self._get_join_info()
# this is a bit kludgy
ldata, rdata = self.left._data, self.right._data
lsuf, rsuf = self.suffixes
llabels, rlabels = items_overlap_with_suffix(ldata.items, lsuf,
rdata.items, rsuf)
if self.fill_method == 'ffill':
left_join_indexer = _join.ffill_indexer(left_indexer)
right_join_indexer = _join.ffill_indexer(right_indexer)
else:
left_join_indexer = left_indexer
right_join_indexer = right_indexer
lindexers = {
1: left_join_indexer} if left_join_indexer is not None else {}
rindexers = {
1: right_join_indexer} if right_join_indexer is not None else {}
result_data = concatenate_block_managers(
[(ldata, lindexers), (rdata, rindexers)],
axes=[llabels.append(rlabels), join_index],
concat_axis=0, copy=self.copy)
typ = self.left._constructor
result = typ(result_data).__finalize__(self, method=self._merge_type)
self._maybe_add_join_keys(result, left_indexer, right_indexer)
return result
def _asof_function(on_type):
return getattr(_join, 'asof_join_%s' % on_type, None)
def _asof_by_function(on_type, by_type):
return getattr(_join, 'asof_join_%s_by_%s' % (on_type, by_type), None)
_type_casters = {
'int64_t': _ensure_int64,
'double': _ensure_float64,
'object': _ensure_object,
}
_cython_types = {
'uint8': 'uint8_t',
'uint32': 'uint32_t',
'uint16': 'uint16_t',
'uint64': 'uint64_t',
'int8': 'int8_t',
'int32': 'int32_t',
'int16': 'int16_t',
'int64': 'int64_t',
'float16': 'error',
'float32': 'float',
'float64': 'double',
}
def _get_cython_type(dtype):
""" Given a dtype, return a C name like 'int64_t' or 'double' """
type_name = _get_dtype(dtype).name
ctype = _cython_types.get(type_name, 'object')
if ctype == 'error':
raise MergeError('unsupported type: ' + type_name)
return ctype
def _get_cython_type_upcast(dtype):
""" Upcast a dtype to 'int64_t', 'double', or 'object' """
if is_integer_dtype(dtype):
return 'int64_t'
elif is_float_dtype(dtype):
return 'double'
else:
return 'object'
class _AsOfMerge(_OrderedMerge):
_merge_type = 'asof_merge'
def __init__(self, left, right, on=None, left_on=None, right_on=None,
left_index=False, right_index=False,
by=None, left_by=None, right_by=None,
axis=1, suffixes=('_x', '_y'), copy=True,
fill_method=None,
how='asof', tolerance=None,
allow_exact_matches=True):
self.by = by
self.left_by = left_by
self.right_by = right_by
self.tolerance = tolerance
self.allow_exact_matches = allow_exact_matches
_OrderedMerge.__init__(self, left, right, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, axis=axis,
how=how, suffixes=suffixes,
fill_method=fill_method)
def _validate_specification(self):
super(_AsOfMerge, self)._validate_specification()
# we only allow on to be a single item for on
if len(self.left_on) != 1 and not self.left_index:
raise MergeError("can only asof on a key for left")
if len(self.right_on) != 1 and not self.right_index:
raise MergeError("can only asof on a key for right")
if self.left_index and isinstance(self.left.index, MultiIndex):
raise MergeError("left can only have one index")
if self.right_index and isinstance(self.right.index, MultiIndex):
raise MergeError("right can only have one index")
# set 'by' columns
if self.by is not None:
if self.left_by is not None or self.right_by is not None:
raise MergeError('Can only pass by OR left_by '
'and right_by')
self.left_by = self.right_by = self.by
if self.left_by is None and self.right_by is not None:
raise MergeError('missing left_by')
if self.left_by is not None and self.right_by is None:
raise MergeError('missing right_by')
# add by to our key-list so we can have it in the
# output as a key
if self.left_by is not None:
if not is_list_like(self.left_by):
self.left_by = [self.left_by]
if not is_list_like(self.right_by):
self.right_by = [self.right_by]
self.left_on = self.left_by + list(self.left_on)
self.right_on = self.right_by + list(self.right_on)
@property
def _asof_key(self):
""" This is our asof key, the 'on' """
return self.left_on[-1]
def _get_merge_keys(self):
# note this function has side effects
(left_join_keys,
right_join_keys,
join_names) = super(_AsOfMerge, self)._get_merge_keys()
# validate index types are the same
for lk, rk in zip(left_join_keys, right_join_keys):
if not is_dtype_equal(lk.dtype, rk.dtype):
raise MergeError("incompatible merge keys, "
"must be the same type")
# validate tolerance; must be a Timedelta if we have a DTI
if self.tolerance is not None:
lt = left_join_keys[-1]
msg = "incompatible tolerance, must be compat " \
"with type {0}".format(type(lt))
if is_datetime64_dtype(lt) or is_datetime64tz_dtype(lt):
if not isinstance(self.tolerance, Timedelta):
raise MergeError(msg)
if self.tolerance < Timedelta(0):
raise MergeError("tolerance must be positive")
elif is_int64_dtype(lt):
if not is_integer(self.tolerance):
raise MergeError(msg)
if self.tolerance < 0:
raise MergeError("tolerance must be positive")
else:
raise MergeError("key must be integer or timestamp")
# validate allow_exact_matches
if not is_bool(self.allow_exact_matches):
raise MergeError("allow_exact_matches must be boolean, "
"passed {0}".format(self.allow_exact_matches))
return left_join_keys, right_join_keys, join_names
def _get_join_indexers(self):
""" return the join indexers """
def flip(xs):
""" unlike np.transpose, this returns an array of tuples """
labels = list(string.ascii_lowercase[:len(xs)])
dtypes = [x.dtype for x in xs]
labeled_dtypes = list(zip(labels, dtypes))
return np.array(lzip(*xs), labeled_dtypes)
# values to compare
left_values = (self.left.index.values if self.left_index else
self.left_join_keys[-1])
right_values = (self.right.index.values if self.right_index else
self.right_join_keys[-1])
tolerance = self.tolerance
# we required sortedness in the join keys
msg = " keys must be sorted"
if not Index(left_values).is_monotonic:
raise ValueError('left' + msg)
if not Index(right_values).is_monotonic:
raise ValueError('right' + msg)
# initial type conversion as needed
if needs_i8_conversion(left_values):
left_values = left_values.view('i8')
right_values = right_values.view('i8')
if tolerance is not None:
tolerance = tolerance.value
# a "by" parameter requires special handling
if self.left_by is not None:
if len(self.left_join_keys) > 2:
# get tuple representation of values if more than one
left_by_values = flip(self.left_join_keys[0:-1])
right_by_values = flip(self.right_join_keys[0:-1])
else:
left_by_values = self.left_join_keys[0]
right_by_values = self.right_join_keys[0]
# upcast 'by' parameter because HashTable is limited
by_type = _get_cython_type_upcast(left_by_values.dtype)
by_type_caster = _type_casters[by_type]
left_by_values = by_type_caster(left_by_values)
right_by_values = by_type_caster(right_by_values)
# choose appropriate function by type
on_type = _get_cython_type(left_values.dtype)
func = _asof_by_function(on_type, by_type)
return func(left_values,
right_values,
left_by_values,
right_by_values,
self.allow_exact_matches,
tolerance)
else:
# choose appropriate function by type
on_type = _get_cython_type(left_values.dtype)
func = _asof_function(on_type)
return func(left_values,
right_values,
self.allow_exact_matches,
tolerance)
def _get_multiindex_indexer(join_keys, index, sort):
from functools import partial
# bind `sort` argument
fkeys = partial(_factorize_keys, sort=sort)
# left & right join labels and num. of levels at each location
rlab, llab, shape = map(list, zip(* map(fkeys, index.levels, join_keys)))
if sort:
rlab = list(map(np.take, rlab, index.labels))
else:
i8copy = lambda a: a.astype('i8', subok=False, copy=True)
rlab = list(map(i8copy, index.labels))
# fix right labels if there were any nulls
for i in range(len(join_keys)):
mask = index.labels[i] == -1
if mask.any():
# check if there already was any nulls at this location
# if there was, it is factorized to `shape[i] - 1`
a = join_keys[i][llab[i] == shape[i] - 1]
if a.size == 0 or not a[0] != a[0]:
shape[i] += 1
rlab[i][mask] = shape[i] - 1
# get flat i8 join keys
lkey, rkey = _get_join_keys(llab, rlab, shape, sort)
# factorize keys to a dense i8 space
lkey, rkey, count = fkeys(lkey, rkey)
return _join.left_outer_join(lkey, rkey, count, sort=sort)
def _get_single_indexer(join_key, index, sort=False):
left_key, right_key, count = _factorize_keys(join_key, index, sort=sort)
left_indexer, right_indexer = _join.left_outer_join(
_ensure_int64(left_key),
_ensure_int64(right_key),
count, sort=sort)
return left_indexer, right_indexer
def _left_join_on_index(left_ax, right_ax, join_keys, sort=False):
if len(join_keys) > 1:
if not ((isinstance(right_ax, MultiIndex) and
len(join_keys) == right_ax.nlevels)):
raise AssertionError("If more than one join key is given then "
"'right_ax' must be a MultiIndex and the "
"number of join keys must be the number of "
"levels in right_ax")
left_indexer, right_indexer = \
_get_multiindex_indexer(join_keys, right_ax, sort=sort)
else:
jkey = join_keys[0]
left_indexer, right_indexer = \
_get_single_indexer(jkey, right_ax, sort=sort)
if sort or len(left_ax) != len(left_indexer):
# if asked to sort or there are 1-to-many matches
join_index = left_ax.take(left_indexer)
return join_index, left_indexer, right_indexer
# left frame preserves order & length of its index
return left_ax, None, right_indexer
def _right_outer_join(x, y, max_groups):
right_indexer, left_indexer = _join.left_outer_join(y, x, max_groups)
return left_indexer, right_indexer
_join_functions = {
'inner': _join.inner_join,
'left': _join.left_outer_join,
'right': _right_outer_join,
'outer': _join.full_outer_join,
}
def _factorize_keys(lk, rk, sort=True):
if is_datetime64tz_dtype(lk) and is_datetime64tz_dtype(rk):
lk = lk.values
rk = rk.values
if is_int_or_datetime_dtype(lk) and is_int_or_datetime_dtype(rk):
klass = _hash.Int64Factorizer
lk = _ensure_int64(com._values_from_object(lk))
rk = _ensure_int64(com._values_from_object(rk))
else:
klass = _hash.Factorizer
lk = _ensure_object(lk)
rk = _ensure_object(rk)
rizer = klass(max(len(lk), len(rk)))
llab = rizer.factorize(lk)
rlab = rizer.factorize(rk)
count = rizer.get_count()
if sort:
uniques = rizer.uniques.to_array()
llab, rlab = _sort_labels(uniques, llab, rlab)
# NA group
lmask = llab == -1
lany = lmask.any()
rmask = rlab == -1
rany = rmask.any()
if lany or rany:
if lany:
np.putmask(llab, lmask, count)
if rany:
np.putmask(rlab, rmask, count)
count += 1
return llab, rlab, count
def _sort_labels(uniques, left, right):
if not isinstance(uniques, np.ndarray):
# tuplesafe
uniques = Index(uniques).values
l = len(left)
labels = np.concatenate([left, right])
_, new_labels = algos.safe_sort(uniques, labels, na_sentinel=-1)
new_labels = _ensure_int64(new_labels)
new_left, new_right = new_labels[:l], new_labels[l:]
return new_left, new_right
def _get_join_keys(llab, rlab, shape, sort):
from pandas.core.groupby import _int64_overflow_possible
# how many levels can be done without overflow
pred = lambda i: not _int64_overflow_possible(shape[:i])
nlev = next(filter(pred, range(len(shape), 0, -1)))
# get keys for the first `nlev` levels
stride = np.prod(shape[1:nlev], dtype='i8')
lkey = stride * llab[0].astype('i8', subok=False, copy=False)
rkey = stride * rlab[0].astype('i8', subok=False, copy=False)
for i in range(1, nlev):
stride //= shape[i]
lkey += llab[i] * stride
rkey += rlab[i] * stride
if nlev == len(shape): # all done!
return lkey, rkey
# densify current keys to avoid overflow
lkey, rkey, count = _factorize_keys(lkey, rkey, sort=sort)
llab = [lkey] + llab[nlev:]
rlab = [rkey] + rlab[nlev:]
shape = [count] + shape[nlev:]
return _get_join_keys(llab, rlab, shape, sort)
# ---------------------------------------------------------------------
# Concatenate DataFrame objects
def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
keys=None, levels=None, names=None, verify_integrity=False,
copy=True):
"""
Concatenate pandas objects along a particular axis with optional set logic
along the other axes. Can also add a layer of hierarchical indexing on the
concatenation axis, which may be useful if the labels are the same (or
overlapping) on the passed axis number
Parameters
----------
objs : a sequence or mapping of Series, DataFrame, or Panel objects
If a dict is passed, the sorted keys will be used as the `keys`
argument, unless it is passed, in which case the values will be
selected (see below). Any None objects will be dropped silently unless
they are all None in which case a ValueError will be raised
axis : {0/'index', 1/'columns'}, default 0
The axis to concatenate along
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis(es)
join_axes : list of Index objects
Specific indexes to use for the other n - 1 axes instead of performing
inner/outer set logic
ignore_index : boolean, default False
If True, do not use the index values along the concatenation axis. The
resulting axis will be labeled 0, ..., n - 1. This is useful if you are
concatenating objects where the concatenation axis does not have
meaningful indexing information. Note the index values on the other
axes are still respected in the join.
keys : sequence, default None
If multiple levels passed, should contain tuples. Construct
hierarchical index using the passed keys as the outermost level
levels : list of sequences, default None
Specific levels (unique values) to use for constructing a
MultiIndex. Otherwise they will be inferred from the keys
names : list, default None
Names for the levels in the resulting hierarchical index
verify_integrity : boolean, default False
Check whether the new concatenated axis contains duplicates. This can
be very expensive relative to the actual data concatenation
copy : boolean, default True
If False, do not copy data unnecessarily
Notes
-----
The keys, levels, and names arguments are all optional
Returns
-------
concatenated : type of objects
"""
op = _Concatenator(objs, axis=axis, join_axes=join_axes,
ignore_index=ignore_index, join=join,
keys=keys, levels=levels, names=names,
verify_integrity=verify_integrity,
copy=copy)
return op.get_result()
class _Concatenator(object):
"""
Orchestrates a concatenation operation for BlockManagers
"""
def __init__(self, objs, axis=0, join='outer', join_axes=None,
keys=None, levels=None, names=None,
ignore_index=False, verify_integrity=False, copy=True):
if isinstance(objs, (NDFrame, compat.string_types)):
raise TypeError('first argument must be an iterable of pandas '
'objects, you passed an object of type '
'"{0}"'.format(type(objs).__name__))
if join == 'outer':
self.intersect = False
elif join == 'inner':
self.intersect = True
else: # pragma: no cover
raise ValueError('Only can inner (intersect) or outer (union) '
'join the other axis')
if isinstance(objs, dict):
if keys is None:
keys = sorted(objs)
objs = [objs[k] for k in keys]
else:
objs = list(objs)
if len(objs) == 0:
raise ValueError('No objects to concatenate')
if keys is None:
objs = [obj for obj in objs if obj is not None]
else:
# #1649
clean_keys = []
clean_objs = []
for k, v in zip(keys, objs):
if v is None:
continue
clean_keys.append(k)
clean_objs.append(v)
objs = clean_objs
name = getattr(keys, 'name', None)
keys = Index(clean_keys, name=name)
if len(objs) == 0:
raise ValueError('All objects passed were None')
# consolidate data & figure out what our result ndim is going to be
ndims = set()
for obj in objs:
if not isinstance(obj, NDFrame):
raise TypeError("cannot concatenate a non-NDFrame object")
# consolidate
obj.consolidate(inplace=True)
ndims.add(obj.ndim)
# get the sample
# want the higest ndim that we have, and must be non-empty
# unless all objs are empty
sample = None
if len(ndims) > 1:
max_ndim = max(ndims)
for obj in objs:
if obj.ndim == max_ndim and np.sum(obj.shape):
sample = obj
break
else:
# filter out the empties if we have not multi-index possibiltes
# note to keep empty Series as it affect to result columns / name
non_empties = [obj for obj in objs
if sum(obj.shape) > 0 or isinstance(obj, Series)]
if (len(non_empties) and (keys is None and names is None and
levels is None and join_axes is None)):
objs = non_empties
sample = objs[0]
if sample is None:
sample = objs[0]
self.objs = objs
# Standardize axis parameter to int
if isinstance(sample, Series):
axis = DataFrame()._get_axis_number(axis)
else:
axis = sample._get_axis_number(axis)
# Need to flip BlockManager axis in the DataFrame special case
self._is_frame = isinstance(sample, DataFrame)
if self._is_frame:
axis = 1 if axis == 0 else 0
self._is_series = isinstance(sample, ABCSeries)
if not 0 <= axis <= sample.ndim:
raise AssertionError("axis must be between 0 and {0}, "
"input was {1}".format(sample.ndim, axis))
# if we have mixed ndims, then convert to highest ndim
# creating column numbers as needed
if len(ndims) > 1:
current_column = 0
max_ndim = sample.ndim
self.objs, objs = [], self.objs
for obj in objs:
ndim = obj.ndim
if ndim == max_ndim:
pass
elif ndim != max_ndim - 1:
raise ValueError("cannot concatenate unaligned mixed "
"dimensional NDFrame objects")
else:
name = getattr(obj, 'name', None)
if ignore_index or name is None:
name = current_column
current_column += 1
# doing a row-wise concatenation so need everything
# to line up
if self._is_frame and axis == 1:
name = 0
obj = sample._constructor({name: obj})
self.objs.append(obj)
# note: this is the BlockManager axis (since DataFrame is transposed)
self.axis = axis
self.join_axes = join_axes
self.keys = keys
self.names = names or getattr(keys, 'names', None)
self.levels = levels
self.ignore_index = ignore_index
self.verify_integrity = verify_integrity
self.copy = copy
self.new_axes = self._get_new_axes()
def get_result(self):
# series only
if self._is_series:
# stack blocks
if self.axis == 0:
# concat Series with length to keep dtype as much
non_empties = [x for x in self.objs if len(x) > 0]
if len(non_empties) > 0:
values = [x._values for x in non_empties]
else:
values = [x._values for x in self.objs]
new_data = _concat._concat_compat(values)
name = com._consensus_name_attr(self.objs)
cons = _concat._get_series_result_type(new_data)
return (cons(new_data, index=self.new_axes[0],
name=name, dtype=new_data.dtype)
.__finalize__(self, method='concat'))
# combine as columns in a frame
else:
data = dict(zip(range(len(self.objs)), self.objs))
cons = _concat._get_series_result_type(data)
index, columns = self.new_axes
df = cons(data, index=index)
df.columns = columns
return df.__finalize__(self, method='concat')
# combine block managers
else:
mgrs_indexers = []
for obj in self.objs:
mgr = obj._data
indexers = {}
for ax, new_labels in enumerate(self.new_axes):
if ax == self.axis:
# Suppress reindexing on concat axis
continue
obj_labels = mgr.axes[ax]
if not new_labels.equals(obj_labels):
indexers[ax] = obj_labels.reindex(new_labels)[1]
mgrs_indexers.append((obj._data, indexers))
new_data = concatenate_block_managers(
mgrs_indexers, self.new_axes, concat_axis=self.axis,
copy=self.copy)
if not self.copy:
new_data._consolidate_inplace()
cons = _concat._get_frame_result_type(new_data, self.objs)
return (cons._from_axes(new_data, self.new_axes)
.__finalize__(self, method='concat'))
def _get_result_dim(self):
if self._is_series and self.axis == 1:
return 2
else:
return self.objs[0].ndim
def _get_new_axes(self):
ndim = self._get_result_dim()
new_axes = [None] * ndim
if self.join_axes is None:
for i in range(ndim):
if i == self.axis:
continue
new_axes[i] = self._get_comb_axis(i)
else:
if len(self.join_axes) != ndim - 1:
raise AssertionError("length of join_axes must not be "
"equal to {0}".format(ndim - 1))
# ufff...
indices = lrange(ndim)
indices.remove(self.axis)
for i, ax in zip(indices, self.join_axes):
new_axes[i] = ax
new_axes[self.axis] = self._get_concat_axis()
return new_axes
def _get_comb_axis(self, i):
if self._is_series:
all_indexes = [x.index for x in self.objs]
else:
try:
all_indexes = [x._data.axes[i] for x in self.objs]
except IndexError:
types = [type(x).__name__ for x in self.objs]
raise TypeError("Cannot concatenate list of %s" % types)
return _get_combined_index(all_indexes, intersect=self.intersect)
def _get_concat_axis(self):
"""
Return index to be used along concatenation axis.
"""
if self._is_series:
if self.axis == 0:
indexes = [x.index for x in self.objs]
elif self.ignore_index:
idx = com._default_index(len(self.objs))
return idx
elif self.keys is None:
names = [None] * len(self.objs)
num = 0
has_names = False
for i, x in enumerate(self.objs):
if not isinstance(x, Series):
raise TypeError("Cannot concatenate type 'Series' "
"with object of type "
"%r" % type(x).__name__)
if x.name is not None:
names[i] = x.name
has_names = True
else:
names[i] = num
num += 1
if has_names:
return Index(names)
else:
return com._default_index(len(self.objs))
else:
return _ensure_index(self.keys)
else:
indexes = [x._data.axes[self.axis] for x in self.objs]
if self.ignore_index:
idx = com._default_index(sum(len(i) for i in indexes))
return idx
if self.keys is None:
concat_axis = _concat_indexes(indexes)
else:
concat_axis = _make_concat_multiindex(indexes, self.keys,
self.levels, self.names)
self._maybe_check_integrity(concat_axis)
return concat_axis
def _maybe_check_integrity(self, concat_index):
if self.verify_integrity:
if not concat_index.is_unique:
overlap = concat_index.get_duplicates()
raise ValueError('Indexes have overlapping values: %s'
% str(overlap))
def _concat_indexes(indexes):
return indexes[0].append(indexes[1:])
def _make_concat_multiindex(indexes, keys, levels=None, names=None):
if ((levels is None and isinstance(keys[0], tuple)) or
(levels is not None and len(levels) > 1)):
zipped = lzip(*keys)
if names is None:
names = [None] * len(zipped)
if levels is None:
_, levels = _factorize_from_iterables(zipped)
else:
levels = [_ensure_index(x) for x in levels]
else:
zipped = [keys]
if names is None:
names = [None]
if levels is None:
levels = [_ensure_index(keys)]
else:
levels = [_ensure_index(x) for x in levels]
if not _all_indexes_same(indexes):
label_list = []
# things are potentially different sizes, so compute the exact labels
# for each level and pass those to MultiIndex.from_arrays
for hlevel, level in zip(zipped, levels):
to_concat = []
for key, index in zip(hlevel, indexes):
try:
i = level.get_loc(key)
except KeyError:
raise ValueError('Key %s not in level %s'
% (str(key), str(level)))
to_concat.append(np.repeat(i, len(index)))
label_list.append(np.concatenate(to_concat))
concat_index = _concat_indexes(indexes)
# these go at the end
if isinstance(concat_index, MultiIndex):
levels.extend(concat_index.levels)
label_list.extend(concat_index.labels)
else:
codes, categories = _factorize_from_iterable(concat_index)
levels.append(categories)
label_list.append(codes)
if len(names) == len(levels):
names = list(names)
else:
# make sure that all of the passed indices have the same nlevels
if not len(set([idx.nlevels for idx in indexes])) == 1:
raise AssertionError("Cannot concat indices that do"
" not have the same number of levels")
# also copies
names = names + _get_consensus_names(indexes)
return MultiIndex(levels=levels, labels=label_list, names=names,
verify_integrity=False)
new_index = indexes[0]
n = len(new_index)
kpieces = len(indexes)
# also copies
new_names = list(names)
new_levels = list(levels)
# construct labels
new_labels = []
# do something a bit more speedy
for hlevel, level in zip(zipped, levels):
hlevel = _ensure_index(hlevel)
mapped = level.get_indexer(hlevel)
mask = mapped == -1
if mask.any():
raise ValueError('Values not found in passed level: %s'
% str(hlevel[mask]))
new_labels.append(np.repeat(mapped, n))
if isinstance(new_index, MultiIndex):
new_levels.extend(new_index.levels)
new_labels.extend([np.tile(lab, kpieces) for lab in new_index.labels])
else:
new_levels.append(new_index)
new_labels.append(np.tile(np.arange(n), kpieces))
if len(new_names) < len(new_levels):
new_names.extend(new_index.names)
return MultiIndex(levels=new_levels, labels=new_labels, names=new_names,
verify_integrity=False)
def _should_fill(lname, rname):
if (not isinstance(lname, compat.string_types) or
not isinstance(rname, compat.string_types)):
return True
return lname == rname
def _any(x):
return x is not None and len(x) > 0 and any([y is not None for y in x])
| mit |
khkaminska/scikit-learn | sklearn/utils/extmath.py | 70 | 21951 | """
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array, NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports CSR sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Data must be of same type. Supported types '
'are 32 and 64 bit float. '
'Falling back to np.dot.', NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.utils.validation import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter, random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
"""
random_state = check_random_state(random_state)
# generating random gaussian vectors r with shape: (A.shape[1], size)
R = random_state.normal(size=(A.shape[1], size))
# sampling the range of A using by linear projection of r
Y = safe_sparse_dot(A, R)
del R
# perform power iterations with Y to further 'imprint' the top
# singular vectors of A in Y
for i in xrange(n_iter):
Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y))
# extracting an orthonormal basis of the A range samples
Q, R = linalg.qr(Y, mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=0,
transpose='auto', flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples.
n_iter: int (default is 0)
Number of power iterations (can be used to deal with very noisy
problems).
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case).
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto' and n_samples > n_features:
transpose = True
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
U, V = svd_flip(U, V)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping. Otherwise,
use the rows of v. The choice of which variable to base the decision on
is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N) or (M, )
Argument to the logistic function
out: array-like, shape: (M, N) or (M, ), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N) or (M, )
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = np.atleast_2d(X)
X = check_array(X, dtype=np.float64)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def softmax(X, copy=True):
"""
Calculate the softmax function.
The softmax function is calculated by
np.exp(X) / np.sum(np.exp(X), axis=1)
This will cause overflow when large values are exponentiated.
Hence the largest value in each row is subtracted from each data
point to prevent this.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
copy: bool, optional
Copy X or not.
Returns
-------
out: array, shape (M, N)
Softmax function evaluated at every point in x
"""
if copy:
X = np.copy(X)
max_prob = np.max(X, axis=1).reshape((-1, 1))
X -= max_prob
np.exp(X, X)
sum_prob = np.sum(X, axis=1).reshape((-1, 1))
X /= sum_prob
return X
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _batch_mean_variance_update(X, old_mean, old_variance, old_sample_count):
"""Calculate an average mean update and a Youngs and Cramer variance update.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
old_mean : array-like, shape: (n_features,)
old_variance : array-like, shape: (n_features,)
old_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample variance:
recommendations, The American Statistician, Vol. 37, No. 3, pp. 242-247
"""
new_sum = X.sum(axis=0)
new_variance = X.var(axis=0) * X.shape[0]
old_sum = old_mean * old_sample_count
n_samples = X.shape[0]
updated_sample_count = old_sample_count + n_samples
partial_variance = old_sample_count / (n_samples * updated_sample_count) * (
n_samples / old_sample_count * old_sum - new_sum) ** 2
unnormalized_variance = old_variance * old_sample_count + new_variance + \
partial_variance
return ((old_sum + new_sum) / updated_sample_count,
unnormalized_variance / updated_sample_count,
updated_sample_count)
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
| bsd-3-clause |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/pandas/io/pickle.py | 6 | 3578 | """ pickle compat """
import numpy as np
from numpy.lib.format import read_array, write_array
from pandas.compat import BytesIO, cPickle as pkl, pickle_compat as pc, PY3
from pandas.core.dtypes.common import is_datetime64_dtype, _NS_DTYPE
from pandas.io.common import _get_handle, _infer_compression
def to_pickle(obj, path, compression='infer'):
"""
Pickle (serialize) object to input file path
Parameters
----------
obj : any object
path : string
File path
compression : {'infer', 'gzip', 'bz2', 'xz', None}, default 'infer'
a string representing the compression to use in the output file
.. versionadded:: 0.20.0
"""
inferred_compression = _infer_compression(path, compression)
f, fh = _get_handle(path, 'wb',
compression=inferred_compression,
is_text=False)
try:
pkl.dump(obj, f, protocol=pkl.HIGHEST_PROTOCOL)
finally:
for _f in fh:
_f.close()
def read_pickle(path, compression='infer'):
"""
Load pickled pandas object (or any other pickled object) from the specified
file path
Warning: Loading pickled data received from untrusted sources can be
unsafe. See: http://docs.python.org/2.7/library/pickle.html
Parameters
----------
path : string
File path
compression : {'infer', 'gzip', 'bz2', 'xz', 'zip', None}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer', then use
gzip, bz2, xz or zip if path is a string ending in '.gz', '.bz2', 'xz',
or 'zip' respectively, and no decompression otherwise.
Set to None for no decompression.
.. versionadded:: 0.20.0
Returns
-------
unpickled : type of object stored in file
"""
inferred_compression = _infer_compression(path, compression)
def read_wrapper(func):
# wrapper file handle open/close operation
f, fh = _get_handle(path, 'rb',
compression=inferred_compression,
is_text=False)
try:
return func(f)
finally:
for _f in fh:
_f.close()
def try_read(path, encoding=None):
# try with cPickle
# try with current pickle, if we have a Type Error then
# try with the compat pickle to handle subclass changes
# pass encoding only if its not None as py2 doesn't handle
# the param
# cpickle
# GH 6899
try:
return read_wrapper(lambda f: pkl.load(f))
except Exception:
# reg/patched pickle
try:
return read_wrapper(
lambda f: pc.load(f, encoding=encoding, compat=False))
# compat pickle
except:
return read_wrapper(
lambda f: pc.load(f, encoding=encoding, compat=True))
try:
return try_read(path)
except:
if PY3:
return try_read(path, encoding='latin1')
raise
# compat with sparse pickle / unpickle
def _pickle_array(arr):
arr = arr.view(np.ndarray)
buf = BytesIO()
write_array(buf, arr)
return buf.getvalue()
def _unpickle_array(bytes):
arr = read_array(BytesIO(bytes))
# All datetimes should be stored as M8[ns]. When unpickling with
# numpy1.6, it will read these as M8[us]. So this ensures all
# datetime64 types are read as MS[ns]
if is_datetime64_dtype(arr):
arr = arr.view(_NS_DTYPE)
return arr
| mit |
simudream/dask | dask/dataframe/tests/test_utils_dataframe.py | 11 | 1064 | import pandas as pd
from dask.dataframe.utils import (shard_df_on_index, get_categories,
_categorize, strip_categories)
import pandas.util.testing as tm
def test_shard_df_on_index():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
s = df.y
result = list(shard_df_on_index(df, [20, 50]))
assert list(result[0].index) == [10]
assert list(result[1].index) == [20, 30, 40]
assert list(result[2].index) == [50, 60]
def test_categories():
df = pd.DataFrame({'x': [1, 2, 3, 4],
'y': pd.Categorical(['a', 'b', 'a', 'c'])},
index=pd.CategoricalIndex(['x', 'x', 'y', 'y']))
categories = get_categories(df)
assert set(categories.keys()) == set(['y', '.index'])
assert list(categories['y']) == ['a', 'b', 'c']
assert list(categories['.index']) == ['x', 'y']
df2 = strip_categories(df)
assert not get_categories(df2)
df3 = _categorize(categories, df2)
tm.assert_frame_equal(df, df3)
| bsd-3-clause |
nfoti/StarCluster | starcluster/balancers/sge/visualizer.py | 18 | 2911 | # Copyright 2009-2014 Justin Riley
#
# This file is part of StarCluster.
#
# StarCluster is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# StarCluster is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with StarCluster. If not, see <http://www.gnu.org/licenses/>.
"""
StarCluster SunGrinEngine stats visualizer module
"""
import os
import numpy as np
from datetime import datetime
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from starcluster.logger import log
class SGEVisualizer(object):
"""
Stats Visualizer for SGE Load Balancer
stats_file - file containing SGE load balancer stats
pngpath - directory to dump the stat plots to
"""
def __init__(self, stats_file, pngpath):
self.pngpath = pngpath
self.stats_file = stats_file
self.records = None
def read(self):
list = []
file = open(self.stats_file, 'r')
for line in file:
parts = line.rstrip().split(',')
a = [datetime.strptime(parts[0], '%Y-%m-%d %H:%M:%S.%f'),
int(parts[1]), int(parts[2]), int(parts[3]), int(parts[4]),
int(parts[5]), int(parts[6]), float(parts[7])]
list.append(a)
file.close()
names = ['dt', 'hosts', 'running_jobs', 'queued_jobs',
'slots', 'avg_duration', 'avg_wait', 'avg_load']
self.records = np.rec.fromrecords(list, names=','.join(names))
def graph(self, yaxis, title):
if self.records is None:
log.error("ERROR: File hasn't been read() yet.")
return -1
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(self.records.dt, yaxis)
ax.grid(True)
fig.autofmt_xdate()
filename = os.path.join(self.pngpath, title + '.png')
plt.savefig(filename, dpi=100)
log.debug("saved graph %s." % title)
plt.close(fig) # close it when its done
def graph_all(self):
self.read()
vals = {'queued': self.records.queued_jobs,
'running': self.records.running_jobs,
'num_hosts': self.records.hosts,
# 'slots': self.records.slots,
'avg_duration': self.records.avg_duration,
'avg_wait': self.records.avg_wait,
'avg_load': self.records.avg_load}
for sub in vals:
self.graph(vals[sub], sub)
log.info("Done making graphs.")
| lgpl-3.0 |
scholer/na_strand_model | nascent/graph_sim_nx/stats_manager.py | 2 | 28170 | # -*- coding: utf-8 -*-
## Copyright 2015 Rasmus Scholer Sorensen, [email protected]
##
## This file is part of Nascent.
##
## Nascent is free software: you can redistribute it and/or modify
## it under the terms of the GNU Affero General Public License as
## published by the Free Software Foundation, either version 3 of the
## License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Affero General Public License for more details.
##
## You should have received a copy of the GNU Affero General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=C0103,W0142
"""
Formats:
* text/csv
* json
* yaml
* msgpack
"""
from __future__ import absolute_import, print_function, division
import os
import pdb
from collections import Counter, defaultdict
from itertools import chain
import yaml
import networkx as nx
from .constants import I_DH, I_DS, I_HYBRIDIZATION, I_STACKING, I_LOOP, I_VOLUME
try:
import msgpack
except ImportError as e:
print("Error importing msgpack library: %s" % e)
print(" - msgpack output not available to stats writer.")
# from nascent.graph_sim_nx.reactionmgr import ReactionAttrs
from .nx_utils import draw_graph_and_save, layout_graph
def simplify(data, _list=list, _set=set):
if isinstance(data, dict):
# cannot simplify if k is frozenset... :(
return {simplify(k, _list=tuple, _set=frozenset): simplify(v, _list=_list, _set=_set)
for k, v in data.items()}
if isinstance(data, (tuple, list)):
return _list(simplify(v, _list=_list, _set=_set) for v in data)
if isinstance(data, (set, frozenset)):
return _set(simplify(v, _list=tuple, _set=frozenset) for v in data)
return str(data)
def load_complex_state_count(fn):
with open(fn) as fp:
data = list(msgpack.Unpacker(fp, encoding='utf-8'))
return data
class StatsWriter():
"""
TODO: Implement support for msgpack format.
"""
def __init__(self, sysmgr, simulator, config):
self.sysmgr = sysmgr
self.simulator = simulator
self.config = config
self.open_files = []
self.monitored_strands = [] # Add strands to monitor throughout the simulation.
self.stats_field_sep = config.get('stats_field_sep', '\t')
self.stats_field_sep2 = config.get('stats_field_sep2', ', ')
# Use standard fields,
default_totals_fields = [
'tau', 'system_time', 'temperature',
'N_domains', 'n_hybridizable_domains', 'n_hybridized_domains',
'N_strands', 'n_partially_hybridized_strands', 'n_fully_hybridized_strands',
'n_stacked_ends',
'n_complexes'
]
self.stats_total_fields = config.get('stats_total_fields', default_totals_fields)
# Or use a custom line format:
# self.stats_total_fmt = config.get('stats_total_fmt')
self.stats_per_domain_sysfields = config.get('stats_per_domain_sysfields',
['tau', 'system_time', 'temperature'])
self.stats_per_domain_fields = config.get('stats_per_domain_fields',
['n_total', 'n_hybridized'])
self.stats_per_domain_species = config.get('stats_per_domain_species',
list(self.sysmgr.domains_by_name.keys()))
if self.stats_per_domain_species == 'hybridizable':
self.stats_per_domain_species = list(self.sysmgr.domain_pairs.keys())
self.stats_per_strand_species = config.get('stats_per_strand_species',
list(sysmgr.strands_by_name.keys()))
self.stats_per_strand_fields = config.get('stats_per_strand_fields',
['n_total', 'n_hybridized', 'n_fully_hybridized'])
self.stats_per_strand_sysfields = config.get('stats_per_strand_sysfields',
['tau', 'system_time', 'temperature'])
## General "total" stats (for each step, file is kept open)
self.stats_total_file = config.get('stats_total_file')
if self.stats_total_file:
print("Writing stats_total_file to file:", self.stats_total_file)
self.stats_total_file = open(self.stats_total_file, 'w')
header = self.stats_field_sep.join(self.stats_total_fields)
self.stats_total_file.write(header+"\n")
## Per-domain stats (for each step, file is kept open)
self.stats_per_domain_file = config.get('stats_per_domain_file')
if self.stats_per_domain_file:
print("Writing stats_per_domain_file to file:", self.stats_per_domain_file)
self.stats_per_domain_file = fh = open(self.stats_per_domain_file, 'w')
self.open_files.append(fh)
# Has two headers, the first is for the "outer" fields: [sys-specs, domA, domB, ...]
# The second header line is for the "inner" fields: [tau, system_time, ...], [n_hybridized, ...]
header1 = self.stats_field_sep.join(["sysfields"] + self.stats_per_domain_species)
header2 = self.stats_field_sep2.join(self.stats_per_domain_sysfields +
self.stats_per_domain_fields*len(self.stats_per_domain_species))
self.stats_per_domain_file.write(header1+"\n"+header2+"\n")
## Per-strand stats (for each step)
self.stats_per_strand_file = config.get('stats_per_strand_file')
if self.stats_per_strand_file:
print("Writing stats_per_strand_file to file:", self.stats_per_strand_file)
self.stats_per_strand_file = fh = open(self.stats_per_strand_file, 'w')
self.open_files.append(fh)
# Has two headers, the first is for the "outer" fields: [sys-specs, domA, domB, ...]
# The second header line is for the "inner" fields: [tau, system_time, ...], [n_hybridized, ...]
header1 = self.stats_field_sep.join(["sysfields"] + self.stats_per_strand_species)
header2 = self.stats_field_sep2.join(self.stats_per_strand_sysfields +
self.stats_per_strand_fields*len(self.stats_per_strand_species))
self.stats_per_strand_file.write(header1+"\n"+header2+"\n")
## Collect complex_state_count (for each step) - using msgpack format
self.complex_state_count_file = config.get('stats_complex_state_count_file')
if self.complex_state_count_file:
self.complex_state_count_file = open(self.complex_state_count_file, 'wb') # msgpack - open in binary mode
self.open_files.append(self.complex_state_count_file)
## File to collect stats for monitored strands:
self.stats_monitored_strands_file = config.get('stats_monitored_strands_file')
# grouping-fields, then index field, then weight (tau), then values or secondary group fields (?)
monitored_strands_fields = ("strand_uid, strand_name, system_time, tau, "
"complex_uid, complex_state, N_domains, N_hybridized_domains").split(", ")
if self.stats_monitored_strands_file:
print("Writing monitored strands stats file:", self.stats_monitored_strands_file)
self.stats_monitored_strands_file = fh = open(self.stats_monitored_strands_file, 'w')
self.open_files.append(fh)
header = self.stats_field_sep.join(monitored_strands_fields)
self.stats_monitored_strands_file.write(header+"\n")
## File to collect complex states:
self.stats_complex_state_file = config.get('stats_complex_state_file')
complex_state_fields = [
"complex_uid", "system_time", "tau", "reaction_str",
#"cached_state_hash", # Is always None
"state_hash", # Obtained with cmplx.state_fingerprint()
#"cached_state_hash2", # Same as state_hash
"N_strands", "N_domains", "N_hybridized_pairs", "N_stacked_pairs",
'total_dH', 'total_dS', 'volume_dH', 'volume_dS', 'shape_dH', 'shape_dS',
'hybridization_dH', 'hybridization_dS', 'stacking_dH', 'stacking_dS'
]
if self.stats_complex_state_file:
print("Writing stats to stats_complex_state_file:", self.stats_complex_state_file)
self.stats_complex_state_file = fh = open(self.stats_complex_state_file, 'w')
self.open_files.append(fh)
header = self.stats_field_sep.join(complex_state_fields)
self.stats_complex_state_file.write(header+"\n")
#### Post-simulation stats: ####
## Collected to a single file using msgpack, each entry is a single end-of-simulation dict.
self.stats_post_simulation_file = config.get('stats_post_simulation_file')
## Reaction graph
self.reaction_graph_output_directory = config.get('reaction_graph_output_directory')
self.reaction_graph_output_fnfmt = config.get('reaction_graph_output_fnfmt')
self.reaction_graph_output_formats = config.get('reaction_graph_output_formats')
if isinstance(self.reaction_graph_output_formats, str):
# Ensure that is is a list/tuple:
self.reaction_graph_output_formats = [self.reaction_graph_output_formats]
def close_all(self):
""" Explicitly close all open files. """
for fh in self.open_files:
fh.close()
def write_stats(self, tau, reaction_attr=None, result=None):
""" Write all stats to all file. """
## TODO: Consider including reaction_spec_pair, reaction_attr (for the *next* or the *previous* reaction?)
if self.stats_total_file is not None:
self.write_total_stats(tau)
if self.stats_per_domain_file is not None:
self.write_per_domain_stats(tau)
if self.stats_per_strand_file is not None:
self.write_per_strand_stats(tau)
if self.stats_monitored_strands_file is not None and self.monitored_strands:
self.write_monitored_strands_stats(tau)
# write_complex_state_stats currently works by writing state changes...
if self.stats_complex_state_file and result is not None:
self.write_complex_state_stats(tau, result, reaction_attr)
def write_total_stats(self, tau, fields=None, init_stats=None):
"""
Write system stats to file.
"""
if fields is None:
fields = self.stats_total_fields
sysmgr = self.sysmgr
# simulator = self.simulator
if init_stats is None:
stats_total = {}
else:
stats_total = init_stats.copy()
## Collect data to a single stats line dict:
stats_total['tau'] = tau
## System/reaction/component manager stats:
for attr in ('system_time', 'temperature', 'N_domains', 'N_strands'):
stats_total[attr] = getattr(sysmgr, attr)
for getter in ('n_hybridized_domains',
'n_hybridizable_domains',
'n_stacked_ends',
'n_partially_hybridized_strands',
'n_fully_hybridized_strands'):
stats_total[getter] = getattr(sysmgr, getter)()
stats_total['n_complexes'] = len(sysmgr.complexes)
## Write data to file:
line = self.stats_field_sep.join(str(stats_total[field]) for field in fields)
self.stats_total_file.write(line + "\n")
def write_per_domain_stats(self, tau, sysfields=None, fields=None, species=None, init_stats=None):
"""
Will first write constant fields, then domain species.
system_time, temperature, N_domains, n_hybridized_domains, N_strands, ...
"""
if fields is None:
sysfields = self.stats_per_domain_sysfields
if fields is None:
fields = self.stats_per_domain_fields
if species is None:
species = self.stats_per_domain_species
sysmgr = self.sysmgr
if init_stats is None:
stats_total = {}
else:
stats_total = init_stats.copy()
## Collect data to a single stats line dict:
stats_total['tau'] = tau
## System/reaction/component manager stats:
for attr in ('system_time', 'temperature', 'N_domains'):
stats_total[attr] = getattr(sysmgr, attr)
## Collect per-domain (specie) stats:
domain_stats = {}
for name, domains in sysmgr.domains_by_name.items():
domain_stats[name] = {}
domain_stats[name]['n_total'] = len(domains)
domain_stats[name]['n_hybridized'] = sum(1 for domain in domains if domain.partner is not None)
## Write data to file:
line = self.stats_field_sep.join(
[self.stats_field_sep2.join([str(stats_total[field]) for field in sysfields])]+
[self.stats_field_sep2.join([str(domain_stats[name][field]) for field in fields])
for name in self.stats_per_domain_species])
self.stats_per_domain_file.write(line + "\n")
def write_per_strand_stats(self, tau, sysfields=None, fields=None, species=None, init_stats=None):
"""
Will first write constant fields, then domain species.
system_time, temperature, N_domains, n_hybridized_domains, N_strands, ...
"""
if fields is None:
sysfields = self.stats_per_domain_sysfields
if fields is None:
fields = self.stats_per_domain_fields
if species is None:
species = self.stats_per_strand_species
sysmgr = self.sysmgr
if init_stats is None:
stats_total = {}
else:
stats_total = init_stats.copy()
## Collect data to a single stats line dict:
stats_total['tau'] = tau
## System/reaction/component manager stats:
for attr in ('system_time', 'temperature', 'N_domains'):
stats_total[attr] = getattr(sysmgr, attr)
## Collect per-domain (specie) stats:
strand_stats = {}
for name, strands in sysmgr.strands_by_name.items():
strand_stats[name] = {}
strand_stats[name]['n_total'] = len(strands)
strand_stats[name]['n_hybridized'] = sum(1 for strand in strands if strand.is_hybridized())
strand_stats[name]['n_fully_hybridized'] = sum(1 for strand in strands if strand.is_fully_hybridized())
## Write data to file:
# tau, system_time, temperature \t
line = self.stats_field_sep.join(
[self.stats_field_sep2.join([str(stats_total[field]) for field in sysfields])]+
[self.stats_field_sep2.join([str(strand_stats[name][field]) for field in fields])
for name in self.stats_per_strand_species])
self.stats_per_strand_file.write(line + "\n")
#pdb.set_trace()
def write_complex_state_count(self, ):
"""
What to record?
For each complex:
- N_strands, N_domains, N_domains_hybridized
- Complex state
Globally:
- Complex state encounters (only register changes).
- Complex states at time t.
"""
sysmgr = self.sysmgr
complex_state_count = dict(Counter([c.state_fingerprint() for c in sysmgr.complexes]))
msgpack.pack(self.complex_state_count_file, complex_state_count)
def write_monitored_strands_stats(self, tau):
"""
Args:
:result: The result dict with case and changed complexes from react_and_process method.
Will, for each monitored strand, write a line with:
strand_uid, strand_name, system_time, tau, complex_uid, complex_state, N_domains, N_hybridized_domains
This should be easy. The tricky part is parsing the result...
You will have to create a table *for each complex* before you start calculating the duration
of each complex state.
To read the data, simply use something like:
pandas.read_table(filename)
"""
system_time = self.sysmgr.system_time
for strand in self.monitored_strands:
cmplx = strand.complex
cuid, cstate = (-1, 0) if cmplx is None else (cmplx.cuid, cmplx.state_fingerprint())
line = self.stats_field_sep.join((#"%s" % val for val in (
"%s" % strand.suid, strand.name,
"%0.05f" % system_time, "%0.04e" % tau, #reaction_str,
"%s" % cuid, "%s" % cstate,
"%s" % len(strand.domains), "%s" % sum(1 for d in strand.domains if d.partner is not None)
))
self.stats_monitored_strands_file.write(line+"\n")
def write_complex_state_stats(self, tau, result, reaction_attr):
"""
Args:
:result: The result dict with case and changed complexes from react_and_process method.
Will, for each complex, write a line with:
<complex uuid>, system_time, N_strands, N_hybridized_domains, N_stacked_domains,
and all fields from complex.energy_subtotals
This should be easy. The tricky part is parsing the result...
You will have to create a table *for each complex* before you start calculating the duration
of each complex state.
"""
system_time = self.sysmgr.system_time
if result['changed_complexes'] is not None and result['new_complexes'] is not None:
changed_complexes = result['changed_complexes'] + result['new_complexes']
elif result['changed_complexes'] is not None:
changed_complexes = result['changed_complexes']
elif result['new_complexes'] is not None:
changed_complexes = result['new_complexes']
else:
return
reaction_attr_str = (reaction_attr.reaction_type + ("+" if reaction_attr.is_forming else "-")
+ ("*" if reaction_attr.is_intra else " "))
for cmplx in changed_complexes:
## in ReactionMgr we track N_domains_hybridized explicitly, but not for complexes.
## Using %s string interpolation seems to be the fastest way to produce strings:
line = self.stats_field_sep.join((
"%s" % cmplx.cuid, "%0.04e" % system_time, "%0.04e" % tau,
reaction_attr_str,
# cmplx._state_fingerprint,
"%s" % cmplx.state_fingerprint(),
# cmplx._state_fingerprint,
"%s" % len(cmplx.strands), "%s" % len(list(cmplx.domains())),
"%s" % len(cmplx.hybridized_pairs), "%s" % len(list(cmplx.stacked_pairs)),
"%0.04f" % cmplx.energy_total_dHdS[0], "%0.04f" % cmplx.energy_total_dHdS[1],
"%0.02f" % cmplx.energy_subtotals[I_VOLUME][0], "%0.03f" % cmplx.energy_subtotals[I_VOLUME][1],
"%0.02f" % cmplx.energy_subtotals[I_LOOP][0], "%0.03f" % cmplx.energy_subtotals[I_LOOP][1],
"%0.02f" % cmplx.energy_subtotals[I_HYBRIDIZATION][0], "%0.03f" % cmplx.energy_subtotals[I_HYBRIDIZATION][1],
"%0.02f" % cmplx.energy_subtotals[I_STACKING][0], "%0.03f" % cmplx.energy_subtotals[I_STACKING][1],
))
self.stats_complex_state_file.write(line+"\n")
def write_post_simulation_stats(self, fnpostfix=""):
"""
Append a dict with stats:
sysmgr_cache, reaction_attrs, reaction_throttle_cache, reaction_invocation_count
to post_simulation_stats.yaml file.
Note that I write the stats by appending the stats dict as:
yaml.dump([stats])
Doing this, I can append [stats] multiple times to the same file
and still get a readable yaml-formatted list.
(I typically append [stats] before and after a simulation.)
"""
if self.stats_post_simulation_file is None:
print("statsmgr.stats_post_simulation_file is None, cannot collect post simulation stats...")
return
sysmgr = self.sysmgr
# systime = sysmgr.system_time
stats = {}
# remove defaultdict, etc:
stats['reaction_throttle_cache'] = sysmgr.reaction_throttle_cache
# ReactionMgr.cache has: domain_hybridization_energy, intracomplex_activity, stochastic_rate_constant,
stats['sysmgr_cache'] = sysmgr.cache
stats['reaction_attrs'] = sysmgr.reaction_attrs
stats['reaction_spec_pairs'] = sysmgr.reaction_spec_pairs
stats['reaction_invocation_count'] = sysmgr.reaction_invocation_count
stats['possible_hybridization_reactions'] = sysmgr.possible_hybridization_reactions
stats['possible_stacking_reactions'] = sysmgr.possible_stacking_reactions
stats = simplify(stats)
fn = self.stats_post_simulation_file.format(
fnpostfix=fnpostfix,
system_time=sysmgr.system_time, T=sysmgr.temperature)
with open(fn, 'a') as fp:
# dump yaml as list to make it easy to append.
# otherwise, use msgpack format.
before = fp.tell()
yaml.dump([stats], fp)
n_bytes = fp.tell() - before
print("\nwrite_post_simulation_stats:", n_bytes, "bytes written to file", self.stats_post_simulation_file)
def save_reaction_graph(self, **kwargs):
"""
Save sysmgr.reaction_graph to file.
See also ReactionMgr.save_reaction_graph (although this one actually saves all system graphs,
not just the reaction graph.)
And:
This method saves in more formats..
This method has different filename formatting, uses ReactionMgr.reaction_graph_output_fnfmt
"""
# self.reaction_graph_output_directory = config.get('reaction_graph_output_directory')
# self.reaction_graph_output_fnfmt = config.get('reaction_graph_output_fnfmt')
# self.reaction_graph_output_formats = config.get('reaction_graph_output_formats')
g = self.sysmgr.reaction_graph
systime = self.sysmgr.system_time
output_funcs = {method: getattr(nx, "write_"+method)
for method in ("yaml", "edgelist", "adjlist", "multiline_adjlist", "gexf", "pajek")}
# output_funcs['png'] = draw_graph_and_save # save reaction graph to png using e.g. graphviz
if not os.path.exists(self.reaction_graph_output_directory):
os.makedirs(self.reaction_graph_output_directory)
if not os.path.isdir(self.reaction_graph_output_directory):
print("Warning: output dir %s is not a director!" % self.reaction_graph_output_directory)
for ext in self.reaction_graph_output_formats:
path = os.path.join(self.reaction_graph_output_directory,
self.reaction_graph_output_fnfmt.format(ext=ext, systime=systime, **kwargs))
# e.g. nx.write_gexf(g, path) or draw_graph_and_save(g, path)
try:
output_funcs[ext](g, path)
except Exception as e:
print("\nStatsManager: Error saving reaction graph using %r(%r, %r)" % (output_funcs[ext], g, path))
print(" - exception type and msg:", type(e), e)
class StatsReader():
def __init__(self, config):
if config is None:
config = {}
self.config = config
self.stats_field_sep = config.get('stats_field_sep', '\t')
self.stats_field_sep2 = config.get('stats_field_sep2', ', ')
def load_total_stats_file(self, fn):
"""
Then the fields header:
tau, system_time, temperature, N_domains, n_hybridized_domains, ...
Returns a list of stats-dicts
"""
with open(fn) as fp:
headers = next(fp).strip().split(self.stats_field_sep)
rows = ([float(val) for val in line.strip().split(self.stats_field_sep)] for line in fp)
stats = [dict(zip(headers, row)) for row in rows]
return stats
def load_per_domain_stats_file(self, fn):
"""
First line is a super-header spec:
# sysspecs, domainA, domainB, ... domain names.
Then the fields header:
tau, system_time, temperature \t n_total, n_hybridized, n_fully_hubridized \t (continue for next domain)
The first three fields before the tab specifies the system state.
The last three fields are "per-strand" stats field.
Next line is values corresponding to the header.
Returns a list of (system-dict, {domain-name: dom-stats-dict}) tuples.
"""
with open(fn) as fp:
first_row = next(fp).strip().split(self.stats_field_sep)
names = first_row[1:]
second_row = next(fp).strip().split(self.stats_field_sep)
# [[tau, system_time, temperature], [n_total, n_hybridized, n_fully_hubridized], [...], ...]
headers = [substr.split(self.stats_field_sep2) for substr in second_row]
rows = ([[float(val) for val in substr.split(self.stats_field_sep2)]
for substr in line.strip().split(self.stats_field_sep)]
for line in fp)
# each row is like: [[0.2, 1.2, 330], [10, 5, 2], [...], ...]
#sysstats, *domainstats =
stats = [(dict(zip(headers[0], row[0])),
{name: dict(zip(headers[1:], domvalues)) for name, domvalues in zip(names, row[1:])})
for row in rows]
return stats
def load_per_strands_stats_file(self, fn):
"""
First line is a super-header spec:
# sysspecs, domainA, domainB, ... domain names.
Then the fields header:
tau, system_time, temperature \t n_total, n_hybridized, n_fully_hubridized \t (continue for next domain)
The first three fields before the tab specifies the system state.
The last three fields are "per-strand" stats field.
Next line is
"""
with open(fn) as fp:
first_row = next(fp).strip().split(self.stats_field_sep)
names = first_row[1:]
second_row = next(fp).strip().split(self.stats_field_sep)
# [[tau, system_time, temperature], [n_total, n_hybridized, n_fully_hubridized], [...], ...]
headers = [substr.split(self.stats_field_sep2) for substr in second_row]
rows = ([[float(val) for val in substr.split(self.stats_field_sep2)]
for substr in line.strip().split(self.stats_field_sep)]
for line in fp)
# each row is like: [[0.2, 1.2, 330], [10, 5, 2], [...], ...]
#sysstats, *domainstats =
stats = [(dict(zip(headers[0], row[0])),
{name: dict(zip(headers[1:], domvalues)) for name, domvalues in zip(names, row[1:])})
for row in rows]
return stats
def load_complex_state_stats(self, fn):
"""
returns
stats, stats_by_cuid
Where stats is a list of dicts with all entries
and stats_by_cuid is the entries grouped by ComplexID.
(This grouping could probably also be done by Pandas and a filter...)
"""
with open(fn) as fp:
headers = next(fp).strip().split(self.stats_field_sep)
rows = ([float(val) for val in line.strip().split(self.stats_field_sep)] for line in fp)
stats = [dict(zip(headers, row)) for row in rows]
stats_by_cuid = defaultdict(list)
# This could probably also be done by Pandas and a filter...
for stat in stats:
stats_by_cuid[stat['ComplexID']].append(stat)
return stats, stats_by_cuid
| gpl-3.0 |
orion-42/numerics-physics-stuff | SOR.py | 1 | 2238 | from collections import namedtuple
import numpy as np
import matplotlib.pyplot as plt
from numba import jit
Electrode = namedtuple("Electrode", ["x", "y", "r", "V"])
@jit
def setup(electrodes, dx, N):
is_boundary = np.empty((N, N), dtype="bool")
V = np.empty((N, N))
for i in range(N):
for j in range(N):
boundary_value = None
if i == 0 or j == 0 or i == N - 1 or j == N - j:
is_boundary[i][j] = True
boundary_value = 0.0
else:
is_boundary[i][j] = False
for e in electrodes:
x = i * dx
y = j * dx
if (x - e.x)**2 + (y - e.y)**2 < e.r**2:
is_boundary[i][j] = True
boundary_value = e.V
break
V[i][j] = boundary_value if is_boundary[i][j] else 0
return V, is_boundary
@jit
def SOR(V, is_boundary, eps, alpha, dx):
N = V.shape[0]
steps = 0
while True:
for i in range(1, N - 1):
for j in range(1, N - 1):
if not is_boundary[i][j]:
V_star = (V[i + 1][j] + V[i - 1][j] + V[i][j - 1] + V[i][j + 1]) / 4.0
Delta_V = V_star - V[i][j]
V[i][j] += alpha * Delta_V
bad = False
for i in range(1, N - 1):
for j in range(1, N - 1):
if not is_boundary[i][j]:
laplace = (V[i - 1][j] - 2 * V[i][j] + V[i + 1][j]) / dx**2 + (V[i][j - 1] - 2 * V[i][j] + V[i][j + 1]) / dx**2
if abs(laplace) >= eps:
bad = True
break
if bad: break
steps += 1
if not bad: return steps
N = 100
L = 1.0
dx = L / (N - 1)
eps = 1e-5
alpha = 1.7
electrodes = [
Electrode(0.2, 0.2, 0.1, +1),
Electrode(0.4, 0.2, 0.1, -1),
Electrode(0.7, 0.3, 0.1, +1),
Electrode(0.5, 0.8, 0.1, +1),
]
V, is_boundary = setup(electrodes, dx, N)
steps = SOR(V, is_boundary, eps, alpha, dx)
print(steps, "steps needed")
x = y = np.linspace(0, L, N)
plt.pcolormesh(x, y, V)
plt.xlabel("x")
plt.ylabel("y")
plt.title("V")
plt.colorbar()
plt.show()
| mit |
MPIBGC-TEE/CompartmentalSystems | tests/Test_BlockIvp.py | 1 | 2477 | import unittest
from testinfrastructure.InDirTest import InDirTest
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from CompartmentalSystems.BlockIvp import BlockIvp
class TestBlockIvp(InDirTest):
@unittest.skip
def test_solution(self):
x1_shape=(5,5)
x2_shape=(2,)
bivp=BlockIvp(
time_str='t'
,start_blocks=[('x1',np.ones(x1_shape)),('x2',np.ones(x2_shape))]
,functions=[
((lambda x1 : - x1 ), [ 'x1' ])
,((lambda t,x2 : - 2*t*x2 ), ['t' , 'x2'])
])
# the reference solution
t_max=2
ref={'x1':np.exp(-t_max)*np.ones(x1_shape),
'x2':np.exp(-t_max**2)*np.ones(x2_shape)
}
res = bivp.block_solve(t_span=(0,t_max))
self.assertTrue(np.allclose(res['x1'][-1],ref['x1'],rtol=1e-2))
self.assertTrue(np.allclose(res['x2'][-1],ref['x2'],rtol=1e-2))
# here we describe time by a variable with constant derivative
# amd use it in the derivative of the second variable
# to simulate a coupled system without feedback (skew product)
x1_shape=(1,)
x2_shape=(2,)
bivp=BlockIvp(
time_str='t'
,start_blocks=[('x1',np.zeros(x1_shape)),('x2',np.ones(x2_shape))]
,functions=[
((lambda x1 : np.ones(x1.shape) ), ['x1'])
,((lambda x1,x2 : - 2*x1*x2 ), ['x1' , 'x2'])
])
# the reference solution
t_max=2
ref={'x1':t_max*np.ones(x1_shape),
'x2':np.exp(-t_max**2)*np.ones(x2_shape)
}
res = bivp.block_solve(t_span=(0,t_max))
self.assertTrue(np.allclose(res['x1'][-1],ref['x1'],rtol=1e-2))
self.assertTrue(np.allclose(res['x2'][-1],ref['x2'],rtol=1e-2))
################################################################################
if __name__ == '__main__':
suite=unittest.defaultTestLoader.discover(".",pattern=__file__)
# # Run same tests across 16 processes
# concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests(1))
# runner = unittest.TextTestRunner()
# res=runner.run(concurrent_suite)
# # to let the buildbot fail we set the exit value !=0 if either a failure or error occurs
# if (len(res.errors)+len(res.failures))>0:
# sys.exit(1)
unittest.main()
| mit |
bmanubay/open-forcefield-tools | single-molecule-property-generation/manipulateparameters.py | 1 | 59104 | # imports needed
import matplotlib as mpl
mpl.use('Agg')
from smarty.forcefield import *
import openeye
from openeye import oechem
import smarty
from smarty.utils import get_data_filename
from simtk import openmm
from simtk import unit
import numpy as np
import netCDF4 as netcdf
import collections as cl
import pandas as pd
import pymbar
from pymbar import timeseries
import glob
import sys
from smarty.forcefield import generateTopologyFromOEMol
import pdb
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#np.set_printoptions(threshold=np.inf)
#----------------------------------------------------------------------
# CONSTANTS
#----------------------------------------------------------------------
kB = 0.001987204118 #Boltzmann constant (Gas constant) in kcal/(mol*K)
#----------------------------------------------------------------------
# UTILITY FUNCTIONS
#----------------------------------------------------------------------
def constructDataFrame(mol_files):
"""
Construct a pandas dataframe to be populated with computed single molecule properties. Each unique bond, angle and torsion has it's own column for a value
and uncertainty.
Parameters
-----------
mol_files - a list of mol2 files from which we determine connectivity using OpenEye Tools and construct the dataframe using Pandas.
Returns
-----------
df - data frame in form molecules x property id that indicates if a specific property exists for a molecule (1 in cell if yes, 0 if no)
"""
molnames = []
for i in mol_files:
molname = i.replace(' ', '')[:-5]
molname = molname.rsplit('/' ,1)[1]
molnames.append(molname)
OEMols=[]
for i in mol_files:
mol = oechem.OEGraphMol()
ifs = oechem.oemolistream(i)
flavor = oechem.OEIFlavor_Generic_Default | oechem.OEIFlavor_MOL2_Default | oechem.OEIFlavor_MOL2_Forcefield
ifs.SetFlavor(oechem.OEFormat_MOL2, flavor)
oechem.OEReadMolecule(ifs, mol)
oechem.OETriposAtomNames(mol)
OEMols.append(mol)
ff = ForceField(get_data_filename('/data/forcefield/smirff99Frosst.ffxml'))
labels = []
lst0 = []
lst1 = []
lst2 = []
lst00 = [[] for i in molnames]
lst11 = [[] for i in molnames]
lst22 = [[] for i in molnames]
lst_0 = [[] for i in molnames]
lst_1 = [[] for i in molnames]
lst_2 = [[] for i in molnames]
for ind, val in enumerate(OEMols):
label = ff.labelMolecules([val], verbose = False)
for entry in range(len(label)):
for bond in label[entry]['HarmonicBondGenerator']:
lst0.extend([str(bond[0])])
lst00[ind].extend([str(bond[0])])
lst_0[ind].append([str(bond[0]),str(bond[2])])
for angle in label[entry]['HarmonicAngleGenerator']:
lst1.extend([str(angle[0])])
lst11[ind].extend([str(angle[0])])
lst_1[ind].append((str(angle[0]),str(angle[2])))
for torsion in label[entry]['PeriodicTorsionGenerator']:
lst2.extend([str(torsion[0])])
lst22[ind].extend([str(torsion[0])])
lst_2[ind].append([str(torsion[0]),str(torsion[2])])
# Return unique strings from lst0
cols0 = set()
for x in lst0:
cols0.add(x)
cols0 = list(cols0)
# Generate data lists to populate dataframe
data0 = [[] for i in range(len(lst00))]
for val in cols0:
for ind,item in enumerate(lst00):
if val in item:
data0[ind].append(1)
else:
data0[ind].append(0)
# Return unique strings from lst1
cols1 = set()
for x in lst1:
cols1.add(x)
cols1 = list(cols1)
# Generate data lists to populate frame (1 means val in lst11 was in cols1, 0 means it wasn't)
data1 = [[] for i in range(len(lst11))]
for val in cols1:
for ind,item in enumerate(lst11):
if val in item:
data1[ind].append(1)
else:
data1[ind].append(0)
# Return unique strings from lst2
cols2 = set()
for x in lst2:
cols2.add(x)
cols2 = list(cols2)
# Generate data lists to populate frame (1 means val in lst22 was in cols2, 0 means it wasn't)
data2 = [[] for i in range(len(lst22))]
for val in cols2:
for ind,item in enumerate(lst22):
if val in item:
data2[ind].append(1)
else:
data2[ind].append(0)
# Clean up clarity of column headers and molecule names
cols0t = ["BondEquilibriumLength " + i for i in cols0]
cols0temp = ["BondEquilibriumLength_std " + i for i in cols0]
cols0 = cols0t + cols0temp
cols1t = ["AngleEquilibriumAngle " + i for i in cols1]
cols1temp = ["AngleEquilibriumAngle_std " + i for i in cols1]
cols1 = cols1t + cols1temp
cols2t = ["TorsionFourier1 " + i for i in cols2]
cols2temp = ["TorsionFourier1_std " + i for i in cols2]
cols2 = cols2t + cols2temp
data0 = [i+i for i in data0]
data1 = [i+i for i in data1]
data2 = [i+i for i in data2]
# Construct dataframes
df0 = pd.DataFrame(data = data0, index = molnames, columns = cols0)
df0['molecule'] = df0.index
df1 = pd.DataFrame(data = data1, index = molnames, columns = cols1)
df1['molecule'] = df1.index
df2 = pd.DataFrame(data = data2, index = molnames, columns = cols2)
df2['molecule'] = df2.index
dftemp = pd.merge(df0, df1, how = 'outer', on = 'molecule')
df = pd.merge(dftemp, df2, how = 'outer', on = 'molecule')
return df, lst_0, lst_1, lst_2
#------------------------------------------------------------------
def ComputeBondsAnglesTorsions(xyz, bonds, angles, torsions):
"""
compute a 3 2D arrays of bond lengths for each frame: bond lengths in rows, angle lengths in columns
Parameters
-----------
xyz - xyz files, an array of length-2 arrays
bonds, angles, torsions - numbered atom indices tuples associated with all unqiue bonds, angles and torsions
Returns
----------
bond_dist, angle_dist, torsion_dist - computed bonds, angles and torsions across the provided time series
"""
niterations = xyz.shape[0] # no. of frames
natoms = xyz.shape[1]
nbonds = np.shape(bonds)[0]
nangles = np.shape(angles)[0]
ntorsions = np.shape(torsions)[0]
bond_dist = np.zeros([niterations,nbonds])
angle_dist = np.zeros([niterations,nangles])
torsion_dist = np.zeros([niterations,ntorsions])
for n in range(niterations):
xyzn = xyz[n] # coordinates this iteration
bond_vectors = np.zeros([nbonds,3])
for i, bond in enumerate(bonds):
bond_vectors[i,:] = xyzn[bond[0]] - xyzn[bond[1]] # calculate the length of the vector
bond_dist[n,i] = np.linalg.norm(bond_vectors[i]) # calculate the bond distance
# we COULD reuse the bond vectors and avoid subtractions, but would involve a lot of bookkeeping
# for now, just recalculate
bond_vector1 = np.zeros(3)
bond_vector2 = np.zeros(3)
bond_vector3 = np.zeros(3)
for i, angle in enumerate(angles):
bond_vector1 = xyzn[angle[0]] - xyzn[angle[1]] # calculate the length of the vector
bond_vector2 = xyzn[angle[1]] - xyzn[angle[2]] # calculate the length of the vector
dot = np.dot(bond_vector1,bond_vector2)
len1 = np.linalg.norm(bond_vector1)
len2 = np.linalg.norm(bond_vector2)
angle_dist[n,i] = np.arccos(dot/(len1*len2)) # angle in radians
for i, torsion in enumerate(torsions):
# algebra from http://math.stackexchange.com/questions/47059/how-do-i-calculate-a-dihedral-angle-given-cartesian-coordinates, Daniel's answer
bond_vector1 = xyzn[torsion[0]] - xyzn[torsion[1]] # calculate the length of the vector
bond_vector2 = xyzn[torsion[1]] - xyzn[torsion[2]] # calculate the length of the vector
bond_vector3 = xyzn[torsion[2]] - xyzn[torsion[3]] # calculate the length of the vector
bond_vector1 /= np.linalg.norm(bond_vector1)
bond_vector2 /= np.linalg.norm(bond_vector2)
bond_vector3 /= np.linalg.norm(bond_vector3)
n1 = np.cross(bond_vector1,bond_vector2)
n2 = np.cross(bond_vector2,bond_vector3)
m = np.cross(n1,bond_vector2)
x = np.dot(n1,n2)
y = np.dot(m,n2)
torsion_dist[n,i] = np.arctan2(y,x) # angle in radians
return bond_dist, angle_dist, torsion_dist
#------------------------------------------------------------------
def calculateBondsAnglesTorsionsStatistics(properties, bond_dist, angle_dist, torsion_dist, bonds, angles, torsions, torsionbool):
"""
Parameters
-----------
properties: A list of property strings we want value for
bond_dist: a Niterations x nbonds list of bond lengths
angle_dist: a Niterations x nbonds list of angle angles (in radians)
torsion_dist: a Niterations x nbonds list of dihedral angles (in radians)
bonds: a list of bonds (ntorsions x 2)
angles: a list of angles (ntorsions x 3)
torsions: a list of torsion atoms (ntorsions x 4)
torsionbool: boolean which suppresses torsion statistical analysis if False
# we assume the bond_dist / bonds , angle_dist / angles, torsion_dist / torsion were constucted in the same order.
PropertyDict - dictionary of average value of bond, angle or torsion across time series with associated uncertainty in mean and uncertainty in uncertainty
"""
PropertyDict = dict()
nbonds = np.shape(bonds)[0]
nangles = np.shape(angles)[0]
ntorsions = np.shape(torsions)[0]
nsamp = np.shape(bond_dist)[0]-1 #WARNING: assumes data points uncorrelated!
for p in properties:
AtomList = p.split(' ', 1)[1:] # figure out which bond this is:
AtomList = [i.lstrip('[').rstrip(']') for i in AtomList] # we assume bond_dist /bond is in the same order.
for i in AtomList:
AtomList = i.strip().split(',')
AtomList = map(int, AtomList)
if 'BondEquilibriumLength' in p:
for i in range(nbonds):
if np.array_equal(AtomList, bonds[i]):
value = np.mean(bond_dist[:,i])
uncertainty = np.std(bond_dist[:,i])/np.sqrt(nsamp)
PropertyDict[p] = [value,uncertainty]
if 'BondEquilibriumLength_std' in p:
for i in range(nbonds):
if np.array_equal(AtomList, bonds[i]):
value = np.std(bond_dist[:,i])
uncertainty = np.std(bond_dist[:,i])**2/np.sqrt(nsamp/2)
PropertyDict[p] = [value,uncertainty]
if 'AngleEquilibriumAngle' in p:
for i in range(nangles):
if np.array_equal(AtomList, angles[i]):
value = np.mean(angle_dist[:,i])
uncertainty = np.std(angle_dist[:,i])/np.sqrt(nsamp)
PropertyDict[p] = [value,uncertainty]
if torsionbool==True:
if 'TorsionFourier1' in p:
for i in range(ntorsions):
if np.array_equal(AtomList, torsions[i]):
value = np.mean(torsion_dist[:,i])
uncertainty = np.std(torsion_dist[:,i])/np.sqrt(nsamp)
PropertyDict[p] = [value,uncertainty]
if 'TorsionFourier1_std' in p:
for i in range(ntorsions):
if np.array_equal(AtomList, torsions[i]):
value = np.std(torsion_dist[:,i])
uncertainty = np.std(torsion_dist[:,i])**2/np.sqrt(nsamp/2)
PropertyDict[p] = [value,uncertainty]
# Circular distribution alternate for torsion calculation
if 'TorsionFourier1' in p:
for i in range(ntorsions):
if np.array_equal(AtomList, torsions[i]):
value = np.array([])
for j in range(nsamp):
val = np.real((np.exp(cmath.sqrt(-1)*torsion_dist[:,i]))**j)
value = np.append(value, val)
value = (1/nsamp)*np.sum(value)
uncertainty = np.std(torsion_dist[:,i])/np.sqrt(nsamp)
PropertyDict[p] = [value, uncertainty]
if 'TorsionFourier1_std' in p:
for i in range(ntorsions):
if np.array_equal(AtomList, torsions[i]):
value = np.std(torsion_dist[:,i])
uncertainty = np.std(torsion_dist[:,i])**2/np.sqrt(nsamp/2)
PropertyDict[p] = [value,uncertainty]
else:
pass
return PropertyDict
#------------------------------------------------------------------
def get_properties_from_trajectory(mol2, ncfiles, torsionbool=True):
"""
take multiple .nc files with identifier names and a pandas dataframe with property
names for single atom bonded properties (including the atom numbers) and populate
those property pandas dataframe.
Parameters
-----------
mol2 - mol2 files used to identify and index molecules
ncfiles - a list of trajectories in netcdf format. Names should correspond to the identifiers in the pandas dataframe.
torsionbool - boolean value passed to computeBondsAnglesTorsionsStatistics() to supress torsion statistics analysis. Default set to True (torsion calculatio n not supressed).
Returns
----------
bond_dist - calculated bond distribution across trajectory
angle_dist - calculated angle distribution across trajectory
torsion_dist - calculated torsion distribution across trajectory
Properties - dictionary of an average value of bond, angle or torsion across time series with associated uncertainty in mean and uncertainty in uncertainty
"""
PropertiesPerMolecule = dict()
# here's code that generate list of properties to calculate for each molecule and
# populate PropertiesPerMolecule
mol_files = mol2
df = constructDataFrame(mol_files)
MoleculeNames = df.molecule.tolist()
properties = df.columns.values.tolist()
for ind, val in enumerate(MoleculeNames):
defined_properties = list()
for p in properties:
if (p is not 'molecule') and ('_std' not in p):
if df.iloc[ind][p] != 0:
defined_properties.append(p)
PropertiesPerMolecule[val] = defined_properties
AtomDict = dict()
AtomDict['MolName'] = list()
for fname in ncfiles:
MoleculeName = fname.split('.')[0]
AtomDict['MolName'].append(MoleculeName)
# extract the xyz coordinate for each frame
data = netcdf.Dataset(fname)
xyz = data.variables['coordinates']
# what is the property list for this molecule
PropertyNames = PropertiesPerMolecule[MoleculeName]
# extract the bond/angle/torsion lists
AtomDict['Bond'] = list()
AtomDict['Angle'] = list()
AtomDict['Torsion'] = list()
# which properties will we use to construct the bond list
ReferenceProperties = ['BondEquilibriumLength','AngleEquilibriumAngle','TorsionFourier1']
for p in PropertyNames:
PropertyName = p.split(' ', 1)[0]
AtomList = p.split(' ', 1)[1:]
AtomList = [i.lstrip('[').rstrip(']') for i in AtomList]
for i in AtomList:
AtomList = i.strip().split(',')
AtomList = map(int, AtomList)
if any(rp in p for rp in ReferenceProperties):
if 'Bond' in p:
AtomDict['Bond'].append(AtomList)
if 'Angle' in p:
AtomDict['Angle'].append(AtomList)
if 'Torsion' in p:
AtomDict['Torsion'].append(AtomList)
bond_dist, angle_dist, torsion_dist = computeBondsAnglesTorsions(xyz,
AtomDict['Bond'],
AtomDict['Angle'],
AtomDict['Torsion'])
Properties = calculateBondsAnglesTorsionsStatistics(PropertyNames,
bond_dist, angle_dist, torsion_dist,
AtomDict['Bond'], AtomDict['Angle'], AtomDict['Torsion'], torsionbool)
#Put properties back in dataframe and return
return [bond_dist, angle_dist, torsion_dist, Properties]
#------------------------------------------------------------------
def read_col(filename,colname,frames):
"""
Reads in columns from .csv outputs of OpenMM StateDataReporter
Parameters
-----------
filename (string) - the path to the folder of the csv
colname (string) - the column you wish to extract from the csv
frames (integer) - the number of frames you wish to extract
Returns
----------
dat - the pandas column series written as a matrix
"""
#print "--Reading %s from %s/..." % (colname,filename)
# Read in file output as pandas df
df = pd.read_csv(filename, sep= ',')
# Read values direct from column into numpy array
dat = df.as_matrix(columns = colname)
dat = dat[-frames:]
return dat
#------------------------------------------------------------------
def readtraj(ncfiles,indkeep):
"""
Take multiple .nc files and read in coordinates in order to re-valuate energies based on parameter changes
Parameters
-----------
ncfiles - a list of trajectories in netcdf format
Returns
----------
data - all of the data contained in the netcdf file
xyzn - the coordinates from the netcdf in angstroms
"""
data = netcdf.Dataset(ncfiles)
xyz = data.variables['coordinates']
xyzn = unit.Quantity(xyz[-indkeep:], unit.angstroms)
return data, xyzn
#------------------------------------------------------------------
def get_energy(system, positions):
"""
Return the potential energy.
Parameters
----------
system : simtk.openmm.System
The system to check
positions : simtk.unit.Quantity of dimension (natoms,3) with units of length
The positions to use
Returns
---------
energy
"""
integrator = openmm.VerletIntegrator(1.0 * unit.femtoseconds)
context = openmm.Context(system, integrator)
context.setPositions(positions)
state = context.getState(getEnergy=True)
energy = state.getPotentialEnergy() / unit.kilocalories_per_mole
return energy
#------------------------------------------------------------------
def new_param_energy(mol2, traj, smirkss, N_k, params, paramtype, samps, indkeep, *coords):
"""
Return potential energies associated with specified parameter perturbations.
Parameters
----------
mol2: mol2 file associated with molecule of interest used to construct OEMol object
traj: trajectory from the simulation ran on the given molecule
smirkss: list of smirks strings we wish to apply parameter changes to (Only changing 1 type of string at a time now. All bonds, all angles or all torsions)
N_k: numpy array of number of samples per state
params: a numpy array of the parameter values we wish to test
paramtype: the type of ff param being edited
BONDS - k (bond force constant), length (equilibrium bond length)
ANGLES - k (angle force constant), angle (equilibrium bond angle)
TORSIONS - k{i} (torsion force constant), idivf{i} (torsional barrier multiplier), periodicity{i} (periodicity of the torsional barrier), phase{i}
(phase offset of the torsion)
NONBONDED - epsilon and rmin_half (where epsilon is the LJ parameter epsilon and rmin_half is half of the LJ parameter rmin)
samps: samples per energy calculation
Returns
-------
energies: a list of the energies associated with the forcfield parameters used as input
"""
#-------------------
# PARAMETERS
#-------------------
params = params
N_k = N_k
ncfiles = traj
# Determine number of simulations
K = np.size(N_k)
#if np.shape(params) != np.shape(N_k): raise "K_k and N_k must have same dimensions"
# Determine max number of samples to be drawn from any state
#-------------
# SYSTEM SETUP
#-------------
verbose = False # suppress echos from OEtoolkit functions
ifs = oechem.oemolistream(mol2)
mol = oechem.OEMol()
# This uses parm@frosst atom types, so make sure to use the forcefield-flavor reader
flavor = oechem.OEIFlavor_Generic_Default | oechem.OEIFlavor_MOL2_Default | oechem.OEIFlavor_MOL2_Forcefield
ifs.SetFlavor( oechem.OEFormat_MOL2, flavor)
oechem.OEReadMolecule(ifs, mol )
# Perceive tripos types
oechem.OETriposAtomNames(mol)
# Get positions for use below
if not coords:
data, xyz = readtraj(traj,indkeep)
#indkeep = int(lentraj*perckeep)
xyzn = xyz[-indkeep:]
else:
xyzn = coords
# Load forcefield file
ffxml = get_data_filename('forcefield/smirff99Frosst.ffxml')
ff = ForceField(ffxml)
# Generate a topology
from smarty.forcefield import generateTopologyFromOEMol
topology = generateTopologyFromOEMol(mol)
#-----------------
# MAIN
#-----------------
# Calculate energies
energies = np.zeros([len(smirkss),len(params),samps],np.float64)
for inds,s in enumerate(smirkss):
temp0 = np.zeros([len(params),samps],np.float64)
param = ff.getParameter(smirks=s)
for ind,val in enumerate(params):
for p in paramtype:
temp1 = np.zeros(samps,np.float64)
for a,b in zip(val,p):
param[b] = str(a)
ff.setParameter(param, smirks = s)
system = ff.createSystem(topology, [mol], verbose=verbose)
for i,a in enumerate(xyzn):
e = np.float(get_energy(system, a))
energies[inds,ind,i] = e
return energies, xyzn, system
#------------------------------------------------------------------
def get_small_mol_dict(mol2, traj):
"""
Return dictionary specifying the bond, angle and torsion indices to feed to ComputeBondsAnglesTorsions()
Parameters
----------
mol2: mol2 file associated with molecule of interest used to determine atom labels
traj: trajectory from the simulation ran on the given molecule
Returns
-------
AtomDict: a dictionary of the bond, angle and torsion indices for the given molecule
"""
PropertiesPerMolecule = dict()
mol_files = []
for i in mol2:
temp = i
mol_files.append(temp)
df,lst_0,lst_1,lst_2 = constructDataFrame(mol_files)
MoleculeNames = df.molecule.tolist()
properties = df.columns.values.tolist()
#print MoleculeNames
for ind, val in enumerate(MoleculeNames):
defined_properties = list()
for p in properties:
if (p is not 'molecule') and ('_std' not in p):
if df.iloc[ind][p] != 0:
defined_properties.append(p)
PropertiesPerMolecule[val] = defined_properties
AtomDict = dict()
AtomDict['MolName'] = list()
for fname in traj:
MoleculeName = fname.split('.')[0][8:]
AtomDict['MolName'].append(MoleculeName)
# what is the property list for this molecule
PropertyNames = PropertiesPerMolecule[MoleculeName]
# extract the bond/angle/torsion lists
AtomDict['Bond'] = list()
AtomDict['Angle'] = list()
AtomDict['Torsion'] = list()
# which properties will we use to construct the bond list
ReferenceProperties = ['BondEquilibriumLength','AngleEquilibriumAngle','TorsionFourier1']
for p in PropertyNames:
PropertyName = p.split(' ', 1)[0]
AtomList = p.split(' ', 1)[1:]
AtomList = [i.lstrip('[').rstrip(']') for i in AtomList]
for i in AtomList:
AtomList = i.strip().split(',')
AtomList = map(int, AtomList)
if any(rp in p for rp in ReferenceProperties):
if 'Bond' in p:
AtomDict['Bond'].append(AtomList)
if 'Angle' in p:
AtomDict['Angle'].append(AtomList)
if 'Torsion' in p:
AtomDict['Torsion'].append(AtomList)
return AtomDict,lst_0,lst_1,lst_2
#------------------------------------------------------------------
def subsampletimeseries(timeser,xyzn,N_k):
"""
Return a subsampled timeseries based on statistical inefficiency calculations.
Parameters
----------
timeser: the timeseries to be subsampled
xyzn: the coordinates associated with each frame of the timeseries to be subsampled
N_k: original # of samples in each timeseries
Returns
---------
N_k_sub: new number of samples per timeseries
ts_sub: the subsampled timeseries
xyz_sub: the subsampled configuration series
"""
# Make a copy of the timeseries and make sure is numpy array of floats
ts = timeser
xyz = xyzn
# initialize array of statistical inefficiencies
g = np.zeros(len(ts),np.float64)
for i,t in enumerate(ts):
if np.count_nonzero(t)==0:
g[i] = np.float(1.)
print "WARNING FLAG"
else:
g[i] = timeseries.statisticalInefficiency(t)
N_k_sub = np.array([len(timeseries.subsampleCorrelatedData(t,g=b)) for t, b in zip(ts,g)])
ind = [timeseries.subsampleCorrelatedData(t,g=b) for t,b in zip(ts,g)]
#xyz_sub = np.array([unit.Quantity(c[i], unit.angstroms) for c,i in zip(xyz,ind)])
if (N_k_sub == N_k).all():
ts_sub = ts
xyz_sub = xyz
print "No sub-sampling occurred"
else:
print "Sub-sampling..."
ts_sub = np.array([t[timeseries.subsampleCorrelatedData(t,g=b)] for t,b in zip(ts,g)])
for c in xyz:
xyz_sub = [c[timeseries.subsampleCorrelatedData(t,g=b)] for t,b in zip(ts,g)]
return ts_sub, N_k_sub, xyz_sub, ind
#------------------------------------------------------------------
def reject_outliers(data, m=2):
return data[abs(data - np.mean(data)) < m * np.std(data)]
#------------------------------------------------------------------
# MAIN
#-----------------------------------------------------------------
# PARAMETERS
#-----------------------------------------------------------------
#N_k = np.array([100, 100, 100, 100, 100])
#N_k = np.array([100,100])
#N_k_orig = 10000.
#pctkeep = 0.8
indkeep = 4000
N_k= np.array([4000])
K = np.size(N_k)
N_max = np.max(N_k)
K_extra_vals = np.arange(0.08,0.25,0.01)
#K_k = np.array([[106], [104], [102], [100], [98]])
#K_k = np.array([[104.],[100.]])
#K_k = np.array([[680.]])
#K_k = np.array([[680.]])
K_k = np.array([[[1.090]] for val in K_extra_vals])
#K_extra = np.array([[96], [99], [103], [105], [108]]) # unsampled force constants
#K_extra = np.array([[110.],[98.]])
#K_extra = np.array([[600.]])
K_extra = np.array([[[val]] for val in K_extra_vals])
paramtype = [['k1']]
obstype = 'Torsion'
#mol2 = [['molecules/AlkEthOH_r0.mol2'],['molecules/AlkEthOH_r48.mol2'],['molecules/AlkEthOH_r51.mol2'],['molecules/AlkEthOH_c581.mol2'],['molecules/AlkEthOH_c100.mol2'],['molecules/AlkEthOH_c1161.mol2'],['molecules/AlkEthOH_c1266.mol2'],['molecules/AlkEthOH_c38.mol2'],['molecules/AlkEthOH_r118.mol2'],['molecules/AlkEthOH_r12.mol2']]
#mol2 = [['molecules/AlkEthOH_r0.mol2'],['molecules/AlkEthOH_c581.mol2'],['molecules/AlkEthOH_c100.mol2'],['molecules/AlkEthOH_c1266.mol2'],['molecules/AlkEthOH_r51.mol2'],['molecules/AlkEthOH_r48.mol2']]
mol2 = [['Mol2_files/'+sys.argv[1]+'.mol2']]
#mol2en = ['molecules/AlkEthOH_r0.mol2','molecules/AlkEthOH_r48.mol2','molecules/AlkEthOH_r51.mol2','molecules/AlkEthOH_c581.mol2','molecules/AlkEthOH_c100.mol2','molecules/AlkEthOH_c1161.mol2','molecules/AlkEthOH_c1266.mol2','molecules/AlkEthOH_c38.mol2','molecules/AlkEthOH_r118.mol2','molecules/AlkEthOH_r12.mol2']
mol2en = [val[0] for val in mol2]
#traj = ['traj/AlkEthOH_r0.nc','traj/AlkEthOH_r48.nc','traj/AlkEthOH_r51.nc','traj/AlkEthOH_c581.nc','traj/AlkEthOH_c100.nc','traj/AlkEthOH_c1161.nc','traj/AlkEthOH_c1266.nc','traj/AlkEthOH_c38.nc','traj/AlkEthOH_r118.nc','traj/AlkEthOH_r12.nc']
#traj = ['traj/AlkEthOH_r0.nc','traj/AlkEthOH_c581.nc','traj/AlkEthOH_c100.nc','traj/AlkEthOH_c1266.nc','traj/AlkEthOH_r51.nc','traj/AlkEthOH_r48.nc']
traj = ['traj4ns/'+sys.argv[1]+'.nc']
#trajs = [['traj/AlkEthOH_r0.nc'],['traj/AlkEthOH_r48.nc'],['traj/AlkEthOH_r51.nc'],['traj/AlkEthOH_c581.nc'],['traj/AlkEthOH_c100.nc'],['traj/AlkEthOH_c1161.nc'],['traj/AlkEthOH_c1266.nc'],['traj/AlkEthOH_c38.nc'],['traj/AlkEthOH_r118.nc'],['traj/AlkEthOH_r12.nc']]
#trajs = [['traj/AlkEthOH_r0.nc'],['traj/AlkEthOH_c581.nc']]
trajs = [[val] for val in traj]
smirkss = ['[#6X4:1]-[#6X4:2]-[#8X2H1:3]-[#1:4]']
trajstest = [[[] for i in K_extra] for _ in traj]
for ind,val in enumerate(trajs):
for ind1,val1 in enumerate(K_extra):
trajstest[ind][ind1] = [val[0][:-3]+'_'+smirkss[0]+'_k1'+str(val1[0][0])+'.nc']
# Calculate energies at various parameters of interest
#energies, xyzn, system = new_param_energy(mol2en,traj, smirkss, N_k, K_k, paramtype, N_max)
#energiesnew, xyznnew, systemnew = new_param_energy(mol2en, traj, smirkss, N_k, K_extra, paramtype, N_max)
# Create lists to store data that will eventually be written in pandas df and saved as csv/json/pkl
molnamedf = []
smirksdf = []
obstypedf = []
paramtypedf = []
newparamval = []
N_subsampleddf = []
percentshiftdf = []
E_expectdf = []
dE_expectdf = []
dE_bootdf = []
E_stddevawaydf = []
Enew_expectdf = []
dEnew_expectdf = []
dEnew_bootdf = []
Enew_stddevawaydf = []
A_expectdf = []
dA_expectdf = []
dA_bootdf = []
A_stddevawaydf = []
Anew_sampleddf = []
Anew_expectdf = []
dAnew_expectdf = []
dAnew_bootdf = []
Anew_stddevawaydf = []
varAnew_bootdf = []
varAnew_sampdf = []
altvarAnew_bootdf = []
dvarAnew_bootdf = []
altdvarAnew_bootdf = []
varAnew_bootdf2 = []
altvarAnew_bootdf2 = []
dvarAnew_bootdf2 = []
altdvarAnew_bootdf2 = []
A_boot_new_sampdf = []
dA_boot_new_sampdf = []
# Return AtomDict needed to feed to ComputeBondsAnglesTorsions()
for ind,(i,j) in enumerate(zip(mol2,traj)):
AtomDict,lst_0,lst_1,lst_2 = get_small_mol_dict(i, [j])
mylist = [ii[1] for ii in lst_2[0]]
myset = set(mylist)
poplist = np.zeros([len(myset)],np.float64)
for b,k in enumerate(myset):
print "%s occurs %s times" %(k, mylist.count(k))
poplist[b] = mylist.count(k)
pctlist = 100.*poplist/sum(poplist)
pctdict = dict()
for c,k in enumerate(myset):
pctdict[k] = pctlist[c]
print '#################################################################################'
Atomdictmatches = []
for sublist in lst_2[0]:
if sublist[1] == smirkss[0]:
Atomdictmatches.append(sublist[0])
if not Atomdictmatches:
print 'No matches found'
continue
Atomdictmatchinds = []
for yy in Atomdictmatches:
for z,y in enumerate(AtomDict[obstype]):
if yy == str(AtomDict[obstype][z]):
Atomdictmatchinds.append(z)
obs_ind = Atomdictmatchinds[0]
# Calculate energies at various parameters of interest
for indparam,valparam in enumerate(K_extra):
energies, xyzn, system = new_param_energy(mol2en[ind],j, smirkss, N_k, K_k[indparam], paramtype, N_max, indkeep)
energiesnew, xyznnew, systemnew = new_param_energy(mol2en[ind],j, smirkss, N_k, K_extra[indparam], paramtype, N_max, indkeep)
xyznsampled = [[] for i in trajs[ind]]
A = np.zeros([K,N_max],np.float64)
for i,x in enumerate(trajs[ind]):
coord = readtraj(x,indkeep)[1]
xyznsampled[i] = coord
obs = ComputeBondsAnglesTorsions(coord,AtomDict['Bond'],AtomDict['Angle'],AtomDict['Torsion'])[0]# Compute angles and return array of angles
numatom = len(obs[0]) # get number of unique angles in molecule
timeser = [obs[:,d] for d in range(numatom)] # re-organize data into timeseries
A[i] = timeser[obs_ind] # pull out single angle in molecule for test case
xyznnewtest = [[] for i in trajstest[ind][indparam]]
Anewtest = np.zeros([K,N_max],np.float64)
for i,x in enumerate(trajstest[ind][indparam]):
coordtest = readtraj(x,indkeep)[1]
xyznnewtest[i] = coordtest
obstest = ComputeBondsAnglesTorsions(coordtest,AtomDict['Bond'],AtomDict['Angle'],AtomDict['Torsion'])[0]# Compute angles and return array of angles
numatomtest = len(obstest[0]) # get number of unique angles in molecule
timesertest = [obstest[:,d] for d in range(numatomtest)] # re-organize data into timeseries
Anewtest[i] = timesertest[obs_ind] # pull out single angle in molecule for test case
# Subsample timeseries and return new number of samples per state
A_sub, N_kA, xyzn_A_sub, indA = subsampletimeseries(A, xyznsampled, N_k)
En_sub, N_kEn, xyzn_En_sub, indEn = subsampletimeseries(energies[0], xyznsampled, N_k)
Ennew_sub, N_kEnnew, xyzn_Ennew_sub, indEnnew = subsampletimeseries(energiesnew[0], xyznsampled, N_k)
A_sub_test,N_kA_test,xyzn_A_test,indAtest = subsampletimeseries(Anewtest,xyznnewtest,N_k)
for a,b,c,d in zip(N_kA,N_kEn,N_kEnnew,N_kA_test):
N_kF = np.array([min(a,b,c,d)])
A_kn = np.zeros([sum(N_kF)],np.float64)
A_knnew = np.zeros([sum(N_kF)],np.float64)
count = 0
for x1,x2 in zip(A_sub,A_sub_test):
for y1,y2 in zip(x1,x2):
A_kn[count] = y1
A_knnew[count] = y2
count += 1
if count > (sum(N_kF)-1):
break
#--------------------------------------------------------------
# Re-evaluate potenitals at all subsampled coord and parameters
#--------------------------------------------------------------
verbose = False # suppress echos from OEtoolkit functions
ifs = oechem.oemolistream(mol2en[ind])
mol = oechem.OEMol()
# This uses parm@frosst atom types, so make sure to use the forcefield-flavor reader
flavor = oechem.OEIFlavor_Generic_Default | oechem.OEIFlavor_MOL2_Default | oechem.OEIFlavor_MOL2_Forcefield
ifs.SetFlavor( oechem.OEFormat_MOL2, flavor)
oechem.OEReadMolecule(ifs, mol )
# Perceive tripos types
oechem.OETriposAtomNames(mol)
# Load forcefield file
ffxml = get_data_filename('forcefield/smirff99Frosst.ffxml')
ff = ForceField(ffxml)
# Generate a topology
from smarty.forcefield import generateTopologyFromOEMol
topology = generateTopologyFromOEMol(mol)
# Re-calculate energies
E_kn = np.zeros([len(K_k[indparam]),sum(N_kEn)],np.float64)
for inds,s in enumerate(smirkss):
param = ff.getParameter(smirks=s)
for indss,vals in enumerate(K_k[indparam]):
count = 0
for p in paramtype:
for a,b in zip(vals,p):
param[b] = str(a)
ff.setParameter(param, smirks = s)
system = ff.createSystem(topology, [mol], verbose=verbose)
while count < sum(N_kEn):
for k_ind, pos in enumerate(xyzn_En_sub):
for i,a in enumerate(pos):
e = np.float(get_energy(system, a))
E_kn[indss,count] = e
count += 1
E_knnew = np.zeros([len(K_extra[indparam]),sum(N_kEn)],np.float64)
for inds,s in enumerate(smirkss):
param = ff.getParameter(smirks=s)
for indss,vals in enumerate(K_extra[indparam]):
count = 0
for p in paramtype:
for a,b in zip(vals,p):
param[b] = str(a)
ff.setParameter(param, smirks = s)
system = ff.createSystem(topology, [mol], verbose=verbose)
while count < sum(N_kEn):
for k_ind, pos in enumerate(xyzn_En_sub):
for i,a in enumerate(pos):
e = np.float(get_energy(system, a))
E_knnew[indss,count] = e
count += 1
# Post process energy distributions to find expectation values, analytical uncertainties and bootstrapped uncertainties
#T_from_file = read_col('StateData/data.csv',["Temperature (K)"],100)
Temp_k = 300.#T_from_file
T_av = 300.#np.average(Temp_k)
nBoots = 200
beta_k = 1 / (kB*T_av)
bbeta_k = 1 / (kB*Temp_k)
#################################################################
# Compute reduced potentials
#################################################################
print "--Computing reduced potentials..."
# Initialize matrices for u_kn/observables matrices and expected value/uncertainty matrices
u_kn = np.zeros([K, sum(N_kF)], dtype=np.float64)
E_kn_samp = np.zeros([K,sum(N_kF)],np.float64)
u_knnew = np.zeros([K,sum(N_kF)], np.float64)
E_knnew_samp = np.zeros([K,sum(N_kF)], np.float64)
A_kn_samp = np.zeros([sum(N_kF)],np.float64)
A_knnew_samp = np.zeros([sum(N_kF)],np.float64)
A2_kn = np.zeros([sum(N_kF)],np.float64)
A2_knnew = np.zeros([sum(N_kF)],np.float64)
nBoots_work = nBoots + 1
allE_expect = np.zeros([K,nBoots_work], np.float64)
allA_expect = np.zeros([K,nBoots_work],np.float64)
allE2_expect = np.zeros([K,nBoots_work], np.float64)
dE_expect = np.zeros([K], np.float64)
allE_expectnew = np.zeros([K,nBoots_work], np.float64)
allE2_expectnew = np.zeros([K,nBoots_work], np.float64)
dE_expectnew = np.zeros([K], np.float64)
dA_expect = np.zeros([K],np.float64)
dA_expectnew = np.zeros([K],np.float64)
allvarA_expect_samp = np.zeros([K,nBoots_work],np.float64)
allA_expectnew = np.zeros([K,nBoots_work],np.float64)
allvarA_expectnew = np.zeros([K,nBoots_work],np.float64)
allaltvarA_expectnew = np.zeros([K,nBoots_work],np.float64)
allA_new_mean_samp = np.zeros([nBoots_work],np.float64)
# Begin bootstrapping loop
for n in range(nBoots_work):
if (n > 0):
print "Bootstrap: %d/%d" % (n,nBoots)
for k in range(K):
if N_kF[k] > 0:
if (n == 0):
booti = np.array(range(N_kF[k]))
else:
booti = np.random.randint(N_kF[k], size = N_kF[k])
E_kn_samp[:,sum(N_kF[0:k]):sum(N_kF[0:k+1])] = E_kn[:,booti]
E_knnew_samp[:,sum(N_kF[0:k]):sum(N_kF[0:k+1])] = E_knnew[:,booti]
A_kn_samp[sum(N_kF[0:k]):sum(N_kF[0:k+1])] = A_kn[booti]
A_knnew_samp[sum(N_kF[0:k]):sum(N_kF[0:k+1])] = A_knnew[booti]
for k in range(K):
u_kn[:,sum(N_kF[0:k]):sum(N_kF[0:k+1])] = beta_k * E_kn_samp[:,sum(N_kF[0:k]):sum(N_kF[0:k+1])]
u_knnew[:,sum(N_kF[0:k]):sum(N_kF[0:k+1])] = beta_k * E_knnew_samp[:,sum(N_kF[0:k]):sum(N_kF[0:k+1])]
A2_kn[sum(N_kF[0:k]):sum(N_kF[0:k+1])] = A_kn_samp[sum(N_kF[0:k]):sum(N_kF[0:k+1])]
A2_knnew[sum(N_kF[0:k]):sum(N_kF[0:k+1])] = A_knnew_samp[sum(N_kF[0:k]):sum(N_kF[0:k+1])]
############################################################################
# Initialize MBAR
############################################################################
# Initialize MBAR with Newton-Raphson
# Use Adaptive Method (Both Newton-Raphson and Self-Consistent, testing which is better)
if (n==0):
initial_f_k = None # start from zero
else:
initial_f_k = mbar.f_k # start from the previous final free energies to speed convergence
mbar = pymbar.MBAR(u_kn, N_kF, verbose=False, relative_tolerance=1e-12,initial_f_k=initial_f_k)
#------------------------------------------------------------------------
# Compute Expectations for energy and angle distributions
#------------------------------------------------------------------------
# print ""
# print "Computing Expectations for E..."
E_kn2 = u_kn # not a copy, we are going to write over it, but we don't need it any more.
E_knnew2 = u_knnew
for k in range(K):
E_kn2[k,:]*=beta_k**(-1) # get the 'unreduced' potential -- we can't take differences of reduced potentials because the beta is different.
E_knnew2[k,:]*=beta_k**(-1)
(E_expect, dE_expect) = mbar.computeExpectations(E_kn2,state_dependent = True)
(E_expectnew, dE_expectnew) = mbar.computeExpectations(E_knnew2,state_dependent = True)
(A_expect, dA_expect) = mbar.computeExpectations(A2_kn,state_dependent = False)
allE_expect[:,n] = E_expect[:]
allE_expectnew[:,n] = E_expectnew[:]
allA_expect[:,n] = A_expect[:]
# expectations for the differences, which we need for numerical derivatives
# To be used once the energy expectations are fixed
(DeltaE_expect, dDeltaE_expect) = mbar.computeExpectations(E_kn2,output='differences', state_dependent = False)
(DeltaE_expectnew, dDeltaE_expectnew) = mbar.computeExpectations(E_knnew2,output='differences', state_dependent = False)
# print "Computing Expectations for E^2..."
(E2_expect, dE2_expect) = mbar.computeExpectations(E_kn2**2, state_dependent = True)
allE2_expect[:,n] = E2_expect[:]
(A_expectnew, dA_expectnew) = mbar.computeExpectations(A2_kn,u_knnew,state_dependent=False)
allA_expectnew[:,n] = A_expectnew[:]
#Variance in sampled calculated observables (i.e. variance in bond length from state we're reweighting from) using MBAR A_expect
#for k in range(K):
# varA_expect_samp[k] = sum([(Ai - A_expect[k])**2 for Ai in A_kn_samp])/len(A_kn_samp)
#allvarA_expect_samp[:,n] = varA_expect_samp[:]
#Variance in unsampled calculated observables using MBAR
varA_mbar_feed = np.zeros([sum(N_kF)],np.float64)
for l in range(sum(N_kF)):
varA_mbar_feed[l] = ((A2_kn[l] - A_expect)**2)
(varA_expectnew,dvarA_expectnew) = mbar.computeExpectations(varA_mbar_feed,u_knnew,state_dependent=False)
allvarA_expectnew[:,n] = varA_expectnew[:]
#Check against calculating variance of A as <x^2> - <A>^2 (instead of <(x-A)^2>)
(A2_expectnew,dA2_expectnew) = mbar.computeExpectations(A2_kn**2,u_knnew,state_dependent=False)
altvarA_expectnew = (A2_expectnew[:] - A_expectnew[:]**2)
allaltvarA_expectnew[:,n] = altvarA_expectnew[:]
#Record mean of sampled observable with bootstrap randomization to get error bars
allA_new_mean_samp[n] = np.mean(A2_knnew)
N_eff = mbar.computeEffectiveSampleNumber(verbose = True)
if nBoots > 0:
A_bootnew = np.zeros([K],dtype=np.float64)
E_bootnew = np.zeros([K],dtype=np.float64)
dE_boot = np.zeros([K],dtype=np.float64)
dE_bootnew = np.zeros([K],dtype=np.float64)
dA_boot = np.zeros([K],dtype=np.float64)
dA_bootnew = np.zeros([K],dtype=np.float64)
varA_bootnew = np.zeros([K],dtype=np.float64)
altvarA_bootnew = np.zeros([K],dtype=np.float64)
dvarA_bootnew = np.zeros([K],dtype=np.float64)
altdvarA_bootnew = np.zeros([K],dtype=np.float64)
A_bootnew_samp = np.mean(allA_new_mean_samp)
dA_bootnew_samp = np.std(allA_new_mean_samp)
for k in range(K):
dE_boot[k] = np.std(allE_expect[k,1:nBoots_work])
dE_bootnew[k] = np.std(allE_expectnew[k,1:nBoots_work])
dA_boot[k] = np.std(allA_expect[k,1:nBoots_work])
dA_bootnew[k] = np.std(allA_expectnew[k,1:nBoots_work])
varA_bootnew[k] = np.average(allvarA_expectnew[k,1:nBoots_work])
altvarA_bootnew[k] = np.average(allaltvarA_expectnew[k,1:nBoots_work])
dvarA_bootnew[k] = np.std(allvarA_expectnew[k,1:nBoots_work])
altdvarA_bootnew[k] = np.std(allaltvarA_expectnew[k,1:nBoots_work])
dA_bootnew = dA_expectnew
varA_bootnew = varA_expectnew
dvarA_bootnew = dvarA_expectnew
altvarA_bootnew = altvarA_expectnew
#altdvarA_bootnew = altdvarA_expectnew
#bins1 = int(np.log2(len(allA_expectnew[0])))
#bins2 = int(np.sqrt(len(allA_expectnew[0])))
#binsnum = int((bins1+bins2)/2)
#plt.figure()
#plt.hist(allA_expectnew[0], binsnum, normed=1, facecolor='green', alpha=0.75)
#plt.xlabel('Length (A)')
#plt.ylabel('Probability')
#plt.axis([min(allA_expectnew[0])-(bins[1]-bins[0]), max(allA_expectnew[0])-(bins[1]-bins[0]), 0, bins[1]-bins[0]])
#plt.grid(True)
#plt.savefig('checkdist.png')
#print "E_expect: %s dE_expect: %s dE_boot: %s \n" % (E_expect,dE_expect,dE_boot)
#print "E_expectnew: %s dE_expectnew: %s dE_bootnew: %s \n" % (E_expectnew,dE_expectnew,dE_bootnew)
#print "delta_E_expect: %s percent_delta_E_expect: %s \n" % (E_expectnew-E_expect, 100.*(E_expectnew-E_expect)/E_expect)
#print "A_expect: %s dA_expect: %s dA_boot: %s \n" % (A_expect,dA_expect,dA_boot)
#print "A_expectnew: %s dA_expectnew: %s dA_bootnew: %s \n" % (A_expectnew,dA_expectnew,dA_bootnew)
#print "varA_bootnew (variance of MBAR A from sampled population): %s sqrt of that: %s True value: %s \n" % (varA_bootnew,varA_bootnew**0.5,np.std(A_knnew_samp)**2,)
#print "The mean of the sampled series = %s \n" % ([np.average(A) for A in A_sub])
#print "The true sampled mean of the observable we're reweighting to = %s \n" % ([np.average(A) for A in A_sub_test])
#print "The mean of the energies corresponding to the sampled series = %s \n" % ([np.average(E) for E in E_kn])
#print "The mean of the energies corresponding to the unsampled series = %s \n" % ([np.average(E) for E in E_knnew])
# calculate standard deviations away that estimate is from sampled value
E_mean_samp = np.array([np.average(E) for E in E_kn])
E_mean_unsamp = np.array([np.average(E) for E in E_knnew])
A_mean_samp = np.array([np.average(A) for A in A_sub])
A_mean_test = np.array([np.average(A) for A in A_sub_test])
varAnew_samp = np.array([np.std(A)**2 for A in A_sub_test])
#print varAnew_samp
#print (dA_bootnew**2)*sum(N_kEn)
E_expect_mean = np.zeros([K],dtype=np.float64)
E_expect_meannew = np.zeros([K],dtype=np.float64)
A_expect_mean_samp = np.zeros([K],dtype=np.float64)
A_expect_mean_unsamp = np.zeros([K],dtype=np.float64)
for k in range(K):
E_expect_mean[k] = np.average(allE_expect[k,1:nBoots_work])
E_expect_meannew[k] = np.average(allE_expectnew[k,1:nBoots_work])
A_expect_mean_samp[k] = np.average(allA_expect[k,1:nBoots_work])
A_expect_mean_unsamp[k] = np.average(allA_expectnew[k,1:nBoots_work])
A_expect_mean_unsamp = A_expectnew
E_expect_meannew = E_expectnew
E_samp_stddevaway = np.zeros([K],np.float64)
E_unsamp_stddevaway = np.zeros([K],np.float64)
A_samp_stddevaway = np.zeros([K],np.float64)
A_test_stddevaway = np.zeros([K],np.float64)
for k in range(K):
E_samp_stddevaway[k] = np.abs(E_mean_samp[k]-E_expect_mean[k])/dE_expect
E_unsamp_stddevaway[k] = np.abs(E_mean_unsamp[k]-E_expect_meannew[k])/dE_expectnew
A_samp_stddevaway[k] = np.abs(A_mean_samp[k]-A_expect_mean_samp[k])/dA_expect
A_test_stddevaway[k] = np.abs(A_mean_test[k]-A_expect_mean_unsamp[k])/dA_expectnew
pctshft = 100.*((np.float(K_k[indparam]) - np.float(K_extra[indparam]))/np.float(K_k[indparam]))
#print "Standard deviations away from true sampled observables for E_expect: %s E_expectnew: %s A_expect: %s A_expect_unsamp: %s" % (E_samp_stddevaway,E_unsamp_stddevaway,A_samp_stddevaway,A_test_stddevaway)
#print "Percent shift = %s \n" % pctshft
#print "Percent of molecule that is %s = %s \n" % (smirkss[0],pctdict[smirkss[0]])
allvarA_expectnew2 = np.zeros([K,nBoots_work],np.float64)
allaltvarA_expectnew2 = np.zeros([K,nBoots_work],np.float64)
for n in range(nBoots_work):
if (n > 0):
print "Bootstrap: %d/%d" % (n,nBoots)
for k in range(K):
if N_kF[k] > 0:
if (n == 0):
booti = np.array(range(N_kF[k]))
else:
booti = np.random.randint(N_kF[k], size = N_kF[k])
E_kn_samp[:,sum(N_kF[0:k]):sum(N_kF[0:k+1])] = E_kn[:,booti]
E_knnew_samp[:,sum(N_kF[0:k]):sum(N_kF[0:k+1])] = E_knnew[:,booti]
A_kn_samp[sum(N_kF[0:k]):sum(N_kF[0:k+1])] = A_kn[booti]
A_knnew_samp[sum(N_kF[0:k]):sum(N_kF[0:k+1])] = A_knnew[booti]
for k in range(K):
u_kn[:,sum(N_kF[0:k]):sum(N_kF[0:k+1])] = beta_k * E_kn_samp[:,sum(N_kF[0:k]):sum(N_kF[0:k+1])]
u_knnew[:,sum(N_kF[0:k]):sum(N_kF[0:k+1])] = beta_k * E_knnew_samp[:,sum(N_kF[0:k]):sum(N_kF[0:k+1])]
A2_kn[sum(N_kF[0:k]):sum(N_kF[0:k+1])] = A_kn_samp[sum(N_kF[0:k]):sum(N_kF[0:k+1])]
A2_knnew[sum(N_kF[0:k]):sum(N_kF[0:k+1])] = A_knnew_samp[sum(N_kF[0:k]):sum(N_kF[0:k+1])]
############################################################################
# Initialize MBAR
############################################################################
# Initialize MBAR with Newton-Raphson
# Use Adaptive Method (Both Newton-Raphson and Self-Consistent, testing which is better)
if (n==0):
initial_f_k = None # start from zero
else:
initial_f_k = mbar.f_k # start from the previous final free energies to speed convergence
mbar = pymbar.MBAR(u_kn, N_kF, verbose=False, relative_tolerance=1e-12,initial_f_k=initial_f_k)
#Variance in unsampled calculated observables using MBAR
varA_mbar_feed2 = np.zeros([sum(N_kF)],np.float64)
for l in range(sum(N_kF)):
varA_mbar_feed2[l] = ((A2_kn[l] - A_expect_mean_unsamp[0])**2)
(varA_expectnew2,dvarA_expectnew2) = mbar.computeExpectations(varA_mbar_feed2,u_knnew,state_dependent=False)
allvarA_expectnew2[:,n] = varA_expectnew2[:]
#Check against calculating variance of A as <x^2> - <A>^2 (instead of <(x-A)^2>)
(A_expectnew2,dA_expectnew2) = mbar.computeExpectations(A2_kn,u_knnew,state_dependent=False)
(A2_expectnew2,dA2_expectnew2) = mbar.computeExpectations(A2_kn**2,u_knnew,state_dependent=False)
altvarA_expectnew2 = (A2_expectnew2[:] - A_expectnew2[:]**2)
allaltvarA_expectnew2[:,n] = altvarA_expectnew2[:]
if nBoots > 0:
varA_bootnew2 = np.zeros([K],dtype=np.float64)
altvarA_bootnew2 = np.zeros([K],dtype=np.float64)
dvarA_bootnew2 = np.zeros([K],dtype=np.float64)
altdvarA_bootnew2 = np.zeros([K],dtype=np.float64)
for k in range(K):
varA_bootnew2[k] = np.average(allvarA_expectnew2[k,1:nBoots_work])
altvarA_bootnew2[k] = np.average(allaltvarA_expectnew2[k,1:nBoots_work])
dvarA_bootnew2 = np.std(allvarA_expectnew2[k,1:nBoots_work])
altdvarA_bootnew2 = np.std(allaltvarA_expectnew2[k,1:nBoots_work])
varA_bootnew2 = varA_expectnew2
dvarA_bootnew2 = dvarA_expectnew2
altvarA_bootnew2 = altvarA_expectnew
#altdvarA_bootnew2 = altdvarA_expectnew2
#print allvarA_expectnew2
#print np.var(allA_expectnew)
bins1 = int(np.log2(len(allvarA_expectnew2[0])))
bins2 = int(np.sqrt(len(allvarA_expectnew2[0])))
binsnum = int((bins1+bins2)/2)
plt.figure()
plt.hist(allvarA_expectnew2[0], binsnum, normed=1, facecolor='green', alpha=0.75)
plt.xlabel('Length^2 (A^2)')
plt.ylabel('Probability')
#plt.axis([min(allA_expectnew[0])-(bins[1]-bins[0]), max(allA_expectnew[0])-(bins[1]-bins[0]), 0, bins[1]-bins[0]])
plt.grid(True)
plt.savefig('checkdist2.png')
#sys.exit()
print '###############################################################################'
molnamedf.append(mol2[ind])
smirksdf.append(smirkss[0])
obstypedf.append(obstype)
paramtypedf.append(paramtype[0])
newparamval.append(K_extra[indparam])
percentshiftdf.append(pctshft)
N_subsampleddf.append(N_kF)
E_expectdf.append(E_expect_mean)
dE_expectdf.append(dE_expect)
dE_bootdf.append(dE_boot)
E_stddevawaydf.append(E_samp_stddevaway)
Enew_expectdf.append(E_expect_meannew)
dEnew_expectdf.append(dE_expectnew)
dEnew_bootdf.append(dE_bootnew)
Enew_stddevawaydf.append(E_unsamp_stddevaway)
A_expectdf.append(A_expect_mean_samp)
dA_expectdf.append(dA_expect)
dA_bootdf.append(dA_boot)
A_stddevawaydf.append(A_samp_stddevaway)
Anew_sampleddf.append(A_mean_test)
Anew_expectdf.append(A_expect_mean_unsamp)
dAnew_expectdf.append(dA_expectnew)
dAnew_bootdf.append(dA_bootnew)
Anew_stddevawaydf.append(A_test_stddevaway)
varAnew_sampdf.append(varAnew_samp)
varAnew_bootdf.append(varA_bootnew)
altvarAnew_bootdf.append(altvarA_bootnew)
dvarAnew_bootdf.append(dvarA_bootnew)
altdvarAnew_bootdf.append(altdvarA_bootnew)
varAnew_bootdf2.append(varA_bootnew2)
altvarAnew_bootdf2.append(altvarA_bootnew2)
dvarAnew_bootdf2.append(dvarA_bootnew2)
altdvarAnew_bootdf2.append(altdvarA_bootnew2)
A_boot_new_sampdf.append(A_bootnew_samp)
dA_boot_new_sampdf.append(dA_bootnew_samp)
print("NEXT LOOP")
########################################################################
df = pd.DataFrame.from_dict({'mol_name':[value for value in molnamedf],
'smirks':[value for value in smirksdf],
'obs_type':[value for value in obstypedf],
'param_type':[value for value in paramtypedf],
'new_param':[value for value in newparamval],
'percent_shift':[value for value in percentshiftdf],
'N_subsampled':[value for value in N_subsampleddf],
'E_expect':[value for value in E_expectdf],
'dE_expect':[value for value in dE_expectdf],
'dE_boot':[value for value in dE_bootdf],
'E_stddevaway':[value for value in E_stddevawaydf],
'Enew_expect':[value for value in Enew_expectdf],
'dEnew_expect':[value for value in dEnew_expectdf],
'dEnew_boot':[value for value in dEnew_bootdf],
'Enew_stddevaway':[value for value in Enew_stddevawaydf],
'A_expect':[value for value in A_expectdf],
'dA_expect':[value for value in dA_expectdf],
'dA_boot':[value for value in dA_bootdf],
'A_stddevaway':[value for value in A_stddevawaydf],
'Anew_sampled':[value for value in Anew_sampleddf],
'Anew_expect':[value for value in Anew_expectdf],
'dAnew_expect':[value for value in dAnew_expectdf],
'dAnew_boot':[value for value in dAnew_bootdf],
'Anew_stddevaway':[value for value in Anew_stddevawaydf],
'varAnew_samp':[value for value in varAnew_sampdf],
'varAnew_boot':[value for value in varAnew_bootdf],
'altvarAnew_boot':[value for value in altvarAnew_bootdf],
'dvarAnew_boot':[value for value in dvarAnew_bootdf],
'altdvarAnew_boot':[value for value in altdvarAnew_bootdf],
'varAnew_boot2':[value for value in varAnew_bootdf2],
'altvarAnew_boot2':[value for value in altvarAnew_bootdf2],
'dvarAnew_boot2':[value for value in dvarAnew_bootdf2],
'altdvarAnew_boot2':[value for value in altdvarAnew_bootdf2],
'A_boot_new_samp':[value for value in A_boot_new_sampdf],
'dA_boot_new_samp':[value for value in dA_boot_new_sampdf]})
df.to_csv('mbar_analyses/mbar_analysis_'+sys.argv[1]+'_'+smirkss[0]+'_'+paramtype[0][0]+'_'+obstype+'.csv',sep=';')
df.to_pickle('mbar_analyses/mbar_analysis_'+sys.argv[1]+'_'+smirkss[0]+'_'+paramtype[0][0]+'_'+obstype+'.pkl')
| mit |
xunyou/vincent | examples/grouped_bar_examples.py | 11 | 2923 | # -*- coding: utf-8 -*-
"""
Vincent Grouped Bar Examples
"""
#Build a Grouped Bar Chart from scratch
import pandas as pd
from vincent import *
from vincent.core import KeyedList
farm_1 = {'apples': 10, 'berries': 32, 'squash': 21, 'melons': 13, 'corn': 18}
farm_2 = {'apples': 15, 'berries': 40, 'squash': 17, 'melons': 10, 'corn': 22}
farm_3 = {'apples': 6, 'berries': 24, 'squash': 22, 'melons': 16, 'corn': 30}
farm_4 = {'apples': 12, 'berries': 30, 'squash': 15, 'melons': 9, 'corn': 15}
farm_5 = {'apples': 20, 'berries': 35, 'squash': 19, 'melons': 17, 'corn': 19}
farm_6 = {'apples': 3, 'berries': 28, 'squash': 21, 'melons': 11, 'corn': 23}
data = [farm_1, farm_2, farm_3, farm_4, farm_5, farm_6]
index = ['Farm 1', 'Farm 2', 'Farm 3', 'Farm 4', 'Farm 5', 'Farm 6']
df = pd.DataFrame(data, index=index)
vis = Visualization(width=500, height=300)
vis.padding = {'top': 10, 'left': 50, 'bottom': 50, 'right': 100}
data = Data.from_pandas(df, grouped=True)
vis.data['table'] = data
vis.scales['x'] = Scale(name='x', type='ordinal', range='width',
domain=DataRef(data='table', field="data.idx"),
padding=0.2)
vis.scales['y'] = Scale(name='y', range='height', nice=True,
domain=DataRef(data='table', field="data.val"))
vis.scales['color'] = Scale(name='color', type='ordinal',
domain=DataRef(data='table', field='data.col'),
range='category20')
vis.axes.extend([Axis(type='x', scale='x'),
Axis(type='y', scale='y')])
enter_props = PropertySet(x=ValueRef(scale='pos', field="data.group"),
y=ValueRef(scale='y', field="data.val"),
width=ValueRef(scale='pos', band=True, offset=-1),
y2=ValueRef(value=0, scale='y'),
fill=ValueRef(scale='color', field='data.col'))
mark = Mark(type='group', from_=transform,
marks=[Mark(type='rect',
properties=MarkProperties(enter=enter_props))])
vis.marks.append(mark)
#Mark group properties
facet = Transform(type='facet', keys=['data.idx'])
transform = MarkRef(data='table',transform=[facet])
group_props = PropertySet(x=ValueRef(scale='x', field="key"),
width=ValueRef(scale='x', band=True))
vis.marks[0].properties = MarkProperties(enter=group_props)
vis.marks[0].scales = KeyedList()
vis.marks[0].scales['pos'] = Scale(name='pos', type='ordinal',
range='width',
domain=DataRef(field='data.group'))
vis.axis_titles(x='Farms', y='Total Produce')
vis.legend(title='Produce Type')
vis.to_json('vega.json')
#Convenience method
vis = GroupedBar(df)
vis.axis_titles(x='Farms', y='Total Produce')
vis.width = 700
vis.legend(title='Produce Type')
vis.colors(brew='Pastel1')
vis.to_json('vega.json')
| mit |
anurag313/scikit-learn | sklearn/svm/tests/test_svm.py | 70 | 31674 | """
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from scipy import sparse
from nose.tools import assert_raises, assert_true, assert_equal, assert_false
from sklearn.base import ChangedBehaviorWarning
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.cross_validation import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils import ConvergenceWarning
from sklearn.utils.validation import NotFittedError
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
from sklearn.utils.testing import ignore_warnings
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deteriministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
@ignore_warnings
def test_single_sample_1d():
# Test whether SVCs work on a single sample given as a 1-d array
clf = svm.SVC().fit(X, Y)
clf.predict(X[0])
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf.predict(X[0])
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert np.linalg.norm(lsvr.coef_ - svr.coef_) / np.linalg.norm(svr.coef_) < .1
assert np.abs(score1 - score2) < 0.1
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_almost_equal(pred, [-1, -1, -1])
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(ValueError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_decision_function_shape():
# check that decision_function_shape='ovr' gives
# correct shape and is consistent with predict
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert_equal(dec.shape, (len(iris.data), 3))
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert_equal(dec.shape, (len(X_test), 5))
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert_equal(dec.shape, (len(X_train), 10))
# check deprecation warning
clf.decision_function_shape = None
msg = "change the shape of the decision function"
dec = assert_warns_message(ChangedBehaviorWarning, msg,
clf.decision_function, X_train)
assert_equal(dec.shape, (len(X_train), 10))
def test_svr_decision_function():
# Test SVR's decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('balanced', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='balanced')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='weighted')
<= metrics.f1_score(y, y_pred_balanced,
average='weighted'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1/L1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2/L2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("L2", "squared_hinge", "loss='L2'", "1.0"),
svm.LinearSVC(loss="L2").fit, X, y)
# LinearSVR
# loss l1/L1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("L1", "epsilon_insensitive", "loss='L1'",
"1.0"),
svm.LinearSVR(loss="L1").fit, X, y)
# loss l2/L2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
# FIXME remove in 0.18
def test_linear_svx_uppercase_loss_penalty():
# Check if Upper case notation is supported by _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the uppercase notation will be removed in %s")
# loss SQUARED_hinge --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("SQUARED_hinge", "squared_hinge", "0.18"),
svm.LinearSVC(loss="SQUARED_hinge").fit, X, y)
# penalty L2 --> l2
assert_warns_message(DeprecationWarning,
msg.replace("loss", "penalty")
% ("L2", "l2", "0.18"),
svm.LinearSVC(penalty="L2").fit, X, y)
# loss EPSILON_INSENSITIVE --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("EPSILON_INSENSITIVE", "epsilon_insensitive",
"0.18"),
svm.LinearSVR(loss="EPSILON_INSENSITIVE").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0,
decision_function_shape='ovr')
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0,
decision_function_shape='ovr')
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(probability=True)
assert_true(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_true(hasattr(G, 'predict_proba'))
G = svm.SVC(probability=False)
assert_false(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_false(hasattr(G, 'predict_proba'))
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert_true(hasattr(G, 'predict_proba'))
msg = "predict_proba is not available when fitted with probability=False"
assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
| bsd-3-clause |
0x0all/kaggle-galaxies | try_convnet_cc_multirotflip_3x69r45_maxout2048_extradense_dup3.py | 7 | 17439 | import numpy as np
# import pandas as pd
import theano
import theano.tensor as T
import layers
import cc_layers
import custom
import load_data
import realtime_augmentation as ra
import time
import csv
import os
import cPickle as pickle
from datetime import datetime, timedelta
# import matplotlib.pyplot as plt
# plt.ion()
# import utils
BATCH_SIZE = 16
NUM_INPUT_FEATURES = 3
LEARNING_RATE_SCHEDULE = {
0: 0.04,
1800: 0.004,
2300: 0.0004,
}
MOMENTUM = 0.9
WEIGHT_DECAY = 0.0
CHUNK_SIZE = 10000 # 30000 # this should be a multiple of the batch size, ideally.
NUM_CHUNKS = 2500 # 3000 # 1500 # 600 # 600 # 600 # 500
VALIDATE_EVERY = 20 # 12 # 6 # 6 # 6 # 5 # validate only every 5 chunks. MUST BE A DIVISOR OF NUM_CHUNKS!!!
# else computing the analysis data does not work correctly, since it assumes that the validation set is still loaded.
NUM_CHUNKS_NONORM = 1 # train without normalisation for this many chunks, to get the weights in the right 'zone'.
# this should be only a few, just 1 hopefully suffices.
GEN_BUFFER_SIZE = 1
# # need to load the full training data anyway to extract the validation set from it.
# # alternatively we could create separate validation set files.
# DATA_TRAIN_PATH = "data/images_train_color_cropped33_singletf.npy.gz"
# DATA2_TRAIN_PATH = "data/images_train_color_8x_singletf.npy.gz"
# DATA_VALIDONLY_PATH = "data/images_validonly_color_cropped33_singletf.npy.gz"
# DATA2_VALIDONLY_PATH = "data/images_validonly_color_8x_singletf.npy.gz"
# DATA_TEST_PATH = "data/images_test_color_cropped33_singletf.npy.gz"
# DATA2_TEST_PATH = "data/images_test_color_8x_singletf.npy.gz"
TARGET_PATH = "predictions/final/try_convnet_cc_multirotflip_3x69r45_maxout2048_extradense_dup3.csv"
ANALYSIS_PATH = "analysis/final/try_convnet_cc_multirotflip_3x69r45_maxout2048_extradense_dup3.pkl"
# FEATURES_PATTERN = "features/try_convnet_chunked_ra_b3sched.%s.npy"
print "Set up data loading"
# TODO: adapt this so it loads the validation data from JPEGs and does the processing realtime
input_sizes = [(69, 69), (69, 69)]
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)
]
num_input_representations = len(ds_transforms)
augmentation_params = {
'zoom_range': (1.0 / 1.3, 1.3),
'rotation_range': (0, 360),
'shear_range': (0, 0),
'translation_range': (-4, 4),
'do_flip': True,
}
augmented_data_gen = ra.realtime_augmented_data_gen(num_chunks=NUM_CHUNKS, chunk_size=CHUNK_SIZE,
augmentation_params=augmentation_params, ds_transforms=ds_transforms,
target_sizes=input_sizes)
post_augmented_data_gen = ra.post_augment_brightness_gen(augmented_data_gen, std=0.5)
train_gen = load_data.buffered_gen_mp(post_augmented_data_gen, buffer_size=GEN_BUFFER_SIZE)
y_train = np.load("data/solutions_train.npy")
train_ids = load_data.train_ids
test_ids = load_data.test_ids
# split training data into training + a small validation set
num_train = len(train_ids)
num_test = len(test_ids)
num_valid = num_train // 10 # integer division
num_train -= num_valid
y_valid = y_train[num_train:]
y_train = y_train[:num_train]
valid_ids = train_ids[num_train:]
train_ids = train_ids[:num_train]
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train + num_valid)
test_indices = np.arange(num_test)
def create_train_gen():
"""
this generates the training data in order, for postprocessing. Do not use this for actual training.
"""
data_gen_train = ra.realtime_fixed_augmented_data_gen(train_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_train, buffer_size=GEN_BUFFER_SIZE)
def create_valid_gen():
data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_valid, buffer_size=GEN_BUFFER_SIZE)
def create_test_gen():
data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_test, buffer_size=GEN_BUFFER_SIZE)
print "Preprocess validation data upfront"
start_time = time.time()
xs_valid = [[] for _ in xrange(num_input_representations)]
for data, length in create_valid_gen():
for x_valid_list, x_chunk in zip(xs_valid, data):
x_valid_list.append(x_chunk[:length])
xs_valid = [np.vstack(x_valid) for x_valid in xs_valid]
xs_valid = [x_valid.transpose(0, 3, 1, 2) for x_valid in xs_valid] # move the colour dimension up
print " took %.2f seconds" % (time.time() - start_time)
print "Build model"
l0 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1])
l0_45 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1])
l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=True)
l0s = cc_layers.ShuffleBC01ToC01BLayer(l0r)
l1a = cc_layers.CudaConvnetConv2DLayer(l0s, n_filters=32, filter_size=6, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l1 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=2)
l2a = cc_layers.CudaConvnetConv2DLayer(l1, n_filters=64, filter_size=5, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l2 = cc_layers.CudaConvnetPooling2DLayer(l2a, pool_size=2)
l3a = cc_layers.CudaConvnetConv2DLayer(l2, n_filters=128, filter_size=3, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3b = cc_layers.CudaConvnetConv2DLayer(l3a, n_filters=128, filter_size=3, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3 = cc_layers.CudaConvnetPooling2DLayer(l3b, pool_size=2)
l3s = cc_layers.ShuffleC01BToBC01Layer(l3)
j3 = layers.MultiRotMergeLayer(l3s, num_views=4) # 2) # merge convolutional parts
l4a = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4b = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape')
l4c = layers.DenseLayer(l4b, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4 = layers.FeatureMaxPoolingLayer(l4c, pool_size=2, feature_dim=1, implementation='reshape')
# l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.0, dropout=0.5, nonlinearity=custom.clip_01) # nonlinearity=layers.identity)
l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity)
# l6 = layers.OutputLayer(l5, error_measure='mse')
l6 = custom.OptimisedDivGalaxyOutputLayer(l5) # this incorporates the constraints on the output (probabilities sum to one, weighting, etc.)
train_loss_nonorm = l6.error(normalisation=False)
train_loss = l6.error() # but compute and print this!
valid_loss = l6.error(dropout_active=False)
all_parameters = layers.all_parameters(l6)
all_bias_parameters = layers.all_bias_parameters(l6)
xs_shared = [theano.shared(np.zeros((1,1,1,1), dtype=theano.config.floatX)) for _ in xrange(num_input_representations)]
y_shared = theano.shared(np.zeros((1,1), dtype=theano.config.floatX))
learning_rate = theano.shared(np.array(LEARNING_RATE_SCHEDULE[0], dtype=theano.config.floatX))
idx = T.lscalar('idx')
givens = {
l0.input_var: xs_shared[0][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l0_45.input_var: xs_shared[1][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l6.target_var: y_shared[idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
}
# updates = layers.gen_updates(train_loss, all_parameters, learning_rate=LEARNING_RATE, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates_nonorm = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss_nonorm, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
train_nonorm = theano.function([idx], train_loss_nonorm, givens=givens, updates=updates_nonorm)
train_norm = theano.function([idx], train_loss, givens=givens, updates=updates)
compute_loss = theano.function([idx], valid_loss, givens=givens) # dropout_active=False
compute_output = theano.function([idx], l6.predictions(dropout_active=False), givens=givens, on_unused_input='ignore') # not using the labels, so theano complains
compute_features = theano.function([idx], l4.output(dropout_active=False), givens=givens, on_unused_input='ignore')
print "Train model"
start_time = time.time()
prev_time = start_time
num_batches_valid = x_valid.shape[0] // BATCH_SIZE
losses_train = []
losses_valid = []
param_stds = []
for e in xrange(NUM_CHUNKS):
print "Chunk %d/%d" % (e + 1, NUM_CHUNKS)
chunk_data, chunk_length = train_gen.next()
y_chunk = chunk_data.pop() # last element is labels.
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
if e in LEARNING_RATE_SCHEDULE:
current_lr = LEARNING_RATE_SCHEDULE[e]
learning_rate.set_value(LEARNING_RATE_SCHEDULE[e])
print " setting learning rate to %.6f" % current_lr
# train without normalisation for the first # chunks.
if e >= NUM_CHUNKS_NONORM:
train = train_norm
else:
train = train_nonorm
print " load training data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
y_shared.set_value(y_chunk)
num_batches_chunk = x_chunk.shape[0] // BATCH_SIZE
# import pdb; pdb.set_trace()
print " batch SGD"
losses = []
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
loss = train(b)
losses.append(loss)
# print " loss: %.6f" % loss
mean_train_loss = np.sqrt(np.mean(losses))
print " mean training loss (RMSE):\t\t%.6f" % mean_train_loss
losses_train.append(mean_train_loss)
# store param stds during training
param_stds.append([p.std() for p in layers.get_param_values(l6)])
if ((e + 1) % VALIDATE_EVERY) == 0:
print
print "VALIDATING"
print " load validation data onto GPU"
for x_shared, x_valid in zip(xs_shared, xs_valid):
x_shared.set_value(x_valid)
y_shared.set_value(y_valid)
print " compute losses"
losses = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
loss = compute_loss(b)
losses.append(loss)
mean_valid_loss = np.sqrt(np.mean(losses))
print " mean validation loss (RMSE):\t\t%.6f" % mean_valid_loss
losses_valid.append(mean_valid_loss)
layers.dump_params(l6, e=e)
now = time.time()
time_since_start = now - start_time
time_since_prev = now - prev_time
prev_time = now
est_time_left = time_since_start * (float(NUM_CHUNKS - (e + 1)) / float(e + 1))
eta = datetime.now() + timedelta(seconds=est_time_left)
eta_str = eta.strftime("%c")
print " %s since start (%.2f s)" % (load_data.hms(time_since_start), time_since_prev)
print " estimated %s to go (ETA: %s)" % (load_data.hms(est_time_left), eta_str)
print
del chunk_data, xs_chunk, x_chunk, y_chunk, xs_valid, x_valid # memory cleanup
print "Compute predictions on validation set for analysis in batches"
predictions_list = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write validation set predictions to %s" % ANALYSIS_PATH
with open(ANALYSIS_PATH, 'w') as f:
pickle.dump({
'ids': valid_ids[:num_batches_valid * BATCH_SIZE], # note that we need to truncate the ids to a multiple of the batch size.
'predictions': all_predictions,
'targets': y_valid,
'mean_train_loss': mean_train_loss,
'mean_valid_loss': mean_valid_loss,
'time_since_start': time_since_start,
'losses_train': losses_train,
'losses_valid': losses_valid,
'param_values': layers.get_param_values(l6),
'param_stds': param_stds,
}, f, pickle.HIGHEST_PROTOCOL)
del predictions_list, all_predictions # memory cleanup
# print "Loading test data"
# x_test = load_data.load_gz(DATA_TEST_PATH)
# x2_test = load_data.load_gz(DATA2_TEST_PATH)
# test_ids = np.load("data/test_ids.npy")
# num_test = x_test.shape[0]
# x_test = x_test.transpose(0, 3, 1, 2) # move the colour dimension up.
# x2_test = x2_test.transpose(0, 3, 1, 2)
# create_test_gen = lambda: load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
print "Computing predictions on test data"
predictions_list = []
for e, (xs_chunk, chunk_length) in enumerate(create_test_gen()):
print "Chunk %d" % (e + 1)
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk] # move the colour dimension up.
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# make predictions for testset, don't forget to cute off the zeros at the end
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
all_predictions = all_predictions[:num_test] # truncate back to the correct length
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write predictions to %s" % TARGET_PATH
# test_ids = np.load("data/test_ids.npy")
with open(TARGET_PATH, 'wb') as csvfile:
writer = csv.writer(csvfile) # , delimiter=',', quoting=csv.QUOTE_MINIMAL)
# write header
writer.writerow(['GalaxyID', 'Class1.1', 'Class1.2', 'Class1.3', 'Class2.1', 'Class2.2', 'Class3.1', 'Class3.2', 'Class4.1', 'Class4.2', 'Class5.1', 'Class5.2', 'Class5.3', 'Class5.4', 'Class6.1', 'Class6.2', 'Class7.1', 'Class7.2', 'Class7.3', 'Class8.1', 'Class8.2', 'Class8.3', 'Class8.4', 'Class8.5', 'Class8.6', 'Class8.7', 'Class9.1', 'Class9.2', 'Class9.3', 'Class10.1', 'Class10.2', 'Class10.3', 'Class11.1', 'Class11.2', 'Class11.3', 'Class11.4', 'Class11.5', 'Class11.6'])
# write data
for k in xrange(test_ids.shape[0]):
row = [test_ids[k]] + all_predictions[k].tolist()
writer.writerow(row)
print "Gzipping..."
os.system("gzip -c %s > %s.gz" % (TARGET_PATH, TARGET_PATH))
del all_predictions, predictions_list, xs_chunk, x_chunk # memory cleanup
# # need to reload training data because it has been split and shuffled.
# # don't need to reload test data
# x_train = load_data.load_gz(DATA_TRAIN_PATH)
# x2_train = load_data.load_gz(DATA2_TRAIN_PATH)
# x_train = x_train.transpose(0, 3, 1, 2) # move the colour dimension up
# x2_train = x2_train.transpose(0, 3, 1, 2)
# train_gen_features = load_data.array_chunker_gen([x_train, x2_train], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# test_gen_features = load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# for name, gen, num in zip(['train', 'test'], [train_gen_features, test_gen_features], [x_train.shape[0], x_test.shape[0]]):
# print "Extracting feature representations for all galaxies: %s" % name
# features_list = []
# for e, (xs_chunk, chunk_length) in enumerate(gen):
# print "Chunk %d" % (e + 1)
# x_chunk, x2_chunk = xs_chunk
# x_shared.set_value(x_chunk)
# x2_shared.set_value(x2_chunk)
# num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# # compute features for set, don't forget to cute off the zeros at the end
# for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
# features = compute_features(b)
# features_list.append(features)
# all_features = np.vstack(features_list)
# all_features = all_features[:num] # truncate back to the correct length
# features_path = FEATURES_PATTERN % name
# print " write features to %s" % features_path
# np.save(features_path, all_features)
print "Done!"
| bsd-3-clause |
cowlicks/numpy | numpy/lib/function_base.py | 4 | 129914 | from __future__ import division, absolute_import, print_function
import warnings
import sys
import collections
import operator
import numpy as np
import numpy.core.numeric as _nx
from numpy.core import linspace, atleast_1d, atleast_2d
from numpy.core.numeric import (
ones, zeros, arange, concatenate, array, asarray, asanyarray, empty,
empty_like, ndarray, around, floor, ceil, take, dot, where, intp,
integer, isscalar
)
from numpy.core.umath import (
pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin,
mod, exp, log10
)
from numpy.core.fromnumeric import (
ravel, nonzero, sort, partition, mean, any, sum
)
from numpy.core.numerictypes import typecodes, number
from numpy.lib.twodim_base import diag
from .utils import deprecate
from numpy.core.multiarray import _insert, add_docstring
from numpy.core.multiarray import digitize, bincount, interp as compiled_interp
from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc
from numpy.compat import long
# Force range to be a generator, for np.delete's usage.
if sys.version_info[0] < 3:
range = xrange
__all__ = [
'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile',
'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp',
'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc'
]
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : {0, 1}
Return 1 if the object has an iterator method or is a sequence,
and 0 otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
1
>>> np.iterable(2)
0
"""
try:
iter(y)
except:
return 0
return 1
def histogram(a, bins=10, range=None, normed=False, weights=None,
density=None):
"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored.
normed : bool, optional
This keyword is deprecated in Numpy 1.6 due to confusing/buggy
behavior. It will be removed in Numpy 2.0. Use the density keyword
instead.
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that this latter behavior is
known to be buggy with unequal bin widths; use `density` instead.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in `a`
only contributes its associated weight towards the bin count
(instead of 1). If `normed` is True, the weights are normalized,
so that the integral of the density over the range remains 1
density : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the `normed` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `normed` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the
second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes*
4.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist*np.diff(bin_edges))
1.0
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
if (range is not None):
mn, mx = range
if (mn > mx):
raise AttributeError(
'max must be larger than min in range parameter.')
if not iterable(bins):
if np.isscalar(bins) and bins < 1:
raise ValueError(
'`bins` should be a positive integer.')
if range is None:
if a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
range = (0, 1)
else:
range = (a.min(), a.max())
mn, mx = [mi + 0.0 for mi in range]
if mn == mx:
mn -= 0.5
mx += 0.5
bins = linspace(mn, mx, bins + 1, endpoint=True)
else:
bins = asarray(bins)
if (np.diff(bins) < 0).any():
raise AttributeError(
'bins must increase monotonically.')
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = int
else:
ntype = weights.dtype
n = np.zeros(bins.shape, ntype)
block = 65536
if weights is None:
for i in arange(0, len(a), block):
sa = sort(a[i:i+block])
n += np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), block):
tmp_a = a[i:i+block]
tmp_w = weights[i:i+block]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero, ], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if density is not None:
if density:
db = array(np.diff(bins), float)
return n/db/n.sum(), bins
else:
return n, bins
else:
# deprecated, buggy behavior. Remove for Numpy 2.0
if normed:
db = array(np.diff(bins), float)
return n/(n*db).sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogrammed. It must be an (N,D) array or data
that can be converted to such. The rows of the resulting array
are the coordinates of points in a D dimensional polytope.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitly in `bins`. Defaults to the minimum and maximum
values along each dimension.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_volume``.
weights : (N,) array_like, optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False,
the values of the returned histogram are equal to the sum of the
weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights
for the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = atleast_2d(sample).T
N, D = sample.shape
nbin = empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = asarray(weights)
try:
M = len(bins)
if M != D:
raise AttributeError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
# Handle empty input. Range can't be determined in that case, use 0-1.
if N == 0:
smin = zeros(D)
smax = ones(D)
else:
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# avoid rounding issues for comparisons when dealing with inexact types
if np.issubdtype(sample.dtype, np.inexact):
edge_dt = sample.dtype
else:
edge_dt = float
# Create edge arrays
for i in arange(D):
if isscalar(bins[i]):
if bins[i] < 1:
raise ValueError(
"Element at index %s in `bins` should be a positive "
"integer." % i)
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt)
else:
edges[i] = asarray(bins[i], edge_dt)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = diff(edges[i])
if np.any(np.asarray(dedges[i]) <= 0):
raise ValueError(
"Found bin edge of size <= 0. Did you specify `bins` with"
"non-monotonic sequence?")
nbin = asarray(nbin)
# Handle empty input.
if N == 0:
return np.zeros(nbin-2), edges
# Compute the bin number each sample falls into.
Ncount = {}
for i in arange(D):
Ncount[i] = digitize(sample[:, i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in arange(D):
# Rounding precision
mindiff = dedges[i].min()
if not np.isinf(mindiff):
decimal = int(-log10(mindiff)) + 6
# Find which points are on the rightmost edge.
not_smaller_than_edge = (sample[:, i] >= edges[i][-1])
on_edge = (around(sample[:, i], decimal) ==
around(edges[i][-1], decimal))
# Shift these points one bin to the left.
Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
# will raise an error.
hist = zeros(nbin, float).reshape(-1)
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
xy += Ncount[ni[-1]]
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
if len(xy) == 0:
return zeros(nbin-2, int), edges
flatcount = bincount(xy, weights)
a = arange(len(flatcount))
hist[a] = flatcount
# Shape into a proper matrix
hist = hist.reshape(sort(nbin))
for i in arange(nbin.size):
j = ni.argsort()[i]
hist = hist.swapaxes(i, j)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1, -1)]
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in arange(D):
shape = ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
average, [sum_of_weights] : array_type or double
Return the average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `Float`
if `a` is of integer type, otherwise it is of the same type as `a`.
`sum_of_weights` is of the same type as `average`.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays -- useful if your data contains
"missing" values
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
"""
if not isinstance(a, np.matrix):
a = np.asarray(a)
if weights is None:
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else:
a = a + 0.0
wgt = np.asarray(weights)
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights "
"differ.")
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ.")
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis.")
# setup wgt to broadcast along axis
wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis)
scl = wgt.sum(axis=axis, dtype=np.result_type(a.dtype, wgt.dtype))
if (scl == 0.0).any():
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt).sum(axis)/scl
if returned:
scl = np.multiply(avg, 0) + scl
return avg, scl
else:
return avg
def asarray_chkfinite(a, dtype=None, order=None):
"""
Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a, dtype=float)
array([1., 2.])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print 'ValueError'
...
ValueError
"""
a = asarray(a, dtype=dtype, order=order)
if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all():
raise ValueError(
"array must not contain infs or NaNs")
return a
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray
The input domain.
condlist : list of bool arrays
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) - len(condlist) == 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., lambda=1)``, then each function is called as
``f(x, lambda=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have a default value of 0.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.linspace(-2.5, 2.5, 6)
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
"""
x = asanyarray(x)
n2 = len(funclist)
if (isscalar(condlist) or not (isinstance(condlist[0], list) or
isinstance(condlist[0], ndarray))):
condlist = [condlist]
condlist = array(condlist, dtype=bool)
n = len(condlist)
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
zerod = False
if x.ndim == 0:
x = x[None]
zerod = True
if condlist.shape[-1] != 1:
condlist = condlist.T
if n == n2 - 1: # compute the "otherwise" condition.
totlist = np.logical_or.reduce(condlist, axis=0)
condlist = np.vstack([condlist, ~totlist])
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not isinstance(item, collections.Callable):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
# Check the size of condlist and choicelist are the same, or abort.
if len(condlist) != len(choicelist):
raise ValueError(
'list of cases must be same length as list of conditions')
# Now that the dtype is known, handle the deprecated select([], []) case
if len(condlist) == 0:
warnings.warn("select with an empty condition list is not possible"
"and will be deprecated",
DeprecationWarning)
return np.asarray(default)[()]
choicelist = [np.asarray(choice) for choice in choicelist]
choicelist.append(np.asarray(default))
# need to get the result type before broadcasting for correct scalar
# behaviour
dtype = np.result_type(*choicelist)
# Convert conditions to arrays and broadcast conditions and choices
# as the shape is needed for the result. Doing it seperatly optimizes
# for example when all choices are scalars.
condlist = np.broadcast_arrays(*condlist)
choicelist = np.broadcast_arrays(*choicelist)
# If cond array is not an ndarray in boolean format or scalar bool, abort.
deprecated_ints = False
for i in range(len(condlist)):
cond = condlist[i]
if cond.dtype.type is not np.bool_:
if np.issubdtype(cond.dtype, np.integer):
# A previous implementation accepted int ndarrays accidentally.
# Supported here deliberately, but deprecated.
condlist[i] = condlist[i].astype(bool)
deprecated_ints = True
else:
raise ValueError(
'invalid entry in choicelist: should be boolean ndarray')
if deprecated_ints:
msg = "select condlists containing integer ndarrays is deprecated " \
"and will be removed in the future. Use `.astype(bool)` to " \
"convert to bools."
warnings.warn(msg, DeprecationWarning)
if choicelist[0].ndim == 0:
# This may be common, so avoid the call.
result_shape = condlist[0].shape
else:
result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape
result = np.full(result_shape, choicelist[-1], dtype)
# Use np.copyto to burn each choicelist array onto result, using the
# corresponding condlist as a boolean mask. This is done in reverse
# order since the first choice should take precedence.
choicelist = choicelist[-2::-1]
condlist = condlist[::-1]
for choice, cond in zip(choicelist, condlist):
np.copyto(result, choice, where=cond)
return result
def copy(a, order='K'):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :meth:ndarray.copy are very
similar, but have different default values for their order=
arguments.)
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, order=order, copy=True)
# Basic operations
def gradient(f, *varargs, **kwargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using second order accurate central differences
in the interior and either first differences or second order accurate
one-sides (forward or backwards) differences at the boundaries. The
returned gradient hence has the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
varargs : list of scalar, optional
N scalars specifying the sample distances for each dimension,
i.e. `dx`, `dy`, `dz`, ... Default distance: 1.
edge_order : {1, 2}, optional
Gradient is calculated using N\ :sup:`th` order accurate differences
at the boundaries. Default: 1.
.. versionadded:: 1.9.1
Returns
-------
gradient : list of ndarray
Each element of `list` has the same shape as `f` giving the derivative
of `f` with respect to each dimension.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
For two dimensional arrays, the return will be two arrays ordered by
axis. In this example the first array stands for the gradient in
rows and the second one in columns direction:
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
>>> x = np.array([0, 1, 2, 3, 4])
>>> dx = np.gradient(x)
>>> y = x**2
>>> np.gradient(y, dx, edge_order=2)
array([-0., 2., 4., 6., 8.])
"""
f = np.asanyarray(f)
N = len(f.shape) # number of dimensions
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
elif n == N:
dx = list(varargs)
else:
raise SyntaxError(
"invalid number of arguments")
edge_order = kwargs.pop('edge_order', 1)
if kwargs:
raise TypeError('"{}" are not valid keyword arguments.'.format(
'", "'.join(kwargs.keys())))
if edge_order > 2:
raise ValueError("'edge_order' greater than 2 not supported")
# use central differences on interior and one-sided differences on the
# endpoints. This preserves second order-accuracy over the full domain.
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
slice4 = [slice(None)]*N
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D', 'm', 'M']:
otype = 'd'
# Difference of datetime64 elements results in timedelta64
if otype == 'M':
# Need to use the full dtype name because it contains unit information
otype = f.dtype.name.replace('datetime', 'timedelta')
elif otype == 'm':
# Needs to keep the specific units, can't be a general unit
otype = f.dtype
# Convert datetime64 data into ints. Make dummy variable `y`
# that is a view of ints if the data is datetime64, otherwise
# just set y equal to the the array `f`.
if f.dtype.char in ["M", "m"]:
y = f.view('int64')
else:
y = f
for axis in range(N):
if y.shape[axis] < 2:
raise ValueError(
"Shape of array too small to calculate a numerical gradient, "
"at least two elements are required.")
# Numerical differentiation: 1st order edges, 2nd order interior
if y.shape[axis] == 2 or edge_order == 1:
# Use first order differences for time data
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (y[1] - y[0])
out[slice1] = (y[slice2] - y[slice3])
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (y[-1] - y[-2])
out[slice1] = (y[slice2] - y[slice3])
# Numerical differentiation: 2st order edges, 2nd order interior
else:
# Use second order differences where possible
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 0
slice3[axis] = 1
slice4[axis] = 2
# 1D equivalent -- out[0] = -(3*y[0] - 4*y[1] + y[2]) / 2.0
out[slice1] = -(3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
slice4[axis] = -3
# 1D equivalent -- out[-1] = (3*y[-1] - 4*y[-2] + y[-3])
out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
# divide by step size
out /= dx[axis]
outvals.append(out)
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
slice4[axis] = slice(None)
if N == 1:
return outvals[0]
else:
return outvals
def diff(a, n=1, axis=-1):
"""
Calculate the n-th order discrete difference along given axis.
The first order difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher order differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
diff : ndarray
The `n` order differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`.
See Also
--------
gradient, ediff1d, cumsum
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def interp(x, xp, fp, left=None, right=None, period=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing if argument
`period` is not specified. Otherwise, `xp` is internally sorted after
normalizing the periodic boundaries with ``xp = xp % period``.
fp : 1-D sequence of floats
The y-coordinates of the data points, same length as `xp`.
left : float, optional
Value to return for `x < xp[0]`, default is `fp[0]`.
right : float, optional
Value to return for `x > xp[-1]`, default is `fp[-1]`.
period : None or float, optional
.. versionadded:: 1.10.0
A period for the x-coordinates. This parameter allows the proper
interpolation of angular x-coordinates. Parameters `left` and `right`
are ignored if `period` is specified.
Returns
-------
y : float or ndarray
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
If `xp` or `fp` are not 1-D sequences
If `period == 0`
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasing is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
Interpolation with periodic x-coordinates:
>>> x = [-180, -170, -185, 185, -10, -5, 0, 365]
>>> xp = [190, -190, 350, -350]
>>> fp = [5, 10, 3, 4]
>>> np.interp(x, xp, fp, period=360)
array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75])
"""
if period is None:
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
else:
if period == 0:
raise ValueError("period must be a non-zero value")
period = abs(period)
left = None
right = None
return_array = True
if isinstance(x, (float, int, number)):
return_array = False
x = [x]
x = np.asarray(x, dtype=np.float64)
xp = np.asarray(xp, dtype=np.float64)
fp = np.asarray(fp, dtype=np.float64)
if xp.ndim != 1 or fp.ndim != 1:
raise ValueError("Data points must be 1-D sequences")
if xp.shape[0] != fp.shape[0]:
raise ValueError("fp and xp are not of the same length")
# normalizing periodic boundaries
x = x % period
xp = xp % period
asort_xp = np.argsort(xp)
xp = xp[asort_xp]
fp = fp[asort_xp]
xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period))
fp = np.concatenate((fp[-1:], fp, fp[0:1]))
if return_array:
return compiled_interp(x, xp, fp, left, right)
else:
return compiled_interp(x, xp, fp, left, right).item()
def angle(z, deg=0):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : ndarray or scalar
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
if deg:
fact = 180/pi
else:
fact = 1.0
z = asarray(z)
if (issubclass(z.dtype.type, _nx.complexfloating)):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag, zreal) * fact
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = len(p.shape)
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
ddmod = mod(dd + pi, 2*pi) - pi
_nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0))
ph_correct = ddmod - dd
_nx.copyto(ph_correct, 0, where=abs(dd) < discont)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a, copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.:
break
else:
first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.:
break
else:
last = last - 1
return filt[first:last]
@deprecate
def unique(x):
"""
This function is deprecated. Use numpy.lib.arraysetops.unique()
instead.
"""
try:
tmp = x.flatten()
if tmp.size == 0:
return tmp
tmp.sort()
idx = concatenate(([True], tmp[1:] != tmp[:-1]))
return tmp[idx]
except AttributeError:
items = sorted(set(x))
return asarray(items)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Note that `place` does the exact opposite of `extract`.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
Returns
-------
extract : ndarray
Rank 1 array of values from `arr` where `condition` is True.
See Also
--------
take, put, copyto, compress, place
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
`place` uses the first N elements of `vals`, where N is the number of
True values in `mask`, while `copyto` uses the elements where `mask`
is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : array_like
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N it will be repeated.
See Also
--------
copyto, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
return _insert(arr, mask, vals)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> np.disp('"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
class vectorize(object):
"""
vectorize(pyfunc, otypes='', doc=None, excluded=None, cache=False)
Generalized function class.
Define a vectorized function which takes a nested sequence
of objects or numpy arrays as inputs and returns a
numpy array as output. The vectorized function evaluates `pyfunc` over
successive tuples of the input arrays like the python map function,
except it uses the broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If `None`, the docstring will be the
``pyfunc.__doc__``.
excluded : set, optional
Set of strings or integers representing the positional or keyword
arguments for which the function will not be vectorized. These will be
passed directly to `pyfunc` unmodified.
.. versionadded:: 1.7.0
cache : bool, optional
If `True`, then cache the first function call that determines the number
of outputs if `otypes` is not provided.
.. versionadded:: 1.7.0
Returns
-------
vectorized : callable
Vectorized function.
Examples
--------
>>> def myfunc(a, b):
... "Return a-b if a>b, otherwise return a+b"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
The `excluded` argument can be used to prevent vectorizing over certain
arguments. This can be useful for array-like arguments of a fixed length
such as the coefficients for a polynomial as in `polyval`:
>>> def mypolyval(p, x):
... _p = list(p)
... res = _p.pop(0)
... while _p:
... res = res*x + _p.pop(0)
... return res
>>> vpolyval = np.vectorize(mypolyval, excluded=['p'])
>>> vpolyval(p=[1, 2, 3], x=[0, 1])
array([3, 6])
Positional arguments may also be excluded by specifying their position:
>>> vpolyval.excluded.add(0)
>>> vpolyval([1, 2, 3], x=[0, 1])
array([3, 6])
Notes
-----
The `vectorize` function is provided primarily for convenience, not for
performance. The implementation is essentially a for loop.
If `otypes` is not specified, then a call to the function with the
first argument will be used to determine the number of outputs. The
results of this call will be cached if `cache` is `True` to prevent
calling the function twice. However, to implement the cache, the
original function must be wrapped which will slow down subsequent
calls, so only do this if your function is expensive.
The new keyword argument interface and `excluded` argument support
further degrades performance.
"""
def __init__(self, pyfunc, otypes='', doc=None, excluded=None,
cache=False):
self.pyfunc = pyfunc
self.cache = cache
self._ufunc = None # Caching to improve default performance
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError(
"Invalid otype specified: %s" % (char,))
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError(
"Invalid otype specification")
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
def __call__(self, *args, **kwargs):
"""
Return arrays with the results of `pyfunc` broadcast (vectorized) over
`args` and `kwargs` not in `excluded`.
"""
excluded = self.excluded
if not kwargs and not excluded:
func = self.pyfunc
vargs = args
else:
# The wrapper accepts only positional arguments: we use `names` and
# `inds` to mutate `the_args` and `kwargs` to pass to the original
# function.
nargs = len(args)
names = [_n for _n in kwargs if _n not in excluded]
inds = [_i for _i in range(nargs) if _i not in excluded]
the_args = list(args)
def func(*vargs):
for _n, _i in enumerate(inds):
the_args[_i] = vargs[_n]
kwargs.update(zip(names, vargs[len(inds):]))
return self.pyfunc(*the_args, **kwargs)
vargs = [args[_i] for _i in inds]
vargs.extend([kwargs[_n] for _n in names])
return self._vectorize_call(func=func, args=vargs)
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
if not args:
raise ValueError('args can not be empty')
if self.otypes:
otypes = self.otypes
nout = len(otypes)
# Note logic here: We only *use* self._ufunc if func is self.pyfunc
# even though we set self._ufunc regardless.
if func is self.pyfunc and self._ufunc is not None:
ufunc = self._ufunc
else:
ufunc = self._ufunc = frompyfunc(func, len(args), nout)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
# the subsequent call when the ufunc is evaluated.
# Assumes that ufunc first evaluates the 0th elements in the input
# arrays (the input values are not checked to ensure this)
inputs = [asarray(_a).flat[0] for _a in args]
outputs = func(*inputs)
# Performance note: profiling indicates that -- for simple
# functions at least -- this wrapping can almost double the
# execution time.
# Hence we make it optional.
if self.cache:
_cache = [outputs]
def _func(*vargs):
if _cache:
return _cache.pop()
else:
return func(*vargs)
else:
_func = func
if isinstance(outputs, tuple):
nout = len(outputs)
else:
nout = 1
outputs = (outputs,)
otypes = ''.join([asarray(outputs[_k]).dtype.char
for _k in range(nout)])
# Performance note: profiling indicates that creating the ufunc is
# not a significant cost compared with wrapping so it seems not
# worth trying to cache this.
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
def _vectorize_call(self, func, args):
"""Vectorized call to `func` over positional `args`."""
if not args:
_res = func()
else:
ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
# Convert args to object arrays first
inputs = [array(_a, copy=False, subok=True, dtype=object)
for _a in args]
outputs = ufunc(*inputs)
if ufunc.nout == 1:
_res = array(outputs,
copy=False, subok=True, dtype=otypes[0])
else:
_res = tuple([array(_x, copy=False, subok=True, dtype=_t)
for _x, _t in zip(outputs, otypes)])
return _res
def cov(m, y=None, rowvar=1, bias=0, ddof=None, fweights=None, aweights=None):
"""
Estimate a covariance matrix, given data and weights.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
See the notes for an outline of the algorithm.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same form
as that of `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` corresponds to the
number of observations given (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using the
keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
.. versionadded:: 1.5
If not ``None`` the default value implied by `bias` is overridden.
Note that ``ddof=1`` will return the unbiased estimate, even if both
`fweights` and `aweights` are specified, and ``ddof=0`` will return
the simple average. See the notes for the details. The default value
is ``None``.
fweights : array_like, int, optional
.. versionadded:: 1.10
1-D array of integer freguency weights; the number of times each
observation vector should be repeated.
aweights : array_like, optional
.. versionadded:: 1.10
1-D array of observation vector weights. These relative weights are
typically large for observations considered "important" and smaller for
observations considered less "important". If ``ddof=0`` the array of
weights can be used to assign probabilities to observation vectors.
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Notes
-----
Assume that the observations are in the columns of the observation
array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The
steps to compute the weighted covariance are as follows::
>>> w = f * a
>>> v1 = np.sum(w)
>>> v2 = np.sum(w * a)
>>> m -= np.sum(m * w, axis=1, keepdims=True) / v1
>>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2)
Note that when ``a == 1``, the normalization factor
``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)``
as it should.
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print np.cov(X)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x, y)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x)
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
# Handles complex arrays too
m = np.asarray(m)
if y is None:
dtype = np.result_type(m, np.float64)
else:
y = np.asarray(y)
dtype = np.result_type(m, y, np.float64)
X = array(m, ndmin=2, dtype=dtype)
if rowvar == 0 and X.shape[0] != 1:
X = X.T
if X.shape[0] == 0:
return np.array([]).reshape(0, 0)
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=dtype)
if rowvar == 0 and y.shape[0] != 1:
y = y.T
X = np.vstack((X, y))
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
# Get the product of frequencies and weights
w = None
if fweights is not None:
fweights = np.asarray(fweights, dtype=np.float)
if not np.all(fweights == np.around(fweights)):
raise TypeError(
"fweights must be integer")
if fweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional fweights")
if fweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and fweights")
if any(fweights < 0):
raise ValueError(
"fweights cannot be negative")
w = fweights
if aweights is not None:
aweights = np.asarray(aweights, dtype=np.float)
if aweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional aweights")
if aweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and aweights")
if any(aweights < 0):
raise ValueError(
"aweights cannot be negative")
if w is None:
w = aweights
else:
w *= aweights
avg, w_sum = average(X, axis=1, weights=w, returned=True)
w_sum = w_sum[0]
# Determine the normalization
if w is None:
fact = float(X.shape[1] - ddof)
elif ddof == 0:
fact = w_sum
elif aweights is None:
fact = w_sum - ddof
else:
fact = w_sum - ddof*sum(w*aweights)/w_sum
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
fact = 0.0
X -= avg[:, None]
if w is None:
X_T = X.T
else:
X_T = (X*w).T
return (dot(X, X_T.conj())/fact).squeeze()
def corrcoef(x, y=None, rowvar=1, bias=np._NoValue, ddof=np._NoValue):
"""
Return Pearson product-moment correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `R`, and the
covariance matrix, `C`, is
.. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `R` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `x`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : _NoValue, optional
.. deprecated:: 1.10.0
Has no affect, do not use.
ddof : _NoValue, optional
.. deprecated:: 1.10.0
Has no affect, do not use.
Returns
-------
R : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
Notes
-----
This function accepts but discards arguments `bias` and `ddof`. This is
for backwards compatibility with previous versions of this function. These
arguments had no effect on the return values of the function and can be
safely ignored in this and previous versions of numpy.
"""
if bias is not np._NoValue or ddof is not np._NoValue:
warnings.warn('bias and ddof have no affect and are deprecated',
DeprecationWarning)
c = cov(x, y, rowvar)
try:
d = diag(c)
except ValueError: # scalar covariance
# nan if incorrect value (nan, inf, 0), 1 otherwise
return c / c
return c / sqrt(multiply.outer(d, d))
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, with the maximum value normalized to one
(the value one appears only if the number of samples is odd), with
the first and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy.fft import fft, fftshift
>>> window = np.bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1))
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius von Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.5 - 0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
and is described in Blackman and Tukey. It was recommended for
smoothing the truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.54 - 0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1
]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1
]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in range(1, len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is
partitioned into the two intervals [0,8] and (8,inf), and Chebyshev
polynomial expansions are employed in each interval. Relative error on
the domain [0,30] using IEEE arithmetic is documented [3]_ as having a
peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x < 0)
x[ind] = -x[ind]
ind = (x <= 8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
def kaiser(M, beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple
approximation to the DPSS window based on Bessel functions. The Kaiser
window is a very good approximation to the Digital Prolate Spheroidal
Sequence, or Slepian window, which is the transform which maximizes the
energy in the main lobe of the window relative to total energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> np.kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0, M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a Lanczos resampling
filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
http://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> x = np.linspace(-4, 4, 41)
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.linspace(-4, 4, 401)
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi * where(x == 0, 1.0e-20, x)
return sin(y)/y
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a, subok=True, copy=True)
b.sort(0)
return b
def _ureduce(a, func, **kwargs):
"""
Internal Function.
Call `func` with `a` as first argument swapping the axes to use extended
axis on functions that don't support it natively.
Returns result and a.shape with axis dims set to 1.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
func : callable
Reduction function Kapable of receiving an axis argument.
It is is called with `a` as first argument followed by `kwargs`.
kwargs : keyword arguments
additional keyword arguments to pass to `func`.
Returns
-------
result : tuple
Result of func(a, **kwargs) and a.shape with axis dims set to 1
which can be used to reshape the result to the same shape a ufunc with
keepdims=True would produce.
"""
a = np.asanyarray(a)
axis = kwargs.get('axis', None)
if axis is not None:
keepdim = list(a.shape)
nd = a.ndim
try:
axis = operator.index(axis)
if axis >= nd or axis < -nd:
raise IndexError("axis %d out of bounds (%d)" % (axis, a.ndim))
keepdim[axis] = 1
except TypeError:
sax = set()
for x in axis:
if x >= nd or x < -nd:
raise IndexError("axis %d out of bounds (%d)" % (x, nd))
if x in sax:
raise ValueError("duplicate value in axis")
sax.add(x % nd)
keepdim[x] = 1
keep = sax.symmetric_difference(frozenset(range(nd)))
nkeep = len(keep)
# swap axis that should not be reduced to front
for i, s in enumerate(sorted(keep)):
a = a.swapaxes(i, s)
# merge reduced axis
a = a.reshape(a.shape[:nkeep] + (-1,))
kwargs['axis'] = -1
else:
keepdim = [1] * a.ndim
r = func(a, **kwargs)
return r, keepdim
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or sequence of int, optional
Axis along which the medians are computed. The default (axis=None)
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape and buffer length as the expected output, but the
type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve the
contents of the input array. Treat the input as undefined, but it
will probably be fully or partially sorted. Default is False. Note
that, if `overwrite_input` is True and the input is not already an
ndarray, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
median : ndarray
A new array holding the result (unless `out` is specified, in which
case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, percentile
Notes
-----
Given a vector V of length N, the median of V is the middle value of
a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is
odd. When N is even, it is the average of the two middle values of
``V_sorted``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
r, k = _ureduce(a, func=_median, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims:
return r.reshape(k)
else:
return r
def _median(a, axis=None, out=None, overwrite_input=False):
# can't be reasonably be implemented in terms of percentile as we have to
# call mean to not break astropy
a = np.asanyarray(a)
if axis is not None and axis >= a.ndim:
raise IndexError(
"axis %d out of bounds (%d)" % (axis, a.ndim))
if overwrite_input:
if axis is None:
part = a.ravel()
sz = part.size
if sz % 2 == 0:
szh = sz // 2
part.partition((szh - 1, szh))
else:
part.partition((sz - 1) // 2)
else:
sz = a.shape[axis]
if sz % 2 == 0:
szh = sz // 2
a.partition((szh - 1, szh), axis=axis)
else:
a.partition((sz - 1) // 2, axis=axis)
part = a
else:
if axis is None:
sz = a.size
else:
sz = a.shape[axis]
if sz % 2 == 0:
part = partition(a, ((sz // 2) - 1, sz // 2), axis=axis)
else:
part = partition(a, (sz - 1) // 2, axis=axis)
if part.shape == ():
# make 0-D arrays work
return part.item()
if axis is None:
axis = 0
indexer = [slice(None)] * part.ndim
index = part.shape[axis] // 2
if part.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(part[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
Percentile to compute which must be between 0 and 100 inclusive.
axis : int or sequence of int, optional
Axis along which the percentiles are computed. The default (None)
is to compute the percentiles along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
percentile. This will save memory when you do not need to preserve
the contents of the input array. In this case you should not make
any assumptions about the content of the passed in array `a` after
this function completes -- treat it as undefined. Default is False.
Note that, if the `a` input is not already an array this parameter
will have no effect, `a` will be converted to an array internally
regardless of the value of this parameter.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
.. versionadded:: 1.9.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
percentile : scalar or ndarray
If a single percentile `q` is given and axis=None a scalar is
returned. If multiple percentiles `q` are given an array holding
the result is returned. The results are listed in the first axis.
(If `out` is specified, in which case that array is returned
instead). If the input contains integers, or floats of smaller
precision than 64, then the output data-type is float64. Otherwise,
the output data-type is the same as that of the input.
See Also
--------
mean, median
Notes
-----
Given a vector V of length N, the q-th percentile of V is the q-th ranked
value in a sorted copy of V. The values and distances of the two
nearest neighbors as well as the `interpolation` parameter will
determine the percentile if the normalized ranking does not match q
exactly. This function is the same as the median if ``q=50``, the same
as the minimum if ``q=0`` and the same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
array([ 3.5])
>>> np.percentile(a, 50, axis=0)
array([[ 6.5, 4.5, 2.5]])
>>> np.percentile(a, 50, axis=1)
array([[ 7.],
[ 2.]])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=m)
array([[ 6.5, 4.5, 2.5]])
>>> m
array([[ 6.5, 4.5, 2.5]])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([[ 7.],
[ 2.]])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.percentile(b, 50, axis=None, overwrite_input=True)
array([ 3.5])
"""
q = array(q, dtype=np.float64, copy=True)
r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out,
overwrite_input=overwrite_input,
interpolation=interpolation)
if keepdims:
if q.ndim == 0:
return r.reshape(k)
else:
return r.reshape([len(q)] + k)
else:
return r
def _percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
a = asarray(a)
if q.ndim == 0:
# Do not allow 0-d arrays because following code fails for scalar
zerod = True
q = q[None]
else:
zerod = False
# avoid expensive reductions, relevant for arrays with < O(1000) elements
if q.size < 10:
for i in range(q.size):
if q[i] < 0. or q[i] > 100.:
raise ValueError("Percentiles must be in the range [0,100]")
q[i] /= 100.
else:
# faster than any()
if np.count_nonzero(q < 0.) or np.count_nonzero(q > 100.):
raise ValueError("Percentiles must be in the range [0,100]")
q /= 100.
# prepare a for partioning
if overwrite_input:
if axis is None:
ap = a.ravel()
else:
ap = a
else:
if axis is None:
ap = a.flatten()
else:
ap = a.copy()
if axis is None:
axis = 0
Nx = ap.shape[axis]
indices = q * (Nx - 1)
# round fractional indices according to interpolation method
if interpolation == 'lower':
indices = floor(indices).astype(intp)
elif interpolation == 'higher':
indices = ceil(indices).astype(intp)
elif interpolation == 'midpoint':
indices = floor(indices) + 0.5
elif interpolation == 'nearest':
indices = around(indices).astype(intp)
elif interpolation == 'linear':
pass # keep index as fraction and interpolate
else:
raise ValueError(
"interpolation can only be 'linear', 'lower' 'higher', "
"'midpoint', or 'nearest'")
if indices.dtype == intp: # take the points along axis
ap.partition(indices, axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
axis = 0
if zerod:
indices = indices[0]
r = take(ap, indices, axis=axis, out=out)
else: # weight the points above and below the indices
indices_below = floor(indices).astype(intp)
indices_above = indices_below + 1
indices_above[indices_above > Nx - 1] = Nx - 1
weights_above = indices - indices_below
weights_below = 1.0 - weights_above
weights_shape = [1, ] * ap.ndim
weights_shape[axis] = len(indices)
weights_below.shape = weights_shape
weights_above.shape = weights_shape
ap.partition(concatenate((indices_below, indices_above)), axis=axis)
x1 = take(ap, indices_below, axis=axis) * weights_below
x2 = take(ap, indices_above, axis=axis) * weights_above
# ensure axis with qth is first
x1 = np.rollaxis(x1, axis, 0)
x2 = np.rollaxis(x2, axis, 0)
if zerod:
x1 = x1.squeeze(0)
x2 = x2.squeeze(0)
if out is not None:
r = add(x1, x2, out=out)
else:
r = add(x1, x2)
return r
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
If `x` is None, then spacing between all `y` elements is `dx`.
dx : scalar, optional
If `x` is None, spacing given by `dx` is assumed. Default is 1.
axis : int, optional
Specify the axis.
Returns
-------
trapz : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points
will be taken from `y` array, by default x-axis distances between
points will be 1.0, alternatively they can be provided with `x` array
or with `dx` scalar. Return value will be equal to combined area under
the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = len(y.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
try:
ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis)
except ValueError:
# Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
return ret
#always succeed
def add_newdoc(place, obj, doc):
"""Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
This routine cannot modify read-only docstrings, as appear
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
"""
try:
new = getattr(__import__(place, globals(), {}, [obj]), obj)
if isinstance(doc, str):
add_docstring(new, doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new, doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new, val[0]), val[1].strip())
except:
pass
# Based on scitools meshgrid
def meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
.. versionadded:: 1.7.0
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
.. versionadded:: 1.7.0
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
.. versionadded:: 1.7.0
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
Notes
-----
This function supports both indexing conventions through the indexing
keyword argument. Giving the string 'ij' returns a meshgrid with
matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing.
In the 2-D case with inputs of length M and N, the outputs are of shape
(N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case
with inputs of length M, N and P, outputs are of shape (N, M, P) for
'xy' indexing and (M, N, P) for 'ij' indexing. The difference is
illustrated by the following code snippet::
xv, yv = meshgrid(x, y, sparse=False, indexing='ij')
for i in range(nx):
for j in range(ny):
# treat xv[i,j], yv[i,j]
xv, yv = meshgrid(x, y, sparse=False, indexing='xy')
for i in range(nx):
for j in range(ny):
# treat xv[j,i], yv[j,i]
In the 1-D and 0-D case, the indexing and sparse keywords have no effect.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> nx, ny = (3, 2)
>>> x = np.linspace(0, 1, nx)
>>> y = np.linspace(0, 1, ny)
>>> xv, yv = meshgrid(x, y)
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
>>> h = plt.contourf(x,y,z)
"""
ndim = len(xi)
copy_ = kwargs.pop('copy', True)
sparse = kwargs.pop('sparse', False)
indexing = kwargs.pop('indexing', 'xy')
if kwargs:
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(xi)]
shape = [x.size for x in output]
if indexing == 'xy' and ndim > 1:
# switch first and second axis
output[0].shape = (1, -1) + (1,)*(ndim - 2)
output[1].shape = (-1, 1) + (1,)*(ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Notes
-----
Often it is preferable to use a boolean mask. For example:
>>> mask = np.ones(len(arr), dtype=bool)
>>> mask[[0,2,4]] = False
>>> result = arr[mask,...]
Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further
use of `mask`.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
if ndim == 0:
warnings.warn(
"in the future the special handling of scalars will be removed "
"from delete and raise an error", DeprecationWarning)
if wrap:
return wrap(arr)
else:
return arr.copy()
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
start, stop, step = obj.indices(N)
xr = range(start, stop, step)
numtodel = len(xr)
if numtodel <= 0:
if wrap:
return wrap(arr.copy())
else:
return arr.copy()
# Invert if step is negative:
if step < 0:
step = -step
start = xr[-1]
stop = xr[0] + 1
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arr.flags.fnc)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
keep = ones(stop-start, dtype=bool)
keep[:stop-start:step] = False
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(start, stop)
arr = arr[slobj2]
slobj2[axis] = keep
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
else:
return new
_obj = obj
obj = np.asarray(obj)
# After removing the special handling of booleans and out of
# bounds values, the conversion to the array can be removed.
if obj.dtype == bool:
warnings.warn(
"in the future insert will treat boolean arrays and array-likes "
"as boolean index instead of casting it to integer", FutureWarning)
obj = obj.astype(intp)
if isinstance(_obj, (int, long, integer)):
# optimization for a single value
obj = obj.item()
if (obj < -N or obj >= N):
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (obj < 0):
obj += N
newshape[axis] -= 1
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1, None)
new[slobj] = arr[slobj2]
else:
if obj.size == 0 and not isinstance(_obj, np.ndarray):
obj = obj.astype(intp)
if not np.can_cast(obj, intp, 'same_kind'):
# obj.size = 1 special case always failed and would just
# give superfluous warnings.
warnings.warn(
"using a non-integer array as obj in delete will result in an "
"error in the future", DeprecationWarning)
obj = obj.astype(intp)
keep = ones(N, dtype=bool)
# Test if there are out of bound indices, this is deprecated
inside_bounds = (obj < N) & (obj >= -N)
if not inside_bounds.all():
warnings.warn(
"in the future out of bounds indices will raise an error "
"instead of being ignored by `numpy.delete`.",
DeprecationWarning)
obj = obj[inside_bounds]
positive_indices = obj >= 0
if not positive_indices.all():
warnings.warn(
"in the future negative indices will not be ignored by "
"`numpy.delete`.", FutureWarning)
obj = obj[positive_indices]
keep[obj, ] = False
slobj[axis] = keep
new = arr[slobj]
if wrap:
return wrap(new)
else:
return new
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
.. versionadded:: 1.8.0
Support for multiple insertions when `obj` is a single scalar or a
sequence with one element (similar to calling insert multiple
times).
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
`values` should be shaped so that ``arr[...,obj,...] = values``
is legal.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
concatenate : Join a sequence of arrays along an existing axis.
delete : Delete elements from an array.
Notes
-----
Note that for higher dimensional inserts `obj=0` behaves very different
from `obj=[0]` just like `arr[:,0,:] = values` is different from
`arr[:,[0],:] = values`.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
Difference between sequence and scalars:
>>> np.insert(a, [1], [[1],[2],[3]], axis=1)
array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
>>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1),
... np.insert(a, [1], [[1],[2],[3]], axis=1))
True
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
else:
if ndim > 0 and (axis < -ndim or axis >= ndim):
raise IndexError(
"axis %i is out of bounds for an array of "
"dimension %i" % (axis, ndim))
if (axis < 0):
axis += ndim
if (ndim == 0):
warnings.warn(
"in the future the special handling of scalars will be removed "
"from insert and raise an error", DeprecationWarning)
arr = arr.copy()
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
# turn it into a range object
indices = arange(*obj.indices(N), **{'dtype': intp})
else:
# need to copy obj, because indices will be changed in-place
indices = np.array(obj)
if indices.dtype == bool:
# See also delete
warnings.warn(
"in the future insert will treat boolean arrays and "
"array-likes as a boolean index instead of casting it to "
"integer", FutureWarning)
indices = indices.astype(intp)
# Code after warning period:
#if obj.ndim != 1:
# raise ValueError('boolean array argument obj to insert '
# 'must be one dimensional')
#indices = np.flatnonzero(obj)
elif indices.ndim > 1:
raise ValueError(
"index array argument obj to insert must be one dimensional "
"or scalar")
if indices.size == 1:
index = indices.item()
if index < -N or index > N:
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (index < 0):
index += N
# There are some object array corner cases here, but we cannot avoid
# that:
values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype)
if indices.ndim == 0:
# broadcasting is very different here, since a[:,0,:] = ... behaves
# very different from a[:,[0],:] = ...! This changes values so that
# it works likes the second case. (here a[:,0:1,:])
values = np.rollaxis(values, 0, (axis % values.ndim) + 1)
numnew = values.shape[axis]
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, index)
new[slobj] = arr[slobj]
slobj[axis] = slice(index, index+numnew)
new[slobj] = values
slobj[axis] = slice(index+numnew, None)
slobj2 = [slice(None)] * ndim
slobj2[axis] = slice(index, None)
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
return new
elif indices.size == 0 and not isinstance(obj, np.ndarray):
# Can safely cast the empty list to intp
indices = indices.astype(intp)
if not np.can_cast(indices, intp, 'same_kind'):
warnings.warn(
"using a non-integer array as obj in insert will result in an "
"error in the future", DeprecationWarning)
indices = indices.astype(intp)
indices[indices < 0] += N
numnew = len(indices)
order = indices.argsort(kind='mergesort') # stable sort
indices[order] += np.arange(numnew)
newshape[axis] += numnew
old_mask = ones(newshape[axis], dtype=bool)
old_mask[indices] = False
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj2 = [slice(None)]*ndim
slobj[axis] = indices
slobj2[axis] = old_mask
new[slobj] = values
new[slobj2] = arr
if wrap:
return wrap(new)
return new
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
| bsd-3-clause |
zaxtax/scikit-learn | sklearn/neighbors/unsupervised.py | 117 | 4755 | """Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Affects only :meth:`k_neighbors` and :meth:`kneighbors_graph` methods.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> nbrs = neigh.radius_neighbors([[0, 0, 1.3]], 0.4, return_distance=False)
>>> np.asarray(nbrs[0][0])
array(2)
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1, **kwargs):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
| bsd-3-clause |
jzt5132/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 355 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
rupak0577/ginga | ginga/qtw/Plot.py | 4 | 1374 | #
# Plot.py -- Plotting widget canvas wrapper.
#
# Eric Jeschke ([email protected])
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
# GUI imports
from ginga.qtw.QtHelp import QtGui, QtCore
from ginga.qtw import QtHelp, Widgets
from ginga.toolkit import toolkit
if toolkit == 'qt5':
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg \
as FigureCanvas
else:
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg \
as FigureCanvas
class PlotWidget(Widgets.WidgetBase):
def __init__(self, plot, width=500, height=500):
super(PlotWidget, self).__init__()
self.widget = FigureCanvas(plot.get_figure())
self.widget._resizeEvent = self.widget.resizeEvent
self.widget.resizeEvent = self.resize_event
self.plot = plot
def configure_window(self, wd, ht):
fig = self.plot.get_figure()
fig.set_size_inches(float(wd) / fig.dpi, float(ht) / fig.dpi)
def resize_event(self, event):
rect = self.widget.geometry()
x1, y1, x2, y2 = rect.getCoords()
width = x2 - x1
height = y2 - y1
if width > 0 and height > 0:
self.configure_window(width, height)
self.widget._resizeEvent(event)
#END
| bsd-3-clause |
sankar-mukherjee/CoFee | scikit_algo/CART.py | 1 | 2044 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 04 14:54:02 2015
@author: mukherjee
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import preprocessing, metrics, cross_validation
from sklearn import tree
# read Form data
DATA_FORM_FILE = 'all-merged-cat.csv'
rawdata = pd.read_csv(DATA_FORM_FILE, usecols=np.r_[3,5:12,13:28,81:87,108])
#select features
posfeat = pd.DataFrame.as_matrix(rawdata)[:,np.r_[0:8,10:23]].astype(float)
# select test labels
#Ytest = pd.DataFrame.as_matrix(rawdata)[:,20:26].astype(float)
label = pd.DataFrame.as_matrix(rawdata)[:,29]
#remove bad features as there is no label
scale = np.where(label == 'None')
label = np.delete(label,scale)
posfeat = np.delete(posfeat,scale,0)
# Transforming categorical feature
le = preprocessing.LabelEncoder()
le.fit(label)
list(le.classes_)
label = le.transform(label)
# create traning and test data by partioning
nSamples = len(posfeat)
XtrainPos = posfeat[:.7 * nSamples,:]
YtrainPos = label[:.7 * nSamples]
XtestPos = posfeat[.7 * nSamples:,:]
YtestPos = label[.7 * nSamples:]
#normalization of features
#scale = preprocessing.StandardScaler().fit(XtrainPos)
#XtrainPos = scale.fit_transform(XtrainPos)
#XtestPos = scale.fit_transform(XtestPos)
#scale = preprocessing.MinMaxScaler()
#XtrainPos = scale.fit_transform(XtrainPos)
#XtestPos = scale.fit_transform(XtestPos)
#
#scale = preprocessing.Normalizer().fit(XtrainPos)
#XtrainPos = scale.fit_transform(XtrainPos)
#XtestPos = scale.fit_transform(XtestPos)
#classification
clf = tree.DecisionTreeClassifier()
clf = clf.fit(XtrainPos, YtrainPos)
print(metrics.classification_report(YtestPos, clf.predict(XtestPos)))
## Crossvalidation 5 times using different split
#scores = cross_validation.cross_val_score(clf_svm, posfeat, label, cv=5, scoring='f1')
#print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
# Visualization
#plt.hist(XtrainPos[:,0])
#plt.show()
| apache-2.0 |
pratapvardhan/scikit-image | doc/ext/plot2rst.py | 21 | 20507 | """
Example generation from python files.
Generate the rst files for the examples by iterating over the python
example files. Files that generate images should start with 'plot'.
To generate your own examples, add this extension to the list of
``extensions``in your Sphinx configuration file. In addition, make sure the
example directory(ies) in `plot2rst_paths` (see below) points to a directory
with examples named `plot_*.py` and include an `index.rst` file.
This code was adapted from scikit-image, which took it from scikit-learn.
Options
-------
The ``plot2rst`` extension accepts the following options:
plot2rst_paths : length-2 tuple, or list of tuples
Tuple or list of tuples of paths to (python plot, generated rst) files,
i.e. (source, destination). Note that both paths are relative to Sphinx
'source' directory. Defaults to ('../examples', 'auto_examples')
plot2rst_rcparams : dict
Matplotlib configuration parameters. See
http://matplotlib.sourceforge.net/users/customizing.html for details.
plot2rst_default_thumb : str
Path (relative to doc root) of default thumbnail image.
plot2rst_thumb_shape : float
Shape of thumbnail in pixels. The image is resized to fit within this shape
and the excess is filled with white pixels. This fixed size ensures that
that gallery images are displayed in a grid.
plot2rst_plot_tag : str
When this tag is found in the example file, the current plot is saved and
tag is replaced with plot path. Defaults to 'PLOT2RST.current_figure'.
Suggested CSS definitions
-------------------------
div.body h2 {
border-bottom: 1px solid #BBB;
clear: left;
}
/*---- example gallery ----*/
.gallery.figure {
float: left;
margin: 1em;
}
.gallery.figure img{
display: block;
margin-left: auto;
margin-right: auto;
width: 200px;
}
.gallery.figure .caption {
width: 200px;
text-align: center !important;
}
"""
import os
import re
import shutil
import token
import tokenize
import traceback
import itertools
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from skimage import io
from skimage import transform
from skimage.util.dtype import dtype_range
from notebook_doc import Notebook
from docutils.core import publish_parts
from sphinx.domains.python import PythonDomain
LITERALINCLUDE = """
.. literalinclude:: {src_name}
:lines: {code_start}-
"""
CODE_LINK = """
**Python source code:** :download:`download <{0}>`
(generated using ``skimage`` |version|)
"""
NOTEBOOK_LINK = """
**IPython Notebook:** :download:`download <{0}>`
(generated using ``skimage`` |version|)
"""
TOCTREE_TEMPLATE = """
.. toctree::
:hidden:
%s
"""
IMAGE_TEMPLATE = """
.. image:: images/%s
:align: center
"""
GALLERY_IMAGE_TEMPLATE = """
.. figure:: %(thumb)s
:figclass: gallery
:target: ./%(source)s.html
:ref:`example_%(link_name)s`
"""
class Path(str):
"""Path object for manipulating directory and file paths."""
def __new__(self, path):
return str.__new__(self, path)
@property
def isdir(self):
return os.path.isdir(self)
@property
def exists(self):
"""Return True if path exists"""
return os.path.exists(self)
def pjoin(self, *args):
"""Join paths. `p` prefix prevents confusion with string method."""
return self.__class__(os.path.join(self, *args))
def psplit(self):
"""Split paths. `p` prefix prevents confusion with string method."""
return [self.__class__(p) for p in os.path.split(self)]
def makedirs(self):
if not self.exists:
os.makedirs(self)
def listdir(self):
return os.listdir(self)
def format(self, *args, **kwargs):
return self.__class__(super(Path, self).format(*args, **kwargs))
def __add__(self, other):
return self.__class__(super(Path, self).__add__(other))
def __iadd__(self, other):
return self.__add__(other)
def setup(app):
app.connect('builder-inited', generate_example_galleries)
app.add_config_value('plot2rst_paths',
('../examples', 'auto_examples'), True)
app.add_config_value('plot2rst_rcparams', {}, True)
app.add_config_value('plot2rst_default_thumb', None, True)
app.add_config_value('plot2rst_thumb_shape', (250, 300), True)
app.add_config_value('plot2rst_plot_tag', 'PLOT2RST.current_figure', True)
app.add_config_value('plot2rst_index_name', 'index', True)
def generate_example_galleries(app):
cfg = app.builder.config
if isinstance(cfg.source_suffix, list):
cfg.source_suffix_str = cfg.source_suffix[0]
else:
cfg.source_suffix_str = cfg.source_suffix
doc_src = Path(os.path.abspath(app.builder.srcdir)) # path/to/doc/source
if isinstance(cfg.plot2rst_paths, tuple):
cfg.plot2rst_paths = [cfg.plot2rst_paths]
for src_dest in cfg.plot2rst_paths:
plot_path, rst_path = [Path(p) for p in src_dest]
example_dir = doc_src.pjoin(plot_path)
rst_dir = doc_src.pjoin(rst_path)
generate_examples_and_gallery(example_dir, rst_dir, cfg)
def generate_examples_and_gallery(example_dir, rst_dir, cfg):
"""Generate rst from examples and create gallery to showcase examples."""
if not example_dir.exists:
print("No example directory found at", example_dir)
return
rst_dir.makedirs()
# we create an index.rst with all examples
with open(rst_dir.pjoin('index'+cfg.source_suffix_str), 'w') as gallery_index:
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
write_gallery(gallery_index, example_dir, rst_dir, cfg)
for d in sorted(example_dir.listdir()):
example_sub = example_dir.pjoin(d)
if example_sub.isdir:
rst_sub = rst_dir.pjoin(d)
rst_sub.makedirs()
write_gallery(gallery_index, example_sub, rst_sub, cfg, depth=1)
gallery_index.flush()
def write_gallery(gallery_index, src_dir, rst_dir, cfg, depth=0):
"""Generate the rst files for an example directory, i.e. gallery.
Write rst files from python examples and add example links to gallery.
Parameters
----------
gallery_index : file
Index file for plot gallery.
src_dir : 'str'
Source directory for python examples.
rst_dir : 'str'
Destination directory for rst files generated from python examples.
cfg : config object
Sphinx config object created by Sphinx.
"""
index_name = cfg.plot2rst_index_name + cfg.source_suffix_str
gallery_template = src_dir.pjoin(index_name)
if not os.path.exists(gallery_template):
print(src_dir)
print(80*'_')
print('Example directory %s does not have a %s file'
% (src_dir, index_name))
print('Skipping this directory')
print(80*'_')
return
with open(gallery_template) as f:
gallery_description = f.read()
gallery_index.write('\n\n%s\n\n' % gallery_description)
rst_dir.makedirs()
examples = [fname for fname in sorted(src_dir.listdir(), key=_plots_first)
if fname.endswith('py')]
ex_names = [ex[:-3] for ex in examples] # strip '.py' extension
if depth == 0:
sub_dir = Path('')
else:
sub_dir_list = src_dir.psplit()[-depth:]
sub_dir = Path('/'.join(sub_dir_list) + '/')
joiner = '\n %s' % sub_dir
gallery_index.write(TOCTREE_TEMPLATE % (sub_dir + joiner.join(ex_names)))
for src_name in examples:
try:
write_example(src_name, src_dir, rst_dir, cfg)
except Exception:
print("Exception raised while running:")
print("%s in %s" % (src_name, src_dir))
print('~' * 60)
traceback.print_exc()
print('~' * 60)
continue
link_name = sub_dir.pjoin(src_name)
link_name = link_name.replace(os.path.sep, '_')
if link_name.startswith('._'):
link_name = link_name[2:]
info = {}
info['thumb'] = sub_dir.pjoin('images/thumb', src_name[:-3] + '.png')
info['source'] = sub_dir + src_name[:-3]
info['link_name'] = link_name
gallery_index.write(GALLERY_IMAGE_TEMPLATE % info)
def _plots_first(fname):
"""Decorate filename so that examples with plots are displayed first."""
if not (fname.startswith('plot') and fname.endswith('.py')):
return 'zz' + fname
return fname
def write_example(src_name, src_dir, rst_dir, cfg):
"""Write rst file from a given python example.
Parameters
----------
src_name : str
Name of example file.
src_dir : 'str'
Source directory for python examples.
rst_dir : 'str'
Destination directory for rst files generated from python examples.
cfg : config object
Sphinx config object created by Sphinx.
"""
last_dir = src_dir.psplit()[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = Path('')
else:
last_dir += '_'
src_path = src_dir.pjoin(src_name)
example_file = rst_dir.pjoin(src_name)
shutil.copyfile(src_path, example_file)
image_dir = rst_dir.pjoin('images')
thumb_dir = image_dir.pjoin('thumb')
notebook_dir = rst_dir.pjoin('notebook')
image_dir.makedirs()
thumb_dir.makedirs()
notebook_dir.makedirs()
base_image_name = os.path.splitext(src_name)[0]
image_path = image_dir.pjoin(base_image_name + '_{0}.png')
basename, py_ext = os.path.splitext(src_name)
rst_path = rst_dir.pjoin(basename + cfg.source_suffix_str)
notebook_path = notebook_dir.pjoin(basename + '.ipynb')
if _plots_are_current(src_path, image_path) and rst_path.exists and \
notebook_path.exists:
return
print('plot2rst: %s' % basename)
blocks = split_code_and_text_blocks(example_file)
if blocks[0][2].startswith('#!'):
blocks.pop(0) # don't add shebang line to rst file.
rst_link = '.. _example_%s:\n\n' % (last_dir + src_name)
figure_list, rst = process_blocks(blocks, src_path, image_path, cfg)
has_inline_plots = any(cfg.plot2rst_plot_tag in b[2] for b in blocks)
if has_inline_plots:
example_rst = ''.join([rst_link, rst])
else:
# print first block of text, display all plots, then display code.
first_text_block = [b for b in blocks if b[0] == 'text'][0]
label, (start, end), content = first_text_block
figure_list = save_all_figures(image_path)
rst_blocks = [IMAGE_TEMPLATE % f.lstrip('/') for f in figure_list]
example_rst = rst_link
example_rst += eval(content)
example_rst += ''.join(rst_blocks)
code_info = dict(src_name=src_name, code_start=end)
example_rst += LITERALINCLUDE.format(**code_info)
example_rst += CODE_LINK.format(src_name)
ipnotebook_name = src_name.replace('.py', '.ipynb')
ipnotebook_name = './notebook/' + ipnotebook_name
example_rst += NOTEBOOK_LINK.format(ipnotebook_name)
with open(rst_path, 'w') as f:
f.write(example_rst)
thumb_path = thumb_dir.pjoin(src_name[:-3] + '.png')
first_image_file = image_dir.pjoin(figure_list[0].lstrip('/'))
if first_image_file.exists:
first_image = io.imread(first_image_file)
save_thumbnail(first_image, thumb_path, cfg.plot2rst_thumb_shape)
if not thumb_path.exists:
if cfg.plot2rst_default_thumb is None:
print("WARNING: No plots found and default thumbnail not defined.")
print("Specify 'plot2rst_default_thumb' in Sphinx config file.")
else:
shutil.copy(cfg.plot2rst_default_thumb, thumb_path)
# Export example to IPython notebook
nb = Notebook()
# Add sphinx roles to the examples, otherwise docutils
# cannot compile the ReST for the notebook
sphinx_roles = PythonDomain.roles.keys()
preamble = '\n'.join('.. role:: py:{0}(literal)\n'.format(role)
for role in sphinx_roles)
# Grab all references to inject them in cells where needed
ref_regexp = re.compile('\n(\.\. \[(\d+)\].*(?:\n[ ]{7,8}.*)+)')
math_role_regexp = re.compile(':math:`(.*?)`')
text = '\n'.join((content for (cell_type, _, content) in blocks
if cell_type != 'code'))
references = re.findall(ref_regexp, text)
for (cell_type, _, content) in blocks:
if cell_type == 'code':
nb.add_cell(content, cell_type='code')
else:
if content.startswith('r'):
content = content.replace('r"""', '')
escaped = False
else:
content = content.replace('"""', '')
escaped = True
if not escaped:
content = content.replace("\\", "\\\\")
content = content.replace('.. seealso::', '**See also:**')
content = re.sub(math_role_regexp, r'$\1$', content)
# Remove math directive when rendering notebooks
# until we implement a smarter way of capturing and replacing
# its content
content = content.replace('.. math::', '')
if not content.strip():
continue
content = (preamble + content).rstrip('\n')
content = '\n'.join([line for line in content.split('\n') if
not line.startswith('.. image')])
# Remove reference links until we can figure out a better way to
# preserve them
for (reference, ref_id) in references:
ref_tag = '[{0}]_'.format(ref_id)
if ref_tag in content:
content = content.replace(ref_tag, ref_tag[:-1])
html = publish_parts(content, writer_name='html')['html_body']
nb.add_cell(html, cell_type='markdown')
with open(notebook_path, 'w') as f:
f.write(nb.json())
def save_thumbnail(image, thumb_path, shape):
"""Save image as a thumbnail with the specified shape.
The image is first resized to fit within the specified shape and then
centered in an array of the specified shape before saving.
"""
rescale = min(float(w_1) / w_2 for w_1, w_2 in zip(shape, image.shape))
small_shape = (rescale * np.asarray(image.shape[:2])).astype(int)
small_image = transform.resize(image, small_shape)
if len(image.shape) == 3:
shape = shape + (image.shape[2],)
background_value = dtype_range[small_image.dtype.type][1]
thumb = background_value * np.ones(shape, dtype=small_image.dtype)
i = (shape[0] - small_shape[0]) // 2
j = (shape[1] - small_shape[1]) // 2
thumb[i:i+small_shape[0], j:j+small_shape[1]] = small_image
io.imsave(thumb_path, thumb)
def _plots_are_current(src_path, image_path):
first_image_file = Path(image_path.format(1))
needs_replot = (not first_image_file.exists or
_mod_time(first_image_file) <= _mod_time(src_path))
return not needs_replot
def _mod_time(file_path):
return os.stat(file_path).st_mtime
def split_code_and_text_blocks(source_file):
"""Return list with source file separated into code and text blocks.
Returns
-------
blocks : list of (label, (start, end+1), content)
List where each element is a tuple with the label ('text' or 'code'),
the (start, end+1) line numbers, and content string of block.
"""
block_edges, idx_first_text_block = get_block_edges(source_file)
with open(source_file) as f:
source_lines = f.readlines()
# Every other block should be a text block
idx_text_block = np.arange(idx_first_text_block, len(block_edges), 2)
blocks = []
slice_ranges = zip(block_edges[:-1], block_edges[1:])
for i, (start, end) in enumerate(slice_ranges):
block_label = 'text' if i in idx_text_block else 'code'
# subtract 1 from indices b/c line numbers start at 1, not 0
content = ''.join(source_lines[start-1:end-1])
blocks.append((block_label, (start, end), content))
return blocks
def get_block_edges(source_file):
"""Return starting line numbers of code and text blocks
Returns
-------
block_edges : list of int
Line number for the start of each block. Note the
idx_first_text_block : {0 | 1}
0 if first block is text then, else 1 (second block better be text).
"""
block_edges = []
with open(source_file) as f:
token_iter = tokenize.generate_tokens(f.readline)
for token_tuple in token_iter:
t_id, t_str, (srow, scol), (erow, ecol), src_line = token_tuple
if (token.tok_name[t_id] == 'STRING' and scol == 0):
# Add one point to line after text (for later slicing)
block_edges.extend((srow, erow+1))
idx_first_text_block = 0
# when example doesn't start with text block.
if not block_edges[0] == 1:
block_edges.insert(0, 1)
idx_first_text_block = 1
# when example doesn't end with text block.
if not block_edges[-1] == erow: # iffy: I'm using end state of loop
block_edges.append(erow)
return block_edges, idx_first_text_block
def process_blocks(blocks, src_path, image_path, cfg):
"""Run source, save plots as images, and convert blocks to rst.
Parameters
----------
blocks : list of block tuples
Code and text blocks from example. See `split_code_and_text_blocks`.
src_path : str
Path to example file.
image_path : str
Path where plots are saved (format string which accepts figure number).
cfg : config object
Sphinx config object created by Sphinx.
Returns
-------
figure_list : list
List of figure names saved by the example.
rst_text : str
Text with code wrapped code-block directives.
"""
src_dir, src_name = src_path.psplit()
if not src_name.startswith('plot'):
return [], ''
# index of blocks which have inline plots
inline_tag = cfg.plot2rst_plot_tag
idx_inline_plot = [i for i, b in enumerate(blocks)
if inline_tag in b[2]]
image_dir, image_fmt_str = image_path.psplit()
figure_list = []
plt.rcdefaults()
plt.rcParams.update(cfg.plot2rst_rcparams)
plt.close('all')
example_globals = {}
rst_blocks = []
fig_num = 1
for i, (blabel, brange, bcontent) in enumerate(blocks):
if blabel == 'code':
exec(bcontent, example_globals)
rst_blocks.append(codestr2rst(bcontent))
else:
if i in idx_inline_plot:
plt.savefig(image_path.format(fig_num))
figure_name = image_fmt_str.format(fig_num)
fig_num += 1
figure_list.append(figure_name)
figure_link = os.path.join('images', figure_name)
bcontent = bcontent.replace(inline_tag, figure_link)
rst_blocks.append(docstr2rst(bcontent))
return figure_list, '\n'.join(rst_blocks)
def codestr2rst(codestr):
"""Return reStructuredText code block from code string"""
code_directive = ".. code-block:: python\n\n"
indented_block = '\t' + codestr.replace('\n', '\n\t')
return code_directive + indented_block
def docstr2rst(docstr):
"""Return reStructuredText from docstring"""
idx_whitespace = len(docstr.rstrip()) - len(docstr)
whitespace = docstr[idx_whitespace:]
return eval(docstr) + whitespace
def save_all_figures(image_path):
"""Save all matplotlib figures.
Parameters
----------
image_path : str
Path where plots are saved (format string which accepts figure number).
"""
figure_list = []
image_dir, image_fmt_str = image_path.psplit()
fig_mngr = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_num in (m.num for m in fig_mngr):
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
plt.figure(fig_num)
plt.savefig(image_path.format(fig_num))
figure_list.append(image_fmt_str.format(fig_num))
return figure_list
| bsd-3-clause |
andaag/scikit-learn | examples/cluster/plot_digits_agglomeration.py | 377 | 1694 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| bsd-3-clause |
kaichogami/scikit-learn | doc/datasets/mldata_fixture.py | 367 | 1183 | """Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
| bsd-3-clause |
michigraber/scikit-learn | sklearn/qda.py | 140 | 7682 | """
Quadratic Discriminant Analysis
"""
# Author: Matthieu Perrot <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import BaseEstimator, ClassifierMixin
from .externals.six.moves import xrange
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
__all__ = ['QDA']
class QDA(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis (QDA)
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
Examples
--------
>>> from sklearn.qda import QDA
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QDA()
>>> clf.fit(X, y)
QDA(priors=None, reg_param=0.0)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.lda.LDA: Linear discriminant analysis
"""
def __init__(self, priors=None, reg_param=0.):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
def fit(self, X, y, store_covariances=False, tol=1.0e-4):
"""
Fit the QDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
"""
X, y = check_X_y(X, y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| bsd-3-clause |
rohit21122012/DCASE2013 | runs/2016/dnn2016med_mfcc_51/src/evaluation.py | 56 | 43426 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
import numpy
import sys
from sklearn import metrics
class DCASE2016_SceneClassification_Metrics():
"""DCASE 2016 scene classification metrics
Examples
--------
>>> dcase2016_scene_metric = DCASE2016_SceneClassification_Metrics(class_list=dataset.scene_labels)
>>> for fold in dataset.folds(mode=dataset_evaluation_mode):
>>> results = []
>>> result_filename = get_result_filename(fold=fold, path=result_path)
>>>
>>> if os.path.isfile(result_filename):
>>> with open(result_filename, 'rt') as f:
>>> for row in csv.reader(f, delimiter='\t'):
>>> results.append(row)
>>>
>>> y_true = []
>>> y_pred = []
>>> for result in results:
>>> y_true.append(dataset.file_meta(result[0])[0]['scene_label'])
>>> y_pred.append(result[1])
>>>
>>> dcase2016_scene_metric.evaluate(system_output=y_pred, annotated_ground_truth=y_true)
>>>
>>> results = dcase2016_scene_metric.results()
"""
def __init__(self, class_list):
"""__init__ method.
Parameters
----------
class_list : list
Evaluated scene labels in the list
"""
self.accuracies_per_class = None
self.Nsys = None
self.Nref = None
self.class_list = class_list
self.eps = numpy.spacing(1)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
return self.results()
def accuracies(self, y_true, y_pred, labels):
"""Calculate accuracy
Parameters
----------
y_true : numpy.array
Ground truth array, list of scene labels
y_pred : numpy.array
System output array, list of scene labels
labels : list
list of scene labels
Returns
-------
array : numpy.array [shape=(number of scene labels,)]
Accuracy per scene label class
"""
confusion_matrix = metrics.confusion_matrix(y_true=y_true, y_pred=y_pred, labels=labels).astype(float)
return numpy.divide(numpy.diag(confusion_matrix), numpy.sum(confusion_matrix, 1) + self.eps)
def evaluate(self, annotated_ground_truth, system_output):
"""Evaluate system output and annotated ground truth pair.
Use results method to get results.
Parameters
----------
annotated_ground_truth : numpy.array
Ground truth array, list of scene labels
system_output : numpy.array
System output array, list of scene labels
Returns
-------
nothing
"""
accuracies_per_class = self.accuracies(y_pred=system_output, y_true=annotated_ground_truth,
labels=self.class_list)
if self.accuracies_per_class is None:
self.accuracies_per_class = accuracies_per_class
else:
self.accuracies_per_class = numpy.vstack((self.accuracies_per_class, accuracies_per_class))
Nref = numpy.zeros(len(self.class_list))
Nsys = numpy.zeros(len(self.class_list))
for class_id, class_label in enumerate(self.class_list):
for item in system_output:
if item == class_label:
Nsys[class_id] += 1
for item in annotated_ground_truth:
if item == class_label:
Nref[class_id] += 1
if self.Nref is None:
self.Nref = Nref
else:
self.Nref = numpy.vstack((self.Nref, Nref))
if self.Nsys is None:
self.Nsys = Nsys
else:
self.Nsys = numpy.vstack((self.Nsys, Nsys))
def results(self):
"""Get results
Outputs results in dict, format:
{
'class_wise_data':
{
'office': {
'Nsys': 10,
'Nref': 7,
},
}
'class_wise_accuracy':
{
'office': 0.6,
'home': 0.4,
}
'overall_accuracy': numpy.mean(self.accuracies_per_class)
'Nsys': 100,
'Nref': 100,
}
Parameters
----------
nothing
Returns
-------
results : dict
Results dict
"""
results = {
'class_wise_data': {},
'class_wise_accuracy': {},
'overall_accuracy': numpy.mean(self.accuracies_per_class)
}
if len(self.Nsys.shape) == 2:
results['Nsys'] = int(sum(sum(self.Nsys)))
results['Nref'] = int(sum(sum(self.Nref)))
else:
results['Nsys'] = int(sum(self.Nsys))
results['Nref'] = int(sum(self.Nref))
for class_id, class_label in enumerate(self.class_list):
if len(self.accuracies_per_class.shape) == 2:
results['class_wise_accuracy'][class_label] = numpy.mean(self.accuracies_per_class[:, class_id])
results['class_wise_data'][class_label] = {
'Nsys': int(sum(self.Nsys[:, class_id])),
'Nref': int(sum(self.Nref[:, class_id])),
}
else:
results['class_wise_accuracy'][class_label] = numpy.mean(self.accuracies_per_class[class_id])
results['class_wise_data'][class_label] = {
'Nsys': int(self.Nsys[class_id]),
'Nref': int(self.Nref[class_id]),
}
return results
class EventDetectionMetrics(object):
"""Baseclass for sound event metric classes.
"""
def __init__(self, class_list):
"""__init__ method.
Parameters
----------
class_list : list
List of class labels to be evaluated.
"""
self.class_list = class_list
self.eps = numpy.spacing(1)
def max_event_offset(self, data):
"""Get maximum event offset from event list
Parameters
----------
data : list
Event list, list of event dicts
Returns
-------
max : float > 0
Maximum event offset
"""
max = 0
for event in data:
if event['event_offset'] > max:
max = event['event_offset']
return max
def list_to_roll(self, data, time_resolution=0.01):
"""Convert event list into event roll.
Event roll is binary matrix indicating event activity withing time segment defined by time_resolution.
Parameters
----------
data : list
Event list, list of event dicts
time_resolution : float > 0
Time resolution used when converting event into event roll.
Returns
-------
event_roll : numpy.ndarray [shape=(math.ceil(data_length * 1 / time_resolution) + 1, amount of classes)]
Event roll
"""
# Initialize
data_length = self.max_event_offset(data)
event_roll = numpy.zeros((math.ceil(data_length * 1 / time_resolution) + 1, len(self.class_list)))
# Fill-in event_roll
for event in data:
pos = self.class_list.index(event['event_label'].rstrip())
onset = math.floor(event['event_onset'] * 1 / time_resolution)
offset = math.ceil(event['event_offset'] * 1 / time_resolution) + 1
event_roll[onset:offset, pos] = 1
return event_roll
class DCASE2016_EventDetection_SegmentBasedMetrics(EventDetectionMetrics):
"""DCASE2016 Segment based metrics for sound event detection
Supported metrics:
- Overall
- Error rate (ER), Substitutions (S), Insertions (I), Deletions (D)
- F-score (F1)
- Class-wise
- Error rate (ER), Insertions (I), Deletions (D)
- F-score (F1)
Examples
--------
>>> overall_metrics_per_scene = {}
>>> for scene_id, scene_label in enumerate(dataset.scene_labels):
>>> dcase2016_segment_based_metric = DCASE2016_EventDetection_SegmentBasedMetrics(class_list=dataset.event_labels(scene_label=scene_label))
>>> for fold in dataset.folds(mode=dataset_evaluation_mode):
>>> results = []
>>> result_filename = get_result_filename(fold=fold, scene_label=scene_label, path=result_path)
>>>
>>> if os.path.isfile(result_filename):
>>> with open(result_filename, 'rt') as f:
>>> for row in csv.reader(f, delimiter='\t'):
>>> results.append(row)
>>>
>>> for file_id, item in enumerate(dataset.test(fold,scene_label=scene_label)):
>>> current_file_results = []
>>> for result_line in results:
>>> if result_line[0] == dataset.absolute_to_relative(item['file']):
>>> current_file_results.append(
>>> {'file': result_line[0],
>>> 'event_onset': float(result_line[1]),
>>> 'event_offset': float(result_line[2]),
>>> 'event_label': result_line[3]
>>> }
>>> )
>>> meta = dataset.file_meta(dataset.absolute_to_relative(item['file']))
>>> dcase2016_segment_based_metric.evaluate(system_output=current_file_results, annotated_ground_truth=meta)
>>> overall_metrics_per_scene[scene_label]['segment_based_metrics'] = dcase2016_segment_based_metric.results()
"""
def __init__(self, class_list, time_resolution=1.0):
"""__init__ method.
Parameters
----------
class_list : list
List of class labels to be evaluated.
time_resolution : float > 0
Time resolution used when converting event into event roll.
(Default value = 1.0)
"""
self.time_resolution = time_resolution
self.overall = {
'Ntp': 0.0,
'Ntn': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
'Nref': 0.0,
'Nsys': 0.0,
'ER': 0.0,
'S': 0.0,
'D': 0.0,
'I': 0.0,
}
self.class_wise = {}
for class_label in class_list:
self.class_wise[class_label] = {
'Ntp': 0.0,
'Ntn': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
'Nref': 0.0,
'Nsys': 0.0,
}
EventDetectionMetrics.__init__(self, class_list=class_list)
def __enter__(self):
# Initialize class and return it
return self
def __exit__(self, type, value, traceback):
# Finalize evaluation and return results
return self.results()
def evaluate(self, annotated_ground_truth, system_output):
"""Evaluate system output and annotated ground truth pair.
Use results method to get results.
Parameters
----------
annotated_ground_truth : numpy.array
Ground truth array, list of scene labels
system_output : numpy.array
System output array, list of scene labels
Returns
-------
nothing
"""
# Convert event list into frame-based representation
system_event_roll = self.list_to_roll(data=system_output, time_resolution=self.time_resolution)
annotated_event_roll = self.list_to_roll(data=annotated_ground_truth, time_resolution=self.time_resolution)
# Fix durations of both event_rolls to be equal
if annotated_event_roll.shape[0] > system_event_roll.shape[0]:
padding = numpy.zeros((annotated_event_roll.shape[0] - system_event_roll.shape[0], len(self.class_list)))
system_event_roll = numpy.vstack((system_event_roll, padding))
if system_event_roll.shape[0] > annotated_event_roll.shape[0]:
padding = numpy.zeros((system_event_roll.shape[0] - annotated_event_roll.shape[0], len(self.class_list)))
annotated_event_roll = numpy.vstack((annotated_event_roll, padding))
# Compute segment-based overall metrics
for segment_id in range(0, annotated_event_roll.shape[0]):
annotated_segment = annotated_event_roll[segment_id, :]
system_segment = system_event_roll[segment_id, :]
Ntp = sum(system_segment + annotated_segment > 1)
Ntn = sum(system_segment + annotated_segment == 0)
Nfp = sum(system_segment - annotated_segment > 0)
Nfn = sum(annotated_segment - system_segment > 0)
Nref = sum(annotated_segment)
Nsys = sum(system_segment)
S = min(Nref, Nsys) - Ntp
D = max(0, Nref - Nsys)
I = max(0, Nsys - Nref)
ER = max(Nref, Nsys) - Ntp
self.overall['Ntp'] += Ntp
self.overall['Ntn'] += Ntn
self.overall['Nfp'] += Nfp
self.overall['Nfn'] += Nfn
self.overall['Nref'] += Nref
self.overall['Nsys'] += Nsys
self.overall['S'] += S
self.overall['D'] += D
self.overall['I'] += I
self.overall['ER'] += ER
for class_id, class_label in enumerate(self.class_list):
annotated_segment = annotated_event_roll[:, class_id]
system_segment = system_event_roll[:, class_id]
Ntp = sum(system_segment + annotated_segment > 1)
Ntn = sum(system_segment + annotated_segment == 0)
Nfp = sum(system_segment - annotated_segment > 0)
Nfn = sum(annotated_segment - system_segment > 0)
Nref = sum(annotated_segment)
Nsys = sum(system_segment)
self.class_wise[class_label]['Ntp'] += Ntp
self.class_wise[class_label]['Ntn'] += Ntn
self.class_wise[class_label]['Nfp'] += Nfp
self.class_wise[class_label]['Nfn'] += Nfn
self.class_wise[class_label]['Nref'] += Nref
self.class_wise[class_label]['Nsys'] += Nsys
return self
def results(self):
"""Get results
Outputs results in dict, format:
{
'overall':
{
'Pre':
'Rec':
'F':
'ER':
'S':
'D':
'I':
}
'class_wise':
{
'office': {
'Pre':
'Rec':
'F':
'ER':
'D':
'I':
'Nref':
'Nsys':
'Ntp':
'Nfn':
'Nfp':
},
}
'class_wise_average':
{
'F':
'ER':
}
}
Parameters
----------
nothing
Returns
-------
results : dict
Results dict
"""
results = {'overall': {},
'class_wise': {},
'class_wise_average': {},
}
# Overall metrics
results['overall']['Pre'] = self.overall['Ntp'] / (self.overall['Nsys'] + self.eps)
results['overall']['Rec'] = self.overall['Ntp'] / self.overall['Nref']
results['overall']['F'] = 2 * ((results['overall']['Pre'] * results['overall']['Rec']) / (
results['overall']['Pre'] + results['overall']['Rec'] + self.eps))
results['overall']['ER'] = self.overall['ER'] / self.overall['Nref']
results['overall']['S'] = self.overall['S'] / self.overall['Nref']
results['overall']['D'] = self.overall['D'] / self.overall['Nref']
results['overall']['I'] = self.overall['I'] / self.overall['Nref']
# Class-wise metrics
class_wise_F = []
class_wise_ER = []
for class_id, class_label in enumerate(self.class_list):
if class_label not in results['class_wise']:
results['class_wise'][class_label] = {}
results['class_wise'][class_label]['Pre'] = self.class_wise[class_label]['Ntp'] / (
self.class_wise[class_label]['Nsys'] + self.eps)
results['class_wise'][class_label]['Rec'] = self.class_wise[class_label]['Ntp'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['F'] = 2 * (
(results['class_wise'][class_label]['Pre'] * results['class_wise'][class_label]['Rec']) / (
results['class_wise'][class_label]['Pre'] + results['class_wise'][class_label]['Rec'] + self.eps))
results['class_wise'][class_label]['ER'] = (self.class_wise[class_label]['Nfn'] +
self.class_wise[class_label]['Nfp']) / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['D'] = self.class_wise[class_label]['Nfn'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['I'] = self.class_wise[class_label]['Nfp'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['Nref'] = self.class_wise[class_label]['Nref']
results['class_wise'][class_label]['Nsys'] = self.class_wise[class_label]['Nsys']
results['class_wise'][class_label]['Ntp'] = self.class_wise[class_label]['Ntp']
results['class_wise'][class_label]['Nfn'] = self.class_wise[class_label]['Nfn']
results['class_wise'][class_label]['Nfp'] = self.class_wise[class_label]['Nfp']
class_wise_F.append(results['class_wise'][class_label]['F'])
class_wise_ER.append(results['class_wise'][class_label]['ER'])
results['class_wise_average']['F'] = numpy.mean(class_wise_F)
results['class_wise_average']['ER'] = numpy.mean(class_wise_ER)
return results
class DCASE2016_EventDetection_EventBasedMetrics(EventDetectionMetrics):
"""DCASE2016 Event based metrics for sound event detection
Supported metrics:
- Overall
- Error rate (ER), Substitutions (S), Insertions (I), Deletions (D)
- F-score (F1)
- Class-wise
- Error rate (ER), Insertions (I), Deletions (D)
- F-score (F1)
Examples
--------
>>> overall_metrics_per_scene = {}
>>> for scene_id, scene_label in enumerate(dataset.scene_labels):
>>> dcase2016_event_based_metric = DCASE2016_EventDetection_EventBasedMetrics(class_list=dataset.event_labels(scene_label=scene_label))
>>> for fold in dataset.folds(mode=dataset_evaluation_mode):
>>> results = []
>>> result_filename = get_result_filename(fold=fold, scene_label=scene_label, path=result_path)
>>>
>>> if os.path.isfile(result_filename):
>>> with open(result_filename, 'rt') as f:
>>> for row in csv.reader(f, delimiter='\t'):
>>> results.append(row)
>>>
>>> for file_id, item in enumerate(dataset.test(fold,scene_label=scene_label)):
>>> current_file_results = []
>>> for result_line in results:
>>> if result_line[0] == dataset.absolute_to_relative(item['file']):
>>> current_file_results.append(
>>> {'file': result_line[0],
>>> 'event_onset': float(result_line[1]),
>>> 'event_offset': float(result_line[2]),
>>> 'event_label': result_line[3]
>>> }
>>> )
>>> meta = dataset.file_meta(dataset.absolute_to_relative(item['file']))
>>> dcase2016_event_based_metric.evaluate(system_output=current_file_results, annotated_ground_truth=meta)
>>> overall_metrics_per_scene[scene_label]['event_based_metrics'] = dcase2016_event_based_metric.results()
"""
def __init__(self, class_list, time_resolution=1.0, t_collar=0.2):
"""__init__ method.
Parameters
----------
class_list : list
List of class labels to be evaluated.
time_resolution : float > 0
Time resolution used when converting event into event roll.
(Default value = 1.0)
t_collar : float > 0
Time collar for event onset and offset condition
(Default value = 0.2)
"""
self.time_resolution = time_resolution
self.t_collar = t_collar
self.overall = {
'Nref': 0.0,
'Nsys': 0.0,
'Nsubs': 0.0,
'Ntp': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
}
self.class_wise = {}
for class_label in class_list:
self.class_wise[class_label] = {
'Nref': 0.0,
'Nsys': 0.0,
'Ntp': 0.0,
'Ntn': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
}
EventDetectionMetrics.__init__(self, class_list=class_list)
def __enter__(self):
# Initialize class and return it
return self
def __exit__(self, type, value, traceback):
# Finalize evaluation and return results
return self.results()
def evaluate(self, annotated_ground_truth, system_output):
"""Evaluate system output and annotated ground truth pair.
Use results method to get results.
Parameters
----------
annotated_ground_truth : numpy.array
Ground truth array, list of scene labels
system_output : numpy.array
System output array, list of scene labels
Returns
-------
nothing
"""
# Overall metrics
# Total number of detected and reference events
Nsys = len(system_output)
Nref = len(annotated_ground_truth)
sys_correct = numpy.zeros(Nsys, dtype=bool)
ref_correct = numpy.zeros(Nref, dtype=bool)
# Number of correctly transcribed events, onset/offset within a t_collar range
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
label_condition = annotated_ground_truth[j]['event_label'] == system_output[i]['event_label']
onset_condition = self.onset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
offset_condition = self.offset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
if label_condition and onset_condition and offset_condition:
ref_correct[j] = True
sys_correct[i] = True
break
Ntp = numpy.sum(sys_correct)
sys_leftover = numpy.nonzero(numpy.negative(sys_correct))[0]
ref_leftover = numpy.nonzero(numpy.negative(ref_correct))[0]
# Substitutions
Nsubs = 0
for j in ref_leftover:
for i in sys_leftover:
onset_condition = self.onset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
offset_condition = self.offset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
if onset_condition and offset_condition:
Nsubs += 1
break
Nfp = Nsys - Ntp - Nsubs
Nfn = Nref - Ntp - Nsubs
self.overall['Nref'] += Nref
self.overall['Nsys'] += Nsys
self.overall['Ntp'] += Ntp
self.overall['Nsubs'] += Nsubs
self.overall['Nfp'] += Nfp
self.overall['Nfn'] += Nfn
# Class-wise metrics
for class_id, class_label in enumerate(self.class_list):
Nref = 0.0
Nsys = 0.0
Ntp = 0.0
# Count event frequencies in the ground truth
for i in range(0, len(annotated_ground_truth)):
if annotated_ground_truth[i]['event_label'] == class_label:
Nref += 1
# Count event frequencies in the system output
for i in range(0, len(system_output)):
if system_output[i]['event_label'] == class_label:
Nsys += 1
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
if annotated_ground_truth[j]['event_label'] == class_label and system_output[i][
'event_label'] == class_label:
onset_condition = self.onset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
offset_condition = self.offset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
if onset_condition and offset_condition:
Ntp += 1
break
Nfp = Nsys - Ntp
Nfn = Nref - Ntp
self.class_wise[class_label]['Nref'] += Nref
self.class_wise[class_label]['Nsys'] += Nsys
self.class_wise[class_label]['Ntp'] += Ntp
self.class_wise[class_label]['Nfp'] += Nfp
self.class_wise[class_label]['Nfn'] += Nfn
def onset_condition(self, annotated_event, system_event, t_collar=0.200):
"""Onset condition, checked does the event pair fulfill condition
Condition:
- event onsets are within t_collar each other
Parameters
----------
annotated_event : dict
Event dict
system_event : dict
Event dict
t_collar : float > 0
Defines how close event onsets have to be in order to be considered match. In seconds.
(Default value = 0.2)
Returns
-------
result : bool
Condition result
"""
return math.fabs(annotated_event['event_onset'] - system_event['event_onset']) <= t_collar
def offset_condition(self, annotated_event, system_event, t_collar=0.200, percentage_of_length=0.5):
"""Offset condition, checking does the event pair fulfill condition
Condition:
- event offsets are within t_collar each other
or
- system event offset is within the percentage_of_length*annotated event_length
Parameters
----------
annotated_event : dict
Event dict
system_event : dict
Event dict
t_collar : float > 0
Defines how close event onsets have to be in order to be considered match. In seconds.
(Default value = 0.2)
percentage_of_length : float [0-1]
Returns
-------
result : bool
Condition result
"""
annotated_length = annotated_event['event_offset'] - annotated_event['event_onset']
return math.fabs(annotated_event['event_offset'] - system_event['event_offset']) <= max(t_collar,
percentage_of_length * annotated_length)
def results(self):
"""Get results
Outputs results in dict, format:
{
'overall':
{
'Pre':
'Rec':
'F':
'ER':
'S':
'D':
'I':
}
'class_wise':
{
'office': {
'Pre':
'Rec':
'F':
'ER':
'D':
'I':
'Nref':
'Nsys':
'Ntp':
'Nfn':
'Nfp':
},
}
'class_wise_average':
{
'F':
'ER':
}
}
Parameters
----------
nothing
Returns
-------
results : dict
Results dict
"""
results = {
'overall': {},
'class_wise': {},
'class_wise_average': {},
}
# Overall metrics
results['overall']['Pre'] = self.overall['Ntp'] / (self.overall['Nsys'] + self.eps)
results['overall']['Rec'] = self.overall['Ntp'] / self.overall['Nref']
results['overall']['F'] = 2 * ((results['overall']['Pre'] * results['overall']['Rec']) / (
results['overall']['Pre'] + results['overall']['Rec'] + self.eps))
results['overall']['ER'] = (self.overall['Nfn'] + self.overall['Nfp'] + self.overall['Nsubs']) / self.overall[
'Nref']
results['overall']['S'] = self.overall['Nsubs'] / self.overall['Nref']
results['overall']['D'] = self.overall['Nfn'] / self.overall['Nref']
results['overall']['I'] = self.overall['Nfp'] / self.overall['Nref']
# Class-wise metrics
class_wise_F = []
class_wise_ER = []
for class_label in self.class_list:
if class_label not in results['class_wise']:
results['class_wise'][class_label] = {}
results['class_wise'][class_label]['Pre'] = self.class_wise[class_label]['Ntp'] / (
self.class_wise[class_label]['Nsys'] + self.eps)
results['class_wise'][class_label]['Rec'] = self.class_wise[class_label]['Ntp'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['F'] = 2 * (
(results['class_wise'][class_label]['Pre'] * results['class_wise'][class_label]['Rec']) / (
results['class_wise'][class_label]['Pre'] + results['class_wise'][class_label]['Rec'] + self.eps))
results['class_wise'][class_label]['ER'] = (self.class_wise[class_label]['Nfn'] +
self.class_wise[class_label]['Nfp']) / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['D'] = self.class_wise[class_label]['Nfn'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['I'] = self.class_wise[class_label]['Nfp'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['Nref'] = self.class_wise[class_label]['Nref']
results['class_wise'][class_label]['Nsys'] = self.class_wise[class_label]['Nsys']
results['class_wise'][class_label]['Ntp'] = self.class_wise[class_label]['Ntp']
results['class_wise'][class_label]['Nfn'] = self.class_wise[class_label]['Nfn']
results['class_wise'][class_label]['Nfp'] = self.class_wise[class_label]['Nfp']
class_wise_F.append(results['class_wise'][class_label]['F'])
class_wise_ER.append(results['class_wise'][class_label]['ER'])
# Class-wise average
results['class_wise_average']['F'] = numpy.mean(class_wise_F)
results['class_wise_average']['ER'] = numpy.mean(class_wise_ER)
return results
class DCASE2013_EventDetection_Metrics(EventDetectionMetrics):
"""Lecagy DCASE2013 metrics, converted from the provided Matlab implementation
Supported metrics:
- Frame based
- F-score (F)
- AEER
- Event based
- Onset
- F-Score (F)
- AEER
- Onset-offset
- F-Score (F)
- AEER
- Class based
- Onset
- F-Score (F)
- AEER
- Onset-offset
- F-Score (F)
- AEER
"""
#
def frame_based(self, annotated_ground_truth, system_output, resolution=0.01):
# Convert event list into frame-based representation
system_event_roll = self.list_to_roll(data=system_output, time_resolution=resolution)
annotated_event_roll = self.list_to_roll(data=annotated_ground_truth, time_resolution=resolution)
# Fix durations of both event_rolls to be equal
if annotated_event_roll.shape[0] > system_event_roll.shape[0]:
padding = numpy.zeros((annotated_event_roll.shape[0] - system_event_roll.shape[0], len(self.class_list)))
system_event_roll = numpy.vstack((system_event_roll, padding))
if system_event_roll.shape[0] > annotated_event_roll.shape[0]:
padding = numpy.zeros((system_event_roll.shape[0] - annotated_event_roll.shape[0], len(self.class_list)))
annotated_event_roll = numpy.vstack((annotated_event_roll, padding))
# Compute frame-based metrics
Nref = sum(sum(annotated_event_roll))
Ntot = sum(sum(system_event_roll))
Ntp = sum(sum(system_event_roll + annotated_event_roll > 1))
Nfp = sum(sum(system_event_roll - annotated_event_roll > 0))
Nfn = sum(sum(annotated_event_roll - system_event_roll > 0))
Nsubs = min(Nfp, Nfn)
eps = numpy.spacing(1)
results = dict()
results['Rec'] = Ntp / (Nref + eps)
results['Pre'] = Ntp / (Ntot + eps)
results['F'] = 2 * ((results['Pre'] * results['Rec']) / (results['Pre'] + results['Rec'] + eps))
results['AEER'] = (Nfn + Nfp + Nsubs) / (Nref + eps)
return results
def event_based(self, annotated_ground_truth, system_output):
# Event-based evaluation for event detection task
# outputFile: the output of the event detection system
# GTFile: the ground truth list of events
# Total number of detected and reference events
Ntot = len(system_output)
Nref = len(annotated_ground_truth)
# Number of correctly transcribed events, onset within a +/-100 ms range
Ncorr = 0
NcorrOff = 0
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
if annotated_ground_truth[j]['event_label'] == system_output[i]['event_label'] and (
math.fabs(annotated_ground_truth[j]['event_onset'] - system_output[i]['event_onset']) <= 0.1):
Ncorr += 1
# If offset within a +/-100 ms range or within 50% of ground-truth event's duration
if math.fabs(annotated_ground_truth[j]['event_offset'] - system_output[i]['event_offset']) <= max(
0.1, 0.5 * (
annotated_ground_truth[j]['event_offset'] - annotated_ground_truth[j]['event_onset'])):
NcorrOff += 1
break # In order to not evaluate duplicates
# Compute onset-only event-based metrics
eps = numpy.spacing(1)
results = {
'onset': {},
'onset-offset': {},
}
Nfp = Ntot - Ncorr
Nfn = Nref - Ncorr
Nsubs = min(Nfp, Nfn)
results['onset']['Rec'] = Ncorr / (Nref + eps)
results['onset']['Pre'] = Ncorr / (Ntot + eps)
results['onset']['F'] = 2 * (
(results['onset']['Pre'] * results['onset']['Rec']) / (
results['onset']['Pre'] + results['onset']['Rec'] + eps))
results['onset']['AEER'] = (Nfn + Nfp + Nsubs) / (Nref + eps)
# Compute onset-offset event-based metrics
NfpOff = Ntot - NcorrOff
NfnOff = Nref - NcorrOff
NsubsOff = min(NfpOff, NfnOff)
results['onset-offset']['Rec'] = NcorrOff / (Nref + eps)
results['onset-offset']['Pre'] = NcorrOff / (Ntot + eps)
results['onset-offset']['F'] = 2 * ((results['onset-offset']['Pre'] * results['onset-offset']['Rec']) / (
results['onset-offset']['Pre'] + results['onset-offset']['Rec'] + eps))
results['onset-offset']['AEER'] = (NfnOff + NfpOff + NsubsOff) / (Nref + eps)
return results
def class_based(self, annotated_ground_truth, system_output):
# Class-wise event-based evaluation for event detection task
# outputFile: the output of the event detection system
# GTFile: the ground truth list of events
# Total number of detected and reference events per class
Ntot = numpy.zeros((len(self.class_list), 1))
for event in system_output:
pos = self.class_list.index(event['event_label'])
Ntot[pos] += 1
Nref = numpy.zeros((len(self.class_list), 1))
for event in annotated_ground_truth:
pos = self.class_list.index(event['event_label'])
Nref[pos] += 1
I = (Nref > 0).nonzero()[0] # index for classes present in ground-truth
# Number of correctly transcribed events per class, onset within a +/-100 ms range
Ncorr = numpy.zeros((len(self.class_list), 1))
NcorrOff = numpy.zeros((len(self.class_list), 1))
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
if annotated_ground_truth[j]['event_label'] == system_output[i]['event_label'] and (
math.fabs(
annotated_ground_truth[j]['event_onset'] - system_output[i]['event_onset']) <= 0.1):
pos = self.class_list.index(system_output[i]['event_label'])
Ncorr[pos] += 1
# If offset within a +/-100 ms range or within 50% of ground-truth event's duration
if math.fabs(annotated_ground_truth[j]['event_offset'] - system_output[i]['event_offset']) <= max(
0.1, 0.5 * (
annotated_ground_truth[j]['event_offset'] - annotated_ground_truth[j][
'event_onset'])):
pos = self.class_list.index(system_output[i]['event_label'])
NcorrOff[pos] += 1
break # In order to not evaluate duplicates
# Compute onset-only class-wise event-based metrics
eps = numpy.spacing(1)
results = {
'onset': {},
'onset-offset': {},
}
Nfp = Ntot - Ncorr
Nfn = Nref - Ncorr
Nsubs = numpy.minimum(Nfp, Nfn)
tempRec = Ncorr[I] / (Nref[I] + eps)
tempPre = Ncorr[I] / (Ntot[I] + eps)
results['onset']['Rec'] = numpy.mean(tempRec)
results['onset']['Pre'] = numpy.mean(tempPre)
tempF = 2 * ((tempPre * tempRec) / (tempPre + tempRec + eps))
results['onset']['F'] = numpy.mean(tempF)
tempAEER = (Nfn[I] + Nfp[I] + Nsubs[I]) / (Nref[I] + eps)
results['onset']['AEER'] = numpy.mean(tempAEER)
# Compute onset-offset class-wise event-based metrics
NfpOff = Ntot - NcorrOff
NfnOff = Nref - NcorrOff
NsubsOff = numpy.minimum(NfpOff, NfnOff)
tempRecOff = NcorrOff[I] / (Nref[I] + eps)
tempPreOff = NcorrOff[I] / (Ntot[I] + eps)
results['onset-offset']['Rec'] = numpy.mean(tempRecOff)
results['onset-offset']['Pre'] = numpy.mean(tempPreOff)
tempFOff = 2 * ((tempPreOff * tempRecOff) / (tempPreOff + tempRecOff + eps))
results['onset-offset']['F'] = numpy.mean(tempFOff)
tempAEEROff = (NfnOff[I] + NfpOff[I] + NsubsOff[I]) / (Nref[I] + eps)
results['onset-offset']['AEER'] = numpy.mean(tempAEEROff)
return results
def main(argv):
# Examples to show usage and required data structures
class_list = ['class1', 'class2', 'class3']
system_output = [
{
'event_label': 'class1',
'event_onset': 0.1,
'event_offset': 1.0
},
{
'event_label': 'class2',
'event_onset': 4.1,
'event_offset': 4.7
},
{
'event_label': 'class3',
'event_onset': 5.5,
'event_offset': 6.7
}
]
annotated_groundtruth = [
{
'event_label': 'class1',
'event_onset': 0.1,
'event_offset': 1.0
},
{
'event_label': 'class2',
'event_onset': 4.2,
'event_offset': 5.4
},
{
'event_label': 'class3',
'event_onset': 5.5,
'event_offset': 6.7
}
]
dcase2013metric = DCASE2013_EventDetection_Metrics(class_list=class_list)
print 'DCASE2013'
print 'Frame-based:', dcase2013metric.frame_based(system_output=system_output,
annotated_ground_truth=annotated_groundtruth)
print 'Event-based:', dcase2013metric.event_based(system_output=system_output,
annotated_ground_truth=annotated_groundtruth)
print 'Class-based:', dcase2013metric.class_based(system_output=system_output,
annotated_ground_truth=annotated_groundtruth)
dcase2016_metric = DCASE2016_EventDetection_SegmentBasedMetrics(class_list=class_list)
print 'DCASE2016'
print dcase2016_metric.evaluate(system_output=system_output, annotated_ground_truth=annotated_groundtruth).results()
if __name__ == "__main__":
sys.exit(main(sys.argv))
| mit |
cbonnett/SkyNet_wrapper | src/test.py | 1 | 1208 | from sklearn.datasets import load_boston
from sklearn.datasets import load_iris
from sklearn.utils import shuffle
from SkyNet import SkyNetRegressor
from SkyNet import SkyNetClassifier
try:
import seaborn as sns
except:
pass
X,y = shuffle(load_boston().data,load_boston().target)
X_train = X[0:200]
y_train = y[0:200]
X_valid = X[200:400]
y_valid = y[200:400]
X_test =X[400:]
y_test =y[400:]
sn_reg = SkyNetRegressor(id='identification', n_jobs = 4,
activation = (1,2,3,0),
layers = (2, 2, 2),
max_iter = 500,
iteration_print_frequency = 1)
sn_reg.fit(X_train,y_train,X_valid,y_valid)
# X_class,y_class = shuffle(load_iris().data, load_iris().target)
#
# X_train = X_class[0:70]
# y_train = y_class[0:70]
#
# X_valid = X_class[70:100]
# y_valid = y_class[70:100]
#
# X_test =X_class[100:]
# y_test =y_class[100:]
#
# sn_cla = SkyNetClassifier(id = 'identification', n_jobs = 1, activation = (3,3,3,0), layers = (5,5,5), max_iter = 400,
# iteration_print_frequency = 1)
#
# sn_cla.fit(X_train, y_train, X_valid, y_valid)
#
# test_yhat = sn_cla.predict_proba(X_test) | gpl-3.0 |
bikong2/scikit-learn | sklearn/utils/multiclass.py | 45 | 12390 |
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
from ..utils.fixes import array_equal
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integrs of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its wieght with the wieght
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implict zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| bsd-3-clause |
sonnyhu/scikit-learn | sklearn/utils/deprecation.py | 77 | 2417 | import warnings
__all__ = ["deprecated", ]
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecation.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
| bsd-3-clause |
jbagd/jupyter-tips-and-tricks | notebooks/04-More_basics.py | 4 | 4958 |
# coding: utf-8
# # Jupyter Notebook Basics
# In[1]:
names = ['alice', 'jonathan', 'bobby']
ages = [24, 32, 45]
ranks = ['kinda cool', 'really cool', 'insanely cool']
# In[3]:
for (name, age, rank) in zip(names, ages, ranks):
print name, age, rank
# In[4]:
for index, (name, age, rank) in enumerate(zip(names, ages, ranks)):
print index, name, age, rank
# In[5]:
# return, esc, shift+enter, ctrl+enter
# text keyboard shortcuts -- cmd > (right), < left,
# option delete (deletes words)
# type "h" for help
# tab
# shift-tab
# keyboard shortcuts
# - a, b, y, m, dd, h, ctrl+shift+-
# In[14]:
get_ipython().magic(u'matplotlib inline')
get_ipython().magic(u"config InlineBackend.figure_format='retina'")
import matplotlib.pyplot as plt
# no pylab
import seaborn as sns
sns.set_context('talk')
sns.set_style('darkgrid')
plt.rcParams['figure.figsize'] = 12, 8 # plotsize
import numpy as np
# don't do `from numpy import *`
import pandas as pd
# In[9]:
# If you have a specific function that you'd like to import
from numpy.random import randn
# In[10]:
x = np.arange(100)
y = np.sin(x)
plt.plot(x, y)#;
# In[12]:
get_ipython().magic(u'matplotlib notebook')
# In[13]:
x = np.arange(10)
y = np.sin(x)
plt.plot(x, y)#;
# ## Magics!
#
# - % and %% magics
# - interact
# - embed image
# - embed links, youtube
# - link notebooks
# Check out http://matplotlib.org/gallery.html select your favorite.
# In[15]:
get_ipython().run_cell_magic(u'bash', u'', u'for num in {1..5}\ndo\n for infile in *;\n do\n echo $num $infile\n done\n wc $infile\ndone')
# In[20]:
print "hi"
get_ipython().system(u'pwd')
# In[17]:
get_ipython().system(u'ping google.com')
# In[18]:
this_is_magic = "Can you believe you can pass variables and strings like this?"
# In[22]:
hey = get_ipython().getoutput(u'echo $this_is_magic')
# In[23]:
hey
# # Numpy
#
# If you have arrays of numbers, use `numpy` or `pandas` (built on `numpy`) to represent the data. Tons of very fast underlying code.
# In[24]:
x = np.arange(10000)
print x # smart printing
# In[25]:
print x[0] # first element
print x[-1] # last element
print x[0:5] # first 5 elements (also x[:5])
print x[:] # "Everything"
# In[26]:
print x[-5:] # last five elements
# In[27]:
print x[-5:-2]
# In[28]:
print x[-5:-1] # not final value -- not inclusive on right
# In[ ]:
# In[29]:
x = np.random.randint(5, 5000, (3, 5))
# In[30]:
x
# In[31]:
np.sum(x)
# In[32]:
x.sum()
# In[42]:
np.sum(x)
# In[41]:
np.sum(x, axis=0)
# In[43]:
np.sum(x, axis=1)
# In[44]:
x.sum(axis=1)
# In[45]:
# Multi dimension array slice with a comma
x[:, 2]
# In[ ]:
# In[46]:
y = np.linspace(10, 20, 11)
y
# In[47]:
get_ipython().magic(u'pinfo np.linspace')
# In[ ]:
np.linspace()
# shift-tab; shift-tab-tab
np.
# In[48]:
def does_it(first=x, second=y):
"""This is my doc"""
pass
# In[49]:
y[[3, 5, 7]]
# In[ ]:
does_it()
# In[51]:
num = 3000
x = np.linspace(1.0, 300.0, num)
y = np.random.rand(num)
z = np.sin(x)
np.savetxt("example.txt", np.transpose((x, y, z)))
# In[52]:
get_ipython().magic(u'less example.txt')
# In[53]:
get_ipython().system(u'wc example.txt')
# In[54]:
get_ipython().system(u'head example.txt')
# In[55]:
#Not a good idea
a = []
b = []
for line in open("example.txt", 'r'):
a.append(line[0])
b.append(line[2])
a[:10] # Whoops!
# In[56]:
a = []
b = []
for line in open("example.txt", 'r'):
line = line.split()
a.append(line[0])
b.append(line[2])
a[:10] # Strings!
# In[57]:
a = []
b = []
for line in open("example.txt", 'r'):
line = line.split()
a.append(float(line[0]))
b.append(float(line[2]))
a[:10] # Lists!
# In[58]:
# Do this!
a, b = np.loadtxt("example.txt", unpack=True, usecols=(0,2))
# In[59]:
a
# ## Matplotlib and Numpy
#
# In[60]:
from numpy.random import randn
# In[61]:
num = 50
x = np.linspace(2.5, 300, num)
y = randn(num)
plt.scatter(x, y)
# In[64]:
y > 1
# In[65]:
y[y > 1]
# In[66]:
y[(y < 1) & (y > -1)]
# In[67]:
plt.scatter(x, y, c='b', s=50)
plt.scatter(x[(y < 1) & (y > -1)], y[(y < 1) & (y > -1)], c='r', s=50)
# In[68]:
y[~((y < 1) & (y > -1))] = 1.0
plt.scatter(x, y, c='b')
plt.scatter(x, np.clip(y, -0.5, 0.5), color='red')
# In[71]:
num = 350
slope = 0.3
x = randn(num) * 50. + 150.0
y = randn(num) * 5 + x * slope
plt.scatter(x, y, c='b')
# In[72]:
# plt.scatter(x[(y < 1) & (y > -1)], y[(y < 1) & (y > -1)], c='r')
# np.argsort, np.sort, complicated index slicing
dframe = pd.DataFrame({'x': x, 'y': y})
g = sns.jointplot('x', 'y', data=dframe, kind="reg")
# ## Grab Python version of ggplot http://ggplot.yhathq.com/
# In[73]:
from ggplot import ggplot, aes, geom_line, stat_smooth, geom_dotplot, geom_point
# In[74]:
ggplot(aes(x='x', y='y'), data=dframe) + geom_point() + stat_smooth(colour='blue', span=0.2)
# In[ ]:
| mit |
efiring/scipy | doc/source/conf.py | 9 | 10898 | # -*- coding: utf-8 -*-
import sys, os, re
# Check Sphinx version
import sphinx
if sphinx.__version__ < "1.1":
raise RuntimeError("Sphinx 1.1 or newer required")
needs_sphinx = '1.1'
# -----------------------------------------------------------------------------
# General configuration
# -----------------------------------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
sys.path.insert(0, os.path.abspath('../sphinxext'))
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'numpydoc',
'sphinx.ext.intersphinx', 'sphinx.ext.coverage',
'sphinx.ext.autosummary', 'scipyoptdoc']
# Determine if the matplotlib has a recent enough version of the
# plot_directive.
try:
from matplotlib.sphinxext import plot_directive
except ImportError:
use_matplotlib_plot_directive = False
else:
try:
use_matplotlib_plot_directive = (plot_directive.__version__ >= 2)
except AttributeError:
use_matplotlib_plot_directive = False
if use_matplotlib_plot_directive:
extensions.append('matplotlib.sphinxext.plot_directive')
else:
raise RuntimeError("You need a recent enough version of matplotlib")
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'SciPy'
copyright = '2008-2014, The Scipy community'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
import scipy
version = re.sub(r'\.dev-.*$', r'.dev', scipy.__version__)
release = scipy.__version__
print "Scipy (VERSION %s)" % (version,)
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = "autolink"
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_dirs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -----------------------------------------------------------------------------
# HTML output
# -----------------------------------------------------------------------------
themedir = os.path.join(os.pardir, 'scipy-sphinx-theme', '_theme')
if os.path.isdir(themedir):
html_theme = 'scipy'
html_theme_path = [themedir]
if 'scipyorg' in tags:
# Build for the scipy.org website
html_theme_options = {
"edit_link": True,
"sidebar": "right",
"scipy_org_logo": True,
"rootlinks": [("http://scipy.org/", "Scipy.org"),
("http://docs.scipy.org/", "Docs")]
}
else:
# Default build
html_theme_options = {
"edit_link": False,
"sidebar": "left",
"scipy_org_logo": False,
"rootlinks": []
}
html_logo = '_static/scipyshiny_small.png'
html_sidebars = {'index': 'indexsidebar.html'}
else:
# Build without scipy.org sphinx theme present
if 'scipyorg' in tags:
raise RuntimeError("Get the scipy-sphinx-theme first, "
"via git submodule init & update")
else:
html_style = 'scipy_fallback.css'
html_logo = '_static/scipyshiny_small.png'
html_sidebars = {'index': 'indexsidebar.html'}
html_title = "%s v%s Reference Guide" % (project, version)
html_static_path = ['_static']
html_last_updated_fmt = '%b %d, %Y'
html_additional_pages = {}
html_use_modindex = True
html_copy_source = False
html_file_suffix = '.html'
htmlhelp_basename = 'scipy'
pngmath_use_preview = True
pngmath_dvipng_args = ['-gamma', '1.5', '-D', '96', '-bg', 'Transparent']
# -----------------------------------------------------------------------------
# LaTeX output
# -----------------------------------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
_stdauthor = 'Written by the SciPy community'
latex_documents = [
('index', 'scipy-ref.tex', 'SciPy Reference Guide', _stdauthor, 'manual'),
# ('user/index', 'scipy-user.tex', 'SciPy User Guide',
# _stdauthor, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\usepackage{amsmath}
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
% In the parameters etc. sections, align uniformly, and adjust label emphasis
\usepackage{expdlist}
\let\latexdescription=\description
\let\endlatexdescription=\enddescription
\renewenvironment{description}%
{\begin{latexdescription}[\setleftmargin{60pt}\breaklabel\setlabelstyle{\bfseries\itshape}]}%
{\end{latexdescription}}
% Make Examples/etc section headers smaller and more compact
\makeatletter
\titleformat{\paragraph}{\normalsize\normalfont\bfseries\itshape}%
{\py@NormalColor}{0em}{\py@NormalColor}{\py@NormalColor}
\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
\makeatother
% Save vertical space in parameter lists and elsewhere
\makeatletter
\renewenvironment{quote}%
{\list{}{\topsep=0pt%
\parsep \z@ \@plus\p@}%
\item\relax}%
{\endlist}
\makeatother
% Fix footer/header
\renewcommand{\chaptermark}[1]{\markboth{\MakeUppercase{\thechapter.\ #1}}{}}
\renewcommand{\sectionmark}[1]{\markright{\MakeUppercase{\thesection.\ #1}}}
'''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False
# -----------------------------------------------------------------------------
# Intersphinx configuration
# -----------------------------------------------------------------------------
intersphinx_mapping = {
'http://docs.python.org/dev': None,
'http://docs.scipy.org/doc/numpy': None,
}
# -----------------------------------------------------------------------------
# Numpy extensions
# -----------------------------------------------------------------------------
# If we want to do a phantom import from an XML file for all autodocs
phantom_import_file = 'dump.xml'
# Generate plots for example sections
numpydoc_use_plots = True
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
if sphinx.__version__ >= "0.7":
import glob
autosummary_generate = glob.glob("*.rst")
# -----------------------------------------------------------------------------
# Coverage checker
# -----------------------------------------------------------------------------
coverage_ignore_modules = r"""
""".split()
coverage_ignore_functions = r"""
test($|_) (some|all)true bitwise_not cumproduct pkgload
generic\.
""".split()
coverage_ignore_classes = r"""
""".split()
coverage_c_path = []
coverage_c_regexes = {}
coverage_ignore_c_items = {}
#------------------------------------------------------------------------------
# Plot
#------------------------------------------------------------------------------
plot_pre_code = """
import numpy as np
np.random.seed(123)
"""
plot_include_source = True
plot_formats = [('png', 96), 'pdf']
plot_html_show_formats = False
import math
phi = (math.sqrt(5) + 1)/2
font_size = 13*72/96.0 # 13 px
plot_rcparams = {
'font.size': font_size,
'axes.titlesize': font_size,
'axes.labelsize': font_size,
'xtick.labelsize': font_size,
'ytick.labelsize': font_size,
'legend.fontsize': font_size,
'figure.figsize': (3*phi, 3),
'figure.subplot.bottom': 0.2,
'figure.subplot.left': 0.2,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.85,
'figure.subplot.wspace': 0.4,
'text.usetex': False,
}
if not use_matplotlib_plot_directive:
import matplotlib
matplotlib.rcParams.update(plot_rcparams)
# -----------------------------------------------------------------------------
# Source code links
# -----------------------------------------------------------------------------
import inspect
from os.path import relpath, dirname
for name in ['sphinx.ext.linkcode', 'linkcode', 'numpydoc.linkcode']:
try:
__import__(name)
extensions.append(name)
break
except ImportError:
pass
else:
print "NOTE: linkcode extension not found -- no links to source generated"
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except:
return None
try:
fn = inspect.getsourcefile(obj)
except:
fn = None
if not fn:
try:
fn = inspect.getsourcefile(sys.modules[obj.__module__])
except:
fn = None
if not fn:
return None
try:
source, lineno = inspect.findsource(obj)
except:
lineno = None
if lineno:
linespec = "#L%d" % (lineno + 1)
else:
linespec = ""
fn = relpath(fn, start=dirname(scipy.__file__))
if 'dev' in scipy.__version__:
return "http://github.com/scipy/scipy/blob/master/scipy/%s%s" % (
fn, linespec)
else:
return "http://github.com/scipy/scipy/blob/v%s/scipy/%s%s" % (
scipy.__version__, fn, linespec)
| bsd-3-clause |
arabenjamin/scikit-learn | examples/ensemble/plot_forest_importances_faces.py | 403 | 1519 | """
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
| bsd-3-clause |
wzbozon/scikit-learn | examples/plot_multioutput_face_completion.py | 330 | 3019 | """
==============================================
Face completion with a multi-output estimators
==============================================
This example shows the use of multi-output estimator to complete images.
The goal is to predict the lower half of a face given its upper half.
The first column of images shows true faces. The next columns illustrate
how extremely randomized trees, k nearest neighbors, linear
regression and ridge regression complete the lower half of those faces.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
# Load the faces datasets
data = fetch_olivetti_faces()
targets = data.target
data = data.images.reshape((len(data.images), -1))
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
X_train = train[:, :np.ceil(0.5 * n_pixels)] # Upper half of the faces
y_train = train[:, np.floor(0.5 * n_pixels):] # Lower half of the faces
X_test = test[:, :np.ceil(0.5 * n_pixels)]
y_test = test[:, np.floor(0.5 * n_pixels):]
# Fit estimators
ESTIMATORS = {
"Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32,
random_state=0),
"K-nn": KNeighborsRegressor(),
"Linear regression": LinearRegression(),
"Ridge": RidgeCV(),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
| bsd-3-clause |
webmasterraj/FogOrNot | flask/lib/python2.7/site-packages/pandas/io/stata.py | 2 | 77117 | """
Module contains tools for processing Stata files into DataFrames
The StataReader below was originally written by Joe Presbrey as part of PyDTA.
It has been extended and improved by Skipper Seabold from the Statsmodels
project who also developed the StataWriter and was finally added to pandas in
an once again improved version.
You can find more information on http://presbrey.mit.edu/PyDTA and
http://statsmodels.sourceforge.net/devel/
"""
import numpy as np
import sys
import struct
from dateutil.relativedelta import relativedelta
from pandas.core.base import StringMixin
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame
from pandas.core.series import Series
import datetime
from pandas import compat, to_timedelta, to_datetime, isnull, DatetimeIndex
from pandas.compat import lrange, lmap, lzip, text_type, string_types, range, \
zip, BytesIO
from pandas.util.decorators import Appender
import pandas.core.common as com
from pandas.io.common import get_filepath_or_buffer
from pandas.lib import max_len_string_array, infer_dtype
from pandas.tslib import NaT, Timestamp
_statafile_processing_params1 = """\
convert_dates : boolean, defaults to True
Convert date variables to DataFrame time values
convert_categoricals : boolean, defaults to True
Read value labels and convert columns to Categorical/Factor variables"""
_encoding_params = """\
encoding : string, None or encoding
Encoding used to parse the files. Note that Stata doesn't
support unicode. None defaults to iso-8859-1."""
_statafile_processing_params2 = """\
index : identifier of index column
identifier of column that should be used as index of the DataFrame
convert_missing : boolean, defaults to False
Flag indicating whether to convert missing values to their Stata
representations. If False, missing values are replaced with nans.
If True, columns containing missing values are returned with
object data types and missing values are represented by
StataMissingValue objects.
preserve_dtypes : boolean, defaults to True
Preserve Stata datatypes. If False, numeric data are upcast to pandas
default types for foreign data (float64 or int64)
columns : list or None
Columns to retain. Columns will be returned in the given order. None
returns all columns
order_categoricals : boolean, defaults to True
Flag indicating whether converted categorical data are ordered."""
_chunksize_params = """\
chunksize : int, default None
Return StataReader object for iterations, returns chunks with
given number of lines"""
_iterator_params = """\
iterator : boolean, default False
Return StataReader object"""
_read_stata_doc = """Read Stata file into DataFrame
Parameters
----------
filepath_or_buffer : string or file-like object
Path to .dta file or object implementing a binary read() functions
%s
%s
%s
%s
%s
Returns
-------
DataFrame or StataReader
Examples
--------
Read a Stata dta file:
>> df = pandas.read_stata('filename.dta')
Read a Stata dta file in 10,000 line chunks:
>> itr = pandas.read_stata('filename.dta', chunksize=10000)
>> for chunk in itr:
>> do_something(chunk)
""" % (_statafile_processing_params1, _encoding_params,
_statafile_processing_params2, _chunksize_params,
_iterator_params)
_data_method_doc = """Reads observations from Stata file, converting them into a dataframe
This is a legacy method. Use `read` in new code.
Parameters
----------
%s
%s
Returns
-------
DataFrame
""" % (_statafile_processing_params1, _statafile_processing_params2)
_read_method_doc = """\
Reads observations from Stata file, converting them into a dataframe
Parameters
----------
nrows : int
Number of lines to read from data file, if None read whole file.
%s
%s
Returns
-------
DataFrame
""" % (_statafile_processing_params1, _statafile_processing_params2)
_stata_reader_doc = """\
Class for reading Stata dta files.
Parameters
----------
path_or_buf : string or file-like object
Path to .dta file or object implementing a binary read() functions
%s
%s
%s
%s
""" % (_statafile_processing_params1, _statafile_processing_params2,
_encoding_params, _chunksize_params)
@Appender(_read_stata_doc)
def read_stata(filepath_or_buffer, convert_dates=True,
convert_categoricals=True, encoding=None, index=None,
convert_missing=False, preserve_dtypes=True, columns=None,
order_categoricals=True, chunksize=None, iterator=False):
reader = StataReader(filepath_or_buffer,
convert_dates=convert_dates,
convert_categoricals=convert_categoricals,
index=index, convert_missing=convert_missing,
preserve_dtypes=preserve_dtypes,
columns=columns,
order_categoricals=order_categoricals,
chunksize=chunksize, encoding=encoding)
if iterator or chunksize:
return reader
return reader.read()
_date_formats = ["%tc", "%tC", "%td", "%d", "%tw", "%tm", "%tq", "%th", "%ty"]
stata_epoch = datetime.datetime(1960, 1, 1)
def _stata_elapsed_date_to_datetime_vec(dates, fmt):
"""
Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
The Stata Internal Format date to convert to datetime according to fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
Returns
Returns
-------
converted : Series
The converted dates
Examples
--------
>>> import pandas as pd
>>> dates = pd.Series([52])
>>> _stata_elapsed_date_to_datetime_vec(dates , "%tw")
0 1961-01-01
dtype: datetime64[ns]
Notes
-----
datetime/c - tc
milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day
datetime/C - tC - NOT IMPLEMENTED
milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds
date - td
days since 01jan1960 (01jan1960 = 0)
weekly date - tw
weeks since 1960w1
This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.
The datetime value is the start of the week in terms of days in the
year, not ISO calendar weeks.
monthly date - tm
months since 1960m1
quarterly date - tq
quarters since 1960q1
half-yearly date - th
half-years since 1960h1 yearly
date - ty
years since 0000
If you don't have pandas with datetime support, then you can't do
milliseconds accurately.
"""
MIN_YEAR, MAX_YEAR = Timestamp.min.year, Timestamp.max.year
MAX_DAY_DELTA = (Timestamp.max - datetime.datetime(1960, 1, 1)).days
MIN_DAY_DELTA = (Timestamp.min - datetime.datetime(1960, 1, 1)).days
MIN_MS_DELTA = MIN_DAY_DELTA * 24 * 3600 * 1000
MAX_MS_DELTA = MAX_DAY_DELTA * 24 * 3600 * 1000
def convert_year_month_safe(year, month):
"""
Convert year and month to datetimes, using pandas vectorized versions
when the date range falls within the range supported by pandas. Other
wise it falls back to a slower but more robust method using datetime.
"""
if year.max() < MAX_YEAR and year.min() > MIN_YEAR:
return to_datetime(100 * year + month, format='%Y%m')
else:
index = getattr(year, 'index', None)
return Series(
[datetime.datetime(y, m, 1) for y, m in zip(year, month)],
index=index)
def convert_year_days_safe(year, days):
"""
Converts year (e.g. 1999) and days since the start of the year to a
datetime or datetime64 Series
"""
if year.max() < (MAX_YEAR - 1) and year.min() > MIN_YEAR:
return to_datetime(year, format='%Y') + to_timedelta(days, unit='d')
else:
index = getattr(year, 'index', None)
value = [datetime.datetime(y, 1, 1) + relativedelta(days=int(d)) for
y, d in zip(year, days)]
return Series(value, index=index)
def convert_delta_safe(base, deltas, unit):
"""
Convert base dates and deltas to datetimes, using pandas vectorized
versions if the deltas satisfy restrictions required to be expressed
as dates in pandas.
"""
index = getattr(deltas, 'index', None)
if unit == 'd':
if deltas.max() > MAX_DAY_DELTA or deltas.min() < MIN_DAY_DELTA:
values = [base + relativedelta(days=int(d)) for d in deltas]
return Series(values, index=index)
elif unit == 'ms':
if deltas.max() > MAX_MS_DELTA or deltas.min() < MIN_MS_DELTA:
values = [base + relativedelta(microseconds=(int(d) * 1000)) for
d in deltas]
return Series(values, index=index)
else:
raise ValueError('format not understood')
base = to_datetime(base)
deltas = to_timedelta(deltas, unit=unit)
return base + deltas
# TODO: If/when pandas supports more than datetime64[ns], this should be improved to use correct range, e.g. datetime[Y] for yearly
bad_locs = np.isnan(dates)
has_bad_values = False
if bad_locs.any():
has_bad_values = True
data_col = Series(dates)
data_col[bad_locs] = 1.0 # Replace with NaT
dates = dates.astype(np.int64)
if fmt in ["%tc", "tc"]: # Delta ms relative to base
base = stata_epoch
ms = dates
conv_dates = convert_delta_safe(base, ms, 'ms')
elif fmt in ["%tC", "tC"]:
from warnings import warn
warn("Encountered %tC format. Leaving in Stata Internal Format.")
conv_dates = Series(dates, dtype=np.object)
if has_bad_values:
conv_dates[bad_locs] = np.nan
return conv_dates
elif fmt in ["%td", "td", "%d", "d"]: # Delta days relative to base
base = stata_epoch
days = dates
conv_dates = convert_delta_safe(base, days, 'd')
elif fmt in ["%tw", "tw"]: # does not count leap days - 7 days is a week
year = stata_epoch.year + dates // 52
days = (dates % 52) * 7
conv_dates = convert_year_days_safe(year, days)
elif fmt in ["%tm", "tm"]: # Delta months relative to base
year = stata_epoch.year + dates // 12
month = (dates % 12) + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%tq", "tq"]: # Delta quarters relative to base
year = stata_epoch.year + dates // 4
month = (dates % 4) * 3 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%th", "th"]: # Delta half-years relative to base
year = stata_epoch.year + dates // 2
month = (dates % 2) * 6 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%ty", "ty"]: # Years -- not delta
year = dates
month = np.ones_like(dates)
conv_dates = convert_year_month_safe(year, month)
else:
raise ValueError("Date fmt %s not understood" % fmt)
if has_bad_values: # Restore NaT for bad values
conv_dates[bad_locs] = NaT
return conv_dates
def _datetime_to_stata_elapsed_vec(dates, fmt):
"""
Convert from datetime to SIF. http://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
Series or array containing datetime.datetime or datetime64[ns] to
convert to the Stata Internal Format given by fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
"""
index = dates.index
NS_PER_DAY = 24 * 3600 * 1000 * 1000 * 1000
US_PER_DAY = NS_PER_DAY / 1000
def parse_dates_safe(dates, delta=False, year=False, days=False):
d = {}
if com.is_datetime64_dtype(dates.values):
if delta:
delta = dates - stata_epoch
d['delta'] = delta.values.astype(
np.int64) // 1000 # microseconds
if days or year:
dates = DatetimeIndex(dates)
d['year'], d['month'] = dates.year, dates.month
if days:
days = (dates.astype(np.int64) -
to_datetime(d['year'], format='%Y').astype(np.int64))
d['days'] = days // NS_PER_DAY
elif infer_dtype(dates) == 'datetime':
if delta:
delta = dates.values - stata_epoch
f = lambda x: \
US_PER_DAY * x.days + 1000000 * x.seconds + x.microseconds
v = np.vectorize(f)
d['delta'] = v(delta)
if year:
year_month = dates.apply(lambda x: 100 * x.year + x.month)
d['year'] = year_month.values // 100
d['month'] = (year_month.values - d['year'] * 100)
if days:
f = lambda x: (x - datetime.datetime(x.year, 1, 1)).days
v = np.vectorize(f)
d['days'] = v(dates)
else:
raise ValueError('Columns containing dates must contain either '
'datetime64, datetime.datetime or null values.')
return DataFrame(d, index=index)
bad_loc = isnull(dates)
index = dates.index
if bad_loc.any():
dates = Series(dates)
if com.is_datetime64_dtype(dates):
dates[bad_loc] = to_datetime(stata_epoch)
else:
dates[bad_loc] = stata_epoch
if fmt in ["%tc", "tc"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta / 1000
elif fmt in ["%tC", "tC"]:
from warnings import warn
warn("Stata Internal Format tC not supported.")
conv_dates = dates
elif fmt in ["%td", "td"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta // US_PER_DAY
elif fmt in ["%tw", "tw"]:
d = parse_dates_safe(dates, year=True, days=True)
conv_dates = (52 * (d.year - stata_epoch.year) + d.days // 7)
elif fmt in ["%tm", "tm"]:
d = parse_dates_safe(dates, year=True)
conv_dates = (12 * (d.year - stata_epoch.year) + d.month - 1)
elif fmt in ["%tq", "tq"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 4 * (d.year - stata_epoch.year) + (d.month - 1) // 3
elif fmt in ["%th", "th"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 2 * (d.year - stata_epoch.year) + \
(d.month > 6).astype(np.int)
elif fmt in ["%ty", "ty"]:
d = parse_dates_safe(dates, year=True)
conv_dates = d.year
else:
raise ValueError("fmt %s not understood" % fmt)
conv_dates = Series(conv_dates, dtype=np.float64)
missing_value = struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0]
conv_dates[bad_loc] = missing_value
return Series(conv_dates, index=index)
excessive_string_length_error = """
Fixed width strings in Stata .dta files are limited to 244 (or fewer) characters.
Column '%s' does not satisfy this restriction.
"""
class PossiblePrecisionLoss(Warning):
pass
precision_loss_doc = """
Column converted from %s to %s, and some data are outside of the lossless
conversion range. This may result in a loss of precision in the saved data.
"""
class ValueLabelTypeMismatch(Warning):
pass
value_label_mismatch_doc = """
Stata value labels (pandas categories) must be strings. Column {0} contains
non-string labels which will be converted to strings. Please check that the
Stata data file created has not lost information due to duplicate labels.
"""
class InvalidColumnName(Warning):
pass
invalid_name_doc = """
Not all pandas column names were valid Stata variable names.
The following replacements have been made:
{0}
If this is not what you expect, please make sure you have Stata-compliant
column names in your DataFrame (strings only, max 32 characters, only alphanumerics and
underscores, no Stata reserved words)
"""
def _cast_to_stata_types(data):
"""Checks the dtypes of the columns of a pandas DataFrame for
compatibility with the data types and ranges supported by Stata, and
converts if necessary.
Parameters
----------
data : DataFrame
The DataFrame to check and convert
Notes
-----
Numeric columns in Stata must be one of int8, int16, int32, float32 or
float64, with some additional value restrictions. int8 and int16 columns
are checked for violations of the value restrictions and
upcast if needed. int64 data is not usable in Stata, and so it is
downcast to int32 whenever the value are in the int32 range, and
sidecast to float64 when larger than this range. If the int64 values
are outside of the range of those perfectly representable as float64 values,
a warning is raised.
bool columns are cast to int8. uint colums are converted to int of the same
size if there is no loss in precision, other wise are upcast to a larger
type. uint64 is currently not supported since it is concerted to object in
a DataFrame.
"""
ws = ''
# original, if small, if large
conversion_data = ((np.bool, np.int8, np.int8),
(np.uint8, np.int8, np.int16),
(np.uint16, np.int16, np.int32),
(np.uint32, np.int32, np.int64))
for col in data:
dtype = data[col].dtype
# Cast from unsupported types to supported types
for c_data in conversion_data:
if dtype == c_data[0]:
if data[col].max() <= np.iinfo(c_data[1]).max:
dtype = c_data[1]
else:
dtype = c_data[2]
if c_data[2] == np.float64: # Warn if necessary
if data[col].max() >= 2 ** 53:
ws = precision_loss_doc % ('uint64', 'float64')
data[col] = data[col].astype(dtype)
# Check values and upcast if necessary
if dtype == np.int8:
if data[col].max() > 100 or data[col].min() < -127:
data[col] = data[col].astype(np.int16)
elif dtype == np.int16:
if data[col].max() > 32740 or data[col].min() < -32767:
data[col] = data[col].astype(np.int32)
elif dtype == np.int64:
if data[col].max() <= 2147483620 and data[col].min() >= -2147483647:
data[col] = data[col].astype(np.int32)
else:
data[col] = data[col].astype(np.float64)
if data[col].max() >= 2 ** 53 or data[col].min() <= -2 ** 53:
ws = precision_loss_doc % ('int64', 'float64')
if ws:
import warnings
warnings.warn(ws, PossiblePrecisionLoss)
return data
class StataValueLabel(object):
"""
Parse a categorical column and prepare formatted output
Parameters
-----------
value : int8, int16, int32, float32 or float64
The Stata missing value code
Attributes
----------
string : string
String representation of the Stata missing value
value : int8, int16, int32, float32 or float64
The original encoded missing value
Methods
-------
generate_value_label
"""
def __init__(self, catarray):
self.labname = catarray.name
categories = catarray.cat.categories
self.value_labels = list(zip(np.arange(len(categories)), categories))
self.value_labels.sort(key=lambda x: x[0])
self.text_len = np.int32(0)
self.off = []
self.val = []
self.txt = []
self.n = 0
# Compute lengths and setup lists of offsets and labels
for vl in self.value_labels:
category = vl[1]
if not isinstance(category, string_types):
category = str(category)
import warnings
warnings.warn(value_label_mismatch_doc.format(catarray.name),
ValueLabelTypeMismatch)
self.off.append(self.text_len)
self.text_len += len(category) + 1 # +1 for the padding
self.val.append(vl[0])
self.txt.append(category)
self.n += 1
if self.text_len > 32000:
raise ValueError('Stata value labels for a single variable must '
'have a combined length less than 32,000 '
'characters.')
# Ensure int32
self.off = np.array(self.off, dtype=np.int32)
self.val = np.array(self.val, dtype=np.int32)
# Total length
self.len = 4 + 4 + 4 * self.n + 4 * self.n + self.text_len
def _encode(self, s):
"""
Python 3 compatability shim
"""
if compat.PY3:
return s.encode(self._encoding)
else:
return s
def generate_value_label(self, byteorder, encoding):
"""
Parameters
----------
byteorder : str
Byte order of the output
encoding : str
File encoding
Returns
-------
value_label : bytes
Bytes containing the formatted value label
"""
self._encoding = encoding
bio = BytesIO()
null_string = '\x00'
null_byte = b'\x00'
# len
bio.write(struct.pack(byteorder + 'i', self.len))
# labname
labname = self._encode(_pad_bytes(self.labname[:32], 33))
bio.write(labname)
# padding - 3 bytes
for i in range(3):
bio.write(struct.pack('c', null_byte))
# value_label_table
# n - int32
bio.write(struct.pack(byteorder + 'i', self.n))
# textlen - int32
bio.write(struct.pack(byteorder + 'i', self.text_len))
# off - int32 array (n elements)
for offset in self.off:
bio.write(struct.pack(byteorder + 'i', offset))
# val - int32 array (n elements)
for value in self.val:
bio.write(struct.pack(byteorder + 'i', value))
# txt - Text labels, null terminated
for text in self.txt:
bio.write(self._encode(text + null_string))
bio.seek(0)
return bio.read()
class StataMissingValue(StringMixin):
"""
An observation's missing value.
Parameters
-----------
value : int8, int16, int32, float32 or float64
The Stata missing value code
Attributes
----------
string : string
String representation of the Stata missing value
value : int8, int16, int32, float32 or float64
The original encoded missing value
Notes
-----
More information: <http://www.stata.com/help.cgi?missing>
Integer missing values make the code '.', '.a', ..., '.z' to the ranges
101 ... 127 (for int8), 32741 ... 32767 (for int16) and 2147483621 ...
2147483647 (for int32). Missing values for floating point data types are
more complex but the pattern is simple to discern from the following table.
np.float32 missing values (float in Stata)
0000007f .
0008007f .a
0010007f .b
...
00c0007f .x
00c8007f .y
00d0007f .z
np.float64 missing values (double in Stata)
000000000000e07f .
000000000001e07f .a
000000000002e07f .b
...
000000000018e07f .x
000000000019e07f .y
00000000001ae07f .z
"""
# Construct a dictionary of missing values
MISSING_VALUES = {}
bases = (101, 32741, 2147483621)
for b in bases:
# Conversion to long to avoid hash issues on 32 bit platforms #8968
MISSING_VALUES[compat.long(b)] = '.'
for i in range(1, 27):
MISSING_VALUES[compat.long(i + b)] = '.' + chr(96 + i)
float32_base = b'\x00\x00\x00\x7f'
increment = struct.unpack('<i', b'\x00\x08\x00\x00')[0]
for i in range(27):
value = struct.unpack('<f', float32_base)[0]
MISSING_VALUES[value] = '.'
if i > 0:
MISSING_VALUES[value] += chr(96 + i)
int_value = struct.unpack('<i', struct.pack('<f', value))[0] + increment
float32_base = struct.pack('<i', int_value)
float64_base = b'\x00\x00\x00\x00\x00\x00\xe0\x7f'
increment = struct.unpack('q', b'\x00\x00\x00\x00\x00\x01\x00\x00')[0]
for i in range(27):
value = struct.unpack('<d', float64_base)[0]
MISSING_VALUES[value] = '.'
if i > 0:
MISSING_VALUES[value] += chr(96 + i)
int_value = struct.unpack('q', struct.pack('<d', value))[0] + increment
float64_base = struct.pack('q', int_value)
BASE_MISSING_VALUES = {'int8': 101,
'int16': 32741,
'int32': 2147483621,
'float32': struct.unpack('<f', float32_base)[0],
'float64': struct.unpack('<d', float64_base)[0]}
def __init__(self, value):
self._value = value
# Conversion to long to avoid hash issues on 32 bit platforms #8968
value = compat.long(value) if value < 2147483648 else float(value)
self._str = self.MISSING_VALUES[value]
string = property(lambda self: self._str,
doc="The Stata representation of the missing value: "
"'.', '.a'..'.z'")
value = property(lambda self: self._value,
doc='The binary representation of the missing value.')
def __unicode__(self):
return self.string
def __repr__(self):
# not perfect :-/
return "%s(%s)" % (self.__class__, self)
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.string == other.string and self.value == other.value)
@classmethod
def get_base_missing_value(cls, dtype):
if dtype == np.int8:
value = cls.BASE_MISSING_VALUES['int8']
elif dtype == np.int16:
value = cls.BASE_MISSING_VALUES['int16']
elif dtype == np.int32:
value = cls.BASE_MISSING_VALUES['int32']
elif dtype == np.float32:
value = cls.BASE_MISSING_VALUES['float32']
elif dtype == np.float64:
value = cls.BASE_MISSING_VALUES['float64']
else:
raise ValueError('Unsupported dtype')
return value
class StataParser(object):
_default_encoding = 'iso-8859-1'
def __init__(self, encoding):
self._encoding = encoding
#type code.
#--------------------
#str1 1 = 0x01
#str2 2 = 0x02
#...
#str244 244 = 0xf4
#byte 251 = 0xfb (sic)
#int 252 = 0xfc
#long 253 = 0xfd
#float 254 = 0xfe
#double 255 = 0xff
#--------------------
#NOTE: the byte type seems to be reserved for categorical variables
# with a label, but the underlying variable is -127 to 100
# we're going to drop the label and cast to int
self.DTYPE_MAP = \
dict(
lzip(range(1, 245), ['a' + str(i) for i in range(1, 245)]) +
[
(251, np.int8),
(252, np.int16),
(253, np.int32),
(254, np.float32),
(255, np.float64)
]
)
self.DTYPE_MAP_XML = \
dict(
[
(32768, np.uint8), # Keys to GSO
(65526, np.float64),
(65527, np.float32),
(65528, np.int32),
(65529, np.int16),
(65530, np.int8)
]
)
self.TYPE_MAP = lrange(251) + list('bhlfd')
self.TYPE_MAP_XML = \
dict(
[
(32768, 'L'),
(65526, 'd'),
(65527, 'f'),
(65528, 'l'),
(65529, 'h'),
(65530, 'b')
]
)
#NOTE: technically, some of these are wrong. there are more numbers
# that can be represented. it's the 27 ABOVE and BELOW the max listed
# numeric data type in [U] 12.2.2 of the 11.2 manual
float32_min = b'\xff\xff\xff\xfe'
float32_max = b'\xff\xff\xff\x7e'
float64_min = b'\xff\xff\xff\xff\xff\xff\xef\xff'
float64_max = b'\xff\xff\xff\xff\xff\xff\xdf\x7f'
self.VALID_RANGE = \
{
'b': (-127, 100),
'h': (-32767, 32740),
'l': (-2147483647, 2147483620),
'f': (np.float32(struct.unpack('<f', float32_min)[0]),
np.float32(struct.unpack('<f', float32_max)[0])),
'd': (np.float64(struct.unpack('<d', float64_min)[0]),
np.float64(struct.unpack('<d', float64_max)[0]))
}
self.OLD_TYPE_MAPPING = \
{
'i': 252,
'f': 254,
'b': 251
}
# These missing values are the generic '.' in Stata, and are used
# to replace nans
self.MISSING_VALUES = \
{
'b': 101,
'h': 32741,
'l': 2147483621,
'f': np.float32(struct.unpack('<f', b'\x00\x00\x00\x7f')[0]),
'd': np.float64(struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0])
}
self.NUMPY_TYPE_MAP = \
{
'b': 'i1',
'h': 'i2',
'l': 'i4',
'f': 'f4',
'd': 'f8',
'L': 'u8'
}
# Reserved words cannot be used as variable names
self.RESERVED_WORDS = ('aggregate', 'array', 'boolean', 'break',
'byte', 'case', 'catch', 'class', 'colvector',
'complex', 'const', 'continue', 'default',
'delegate', 'delete', 'do', 'double', 'else',
'eltypedef', 'end', 'enum', 'explicit',
'export', 'external', 'float', 'for', 'friend',
'function', 'global', 'goto', 'if', 'inline',
'int', 'local', 'long', 'NULL', 'pragma',
'protected', 'quad', 'rowvector', 'short',
'typedef', 'typename', 'virtual')
def _decode_bytes(self, str, errors=None):
if compat.PY3 or self._encoding is not None:
return str.decode(self._encoding, errors)
else:
return str
class StataReader(StataParser):
__doc__ = _stata_reader_doc
def __init__(self, path_or_buf, convert_dates=True,
convert_categoricals=True, index=None,
convert_missing=False, preserve_dtypes=True,
columns=None, order_categoricals=True,
encoding='iso-8859-1', chunksize=None):
super(StataReader, self).__init__(encoding)
self.col_sizes = ()
# Arguments to the reader (can be temporarily overridden in
# calls to read).
self._convert_dates = convert_dates
self._convert_categoricals = convert_categoricals
self._index = index
self._convert_missing = convert_missing
self._preserve_dtypes = preserve_dtypes
self._columns = columns
self._order_categoricals = order_categoricals
self._encoding = encoding
self._chunksize = chunksize
# State variables for the file
self._has_string_data = False
self._missing_values = False
self._can_read_value_labels = False
self._column_selector_set = False
self._value_labels_read = False
self._data_read = False
self._dtype = None
self._lines_read = 0
self._native_byteorder = _set_endianness(sys.byteorder)
if isinstance(path_or_buf, str):
path_or_buf, encoding = get_filepath_or_buffer(
path_or_buf, encoding=self._default_encoding
)
if isinstance(path_or_buf, (str, compat.text_type, bytes)):
self.path_or_buf = open(path_or_buf, 'rb')
else:
# Copy to BytesIO, and ensure no encoding
contents = path_or_buf.read()
try:
contents = contents.encode(self._default_encoding)
except:
pass
self.path_or_buf = BytesIO(contents)
self._read_header()
def _read_header(self):
first_char = self.path_or_buf.read(1)
if struct.unpack('c', first_char)[0] == b'<':
# format 117 or higher (XML like)
self.path_or_buf.read(27) # stata_dta><header><release>
self.format_version = int(self.path_or_buf.read(3))
if self.format_version not in [117]:
raise ValueError("Version of given Stata file is not 104, "
"105, 108, 113 (Stata 8/9), 114 (Stata "
"10/11), 115 (Stata 12) or 117 (Stata 13)")
self.path_or_buf.read(21) # </release><byteorder>
self.byteorder = self.path_or_buf.read(3) == "MSF" and '>' or '<'
self.path_or_buf.read(15) # </byteorder><K>
self.nvar = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
self.path_or_buf.read(7) # </K><N>
self.nobs = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
self.path_or_buf.read(11) # </N><label>
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
self.data_label = self._null_terminate(self.path_or_buf.read(strlen))
self.path_or_buf.read(19) # </label><timestamp>
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
self.time_stamp = self._null_terminate(self.path_or_buf.read(strlen))
self.path_or_buf.read(26) # </timestamp></header><map>
self.path_or_buf.read(8) # 0x0000000000000000
self.path_or_buf.read(8) # position of <map>
seek_vartypes = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 16
seek_varnames = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
seek_sortlist = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
seek_formats = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 9
seek_value_label_names = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 19
# Stata 117 data files do not follow the described format. This is
# a work around that uses the previous label, 33 bytes for each
# variable, 20 for the closing tag and 17 for the opening tag
self.path_or_buf.read(8) # <variable_lables>, throw away
seek_variable_labels = seek_value_label_names + (33*self.nvar) + 20 + 17
# Below is the original, correct code (per Stata sta format doc,
# although this is not followed in actual 117 dtas)
#seek_variable_labels = struct.unpack(
# self.byteorder + 'q', self.path_or_buf.read(8))[0] + 17
self.path_or_buf.read(8) # <characteristics>
self.data_location = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 6
self.seek_strls = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 7
self.seek_value_labels = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 14
#self.path_or_buf.read(8) # </stata_dta>
#self.path_or_buf.read(8) # EOF
self.path_or_buf.seek(seek_vartypes)
typlist = [struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
for i in range(self.nvar)]
self.typlist = [None]*self.nvar
try:
i = 0
for typ in typlist:
if typ <= 2045:
self.typlist[i] = typ
#elif typ == 32768:
# raise ValueError("Long strings are not supported")
else:
self.typlist[i] = self.TYPE_MAP_XML[typ]
i += 1
except:
raise ValueError("cannot convert stata types [{0}]"
.format(','.join(typlist)))
self.dtyplist = [None]*self.nvar
try:
i = 0
for typ in typlist:
if typ <= 2045:
self.dtyplist[i] = str(typ)
else:
self.dtyplist[i] = self.DTYPE_MAP_XML[typ]
i += 1
except:
raise ValueError("cannot convert stata dtypes [{0}]"
.format(','.join(typlist)))
self.path_or_buf.seek(seek_varnames)
self.varlist = [self._null_terminate(self.path_or_buf.read(33))
for i in range(self.nvar)]
self.path_or_buf.seek(seek_sortlist)
self.srtlist = struct.unpack(
self.byteorder + ('h' * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1))
)[:-1]
self.path_or_buf.seek(seek_formats)
self.fmtlist = [self._null_terminate(self.path_or_buf.read(49))
for i in range(self.nvar)]
self.path_or_buf.seek(seek_value_label_names)
self.lbllist = [self._null_terminate(self.path_or_buf.read(33))
for i in range(self.nvar)]
self.path_or_buf.seek(seek_variable_labels)
self.vlblist = [self._null_terminate(self.path_or_buf.read(81))
for i in range(self.nvar)]
else:
# header
self.format_version = struct.unpack('b', first_char)[0]
if self.format_version not in [104, 105, 108, 113, 114, 115]:
raise ValueError("Version of given Stata file is not 104, "
"105, 108, 113 (Stata 8/9), 114 (Stata "
"10/11), 115 (Stata 12) or 117 (Stata 13)")
self.byteorder = struct.unpack('b', self.path_or_buf.read(1))[0] == 0x1 and '>' or '<'
self.filetype = struct.unpack('b', self.path_or_buf.read(1))[0]
self.path_or_buf.read(1) # unused
self.nvar = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
self.nobs = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
if self.format_version > 105:
self.data_label = self._null_terminate(self.path_or_buf.read(81))
else:
self.data_label = self._null_terminate(self.path_or_buf.read(32))
if self.format_version > 104:
self.time_stamp = self._null_terminate(self.path_or_buf.read(18))
# descriptors
if self.format_version > 108:
typlist = [ord(self.path_or_buf.read(1))
for i in range(self.nvar)]
else:
typlist = [
self.OLD_TYPE_MAPPING[
self._decode_bytes(self.path_or_buf.read(1))
] for i in range(self.nvar)
]
try:
self.typlist = [self.TYPE_MAP[typ] for typ in typlist]
except:
raise ValueError("cannot convert stata types [{0}]"
.format(','.join(typlist)))
try:
self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist]
except:
raise ValueError("cannot convert stata dtypes [{0}]"
.format(','.join(typlist)))
if self.format_version > 108:
self.varlist = [self._null_terminate(self.path_or_buf.read(33))
for i in range(self.nvar)]
else:
self.varlist = [self._null_terminate(self.path_or_buf.read(9))
for i in range(self.nvar)]
self.srtlist = struct.unpack(
self.byteorder + ('h' * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1))
)[:-1]
if self.format_version > 113:
self.fmtlist = [self._null_terminate(self.path_or_buf.read(49))
for i in range(self.nvar)]
elif self.format_version > 104:
self.fmtlist = [self._null_terminate(self.path_or_buf.read(12))
for i in range(self.nvar)]
else:
self.fmtlist = [self._null_terminate(self.path_or_buf.read(7))
for i in range(self.nvar)]
if self.format_version > 108:
self.lbllist = [self._null_terminate(self.path_or_buf.read(33))
for i in range(self.nvar)]
else:
self.lbllist = [self._null_terminate(self.path_or_buf.read(9))
for i in range(self.nvar)]
if self.format_version > 105:
self.vlblist = [self._null_terminate(self.path_or_buf.read(81))
for i in range(self.nvar)]
else:
self.vlblist = [self._null_terminate(self.path_or_buf.read(32))
for i in range(self.nvar)]
# ignore expansion fields (Format 105 and later)
# When reading, read five bytes; the last four bytes now tell you
# the size of the next read, which you discard. You then continue
# like this until you read 5 bytes of zeros.
if self.format_version > 104:
while True:
data_type = struct.unpack(self.byteorder + 'b',
self.path_or_buf.read(1))[0]
if self.format_version > 108:
data_len = struct.unpack(self.byteorder + 'i',
self.path_or_buf.read(4))[0]
else:
data_len = struct.unpack(self.byteorder + 'h',
self.path_or_buf.read(2))[0]
if data_type == 0:
break
self.path_or_buf.read(data_len)
# necessary data to continue parsing
self.data_location = self.path_or_buf.tell()
self.has_string_data = len([x for x in self.typlist
if type(x) is int]) > 0
# calculate size of a data record
self.col_sizes = lmap(lambda x: self._calcsize(x), self.typlist)
# remove format details from %td
self.fmtlist = ["%td" if x.startswith("%td") else x for x in self.fmtlist]
def _calcsize(self, fmt):
return (type(fmt) is int and fmt
or struct.calcsize(self.byteorder + fmt))
def _null_terminate(self, s):
if compat.PY3 or self._encoding is not None: # have bytes not strings,
# so must decode
s = s.partition(b"\0")[0]
return s.decode(self._encoding or self._default_encoding)
else:
null_byte = "\0"
try:
return s.lstrip(null_byte)[:s.index(null_byte)]
except:
return s
def _read_value_labels(self):
if self.format_version <= 108:
# Value labels are not supported in version 108 and earlier.
return
if self._value_labels_read:
# Don't read twice
return
if self.format_version >= 117:
self.path_or_buf.seek(self.seek_value_labels)
else:
offset = self.nobs * self._dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
self._value_labels_read = True
self.value_label_dict = dict()
while True:
if self.format_version >= 117:
if self.path_or_buf.read(5) == b'</val': # <lbl>
break # end of variable label table
slength = self.path_or_buf.read(4)
if not slength:
break # end of variable label table (format < 117)
labname = self._null_terminate(self.path_or_buf.read(33))
self.path_or_buf.read(3) # padding
n = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
txtlen = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
off = []
for i in range(n):
off.append(struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0])
val = []
for i in range(n):
val.append(struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0])
txt = self.path_or_buf.read(txtlen)
self.value_label_dict[labname] = dict()
for i in range(n):
self.value_label_dict[labname][val[i]] = (
self._null_terminate(txt[off[i]:])
)
if self.format_version >= 117:
self.path_or_buf.read(6) # </lbl>
self._value_labels_read = True
def _read_strls(self):
self.path_or_buf.seek(self.seek_strls)
self.GSO = dict()
while True:
if self.path_or_buf.read(3) != b'GSO':
break
v_o = struct.unpack(self.byteorder + 'Q', self.path_or_buf.read(8))[0]
typ = struct.unpack('B', self.path_or_buf.read(1))[0]
length = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
va = self.path_or_buf.read(length)
if typ == 130:
va = va[0:-1].decode(self._encoding or self._default_encoding)
self.GSO[v_o] = va
# legacy
@Appender('DEPRECATED: ' + _data_method_doc)
def data(self, **kwargs):
import warnings
warnings.warn("'data' is deprecated, use 'read' instead")
if self._data_read:
raise Exception("Data has already been read.")
self._data_read = True
return self.read(None, **kwargs)
def __iter__(self):
try:
if self._chunksize:
while True:
yield self.read(self._chunksize)
else:
yield self.read()
except StopIteration:
pass
def get_chunk(self, size=None):
"""
Reads lines from Stata file and returns as dataframe
Parameters
----------
size : int, defaults to None
Number of lines to read. If None, reads whole file.
Returns
-------
DataFrame
"""
if size is None:
size = self._chunksize
return self.read(nrows=size)
@Appender(_read_method_doc)
def read(self, nrows=None, convert_dates=None,
convert_categoricals=None, index=None,
convert_missing=None, preserve_dtypes=None,
columns=None, order_categoricals=None):
# Handle empty file or chunk. If reading incrementally raise
# StopIteration. If reading the whole thing return an empty
# data frame.
if (self.nobs == 0) and (nrows is None):
self._can_read_value_labels = True
self._data_read = True
return DataFrame(columns=self.varlist)
# Handle options
if convert_dates is None:
convert_dates = self._convert_dates
if convert_categoricals is None:
convert_categoricals = self._convert_categoricals
if convert_missing is None:
convert_missing = self._convert_missing
if preserve_dtypes is None:
preserve_dtypes = self._preserve_dtypes
if columns is None:
columns = self._columns
if order_categoricals is None:
order_categoricals = self._order_categoricals
if nrows is None:
nrows = self.nobs
if (self.format_version >= 117) and (self._dtype is None):
self._can_read_value_labels = True
self._read_strls()
# Setup the dtype.
if self._dtype is None:
dtype = [] # Convert struct data types to numpy data type
for i, typ in enumerate(self.typlist):
if typ in self.NUMPY_TYPE_MAP:
dtype.append(('s' + str(i), self.byteorder + self.NUMPY_TYPE_MAP[typ]))
else:
dtype.append(('s' + str(i), 'S' + str(typ)))
dtype = np.dtype(dtype)
self._dtype = dtype
# Read data
dtype = self._dtype
max_read_len = (self.nobs - self._lines_read) * dtype.itemsize
read_len = nrows * dtype.itemsize
read_len = min(read_len, max_read_len)
if read_len <= 0:
# Iterator has finished, should never be here unless
# we are reading the file incrementally
self._read_value_labels()
raise StopIteration
offset = self._lines_read * dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
read_lines = min(nrows, self.nobs - self._lines_read)
data = np.frombuffer(self.path_or_buf.read(read_len), dtype=dtype,
count=read_lines)
self._lines_read += read_lines
if self._lines_read == self.nobs:
self._can_read_value_labels = True
self._data_read = True
# if necessary, swap the byte order to native here
if self.byteorder != self._native_byteorder:
data = data.byteswap().newbyteorder()
if convert_categoricals:
self._read_value_labels()
if len(data)==0:
data = DataFrame(columns=self.varlist, index=index)
else:
data = DataFrame.from_records(data, index=index)
data.columns = self.varlist
# If index is not specified, use actual row number rather than
# restarting at 0 for each chunk.
if index is None:
ix = np.arange(self._lines_read - read_lines, self._lines_read)
data = data.set_index(ix)
if columns is not None:
data = self._do_select_columns(data, columns)
# Decode strings
for col, typ in zip(data, self.typlist):
if type(typ) is int:
data[col] = data[col].apply(self._null_terminate, convert_dtype=True)
data = self._insert_strls(data)
cols_ = np.where(self.dtyplist)[0]
# Convert columns (if needed) to match input type
index = data.index
requires_type_conversion = False
data_formatted = []
for i in cols_:
if self.dtyplist[i] is not None:
col = data.columns[i]
dtype = data[col].dtype
if (dtype != np.dtype(object)) and (dtype != self.dtyplist[i]):
requires_type_conversion = True
data_formatted.append((col, Series(data[col], index, self.dtyplist[i])))
else:
data_formatted.append((col, data[col]))
if requires_type_conversion:
data = DataFrame.from_items(data_formatted)
del data_formatted
self._do_convert_missing(data, convert_missing)
if convert_dates:
cols = np.where(lmap(lambda x: x in _date_formats,
self.fmtlist))[0]
for i in cols:
col = data.columns[i]
data[col] = _stata_elapsed_date_to_datetime_vec(data[col], self.fmtlist[i])
if convert_categoricals and self.value_label_dict:
data = self._do_convert_categoricals(data, self.value_label_dict, self.lbllist,
order_categoricals)
if not preserve_dtypes:
retyped_data = []
convert = False
for col in data:
dtype = data[col].dtype
if dtype in (np.float16, np.float32):
dtype = np.float64
convert = True
elif dtype in (np.int8, np.int16, np.int32):
dtype = np.int64
convert = True
retyped_data.append((col, data[col].astype(dtype)))
if convert:
data = DataFrame.from_items(retyped_data)
return data
def _do_convert_missing(self, data, convert_missing):
# Check for missing values, and replace if found
for i, colname in enumerate(data):
fmt = self.typlist[i]
if fmt not in self.VALID_RANGE:
continue
nmin, nmax = self.VALID_RANGE[fmt]
series = data[colname]
missing = np.logical_or(series < nmin, series > nmax)
if not missing.any():
continue
if convert_missing: # Replacement follows Stata notation
missing_loc = np.argwhere(missing)
umissing, umissing_loc = np.unique(series[missing],
return_inverse=True)
replacement = Series(series, dtype=np.object)
for j, um in enumerate(umissing):
missing_value = StataMissingValue(um)
loc = missing_loc[umissing_loc == j]
replacement.iloc[loc] = missing_value
else: # All replacements are identical
dtype = series.dtype
if dtype not in (np.float32, np.float64):
dtype = np.float64
replacement = Series(series, dtype=dtype)
replacement[missing] = np.nan
data[colname] = replacement
def _insert_strls(self, data):
if not hasattr(self, 'GSO') or len(self.GSO) == 0:
return data
for i, typ in enumerate(self.typlist):
if typ != 'L':
continue
data.iloc[:, i] = [self.GSO[k] for k in data.iloc[:, i]]
return data
def _do_select_columns(self, data, columns):
if not self._column_selector_set:
column_set = set(columns)
if len(column_set) != len(columns):
raise ValueError('columns contains duplicate entries')
unmatched = column_set.difference(data.columns)
if unmatched:
raise ValueError('The following columns were not found in the '
'Stata data set: ' +
', '.join(list(unmatched)))
# Copy information for retained columns for later processing
dtyplist = []
typlist = []
fmtlist = []
lbllist = []
matched = set()
for i, col in enumerate(data.columns):
if col in column_set:
matched.update([col])
dtyplist.append(self.dtyplist[i])
typlist.append(self.typlist[i])
fmtlist.append(self.fmtlist[i])
lbllist.append(self.lbllist[i])
self.dtyplist = dtyplist
self.typlist = typlist
self.fmtlist = fmtlist
self.lbllist = lbllist
self._column_selector_set = True
return data[columns]
def _do_convert_categoricals(self, data, value_label_dict, lbllist, order_categoricals):
"""
Converts categorical columns to Categorical type.
"""
value_labels = list(compat.iterkeys(value_label_dict))
cat_converted_data = []
for col, label in zip(data, lbllist):
if label in value_labels:
# Explicit call with ordered=True
cat_data = Categorical(data[col], ordered=order_categoricals)
categories = []
for category in cat_data.categories:
if category in value_label_dict[label]:
categories.append(value_label_dict[label][category])
else:
categories.append(category) # Partially labeled
cat_data.categories = categories
# TODO: is the next line needed above in the data(...) method?
cat_data = Series(cat_data, index=data.index)
cat_converted_data.append((col, cat_data))
else:
cat_converted_data.append((col, data[col]))
data = DataFrame.from_items(cat_converted_data)
return data
def data_label(self):
"""Returns data label of Stata file"""
return self.data_label
def variable_labels(self):
"""Returns variable labels as a dict, associating each variable name
with corresponding label
"""
return dict(zip(self.varlist, self.vlblist))
def value_labels(self):
"""Returns a dict, associating each variable name a dict, associating
each value its corresponding label
"""
if not self._value_labels_read:
self._read_value_labels()
return self.value_label_dict
def _open_file_binary_write(fname, encoding):
if hasattr(fname, 'write'):
#if 'b' not in fname.mode:
return fname
return open(fname, "wb")
def _set_endianness(endianness):
if endianness.lower() in ["<", "little"]:
return "<"
elif endianness.lower() in [">", "big"]:
return ">"
else: # pragma : no cover
raise ValueError("Endianness %s not understood" % endianness)
def _pad_bytes(name, length):
"""
Takes a char string and pads it wih null bytes until it's length chars
"""
return name + "\x00" * (length - len(name))
def _convert_datetime_to_stata_type(fmt):
"""
Converts from one of the stata date formats to a type in TYPE_MAP
"""
if fmt in ["tc", "%tc", "td", "%td", "tw", "%tw", "tm", "%tm", "tq",
"%tq", "th", "%th", "ty", "%ty"]:
return np.float64 # Stata expects doubles for SIFs
else:
raise ValueError("fmt %s not understood" % fmt)
def _maybe_convert_to_int_keys(convert_dates, varlist):
new_dict = {}
for key in convert_dates:
if not convert_dates[key].startswith("%"): # make sure proper fmts
convert_dates[key] = "%" + convert_dates[key]
if key in varlist:
new_dict.update({varlist.index(key): convert_dates[key]})
else:
if not isinstance(key, int):
raise ValueError(
"convert_dates key is not in varlist and is not an int"
)
new_dict.update({key: convert_dates[key]})
return new_dict
def _dtype_to_stata_type(dtype, column):
"""
Converts dtype types to stata types. Returns the byte of the given ordinal.
See TYPE_MAP and comments for an explanation. This is also explained in
the dta spec.
1 - 244 are strings of this length
Pandas Stata
251 - chr(251) - for int8 byte
252 - chr(252) - for int16 int
253 - chr(253) - for int32 long
254 - chr(254) - for float32 float
255 - chr(255) - for double double
If there are dates to convert, then dtype will already have the correct
type inserted.
"""
# TODO: expand to handle datetime to integer conversion
if dtype.type == np.string_:
return chr(dtype.itemsize)
elif dtype.type == np.object_: # try to coerce it to the biggest string
# not memory efficient, what else could we
# do?
itemsize = max_len_string_array(column.values)
return chr(max(itemsize, 1))
elif dtype == np.float64:
return chr(255)
elif dtype == np.float32:
return chr(254)
elif dtype == np.int32:
return chr(253)
elif dtype == np.int16:
return chr(252)
elif dtype == np.int8:
return chr(251)
else: # pragma : no cover
raise ValueError("Data type %s not currently understood. "
"Please report an error to the developers." % dtype)
def _dtype_to_default_stata_fmt(dtype, column):
"""
Maps numpy dtype to stata's default format for this type. Not terribly
important since users can change this in Stata. Semantics are
object -> "%DDs" where DD is the length of the string. If not a string,
raise ValueError
float64 -> "%10.0g"
float32 -> "%9.0g"
int64 -> "%9.0g"
int32 -> "%12.0g"
int16 -> "%8.0g"
int8 -> "%8.0g"
"""
# TODO: Refactor to combine type with format
# TODO: expand this to handle a default datetime format?
if dtype.type == np.object_:
inferred_dtype = infer_dtype(column.dropna())
if not (inferred_dtype in ('string', 'unicode')
or len(column) == 0):
raise ValueError('Writing general object arrays is not supported')
itemsize = max_len_string_array(column.values)
if itemsize > 244:
raise ValueError(excessive_string_length_error % column.name)
return "%" + str(max(itemsize, 1)) + "s"
elif dtype == np.float64:
return "%10.0g"
elif dtype == np.float32:
return "%9.0g"
elif dtype == np.int32:
return "%12.0g"
elif dtype == np.int8 or dtype == np.int16:
return "%8.0g"
else: # pragma : no cover
raise ValueError("Data type %s not currently understood. "
"Please report an error to the developers." % dtype)
class StataWriter(StataParser):
"""
A class for writing Stata binary dta files from array-like objects
Parameters
----------
fname : file path or buffer
Where to save the dta file.
data : array-like
Array-like input to save. Pandas objects are also accepted.
convert_dates : dict
Dictionary mapping column of datetime types to the stata internal
format that you want to use for the dates. Options are
'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either a
number or a name.
encoding : str
Default is latin-1. Note that Stata does not support unicode.
byteorder : str
Can be ">", "<", "little", or "big". The default is None which uses
`sys.byteorder`
time_stamp : datetime
A date time to use when writing the file. Can be None, in which
case the current time is used.
dataset_label : str
A label for the data set. Should be 80 characters or smaller.
Returns
-------
writer : StataWriter instance
The StataWriter instance has a write_file method, which will
write the file to the given `fname`.
Examples
--------
>>> import pandas as pd
>>> data = pd.DataFrame([[1.0, 1]], columns=['a', 'b'])
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
Or with dates
>>> from datetime import datetime
>>> data = pd.DataFrame([[datetime(2000,1,1)]], columns=['date'])
>>> writer = StataWriter('./date_data_file.dta', data, {'date' : 'tw'})
>>> writer.write_file()
"""
def __init__(self, fname, data, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None):
super(StataWriter, self).__init__(encoding)
self._convert_dates = convert_dates
self._write_index = write_index
self._time_stamp = time_stamp
self._data_label = data_label
# attach nobs, nvars, data, varlist, typlist
self._prepare_pandas(data)
if byteorder is None:
byteorder = sys.byteorder
self._byteorder = _set_endianness(byteorder)
self._file = _open_file_binary_write(
fname, self._encoding or self._default_encoding
)
self.type_converters = {253: np.int32, 252: np.int16, 251: np.int8}
def _write(self, to_write):
"""
Helper to call encode before writing to file for Python 3 compat.
"""
if compat.PY3:
self._file.write(to_write.encode(self._encoding or
self._default_encoding))
else:
self._file.write(to_write)
def _prepare_categoricals(self, data):
"""Check for categorigal columns, retain categorical information for
Stata file and convert categorical data to int"""
is_cat = [com.is_categorical_dtype(data[col]) for col in data]
self._is_col_cat = is_cat
self._value_labels = []
if not any(is_cat):
return data
get_base_missing_value = StataMissingValue.get_base_missing_value
index = data.index
data_formatted = []
for col, col_is_cat in zip(data, is_cat):
if col_is_cat:
self._value_labels.append(StataValueLabel(data[col]))
dtype = data[col].cat.codes.dtype
if dtype == np.int64:
raise ValueError('It is not possible to export int64-based '
'categorical data to Stata.')
values = data[col].cat.codes.values.copy()
# Upcast if needed so that correct missing values can be set
if values.max() >= get_base_missing_value(dtype):
if dtype == np.int8:
dtype = np.int16
elif dtype == np.int16:
dtype = np.int32
else:
dtype = np.float64
values = np.array(values, dtype=dtype)
# Replace missing values with Stata missing value for type
values[values == -1] = get_base_missing_value(dtype)
data_formatted.append((col, values, index))
else:
data_formatted.append((col, data[col]))
return DataFrame.from_items(data_formatted)
def _replace_nans(self, data):
# return data
"""Checks floating point data columns for nans, and replaces these with
the generic Stata for missing value (.)"""
for c in data:
dtype = data[c].dtype
if dtype in (np.float32, np.float64):
if dtype == np.float32:
replacement = self.MISSING_VALUES['f']
else:
replacement = self.MISSING_VALUES['d']
data[c] = data[c].fillna(replacement)
return data
def _check_column_names(self, data):
"""Checks column names to ensure that they are valid Stata column names.
This includes checks for:
* Non-string names
* Stata keywords
* Variables that start with numbers
* Variables with names that are too long
When an illegal variable name is detected, it is converted, and if dates
are exported, the variable name is propogated to the date conversion
dictionary
"""
converted_names = []
columns = list(data.columns)
original_columns = columns[:]
duplicate_var_id = 0
for j, name in enumerate(columns):
orig_name = name
if not isinstance(name, string_types):
name = text_type(name)
for c in name:
if (c < 'A' or c > 'Z') and (c < 'a' or c > 'z') and \
(c < '0' or c > '9') and c != '_':
name = name.replace(c, '_')
# Variable name must not be a reserved word
if name in self.RESERVED_WORDS:
name = '_' + name
# Variable name may not start with a number
if name[0] >= '0' and name[0] <= '9':
name = '_' + name
name = name[:min(len(name), 32)]
if not name == orig_name:
# check for duplicates
while columns.count(name) > 0:
# prepend ascending number to avoid duplicates
name = '_' + str(duplicate_var_id) + name
name = name[:min(len(name), 32)]
duplicate_var_id += 1
# need to possibly encode the orig name if its unicode
try:
orig_name = orig_name.encode('utf-8')
except:
pass
converted_names.append('{0} -> {1}'.format(orig_name, name))
columns[j] = name
data.columns = columns
# Check date conversion, and fix key if needed
if self._convert_dates:
for c, o in zip(columns, original_columns):
if c != o:
self._convert_dates[c] = self._convert_dates[o]
del self._convert_dates[o]
if converted_names:
import warnings
ws = invalid_name_doc.format('\n '.join(converted_names))
warnings.warn(ws, InvalidColumnName)
return data
def _prepare_pandas(self, data):
#NOTE: we might need a different API / class for pandas objects so
# we can set different semantics - handle this with a PR to pandas.io
if self._write_index:
data = data.reset_index()
# Ensure column names are strings
data = self._check_column_names(data)
# Check columns for compatibility with stata, upcast if necessary
data = _cast_to_stata_types(data)
# Replace NaNs with Stata missing values
data = self._replace_nans(data)
# Convert categoricals to int data, and strip labels
data = self._prepare_categoricals(data)
self.nobs, self.nvar = data.shape
self.data = data
self.varlist = data.columns.tolist()
dtypes = data.dtypes
if self._convert_dates is not None:
self._convert_dates = _maybe_convert_to_int_keys(
self._convert_dates, self.varlist
)
for key in self._convert_dates:
new_type = _convert_datetime_to_stata_type(
self._convert_dates[key]
)
dtypes[key] = np.dtype(new_type)
self.typlist = []
self.fmtlist = []
for col, dtype in dtypes.iteritems():
self.fmtlist.append(_dtype_to_default_stata_fmt(dtype, data[col]))
self.typlist.append(_dtype_to_stata_type(dtype, data[col]))
# set the given format for the datetime cols
if self._convert_dates is not None:
for key in self._convert_dates:
self.fmtlist[key] = self._convert_dates[key]
def write_file(self):
self._write_header(time_stamp=self._time_stamp,
data_label=self._data_label)
self._write_descriptors()
self._write_variable_labels()
# write 5 zeros for expansion fields
self._write(_pad_bytes("", 5))
self._prepare_data()
self._write_data()
self._write_value_labels()
self._file.close()
def _write_value_labels(self):
for vl in self._value_labels:
self._file.write(vl.generate_value_label(self._byteorder,
self._encoding))
def _write_header(self, data_label=None, time_stamp=None):
byteorder = self._byteorder
# ds_format - just use 114
self._file.write(struct.pack("b", 114))
# byteorder
self._write(byteorder == ">" and "\x01" or "\x02")
# filetype
self._write("\x01")
# unused
self._write("\x00")
# number of vars, 2 bytes
self._file.write(struct.pack(byteorder+"h", self.nvar)[:2])
# number of obs, 4 bytes
self._file.write(struct.pack(byteorder+"i", self.nobs)[:4])
# data label 81 bytes, char, null terminated
if data_label is None:
self._file.write(self._null_terminate(_pad_bytes("", 80)))
else:
self._file.write(
self._null_terminate(_pad_bytes(data_label[:80], 80))
)
# time stamp, 18 bytes, char, null terminated
# format dd Mon yyyy hh:mm
if time_stamp is None:
time_stamp = datetime.datetime.now()
elif not isinstance(time_stamp, datetime.datetime):
raise ValueError("time_stamp should be datetime type")
self._file.write(
self._null_terminate(time_stamp.strftime("%d %b %Y %H:%M"))
)
def _write_descriptors(self, typlist=None, varlist=None, srtlist=None,
fmtlist=None, lbllist=None):
nvar = self.nvar
# typlist, length nvar, format byte array
for typ in self.typlist:
self._write(typ)
# varlist names are checked by _check_column_names
# varlist, requires null terminated
for name in self.varlist:
name = self._null_terminate(name, True)
name = _pad_bytes(name[:32], 33)
self._write(name)
# srtlist, 2*(nvar+1), int array, encoded by byteorder
srtlist = _pad_bytes("", (2*(nvar+1)))
self._write(srtlist)
# fmtlist, 49*nvar, char array
for fmt in self.fmtlist:
self._write(_pad_bytes(fmt, 49))
# lbllist, 33*nvar, char array
for i in range(nvar):
# Use variable name when categorical
if self._is_col_cat[i]:
name = self.varlist[i]
name = self._null_terminate(name, True)
name = _pad_bytes(name[:32], 33)
self._write(name)
else: # Default is empty label
self._write(_pad_bytes("", 33))
def _write_variable_labels(self, labels=None):
nvar = self.nvar
if labels is None:
for i in range(nvar):
self._write(_pad_bytes("", 81))
def _prepare_data(self):
data = self.data.copy()
typlist = self.typlist
convert_dates = self._convert_dates
# 1. Convert dates
if self._convert_dates is not None:
for i, col in enumerate(data):
if i in convert_dates:
data[col] = _datetime_to_stata_elapsed_vec(data[col],
self.fmtlist[i])
# 2. Convert bad string data to '' and pad to correct length
dtype = []
data_cols = []
has_strings = False
for i, col in enumerate(data):
typ = ord(typlist[i])
if typ <= 244:
has_strings = True
data[col] = data[col].fillna('').apply(_pad_bytes, args=(typ,))
stype = 'S%d' % typ
dtype.append(('c'+str(i), stype))
string = data[col].str.encode(self._encoding)
data_cols.append(string.values.astype(stype))
else:
dtype.append(('c'+str(i), data[col].dtype))
data_cols.append(data[col].values)
dtype = np.dtype(dtype)
if has_strings:
self.data = np.fromiter(zip(*data_cols), dtype=dtype)
else:
self.data = data.to_records(index=False)
def _write_data(self):
data = self.data
data.tofile(self._file)
def _null_terminate(self, s, as_string=False):
null_byte = '\x00'
if compat.PY3 and not as_string:
s += null_byte
return s.encode(self._encoding)
else:
s += null_byte
return s
| gpl-2.0 |
pyparallel/numpy | numpy/lib/function_base.py | 4 | 143343 | from __future__ import division, absolute_import, print_function
import warnings
import sys
import collections
import operator
import numpy as np
import numpy.core.numeric as _nx
from numpy.core import linspace, atleast_1d, atleast_2d
from numpy.core.numeric import (
ones, zeros, arange, concatenate, array, asarray, asanyarray, empty,
empty_like, ndarray, around, floor, ceil, take, dot, where, intp,
integer, isscalar
)
from numpy.core.umath import (
pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin,
mod, exp, log10
)
from numpy.core.fromnumeric import (
ravel, nonzero, sort, partition, mean, any, sum
)
from numpy.core.numerictypes import typecodes, number
from numpy.lib.twodim_base import diag
from .utils import deprecate
from numpy.core.multiarray import _insert, add_docstring
from numpy.core.multiarray import digitize, bincount, interp as compiled_interp
from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc
from numpy.compat import long
from numpy.compat.py3k import basestring
# Force range to be a generator, for np.delete's usage.
if sys.version_info[0] < 3:
range = xrange
__all__ = [
'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile',
'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp',
'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc'
]
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : {0, 1}
Return 1 if the object has an iterator method or is a sequence,
and 0 otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
1
>>> np.iterable(2)
0
"""
try:
iter(y)
except:
return 0
return 1
def _hist_optim_numbins_estimator(a, estimator):
"""
A helper function to be called from histogram to deal with estimating optimal number of bins
estimator: str
If estimator is one of ['auto', 'fd', 'scott', 'rice', 'sturges'] this function
will choose the appropriate estimator and return it's estimate for the optimal
number of bins.
"""
assert isinstance(estimator, basestring)
# private function should not be called otherwise
if a.size == 0:
return 1
def sturges(x):
"""
Sturges Estimator
A very simplistic estimator based on the assumption of normality of the data
Poor performance for non-normal data, especially obvious for large X.
Depends only on size of the data.
"""
return np.ceil(np.log2(x.size)) + 1
def rice(x):
"""
Rice Estimator
Another simple estimator, with no normality assumption.
It has better performance for large data, but tends to overestimate number of bins.
The number of bins is proportional to the cube root of data size (asymptotically optimal)
Depends only on size of the data
"""
return np.ceil(2 * x.size ** (1.0 / 3))
def scott(x):
"""
Scott Estimator
The binwidth is proportional to the standard deviation of the data and
inversely proportional to the cube root of data size (asymptotically optimal)
"""
h = 3.5 * x.std() * x.size ** (-1.0 / 3)
if h > 0:
return np.ceil(x.ptp() / h)
return 1
def fd(x):
"""
Freedman Diaconis rule using Inter Quartile Range (IQR) for binwidth
Considered a variation of the Scott rule with more robustness as the IQR
is less affected by outliers than the standard deviation. However the IQR depends on
fewer points than the sd so it is less accurate, especially for long tailed distributions.
If the IQR is 0, we return 1 for the number of bins.
Binwidth is inversely proportional to the cube root of data size (asymptotically optimal)
"""
iqr = np.subtract(*np.percentile(x, [75, 25]))
if iqr > 0:
h = (2 * iqr * x.size ** (-1.0 / 3))
return np.ceil(x.ptp() / h)
# If iqr is 0, default number of bins is 1
return 1
def auto(x):
"""
The FD estimator is usually the most robust method, but it tends to be too small
for small X. The Sturges estimator is quite good for small (<1000) datasets and is
the default in R.
This method gives good off the shelf behaviour.
"""
return max(fd(x), sturges(x))
optimal_numbins_methods = {'sturges': sturges, 'rice': rice, 'scott': scott,
'fd': fd, 'auto': auto}
try:
estimator_func = optimal_numbins_methods[estimator.lower()]
except KeyError:
raise ValueError("{0} not a valid method for `bins`".format(estimator))
else:
# these methods return floats, np.histogram requires an int
return int(estimator_func(a))
def histogram(a, bins=10, range=None, normed=False, weights=None,
density=None):
"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars or str, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
.. versionadded:: 1.11.0
If `bins` is a string from the list below, `histogram` will use the method
chosen to calculate the optimal number of bins (see Notes for more detail
on the estimators). For visualisation, we suggest using the 'auto' option.
'auto'
Maximum of the 'sturges' and 'fd' estimators. Provides good all round performance
'fd' (Freedman Diaconis Estimator)
Robust (resilient to outliers) estimator that takes into account data
variability and data size .
'scott'
Less robust estimator that that takes into account data
variability and data size.
'rice'
Estimator does not take variability into account, only data size.
Commonly overestimates number of bins required.
'sturges'
R's default method, only accounts for data size. Only optimal for
gaussian data and underestimates number of bins for large non-gaussian datasets.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored.
normed : bool, optional
This keyword is deprecated in Numpy 1.6 due to confusing/buggy
behavior. It will be removed in Numpy 2.0. Use the density keyword
instead.
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that this latter behavior is
known to be buggy with unequal bin widths; use `density` instead.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in `a`
only contributes its associated weight towards the bin count
(instead of 1). If `normed` is True, the weights are normalized,
so that the integral of the density over the range remains 1
density : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the `normed` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `normed` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the
second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes*
4.
.. versionadded:: 1.11.0
The methods to estimate the optimal number of bins are well found in literature,
and are inspired by the choices R provides for histogram visualisation.
Note that having the number of bins proportional to :math:`n^{1/3}` is asymptotically optimal,
which is why it appears in most estimators.
These are simply plug-in methods that give good starting points for number of bins.
In the equations below, :math:`h` is the binwidth and :math:`n_h` is the number of bins
'Auto' (maximum of the 'Sturges' and 'FD' estimators)
A compromise to get a good value. For small datasets the sturges
value will usually be chosen, while larger datasets will usually default to FD.
Avoids the overly conservative behaviour of FD and Sturges for small and
large datasets respectively. Switchover point is usually x.size~1000.
'FD' (Freedman Diaconis Estimator)
.. math:: h = 2 \\frac{IQR}{n^{-1/3}}
The binwidth is proportional to the interquartile range (IQR)
and inversely proportional to cube root of a.size. Can be too
conservative for small datasets, but is quite good
for large datasets. The IQR is very robust to outliers.
'Scott'
.. math:: h = \\frac{3.5\\sigma}{n^{-1/3}}
The binwidth is proportional to the standard deviation (sd) of the data
and inversely proportional to cube root of a.size. Can be too
conservative for small datasets, but is quite good
for large datasets. The sd is not very robust to outliers. Values
are very similar to the Freedman Diaconis Estimator in the absence of outliers.
'Rice'
.. math:: n_h = \\left\\lceil 2n^{1/3} \\right\\rceil
The number of bins is only proportional to cube root of a.size.
It tends to overestimate the number of bins
and it does not take into account data variability.
'Sturges'
.. math:: n_h = \\left\\lceil \\log _{2}n+1 \\right\\rceil
The number of bins is the base2 log of a.size.
This estimator assumes normality of data and is too conservative for larger,
non-normal datasets. This is the default method in R's `hist` method.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist*np.diff(bin_edges))
1.0
.. versionadded:: 1.11.0
Automated Bin Selection Methods example, using 2 peak random data with 2000 points
>>> import matplotlib.pyplot as plt
>>> rng = np.random.RandomState(10) # deterministic random data
>>> a = np.hstack((rng.normal(size = 1000), rng.normal(loc = 5, scale = 2, size = 1000)))
>>> plt.hist(a, bins = 'auto') # plt.hist passes it's arguments to np.histogram
>>> plt.title("Histogram with 'auto' bins")
>>> plt.show()
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
if (range is not None):
mn, mx = range
if (mn > mx):
raise AttributeError(
'max must be larger than min in range parameter.')
if isinstance(bins, basestring):
bins = _hist_optim_numbins_estimator(a, bins)
# if `bins` is a string for an automatic method,
# this will replace it with the number of bins calculated
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = np.dtype(np.intp)
else:
ntype = weights.dtype
# We set a block size, as this allows us to iterate over chunks when
# computing histograms, to minimize memory usage.
BLOCK = 65536
if not iterable(bins):
if np.isscalar(bins) and bins < 1:
raise ValueError(
'`bins` should be a positive integer.')
if range is None:
if a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
range = (0, 1)
else:
range = (a.min(), a.max())
mn, mx = [mi + 0.0 for mi in range]
if mn == mx:
mn -= 0.5
mx += 0.5
# At this point, if the weights are not integer, floating point, or
# complex, we have to use the slow algorithm.
if weights is not None and not (np.can_cast(weights.dtype, np.double) or
np.can_cast(weights.dtype, np.complex)):
bins = linspace(mn, mx, bins + 1, endpoint=True)
if not iterable(bins):
# We now convert values of a to bin indices, under the assumption of
# equal bin widths (which is valid here).
# Initialize empty histogram
n = np.zeros(bins, ntype)
# Pre-compute histogram scaling factor
norm = bins / (mx - mn)
# We iterate over blocks here for two reasons: the first is that for
# large arrays, it is actually faster (for example for a 10^8 array it
# is 2x as fast) and it results in a memory footprint 3x lower in the
# limit of large arrays.
for i in arange(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
if weights is None:
tmp_w = None
else:
tmp_w = weights[i:i + BLOCK]
# Only include values in the right range
keep = (tmp_a >= mn)
keep &= (tmp_a <= mx)
if not np.logical_and.reduce(keep):
tmp_a = tmp_a[keep]
if tmp_w is not None:
tmp_w = tmp_w[keep]
tmp_a = tmp_a.astype(float)
tmp_a -= mn
tmp_a *= norm
# Compute the bin indices, and for values that lie exactly on mx we
# need to subtract one
indices = tmp_a.astype(np.intp)
indices[indices == bins] -= 1
# We now compute the histogram using bincount
if ntype.kind == 'c':
n.real += np.bincount(indices, weights=tmp_w.real, minlength=bins)
n.imag += np.bincount(indices, weights=tmp_w.imag, minlength=bins)
else:
n += np.bincount(indices, weights=tmp_w, minlength=bins).astype(ntype)
# We now compute the bin edges since these are returned
bins = linspace(mn, mx, bins + 1, endpoint=True)
else:
bins = asarray(bins)
if (np.diff(bins) < 0).any():
raise AttributeError(
'bins must increase monotonically.')
# Initialize empty histogram
n = np.zeros(bins.shape, ntype)
if weights is None:
for i in arange(0, len(a), BLOCK):
sa = sort(a[i:i+BLOCK])
n += np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
tmp_w = weights[i:i+BLOCK]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero, ], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if density is not None:
if density:
db = array(np.diff(bins), float)
return n/db/n.sum(), bins
else:
return n, bins
else:
# deprecated, buggy behavior. Remove for Numpy 2.0
if normed:
db = array(np.diff(bins), float)
return n/(n*db).sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogrammed. It must be an (N,D) array or data
that can be converted to such. The rows of the resulting array
are the coordinates of points in a D dimensional polytope.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitly in `bins`. Defaults to the minimum and maximum
values along each dimension.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_volume``.
weights : (N,) array_like, optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False,
the values of the returned histogram are equal to the sum of the
weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights
for the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = atleast_2d(sample).T
N, D = sample.shape
nbin = empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = asarray(weights)
try:
M = len(bins)
if M != D:
raise AttributeError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
# Handle empty input. Range can't be determined in that case, use 0-1.
if N == 0:
smin = zeros(D)
smax = ones(D)
else:
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# avoid rounding issues for comparisons when dealing with inexact types
if np.issubdtype(sample.dtype, np.inexact):
edge_dt = sample.dtype
else:
edge_dt = float
# Create edge arrays
for i in arange(D):
if isscalar(bins[i]):
if bins[i] < 1:
raise ValueError(
"Element at index %s in `bins` should be a positive "
"integer." % i)
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt)
else:
edges[i] = asarray(bins[i], edge_dt)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = diff(edges[i])
if np.any(np.asarray(dedges[i]) <= 0):
raise ValueError(
"Found bin edge of size <= 0. Did you specify `bins` with"
"non-monotonic sequence?")
nbin = asarray(nbin)
# Handle empty input.
if N == 0:
return np.zeros(nbin-2), edges
# Compute the bin number each sample falls into.
Ncount = {}
for i in arange(D):
Ncount[i] = digitize(sample[:, i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in arange(D):
# Rounding precision
mindiff = dedges[i].min()
if not np.isinf(mindiff):
decimal = int(-log10(mindiff)) + 6
# Find which points are on the rightmost edge.
not_smaller_than_edge = (sample[:, i] >= edges[i][-1])
on_edge = (around(sample[:, i], decimal) ==
around(edges[i][-1], decimal))
# Shift these points one bin to the left.
Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
# will raise an error.
hist = zeros(nbin, float).reshape(-1)
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
xy += Ncount[ni[-1]]
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
if len(xy) == 0:
return zeros(nbin-2, int), edges
flatcount = bincount(xy, weights)
a = arange(len(flatcount))
hist[a] = flatcount
# Shape into a proper matrix
hist = hist.reshape(sort(nbin))
for i in arange(nbin.size):
j = ni.argsort()[i]
hist = hist.swapaxes(i, j)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1, -1)]
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in arange(D):
shape = ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
average, [sum_of_weights] : array_type or double
Return the average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `Float`
if `a` is of integer type, otherwise it is of the same type as `a`.
`sum_of_weights` is of the same type as `average`.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays -- useful if your data contains
"missing" values
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
"""
if not isinstance(a, np.matrix):
a = np.asarray(a)
if weights is None:
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else:
a = a + 0.0
wgt = np.asarray(weights)
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights "
"differ.")
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ.")
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis.")
# setup wgt to broadcast along axis
wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis)
scl = wgt.sum(axis=axis, dtype=np.result_type(a.dtype, wgt.dtype))
if (scl == 0.0).any():
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt).sum(axis)/scl
if returned:
scl = np.multiply(avg, 0) + scl
return avg, scl
else:
return avg
def asarray_chkfinite(a, dtype=None, order=None):
"""Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major (C-style) or
column-major (Fortran-style) memory representation.
Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a, dtype=float)
array([1., 2.])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print 'ValueError'
...
ValueError
"""
a = asarray(a, dtype=dtype, order=order)
if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all():
raise ValueError(
"array must not contain infs or NaNs")
return a
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray
The input domain.
condlist : list of bool arrays
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) - len(condlist) == 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., lambda=1)``, then each function is called as
``f(x, lambda=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have a default value of 0.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.linspace(-2.5, 2.5, 6)
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
"""
x = asanyarray(x)
n2 = len(funclist)
if (isscalar(condlist) or not (isinstance(condlist[0], list) or
isinstance(condlist[0], ndarray))):
condlist = [condlist]
condlist = array(condlist, dtype=bool)
n = len(condlist)
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
zerod = False
if x.ndim == 0:
x = x[None]
zerod = True
if condlist.shape[-1] != 1:
condlist = condlist.T
if n == n2 - 1: # compute the "otherwise" condition.
totlist = np.logical_or.reduce(condlist, axis=0)
condlist = np.vstack([condlist, ~totlist])
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not isinstance(item, collections.Callable):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
# Check the size of condlist and choicelist are the same, or abort.
if len(condlist) != len(choicelist):
raise ValueError(
'list of cases must be same length as list of conditions')
# Now that the dtype is known, handle the deprecated select([], []) case
if len(condlist) == 0:
# 2014-02-24, 1.9
warnings.warn("select with an empty condition list is not possible"
"and will be deprecated",
DeprecationWarning)
return np.asarray(default)[()]
choicelist = [np.asarray(choice) for choice in choicelist]
choicelist.append(np.asarray(default))
# need to get the result type before broadcasting for correct scalar
# behaviour
dtype = np.result_type(*choicelist)
# Convert conditions to arrays and broadcast conditions and choices
# as the shape is needed for the result. Doing it seperatly optimizes
# for example when all choices are scalars.
condlist = np.broadcast_arrays(*condlist)
choicelist = np.broadcast_arrays(*choicelist)
# If cond array is not an ndarray in boolean format or scalar bool, abort.
deprecated_ints = False
for i in range(len(condlist)):
cond = condlist[i]
if cond.dtype.type is not np.bool_:
if np.issubdtype(cond.dtype, np.integer):
# A previous implementation accepted int ndarrays accidentally.
# Supported here deliberately, but deprecated.
condlist[i] = condlist[i].astype(bool)
deprecated_ints = True
else:
raise ValueError(
'invalid entry in choicelist: should be boolean ndarray')
if deprecated_ints:
# 2014-02-24, 1.9
msg = "select condlists containing integer ndarrays is deprecated " \
"and will be removed in the future. Use `.astype(bool)` to " \
"convert to bools."
warnings.warn(msg, DeprecationWarning)
if choicelist[0].ndim == 0:
# This may be common, so avoid the call.
result_shape = condlist[0].shape
else:
result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape
result = np.full(result_shape, choicelist[-1], dtype)
# Use np.copyto to burn each choicelist array onto result, using the
# corresponding condlist as a boolean mask. This is done in reverse
# order since the first choice should take precedence.
choicelist = choicelist[-2::-1]
condlist = condlist[::-1]
for choice, cond in zip(choicelist, condlist):
np.copyto(result, choice, where=cond)
return result
def copy(a, order='K'):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :meth:ndarray.copy are very
similar, but have different default values for their order=
arguments.)
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, order=order, copy=True)
# Basic operations
def gradient(f, *varargs, **kwargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using second order accurate central differences
in the interior and either first differences or second order accurate
one-sides (forward or backwards) differences at the boundaries. The
returned gradient hence has the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
varargs : scalar or list of scalar, optional
N scalars specifying the sample distances for each dimension,
i.e. `dx`, `dy`, `dz`, ... Default distance: 1.
single scalar specifies sample distance for all dimensions.
if `axis` is given, the number of varargs must equal the number of axes.
edge_order : {1, 2}, optional
Gradient is calculated using N\ :sup:`th` order accurate differences
at the boundaries. Default: 1.
.. versionadded:: 1.9.1
axis : None or int or tuple of ints, optional
Gradient is calculated only along the given axis or axes
The default (axis = None) is to calculate the gradient for all the axes of the input array.
axis may be negative, in which case it counts from the last to the first axis.
.. versionadded:: 1.11.0
Returns
-------
gradient : list of ndarray
Each element of `list` has the same shape as `f` giving the derivative
of `f` with respect to each dimension.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
For two dimensional arrays, the return will be two arrays ordered by
axis. In this example the first array stands for the gradient in
rows and the second one in columns direction:
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
>>> x = np.array([0, 1, 2, 3, 4])
>>> dx = np.gradient(x)
>>> y = x**2
>>> np.gradient(y, dx, edge_order=2)
array([-0., 2., 4., 6., 8.])
The axis keyword can be used to specify a subset of axes of which the gradient is calculated
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), axis=0)
array([[ 2., 2., -1.],
[ 2., 2., -1.]])
"""
f = np.asanyarray(f)
N = len(f.shape) # number of dimensions
axes = kwargs.pop('axis', None)
if axes is None:
axes = tuple(range(N))
# check axes to have correct type and no duplicate entries
if isinstance(axes, int):
axes = (axes,)
if not isinstance(axes, tuple):
raise TypeError("A tuple of integers or a single integer is required")
# normalize axis values:
axes = tuple(x + N if x < 0 else x for x in axes)
if max(axes) >= N or min(axes) < 0:
raise ValueError("'axis' entry is out of bounds")
if len(set(axes)) != len(axes):
raise ValueError("duplicate value in 'axis'")
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
elif n == len(axes):
dx = list(varargs)
else:
raise SyntaxError(
"invalid number of arguments")
edge_order = kwargs.pop('edge_order', 1)
if kwargs:
raise TypeError('"{}" are not valid keyword arguments.'.format(
'", "'.join(kwargs.keys())))
if edge_order > 2:
raise ValueError("'edge_order' greater than 2 not supported")
# use central differences on interior and one-sided differences on the
# endpoints. This preserves second order-accuracy over the full domain.
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
slice4 = [slice(None)]*N
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D', 'm', 'M']:
otype = 'd'
# Difference of datetime64 elements results in timedelta64
if otype == 'M':
# Need to use the full dtype name because it contains unit information
otype = f.dtype.name.replace('datetime', 'timedelta')
elif otype == 'm':
# Needs to keep the specific units, can't be a general unit
otype = f.dtype
# Convert datetime64 data into ints. Make dummy variable `y`
# that is a view of ints if the data is datetime64, otherwise
# just set y equal to the the array `f`.
if f.dtype.char in ["M", "m"]:
y = f.view('int64')
else:
y = f
for i, axis in enumerate(axes):
if y.shape[axis] < 2:
raise ValueError(
"Shape of array too small to calculate a numerical gradient, "
"at least two elements are required.")
# Numerical differentiation: 1st order edges, 2nd order interior
if y.shape[axis] == 2 or edge_order == 1:
# Use first order differences for time data
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (y[1] - y[0])
out[slice1] = (y[slice2] - y[slice3])
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (y[-1] - y[-2])
out[slice1] = (y[slice2] - y[slice3])
# Numerical differentiation: 2st order edges, 2nd order interior
else:
# Use second order differences where possible
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 0
slice3[axis] = 1
slice4[axis] = 2
# 1D equivalent -- out[0] = -(3*y[0] - 4*y[1] + y[2]) / 2.0
out[slice1] = -(3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
slice4[axis] = -3
# 1D equivalent -- out[-1] = (3*y[-1] - 4*y[-2] + y[-3])
out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
# divide by step size
out /= dx[i]
outvals.append(out)
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
slice4[axis] = slice(None)
if len(axes) == 1:
return outvals[0]
else:
return outvals
def diff(a, n=1, axis=-1):
"""
Calculate the n-th discrete difference along given axis.
The first difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
diff : ndarray
The n-th differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`.
.
See Also
--------
gradient, ediff1d, cumsum
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def interp(x, xp, fp, left=None, right=None, period=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing if argument
`period` is not specified. Otherwise, `xp` is internally sorted after
normalizing the periodic boundaries with ``xp = xp % period``.
fp : 1-D sequence of floats
The y-coordinates of the data points, same length as `xp`.
left : float, optional
Value to return for `x < xp[0]`, default is `fp[0]`.
right : float, optional
Value to return for `x > xp[-1]`, default is `fp[-1]`.
period : None or float, optional
A period for the x-coordinates. This parameter allows the proper
interpolation of angular x-coordinates. Parameters `left` and `right`
are ignored if `period` is specified.
.. versionadded:: 1.10.0
Returns
-------
y : float or ndarray
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
If `xp` or `fp` are not 1-D sequences
If `period == 0`
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasing is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
Interpolation with periodic x-coordinates:
>>> x = [-180, -170, -185, 185, -10, -5, 0, 365]
>>> xp = [190, -190, 350, -350]
>>> fp = [5, 10, 3, 4]
>>> np.interp(x, xp, fp, period=360)
array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75])
"""
if period is None:
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
else:
if period == 0:
raise ValueError("period must be a non-zero value")
period = abs(period)
left = None
right = None
return_array = True
if isinstance(x, (float, int, number)):
return_array = False
x = [x]
x = np.asarray(x, dtype=np.float64)
xp = np.asarray(xp, dtype=np.float64)
fp = np.asarray(fp, dtype=np.float64)
if xp.ndim != 1 or fp.ndim != 1:
raise ValueError("Data points must be 1-D sequences")
if xp.shape[0] != fp.shape[0]:
raise ValueError("fp and xp are not of the same length")
# normalizing periodic boundaries
x = x % period
xp = xp % period
asort_xp = np.argsort(xp)
xp = xp[asort_xp]
fp = fp[asort_xp]
xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period))
fp = np.concatenate((fp[-1:], fp, fp[0:1]))
if return_array:
return compiled_interp(x, xp, fp, left, right)
else:
return compiled_interp(x, xp, fp, left, right).item()
def angle(z, deg=0):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : ndarray or scalar
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
if deg:
fact = 180/pi
else:
fact = 1.0
z = asarray(z)
if (issubclass(z.dtype.type, _nx.complexfloating)):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag, zreal) * fact
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = len(p.shape)
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
ddmod = mod(dd + pi, 2*pi) - pi
_nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0))
ph_correct = ddmod - dd
_nx.copyto(ph_correct, 0, where=abs(dd) < discont)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a, copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.:
break
else:
first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.:
break
else:
last = last - 1
return filt[first:last]
@deprecate
def unique(x):
"""
This function is deprecated. Use numpy.lib.arraysetops.unique()
instead.
"""
try:
tmp = x.flatten()
if tmp.size == 0:
return tmp
tmp.sort()
idx = concatenate(([True], tmp[1:] != tmp[:-1]))
return tmp[idx]
except AttributeError:
items = sorted(set(x))
return asarray(items)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Note that `place` does the exact opposite of `extract`.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
Returns
-------
extract : ndarray
Rank 1 array of values from `arr` where `condition` is True.
See Also
--------
take, put, copyto, compress, place
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
`place` uses the first N elements of `vals`, where N is the number of
True values in `mask`, while `copyto` uses the elements where `mask`
is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : array_like
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N it will be repeated.
See Also
--------
copyto, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
return _insert(arr, mask, vals)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> np.disp('"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
class vectorize(object):
"""
vectorize(pyfunc, otypes='', doc=None, excluded=None, cache=False)
Generalized function class.
Define a vectorized function which takes a nested sequence
of objects or numpy arrays as inputs and returns a
numpy array as output. The vectorized function evaluates `pyfunc` over
successive tuples of the input arrays like the python map function,
except it uses the broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If `None`, the docstring will be the
``pyfunc.__doc__``.
excluded : set, optional
Set of strings or integers representing the positional or keyword
arguments for which the function will not be vectorized. These will be
passed directly to `pyfunc` unmodified.
.. versionadded:: 1.7.0
cache : bool, optional
If `True`, then cache the first function call that determines the number
of outputs if `otypes` is not provided.
.. versionadded:: 1.7.0
Returns
-------
vectorized : callable
Vectorized function.
Examples
--------
>>> def myfunc(a, b):
... "Return a-b if a>b, otherwise return a+b"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
The `excluded` argument can be used to prevent vectorizing over certain
arguments. This can be useful for array-like arguments of a fixed length
such as the coefficients for a polynomial as in `polyval`:
>>> def mypolyval(p, x):
... _p = list(p)
... res = _p.pop(0)
... while _p:
... res = res*x + _p.pop(0)
... return res
>>> vpolyval = np.vectorize(mypolyval, excluded=['p'])
>>> vpolyval(p=[1, 2, 3], x=[0, 1])
array([3, 6])
Positional arguments may also be excluded by specifying their position:
>>> vpolyval.excluded.add(0)
>>> vpolyval([1, 2, 3], x=[0, 1])
array([3, 6])
Notes
-----
The `vectorize` function is provided primarily for convenience, not for
performance. The implementation is essentially a for loop.
If `otypes` is not specified, then a call to the function with the
first argument will be used to determine the number of outputs. The
results of this call will be cached if `cache` is `True` to prevent
calling the function twice. However, to implement the cache, the
original function must be wrapped which will slow down subsequent
calls, so only do this if your function is expensive.
The new keyword argument interface and `excluded` argument support
further degrades performance.
"""
def __init__(self, pyfunc, otypes='', doc=None, excluded=None,
cache=False):
self.pyfunc = pyfunc
self.cache = cache
self._ufunc = None # Caching to improve default performance
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError(
"Invalid otype specified: %s" % (char,))
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError(
"Invalid otype specification")
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
def __call__(self, *args, **kwargs):
"""
Return arrays with the results of `pyfunc` broadcast (vectorized) over
`args` and `kwargs` not in `excluded`.
"""
excluded = self.excluded
if not kwargs and not excluded:
func = self.pyfunc
vargs = args
else:
# The wrapper accepts only positional arguments: we use `names` and
# `inds` to mutate `the_args` and `kwargs` to pass to the original
# function.
nargs = len(args)
names = [_n for _n in kwargs if _n not in excluded]
inds = [_i for _i in range(nargs) if _i not in excluded]
the_args = list(args)
def func(*vargs):
for _n, _i in enumerate(inds):
the_args[_i] = vargs[_n]
kwargs.update(zip(names, vargs[len(inds):]))
return self.pyfunc(*the_args, **kwargs)
vargs = [args[_i] for _i in inds]
vargs.extend([kwargs[_n] for _n in names])
return self._vectorize_call(func=func, args=vargs)
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
if not args:
raise ValueError('args can not be empty')
if self.otypes:
otypes = self.otypes
nout = len(otypes)
# Note logic here: We only *use* self._ufunc if func is self.pyfunc
# even though we set self._ufunc regardless.
if func is self.pyfunc and self._ufunc is not None:
ufunc = self._ufunc
else:
ufunc = self._ufunc = frompyfunc(func, len(args), nout)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
# the subsequent call when the ufunc is evaluated.
# Assumes that ufunc first evaluates the 0th elements in the input
# arrays (the input values are not checked to ensure this)
inputs = [asarray(_a).flat[0] for _a in args]
outputs = func(*inputs)
# Performance note: profiling indicates that -- for simple
# functions at least -- this wrapping can almost double the
# execution time.
# Hence we make it optional.
if self.cache:
_cache = [outputs]
def _func(*vargs):
if _cache:
return _cache.pop()
else:
return func(*vargs)
else:
_func = func
if isinstance(outputs, tuple):
nout = len(outputs)
else:
nout = 1
outputs = (outputs,)
otypes = ''.join([asarray(outputs[_k]).dtype.char
for _k in range(nout)])
# Performance note: profiling indicates that creating the ufunc is
# not a significant cost compared with wrapping so it seems not
# worth trying to cache this.
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
def _vectorize_call(self, func, args):
"""Vectorized call to `func` over positional `args`."""
if not args:
_res = func()
else:
ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
# Convert args to object arrays first
inputs = [array(_a, copy=False, subok=True, dtype=object)
for _a in args]
outputs = ufunc(*inputs)
if ufunc.nout == 1:
_res = array(outputs,
copy=False, subok=True, dtype=otypes[0])
else:
_res = tuple([array(_x, copy=False, subok=True, dtype=_t)
for _x, _t in zip(outputs, otypes)])
return _res
def cov(m, y=None, rowvar=1, bias=0, ddof=None, fweights=None, aweights=None):
"""
Estimate a covariance matrix, given data and weights.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
See the notes for an outline of the algorithm.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same form
as that of `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` corresponds to the
number of observations given (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using the
keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
If not ``None`` the default value implied by `bias` is overridden.
Note that ``ddof=1`` will return the unbiased estimate, even if both
`fweights` and `aweights` are specified, and ``ddof=0`` will return
the simple average. See the notes for the details. The default value
is ``None``.
.. versionadded:: 1.5
fweights : array_like, int, optional
1-D array of integer freguency weights; the number of times each
observation vector should be repeated.
.. versionadded:: 1.10
aweights : array_like, optional
1-D array of observation vector weights. These relative weights are
typically large for observations considered "important" and smaller for
observations considered less "important". If ``ddof=0`` the array of
weights can be used to assign probabilities to observation vectors.
.. versionadded:: 1.10
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Notes
-----
Assume that the observations are in the columns of the observation
array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The
steps to compute the weighted covariance are as follows::
>>> w = f * a
>>> v1 = np.sum(w)
>>> v2 = np.sum(w * a)
>>> m -= np.sum(m * w, axis=1, keepdims=True) / v1
>>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2)
Note that when ``a == 1``, the normalization factor
``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)``
as it should.
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print np.cov(X)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x, y)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x)
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
# Handles complex arrays too
m = np.asarray(m)
if y is None:
dtype = np.result_type(m, np.float64)
else:
y = np.asarray(y)
dtype = np.result_type(m, y, np.float64)
X = array(m, ndmin=2, dtype=dtype)
if rowvar == 0 and X.shape[0] != 1:
X = X.T
if X.shape[0] == 0:
return np.array([]).reshape(0, 0)
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=dtype)
if rowvar == 0 and y.shape[0] != 1:
y = y.T
X = np.vstack((X, y))
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
# Get the product of frequencies and weights
w = None
if fweights is not None:
fweights = np.asarray(fweights, dtype=np.float)
if not np.all(fweights == np.around(fweights)):
raise TypeError(
"fweights must be integer")
if fweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional fweights")
if fweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and fweights")
if any(fweights < 0):
raise ValueError(
"fweights cannot be negative")
w = fweights
if aweights is not None:
aweights = np.asarray(aweights, dtype=np.float)
if aweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional aweights")
if aweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and aweights")
if any(aweights < 0):
raise ValueError(
"aweights cannot be negative")
if w is None:
w = aweights
else:
w *= aweights
avg, w_sum = average(X, axis=1, weights=w, returned=True)
w_sum = w_sum[0]
# Determine the normalization
if w is None:
fact = X.shape[1] - ddof
elif ddof == 0:
fact = w_sum
elif aweights is None:
fact = w_sum - ddof
else:
fact = w_sum - ddof*sum(w*aweights)/w_sum
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
fact = 0.0
X -= avg[:, None]
if w is None:
X_T = X.T
else:
X_T = (X*w).T
c = dot(X, X_T.conj())
c *= 1. / np.float64(fact)
return c.squeeze()
def corrcoef(x, y=None, rowvar=1, bias=np._NoValue, ddof=np._NoValue):
"""
Return Pearson product-moment correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `R`, and the
covariance matrix, `C`, is
.. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `R` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `x`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
ddof : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
Returns
-------
R : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
Notes
-----
This function accepts but discards arguments `bias` and `ddof`. This is
for backwards compatibility with previous versions of this function. These
arguments had no effect on the return values of the function and can be
safely ignored in this and previous versions of numpy.
"""
if bias is not np._NoValue or ddof is not np._NoValue:
# 2015-03-15, 1.10
warnings.warn('bias and ddof have no effect and are deprecated',
DeprecationWarning)
c = cov(x, y, rowvar)
try:
d = diag(c)
except ValueError: # scalar covariance
# nan if incorrect value (nan, inf, 0), 1 otherwise
return c / c
d = sqrt(d)
# calculate "c / multiply.outer(d, d)" row-wise ... for memory and speed
for i in range(0, d.size):
c[i,:] /= (d * d[i])
return c
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, with the maximum value normalized to one
(the value one appears only if the number of samples is odd), with
the first and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy.fft import fft, fftshift
>>> window = np.bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1))
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius von Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.5 - 0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
and is described in Blackman and Tukey. It was recommended for
smoothing the truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.54 - 0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1
]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1
]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in range(1, len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is
partitioned into the two intervals [0,8] and (8,inf), and Chebyshev
polynomial expansions are employed in each interval. Relative error on
the domain [0,30] using IEEE arithmetic is documented [3]_ as having a
peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x < 0)
x[ind] = -x[ind]
ind = (x <= 8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
def kaiser(M, beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple
approximation to the DPSS window based on Bessel functions. The Kaiser
window is a very good approximation to the Digital Prolate Spheroidal
Sequence, or Slepian window, which is the transform which maximizes the
energy in the main lobe of the window relative to total energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> np.kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0, M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a Lanczos resampling
filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
http://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> x = np.linspace(-4, 4, 41)
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.linspace(-4, 4, 401)
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi * where(x == 0, 1.0e-20, x)
return sin(y)/y
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a, subok=True, copy=True)
b.sort(0)
return b
def _ureduce(a, func, **kwargs):
"""
Internal Function.
Call `func` with `a` as first argument swapping the axes to use extended
axis on functions that don't support it natively.
Returns result and a.shape with axis dims set to 1.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
func : callable
Reduction function Kapable of receiving an axis argument.
It is is called with `a` as first argument followed by `kwargs`.
kwargs : keyword arguments
additional keyword arguments to pass to `func`.
Returns
-------
result : tuple
Result of func(a, **kwargs) and a.shape with axis dims set to 1
which can be used to reshape the result to the same shape a ufunc with
keepdims=True would produce.
"""
a = np.asanyarray(a)
axis = kwargs.get('axis', None)
if axis is not None:
keepdim = list(a.shape)
nd = a.ndim
try:
axis = operator.index(axis)
if axis >= nd or axis < -nd:
raise IndexError("axis %d out of bounds (%d)" % (axis, a.ndim))
keepdim[axis] = 1
except TypeError:
sax = set()
for x in axis:
if x >= nd or x < -nd:
raise IndexError("axis %d out of bounds (%d)" % (x, nd))
if x in sax:
raise ValueError("duplicate value in axis")
sax.add(x % nd)
keepdim[x] = 1
keep = sax.symmetric_difference(frozenset(range(nd)))
nkeep = len(keep)
# swap axis that should not be reduced to front
for i, s in enumerate(sorted(keep)):
a = a.swapaxes(i, s)
# merge reduced axis
a = a.reshape(a.shape[:nkeep] + (-1,))
kwargs['axis'] = -1
else:
keepdim = [1] * a.ndim
r = func(a, **kwargs)
return r, keepdim
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or sequence of int, optional
Axis along which the medians are computed. The default (axis=None)
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape and buffer length as the expected output, but the
type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve the
contents of the input array. Treat the input as undefined, but it
will probably be fully or partially sorted. Default is False. Note
that, if `overwrite_input` is True and the input is not already an
ndarray, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
median : ndarray
A new array holding the result (unless `out` is specified, in which
case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, percentile
Notes
-----
Given a vector V of length N, the median of V is the middle value of
a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is
odd. When N is even, it is the average of the two middle values of
``V_sorted``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
r, k = _ureduce(a, func=_median, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims:
return r.reshape(k)
else:
return r
def _median(a, axis=None, out=None, overwrite_input=False):
# can't be reasonably be implemented in terms of percentile as we have to
# call mean to not break astropy
a = np.asanyarray(a)
# Set the partition indexes
if axis is None:
sz = a.size
else:
sz = a.shape[axis]
if sz % 2 == 0:
szh = sz // 2
kth = [szh - 1, szh]
else:
kth = [(sz - 1) // 2]
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
kth.append(-1)
if overwrite_input:
if axis is None:
part = a.ravel()
part.partition(kth)
else:
a.partition(kth, axis=axis)
part = a
else:
part = partition(a, kth, axis=axis)
if part.shape == ():
# make 0-D arrays work
return part.item()
if axis is None:
axis = 0
indexer = [slice(None)] * part.ndim
index = part.shape[axis] // 2
if part.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact) and sz > 0:
# warn and return nans like mean would
rout = mean(part[indexer], axis=axis, out=out)
part = np.rollaxis(part, axis, part.ndim)
n = np.isnan(part[..., -1])
if rout.ndim == 0:
if n == True:
warnings.warn("Invalid value encountered in median",
RuntimeWarning)
if out is not None:
out[...] = a.dtype.type(np.nan)
rout = out
else:
rout = a.dtype.type(np.nan)
elif np.count_nonzero(n.ravel()) > 0:
warnings.warn("Invalid value encountered in median for" +
" %d results" % np.count_nonzero(n.ravel()),
RuntimeWarning)
rout[n] = np.nan
return rout
else:
# if there are no nans
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(part[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
Percentile to compute which must be between 0 and 100 inclusive.
axis : int or sequence of int, optional
Axis along which the percentiles are computed. The default (None)
is to compute the percentiles along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
percentile. This will save memory when you do not need to preserve
the contents of the input array. In this case you should not make
any assumptions about the content of the passed in array `a` after
this function completes -- treat it as undefined. Default is False.
Note that, if the `a` input is not already an array this parameter
will have no effect, `a` will be converted to an array internally
regardless of the value of this parameter.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
.. versionadded:: 1.9.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original array `a`.
.. versionadded:: 1.9.0
Returns
-------
percentile : scalar or ndarray
If a single percentile `q` is given and axis=None a scalar is
returned. If multiple percentiles `q` are given an array holding
the result is returned. The results are listed in the first axis.
(If `out` is specified, in which case that array is returned
instead). If the input contains integers, or floats of smaller
precision than 64, then the output data-type is float64. Otherwise,
the output data-type is the same as that of the input.
See Also
--------
mean, median
Notes
-----
Given a vector V of length N, the q-th percentile of V is the q-th ranked
value in a sorted copy of V. The values and distances of the two
nearest neighbors as well as the `interpolation` parameter will
determine the percentile if the normalized ranking does not match q
exactly. This function is the same as the median if ``q=50``, the same
as the minimum if ``q=0`` and the same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
array([ 3.5])
>>> np.percentile(a, 50, axis=0)
array([[ 6.5, 4.5, 2.5]])
>>> np.percentile(a, 50, axis=1)
array([[ 7.],
[ 2.]])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=m)
array([[ 6.5, 4.5, 2.5]])
>>> m
array([[ 6.5, 4.5, 2.5]])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([[ 7.],
[ 2.]])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.percentile(b, 50, axis=None, overwrite_input=True)
array([ 3.5])
"""
q = array(q, dtype=np.float64, copy=True)
r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out,
overwrite_input=overwrite_input,
interpolation=interpolation)
if keepdims:
if q.ndim == 0:
return r.reshape(k)
else:
return r.reshape([len(q)] + k)
else:
return r
def _percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
a = asarray(a)
if q.ndim == 0:
# Do not allow 0-d arrays because following code fails for scalar
zerod = True
q = q[None]
else:
zerod = False
# avoid expensive reductions, relevant for arrays with < O(1000) elements
if q.size < 10:
for i in range(q.size):
if q[i] < 0. or q[i] > 100.:
raise ValueError("Percentiles must be in the range [0,100]")
q[i] /= 100.
else:
# faster than any()
if np.count_nonzero(q < 0.) or np.count_nonzero(q > 100.):
raise ValueError("Percentiles must be in the range [0,100]")
q /= 100.
# prepare a for partioning
if overwrite_input:
if axis is None:
ap = a.ravel()
else:
ap = a
else:
if axis is None:
ap = a.flatten()
else:
ap = a.copy()
if axis is None:
axis = 0
Nx = ap.shape[axis]
indices = q * (Nx - 1)
# round fractional indices according to interpolation method
if interpolation == 'lower':
indices = floor(indices).astype(intp)
elif interpolation == 'higher':
indices = ceil(indices).astype(intp)
elif interpolation == 'midpoint':
indices = floor(indices) + 0.5
elif interpolation == 'nearest':
indices = around(indices).astype(intp)
elif interpolation == 'linear':
pass # keep index as fraction and interpolate
else:
raise ValueError(
"interpolation can only be 'linear', 'lower' 'higher', "
"'midpoint', or 'nearest'")
n = np.array(False, dtype=bool) # check for nan's flag
if indices.dtype == intp: # take the points along axis
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = concatenate((indices, [-1]))
ap.partition(indices, axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = indices[:-1]
n = np.isnan(ap[-1:, ...])
if zerod:
indices = indices[0]
r = take(ap, indices, axis=axis, out=out)
else: # weight the points above and below the indices
indices_below = floor(indices).astype(intp)
indices_above = indices_below + 1
indices_above[indices_above > Nx - 1] = Nx - 1
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = concatenate((indices_above, [-1]))
weights_above = indices - indices_below
weights_below = 1.0 - weights_above
weights_shape = [1, ] * ap.ndim
weights_shape[axis] = len(indices)
weights_below.shape = weights_shape
weights_above.shape = weights_shape
ap.partition(concatenate((indices_below, indices_above)), axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
weights_below = np.rollaxis(weights_below, axis, 0)
weights_above = np.rollaxis(weights_above, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = indices_above[:-1]
n = np.isnan(ap[-1:, ...])
x1 = take(ap, indices_below, axis=axis) * weights_below
x2 = take(ap, indices_above, axis=axis) * weights_above
# ensure axis with qth is first
x1 = np.rollaxis(x1, axis, 0)
x2 = np.rollaxis(x2, axis, 0)
if zerod:
x1 = x1.squeeze(0)
x2 = x2.squeeze(0)
if out is not None:
r = add(x1, x2, out=out)
else:
r = add(x1, x2)
if np.any(n):
warnings.warn("Invalid value encountered in median",
RuntimeWarning)
if zerod:
if ap.ndim == 1:
if out is not None:
out[...] = a.dtype.type(np.nan)
r = out
else:
r = a.dtype.type(np.nan)
else:
r[..., n.squeeze(0)] = a.dtype.type(np.nan)
else:
if r.ndim == 1:
r[:] = a.dtype.type(np.nan)
else:
r[..., n.repeat(q.size, 0)] = a.dtype.type(np.nan)
return r
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
If `x` is None, then spacing between all `y` elements is `dx`.
dx : scalar, optional
If `x` is None, spacing given by `dx` is assumed. Default is 1.
axis : int, optional
Specify the axis.
Returns
-------
trapz : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points
will be taken from `y` array, by default x-axis distances between
points will be 1.0, alternatively they can be provided with `x` array
or with `dx` scalar. Return value will be equal to combined area under
the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = len(y.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
try:
ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis)
except ValueError:
# Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
return ret
#always succeed
def add_newdoc(place, obj, doc):
"""Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
This routine cannot modify read-only docstrings, as appear
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
"""
try:
new = getattr(__import__(place, globals(), {}, [obj]), obj)
if isinstance(doc, str):
add_docstring(new, doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new, doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new, val[0]), val[1].strip())
except:
pass
# Based on scitools meshgrid
def meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
.. versionadded:: 1.7.0
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
.. versionadded:: 1.7.0
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
.. versionadded:: 1.7.0
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
Notes
-----
This function supports both indexing conventions through the indexing
keyword argument. Giving the string 'ij' returns a meshgrid with
matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing.
In the 2-D case with inputs of length M and N, the outputs are of shape
(N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case
with inputs of length M, N and P, outputs are of shape (N, M, P) for
'xy' indexing and (M, N, P) for 'ij' indexing. The difference is
illustrated by the following code snippet::
xv, yv = meshgrid(x, y, sparse=False, indexing='ij')
for i in range(nx):
for j in range(ny):
# treat xv[i,j], yv[i,j]
xv, yv = meshgrid(x, y, sparse=False, indexing='xy')
for i in range(nx):
for j in range(ny):
# treat xv[j,i], yv[j,i]
In the 1-D and 0-D case, the indexing and sparse keywords have no effect.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> nx, ny = (3, 2)
>>> x = np.linspace(0, 1, nx)
>>> y = np.linspace(0, 1, ny)
>>> xv, yv = meshgrid(x, y)
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
>>> h = plt.contourf(x,y,z)
"""
ndim = len(xi)
copy_ = kwargs.pop('copy', True)
sparse = kwargs.pop('sparse', False)
indexing = kwargs.pop('indexing', 'xy')
if kwargs:
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(xi)]
shape = [x.size for x in output]
if indexing == 'xy' and ndim > 1:
# switch first and second axis
output[0].shape = (1, -1) + (1,)*(ndim - 2)
output[1].shape = (-1, 1) + (1,)*(ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Notes
-----
Often it is preferable to use a boolean mask. For example:
>>> mask = np.ones(len(arr), dtype=bool)
>>> mask[[0,2,4]] = False
>>> result = arr[mask,...]
Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further
use of `mask`.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
if ndim == 0:
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
"from delete and raise an error", DeprecationWarning)
if wrap:
return wrap(arr)
else:
return arr.copy()
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
start, stop, step = obj.indices(N)
xr = range(start, stop, step)
numtodel = len(xr)
if numtodel <= 0:
if wrap:
return wrap(arr.copy())
else:
return arr.copy()
# Invert if step is negative:
if step < 0:
step = -step
start = xr[-1]
stop = xr[0] + 1
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arr.flags.fnc)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
keep = ones(stop-start, dtype=bool)
keep[:stop-start:step] = False
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(start, stop)
arr = arr[slobj2]
slobj2[axis] = keep
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
else:
return new
_obj = obj
obj = np.asarray(obj)
# After removing the special handling of booleans and out of
# bounds values, the conversion to the array can be removed.
if obj.dtype == bool:
warnings.warn(
"in the future insert will treat boolean arrays and array-likes "
"as boolean index instead of casting it to integer", FutureWarning)
obj = obj.astype(intp)
if isinstance(_obj, (int, long, integer)):
# optimization for a single value
obj = obj.item()
if (obj < -N or obj >= N):
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (obj < 0):
obj += N
newshape[axis] -= 1
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1, None)
new[slobj] = arr[slobj2]
else:
if obj.size == 0 and not isinstance(_obj, np.ndarray):
obj = obj.astype(intp)
if not np.can_cast(obj, intp, 'same_kind'):
# obj.size = 1 special case always failed and would just
# give superfluous warnings.
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in delete will result in an "
"error in the future", DeprecationWarning)
obj = obj.astype(intp)
keep = ones(N, dtype=bool)
# Test if there are out of bound indices, this is deprecated
inside_bounds = (obj < N) & (obj >= -N)
if not inside_bounds.all():
# 2013-09-24, 1.9
warnings.warn(
"in the future out of bounds indices will raise an error "
"instead of being ignored by `numpy.delete`.",
DeprecationWarning)
obj = obj[inside_bounds]
positive_indices = obj >= 0
if not positive_indices.all():
warnings.warn(
"in the future negative indices will not be ignored by "
"`numpy.delete`.", FutureWarning)
obj = obj[positive_indices]
keep[obj, ] = False
slobj[axis] = keep
new = arr[slobj]
if wrap:
return wrap(new)
else:
return new
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
.. versionadded:: 1.8.0
Support for multiple insertions when `obj` is a single scalar or a
sequence with one element (similar to calling insert multiple
times).
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
`values` should be shaped so that ``arr[...,obj,...] = values``
is legal.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
concatenate : Join a sequence of arrays along an existing axis.
delete : Delete elements from an array.
Notes
-----
Note that for higher dimensional inserts `obj=0` behaves very different
from `obj=[0]` just like `arr[:,0,:] = values` is different from
`arr[:,[0],:] = values`.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
Difference between sequence and scalars:
>>> np.insert(a, [1], [[1],[2],[3]], axis=1)
array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
>>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1),
... np.insert(a, [1], [[1],[2],[3]], axis=1))
True
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
else:
if ndim > 0 and (axis < -ndim or axis >= ndim):
raise IndexError(
"axis %i is out of bounds for an array of "
"dimension %i" % (axis, ndim))
if (axis < 0):
axis += ndim
if (ndim == 0):
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
"from insert and raise an error", DeprecationWarning)
arr = arr.copy()
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
# turn it into a range object
indices = arange(*obj.indices(N), **{'dtype': intp})
else:
# need to copy obj, because indices will be changed in-place
indices = np.array(obj)
if indices.dtype == bool:
# See also delete
warnings.warn(
"in the future insert will treat boolean arrays and "
"array-likes as a boolean index instead of casting it to "
"integer", FutureWarning)
indices = indices.astype(intp)
# Code after warning period:
#if obj.ndim != 1:
# raise ValueError('boolean array argument obj to insert '
# 'must be one dimensional')
#indices = np.flatnonzero(obj)
elif indices.ndim > 1:
raise ValueError(
"index array argument obj to insert must be one dimensional "
"or scalar")
if indices.size == 1:
index = indices.item()
if index < -N or index > N:
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (index < 0):
index += N
# There are some object array corner cases here, but we cannot avoid
# that:
values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype)
if indices.ndim == 0:
# broadcasting is very different here, since a[:,0,:] = ... behaves
# very different from a[:,[0],:] = ...! This changes values so that
# it works likes the second case. (here a[:,0:1,:])
values = np.rollaxis(values, 0, (axis % values.ndim) + 1)
numnew = values.shape[axis]
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, index)
new[slobj] = arr[slobj]
slobj[axis] = slice(index, index+numnew)
new[slobj] = values
slobj[axis] = slice(index+numnew, None)
slobj2 = [slice(None)] * ndim
slobj2[axis] = slice(index, None)
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
return new
elif indices.size == 0 and not isinstance(obj, np.ndarray):
# Can safely cast the empty list to intp
indices = indices.astype(intp)
if not np.can_cast(indices, intp, 'same_kind'):
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in insert will result in an "
"error in the future", DeprecationWarning)
indices = indices.astype(intp)
indices[indices < 0] += N
numnew = len(indices)
order = indices.argsort(kind='mergesort') # stable sort
indices[order] += np.arange(numnew)
newshape[axis] += numnew
old_mask = ones(newshape[axis], dtype=bool)
old_mask[indices] = False
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj2 = [slice(None)]*ndim
slobj[axis] = indices
slobj2[axis] = old_mask
new[slobj] = values
new[slobj2] = arr
if wrap:
return wrap(new)
return new
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
| bsd-3-clause |
pp-mo/iris | lib/iris/plot.py | 2 | 55360 | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Iris-specific extensions to matplotlib, mimicking the :mod:`matplotlib.pyplot`
interface.
See also: :ref:`matplotlib <matplotlib:users-guide-index>`.
"""
import collections
import datetime
import cartopy.crs as ccrs
import cartopy.mpl.geoaxes
from cartopy.geodesic import Geodesic
import cftime
import matplotlib.axes
import matplotlib.collections as mpl_collections
import matplotlib.dates as mpl_dates
import matplotlib.pyplot as plt
from matplotlib.offsetbox import AnchoredText
import matplotlib.ticker as mpl_ticker
import matplotlib.transforms as mpl_transforms
import numpy as np
import numpy.ma as ma
import iris.cube
import iris.analysis.cartography as cartography
import iris.coords
import iris.coord_systems
from iris.exceptions import IrisError
# Importing iris.palette to register the brewer palettes.
import iris.palette
from iris.util import _meshgrid
# Cynthia Brewer citation text.
BREWER_CITE = "Colours based on ColorBrewer.org"
PlotDefn = collections.namedtuple("PlotDefn", ("coords", "transpose"))
def _get_plot_defn_custom_coords_picked(cube, coords, mode, ndims=2):
def names(coords):
result = []
for coord in coords:
if isinstance(coord, int):
result.append("dim={}".format(coord))
else:
result.append(coord.name())
return ", ".join(result)
def as_coord(coord):
if isinstance(coord, int):
# Pass through valid dimension indexes.
if coord >= ndims:
emsg = (
"The data dimension ({}) is out of range for "
"the dimensionality of the required plot ({})"
)
raise IndexError(emsg.format(coord, ndims))
else:
coord = cube.coord(coord)
return coord
coords = list(map(as_coord, coords))
# Check that we were given the right number of coordinates/dimensions.
if len(coords) != ndims:
raise ValueError(
"The list of coordinates given (%s) should have the"
" same length (%s) as the dimensionality of the"
" required plot (%s)" % (names(coords), len(coords), ndims)
)
# Check which dimensions are spanned by each coordinate.
def get_span(coord):
if isinstance(coord, int):
span = set([coord])
else:
span = set(cube.coord_dims(coord))
return span
spans = list(map(get_span, coords))
for span, coord in zip(spans, coords):
if not span:
msg = "The coordinate {!r} doesn't span a data dimension."
raise ValueError(msg.format(coord.name()))
if mode == iris.coords.BOUND_MODE and len(span) not in [1, 2]:
raise ValueError(
"The coordinate {!r} has {} dimensions."
"Cell-based plotting is only supported for"
"coordinates with one or two dimensions.".format(
coord.name(), len(span)
)
)
# Check the combination of coordinates spans enough (ndims) data
# dimensions.
total_span = set().union(*spans)
if len(total_span) != ndims:
raise ValueError(
"The given coordinates ({}) don't span the {} data"
" dimensions.".format(names(coords), ndims)
)
# If we have 2-dimensional data, and one or more 1-dimensional
# coordinates, check if we need to transpose.
transpose = False
if ndims == 2 and min(map(len, spans)) == 1:
for i, span in enumerate(spans):
if len(span) == 1:
if list(span)[0] == i:
transpose = True
break
# Note the use of `reversed` to convert from the X-then-Y
# convention of the end-user API to the V-then-U convention used by
# the plotting routines.
plot_coords = list(reversed(coords))
return PlotDefn(plot_coords, transpose)
def _valid_bound_dim_coord(coord):
result = None
if coord and coord.ndim == 1 and coord.nbounds:
result = coord
return result
def _get_plot_defn(cube, mode, ndims=2):
"""
Return data and plot-axis coords given a cube & a mode of either
POINT_MODE or BOUND_MODE.
"""
if cube.ndim != ndims:
msg = "Cube must be %s-dimensional. Got %s dimensions."
raise ValueError(msg % (ndims, cube.ndim))
# Start by taking the DimCoords from each dimension.
coords = [None] * ndims
for dim_coord in cube.dim_coords:
dim = cube.coord_dims(dim_coord)[0]
coords[dim] = dim_coord
# When appropriate, restrict to 1D with bounds.
if mode == iris.coords.BOUND_MODE:
coords = list(map(_valid_bound_dim_coord, coords))
def guess_axis(coord):
axis = None
if coord is not None:
axis = iris.util.guess_coord_axis(coord)
return axis
# Allow DimCoords in aux_coords to fill in for missing dim_coords.
for dim, coord in enumerate(coords):
if coord is None:
aux_coords = cube.coords(dimensions=dim)
aux_coords = [
coord
for coord in aux_coords
if isinstance(coord, iris.coords.DimCoord)
]
if aux_coords:
aux_coords.sort(key=lambda coord: coord._as_defn())
coords[dim] = aux_coords[0]
# If plotting a 2 dimensional plot, check for 2d coordinates
if ndims == 2:
missing_dims = [
dim for dim, coord in enumerate(coords) if coord is None
]
if missing_dims:
# Note that this only picks up coordinates that span the dims
two_dim_coords = cube.coords(dimensions=missing_dims)
two_dim_coords = [
coord for coord in two_dim_coords if coord.ndim == 2
]
if len(two_dim_coords) >= 2:
two_dim_coords.sort(key=lambda coord: coord._as_defn())
coords = two_dim_coords[:2]
if mode == iris.coords.POINT_MODE:
# Allow multi-dimensional aux_coords to override the dim_coords
# along the Z axis. This results in a preference for using the
# derived altitude over model_level_number or level_height.
# Limit to Z axis to avoid preferring latitude over grid_latitude etc.
axes = list(map(guess_axis, coords))
axis = "Z"
if axis in axes:
for coord in cube.coords(dim_coords=False):
if (
max(coord.shape) > 1
and iris.util.guess_coord_axis(coord) == axis
):
coords[axes.index(axis)] = coord
# Re-order the coordinates to achieve the preferred
# horizontal/vertical associations. If we can't associate
# an axis to order the coordinates, fall back to using the cube dimension
# followed by the name of the coordinate.
def sort_key(coord):
order = {"X": 2, "T": 1, "Y": -1, "Z": -2}
axis = guess_axis(coord)
return (
order.get(axis, 0),
coords.index(coord),
coord and coord.name(),
)
sorted_coords = sorted(coords, key=sort_key)
transpose = sorted_coords != coords
return PlotDefn(sorted_coords, transpose)
def _can_draw_map(coords):
std_names = [
c and c.standard_name
for c in coords
if isinstance(c, iris.coords.Coord)
]
valid_std_names = [
["latitude", "longitude"],
["grid_latitude", "grid_longitude"],
["projection_y_coordinate", "projection_x_coordinate"],
]
return std_names in valid_std_names
def _broadcast_2d(u, v):
# Matplotlib needs the U and V coordinates to have the same
# dimensionality (either both 1D, or both 2D). So we simply
# broadcast both to 2D to be on the safe side.
u = np.atleast_2d(u)
v = np.atleast_2d(v.T).T
u, v = np.broadcast_arrays(u, v)
return u, v
def _string_coord_axis_tick_labels(string_axes, axes=None):
"""Apply tick labels for string coordinates."""
ax = axes if axes else plt.gca()
for axis, ticks in string_axes.items():
formatter = mpl_ticker.IndexFormatter(ticks)
locator = mpl_ticker.MaxNLocator(integer=True)
this_axis = getattr(ax, axis)
this_axis.set_major_formatter(formatter)
this_axis.set_major_locator(locator)
def _invert_yaxis(v_coord, axes=None):
"""
Inverts the y-axis of the current plot based on conditions:
* If the y-axis is already inverted we don't want to re-invert it.
* If v_coord is None then it will not have any attributes.
* If neither of the above are true then invert y if v_coord has
attribute 'positive' set to 'down'.
Args:
* v_coord - the coord to be plotted on the y-axis
"""
axes = axes if axes else plt.gca()
yaxis_is_inverted = axes.yaxis_inverted()
if not yaxis_is_inverted and isinstance(v_coord, iris.coords.Coord):
attr_pve = v_coord.attributes.get("positive")
if attr_pve is not None and attr_pve.lower() == "down":
axes.invert_yaxis()
def _check_bounds_contiguity_and_mask(coord, data, atol=None, rtol=None):
"""
Checks that any discontiguities in the bounds of the given coordinate only
occur where the data is masked.
Where a discontinuity occurs the grid created for plotting will not be
correct. This does not matter if the data is masked in that location as
this is not plotted.
If a discontiguity occurs where the data is *not* masked, an error is
raised.
Args:
coord: (iris.coord.Coord)
Coordinate the bounds of which will be checked for contiguity
data: (array)
Data of the the cube we are plotting
atol:
Absolute tolerance when checking the contiguity. Defaults to None.
If an absolute tolerance is not set, 1D coords are not checked (so
as to not introduce a breaking change without a major release) but
2D coords are always checked, by calling
:meth:`iris.coords.Coord._discontiguity_in_bounds` with its default
tolerance.
"""
kwargs = {}
data_is_masked = hasattr(data, "mask")
if data_is_masked:
# When checking the location of the discontiguities, we check against
# the opposite of the mask, which is True where data exists.
mask_invert = np.logical_not(data.mask)
if coord.ndim == 1:
# 1D coords are only checked if an absolute tolerance is set, to avoid
# introducing a breaking change.
if atol:
contiguous, diffs = coord._discontiguity_in_bounds(atol=atol)
if not contiguous and data_is_masked:
not_masked_at_discontiguity = np.any(
np.logical_and(mask_invert[:-1], diffs)
)
else:
return
elif coord.ndim == 2:
if atol:
kwargs["atol"] = atol
if rtol:
kwargs["rtol"] = rtol
contiguous, diffs = coord._discontiguity_in_bounds(**kwargs)
if not contiguous and data_is_masked:
diffs_along_x, diffs_along_y = diffs
# Check along both dimensions that any discontiguous
# points are correctly masked.
not_masked_at_discontiguity_along_x = np.any(
np.logical_and(mask_invert[:, :-1], diffs_along_x)
)
not_masked_at_discontiguity_along_y = np.any(
np.logical_and(mask_invert[:-1,], diffs_along_y)
)
not_masked_at_discontiguity = (
not_masked_at_discontiguity_along_x
or not_masked_at_discontiguity_along_y
)
# If any discontiguity occurs where the data is not masked the grid will be
# created incorrectly, so raise an error.
if not contiguous:
if not data_is_masked:
raise ValueError(
"The bounds of the {} coordinate are not "
"contiguous. Not able to create a suitable grid"
"to plot. You can use "
"iris.util.find_discontiguities() to identify "
"discontiguities in your x and y coordinate "
"bounds arrays.".format(coord.name())
)
if not_masked_at_discontiguity:
raise ValueError(
"The bounds of the {} coordinate are not "
"contiguous and data is not masked where the "
"discontiguity occurs. Not able to create a "
"suitable grid to plot. You can use "
"iris.util.find_discontiguities() to identify "
"discontiguities in your x and y coordinate "
"bounds arrays, and then mask them with "
"iris.util.mask_cube()"
"".format(coord.name())
)
def _draw_2d_from_bounds(draw_method_name, cube, *args, **kwargs):
# NB. In the interests of clarity we use "u" and "v" to refer to the
# horizontal and vertical axes on the matplotlib plot.
mode = iris.coords.BOUND_MODE
# Get & remove the coords entry from kwargs.
coords = kwargs.pop("coords", None)
if coords is not None:
plot_defn = _get_plot_defn_custom_coords_picked(
cube, coords, mode, ndims=2
)
else:
plot_defn = _get_plot_defn(cube, mode, ndims=2)
contig_tol = kwargs.pop("contiguity_tolerance", None)
for coord in plot_defn.coords:
if hasattr(coord, "has_bounds") and coord.has_bounds():
_check_bounds_contiguity_and_mask(
coord, data=cube.data, atol=contig_tol
)
if _can_draw_map(plot_defn.coords):
result = _map_common(
draw_method_name,
None,
iris.coords.BOUND_MODE,
cube,
plot_defn,
*args,
**kwargs,
)
else:
# Obtain data array.
data = cube.data
if plot_defn.transpose:
data = data.T
# Obtain U and V coordinates
v_coord, u_coord = plot_defn.coords
# Track numpy arrays to use for the actual plotting.
plot_arrays = []
# Map axis name to associated values.
string_axes = {}
for coord, axis_name, data_dim in zip(
[u_coord, v_coord], ["xaxis", "yaxis"], [1, 0]
):
if coord is None:
values = np.arange(data.shape[data_dim] + 1)
elif isinstance(coord, int):
dim = 1 - coord if plot_defn.transpose else coord
values = np.arange(data.shape[dim] + 1)
else:
if coord.points.dtype.char in "SU":
if coord.points.ndim != 1:
msg = "Coord {!r} must be one-dimensional."
raise ValueError(msg.format(coord))
if coord.bounds is not None:
msg = "Cannot plot bounded string coordinate."
raise ValueError(msg)
string_axes[axis_name] = coord.points
values = np.arange(data.shape[data_dim] + 1) - 0.5
else:
values = coord.contiguous_bounds()
plot_arrays.append(values)
u, v = plot_arrays
# If the data is transposed, 2D coordinates will also need to be
# transposed.
if plot_defn.transpose is True:
u, v = [coord.T if coord.ndim == 2 else coord for coord in [u, v]]
if u.ndim == v.ndim == 1:
u, v = _broadcast_2d(u, v)
axes = kwargs.pop("axes", None)
draw_method = getattr(axes if axes else plt, draw_method_name)
result = draw_method(u, v, data, *args, **kwargs)
# Apply tick labels for string coordinates.
_string_coord_axis_tick_labels(string_axes, axes)
# Invert y-axis if necessary.
_invert_yaxis(v_coord, axes)
return result
def _draw_2d_from_points(draw_method_name, arg_func, cube, *args, **kwargs):
# NB. In the interests of clarity we use "u" and "v" to refer to the
# horizontal and vertical axes on the matplotlib plot.
mode = iris.coords.POINT_MODE
# Get & remove the coords entry from kwargs.
coords = kwargs.pop("coords", None)
if coords is not None:
plot_defn = _get_plot_defn_custom_coords_picked(cube, coords, mode)
else:
plot_defn = _get_plot_defn(cube, mode, ndims=2)
if _can_draw_map(plot_defn.coords):
result = _map_common(
draw_method_name,
arg_func,
iris.coords.POINT_MODE,
cube,
plot_defn,
*args,
**kwargs,
)
else:
# Obtain data array.
data = cube.data
if plot_defn.transpose:
data = data.T
# Also transpose the scatter marker color array,
# as now mpl 2.x does not do this for free.
if draw_method_name == "scatter" and "c" in kwargs:
c = kwargs["c"]
if hasattr(c, "T") and cube.data.shape == c.shape:
kwargs["c"] = c.T
# Obtain U and V coordinates
v_coord, u_coord = plot_defn.coords
if u_coord is None:
u = np.arange(data.shape[1])
elif isinstance(u_coord, int):
dim = 1 - u_coord if plot_defn.transpose else u_coord
u = np.arange(data.shape[dim])
else:
u = u_coord.points
u = _fixup_dates(u_coord, u)
if v_coord is None:
v = np.arange(data.shape[0])
elif isinstance(v_coord, int):
dim = 1 - v_coord if plot_defn.transpose else v_coord
v = np.arange(data.shape[dim])
else:
v = v_coord.points
v = _fixup_dates(v_coord, v)
if plot_defn.transpose:
u = u.T
v = v.T
# Track numpy arrays to use for the actual plotting.
plot_arrays = []
# Map axis name to associated values.
string_axes = {}
for values, axis_name in zip([u, v], ["xaxis", "yaxis"]):
# Replace any string coordinates with "index" coordinates.
if values.dtype.char in "SU":
if values.ndim != 1:
raise ValueError(
"Multi-dimensional string coordinates "
"not supported."
)
plot_arrays.append(np.arange(values.size))
string_axes[axis_name] = values
elif values.dtype == np.dtype(object) and isinstance(
values[0], datetime.datetime
):
plot_arrays.append(mpl_dates.date2num(values))
else:
plot_arrays.append(values)
u, v = plot_arrays
u, v = _broadcast_2d(u, v)
axes = kwargs.pop("axes", None)
draw_method = getattr(axes if axes else plt, draw_method_name)
if arg_func is not None:
args, kwargs = arg_func(u, v, data, *args, **kwargs)
result = draw_method(*args, **kwargs)
else:
result = draw_method(u, v, data, *args, **kwargs)
# Apply tick labels for string coordinates.
_string_coord_axis_tick_labels(string_axes, axes)
# Invert y-axis if necessary.
_invert_yaxis(v_coord, axes)
return result
def _fixup_dates(coord, values):
if coord.units.calendar is not None and values.ndim == 1:
# Convert coordinate values into tuples of
# (year, month, day, hour, min, sec)
dates = [coord.units.num2date(val).timetuple()[0:6] for val in values]
if coord.units.calendar == "gregorian":
r = [datetime.datetime(*date) for date in dates]
else:
try:
import nc_time_axis
except ImportError:
msg = (
"Cannot plot against time in a non-gregorian "
'calendar, because "nc_time_axis" is not available : '
"Install the package from "
"https://github.com/SciTools/nc-time-axis to enable "
"this usage."
)
raise IrisError(msg)
r = [
nc_time_axis.CalendarDateTime(
cftime.datetime(*date), coord.units.calendar
)
for date in dates
]
values = np.empty(len(r), dtype=object)
values[:] = r
return values
def _data_from_coord_or_cube(c):
if isinstance(c, iris.cube.Cube):
data = c.data
elif isinstance(c, iris.coords.Coord):
data = _fixup_dates(c, c.points)
else:
raise TypeError("Plot arguments must be cubes or coordinates.")
return data
def _uv_from_u_object_v_object(u_object, v_object):
ndim_msg = "Cube or coordinate must be 1-dimensional. Got {} dimensions."
if u_object is not None and u_object.ndim > 1:
raise ValueError(ndim_msg.format(u_object.ndim))
if v_object.ndim > 1:
raise ValueError(ndim_msg.format(v_object.ndim))
v = _data_from_coord_or_cube(v_object)
if u_object is None:
u = np.arange(v.shape[0])
else:
u = _data_from_coord_or_cube(u_object)
return u, v
def _u_object_from_v_object(v_object):
u_object = None
if isinstance(v_object, iris.cube.Cube):
plot_defn = _get_plot_defn(v_object, iris.coords.POINT_MODE, ndims=1)
(u_object,) = plot_defn.coords
return u_object
def _get_plot_objects(args):
if len(args) > 1 and isinstance(
args[1], (iris.cube.Cube, iris.coords.Coord)
):
# two arguments
u_object, v_object = args[:2]
u, v = _uv_from_u_object_v_object(u_object, v_object)
args = args[2:]
if len(u) != len(v):
msg = (
"The x and y-axis objects are not compatible. They should "
"have equal sizes but got ({}: {}) and ({}: {})."
)
raise ValueError(
msg.format(u_object.name(), len(u), v_object.name(), len(v))
)
else:
# single argument
v_object = args[0]
u_object = _u_object_from_v_object(v_object)
u, v = _uv_from_u_object_v_object(u_object, args[0])
args = args[1:]
return u_object, v_object, u, v, args
def _get_geodesic_params(globe):
# Derive the semimajor axis and flattening values for a given globe from
# its attributes. If the values are under specified, raise a ValueError
flattening = globe.flattening
semimajor = globe.semimajor_axis
try:
if semimajor is None:
# Has semiminor or raises error
if flattening is None:
# Has inverse flattening or raises error
flattening = 1.0 / globe.inverse_flattening
semimajor = globe.semiminor_axis / (1.0 - flattening)
elif flattening is None:
if globe.semiminor_axis is not None:
flattening = (semimajor - globe.semiminor_axis) / float(
semimajor
)
else:
# Has inverse flattening or raises error
flattening = 1.0 / globe.inverse_flattening
except TypeError:
# One of the required attributes was None
raise ValueError("The globe was underspecified.")
return semimajor, flattening
def _shift_plot_sections(u_object, u, v):
"""
Shifts subsections of u by multiples of 360 degrees within ranges
defined by the points where the line should cross over the 0/360 degree
longitude boundary.
e.g. [ 300, 100, 200, 300, 100, 300 ] => [ 300, 460, 560, 660, 820, 660 ]
"""
# Convert coordinates to true lat-lon
src_crs = (
u_object.coord_system.as_cartopy_crs()
if u_object.coord_system is not None
else ccrs.Geodetic()
)
tgt_crs = ccrs.Geodetic(globe=src_crs.globe)
tgt_proj = ccrs.PlateCarree(globe=src_crs.globe)
points = tgt_crs.transform_points(src_crs, u, v)
startpoints = points[:-1, :2]
endpoints = points[1:, :2]
proj_x, proj_y, _ = tgt_proj.transform_points(src_crs, u, v).T
# Calculate the inverse geodesic for each pair of points in turn, and
# convert the start point's azimuth into a vector in the source coordinate
# system.
try:
radius, flattening = _get_geodesic_params(src_crs.globe)
geodesic = Geodesic(radius, flattening)
except ValueError:
geodesic = Geodesic()
dists, azms, _ = geodesic.inverse(startpoints, endpoints).T
azms_lon = np.sin(np.deg2rad(azms))
azms_lat = np.cos(np.deg2rad(azms))
azms_u, _ = src_crs.transform_vectors(
tgt_proj, proj_x[:-1], proj_y[:-1], azms_lon, azms_lat
)
# Use the grid longitude values and the geodesic azimuth to determine
# the points where the line should cross the 0/360 degree boundary, and
# in which direction
lwraps = np.logical_and(u[1:] > u[:-1], azms_u < 0)
rwraps = np.logical_and(u[1:] < u[:-1], azms_u > 0)
shifts = np.where(rwraps, 1, 0) - np.where(lwraps, 1, 0)
shift_vals = shifts.cumsum() * u_object.units.modulus
new_u = np.empty_like(u)
new_u[0] = u[0]
new_u[1:] = u[1:] + shift_vals
return new_u
def _draw_1d_from_points(draw_method_name, arg_func, *args, **kwargs):
# NB. In the interests of clarity we use "u" to refer to the horizontal
# axes on the matplotlib plot and "v" for the vertical axes.
# retrieve the objects that are plotted on the horizontal and vertical
# axes (cubes or coordinates) and their respective values, along with the
# argument tuple with these objects removed
u_object, v_object, u, v, args = _get_plot_objects(args)
# Track numpy arrays to use for the actual plotting.
plot_arrays = []
# Map axis name to associated values.
string_axes = {}
for values, axis_name in zip([u, v], ["xaxis", "yaxis"]):
# Replace any string coordinates with "index" coordinates.
if values.dtype.char in "SU":
if values.ndim != 1:
msg = "Multi-dimensional string coordinates are not supported."
raise ValueError(msg)
plot_arrays.append(np.arange(values.size))
string_axes[axis_name] = values
else:
plot_arrays.append(values)
u, v = plot_arrays
# if both u_object and v_object are coordinates then check if a map
# should be drawn
if (
isinstance(u_object, iris.coords.Coord)
and isinstance(v_object, iris.coords.Coord)
and _can_draw_map([v_object, u_object])
):
# Replace non-cartopy subplot/axes with a cartopy alternative and set
# the transform keyword.
kwargs = _ensure_cartopy_axes_and_determine_kwargs(
u_object, v_object, kwargs
)
if draw_method_name == "plot" and u_object.standard_name not in (
"projection_x_coordinate",
"projection_y_coordinate",
):
u = _shift_plot_sections(u_object, u, v)
axes = kwargs.pop("axes", None)
draw_method = getattr(axes if axes else plt, draw_method_name)
if arg_func is not None:
args, kwargs = arg_func(u, v, *args, **kwargs)
result = draw_method(*args, **kwargs)
else:
result = draw_method(u, v, *args, **kwargs)
# Apply tick labels for string coordinates.
_string_coord_axis_tick_labels(string_axes, axes)
# Invert y-axis if necessary.
_invert_yaxis(v_object, axes)
return result
def _replace_axes_with_cartopy_axes(cartopy_proj):
"""
Replace non-cartopy subplot/axes with a cartopy alternative
based on the provided projection. If the current axes are already an
instance of :class:`cartopy.mpl.geoaxes.GeoAxes` then no action is taken.
"""
ax = plt.gca()
if not isinstance(ax, cartopy.mpl.geoaxes.GeoAxes):
fig = plt.gcf()
if isinstance(ax, matplotlib.axes.SubplotBase):
_ = fig.add_subplot(
ax.get_subplotspec(),
projection=cartopy_proj,
title=ax.get_title(),
xlabel=ax.get_xlabel(),
ylabel=ax.get_ylabel(),
)
else:
_ = fig.add_axes(
projection=cartopy_proj,
title=ax.get_title(),
xlabel=ax.get_xlabel(),
ylabel=ax.get_ylabel(),
)
# delete the axes which didn't have a cartopy projection
fig.delaxes(ax)
def _ensure_cartopy_axes_and_determine_kwargs(x_coord, y_coord, kwargs):
"""
Replace the current non-cartopy axes with :class:`cartopy.mpl.GeoAxes`
and return the appropriate kwargs dict based on the provided coordinates
and kwargs.
"""
# Determine projection.
if x_coord.coord_system != y_coord.coord_system:
raise ValueError(
"The X and Y coordinates must have equal coordinate" " systems."
)
cs = x_coord.coord_system
if cs is not None:
cartopy_proj = cs.as_cartopy_projection()
else:
cartopy_proj = ccrs.PlateCarree()
# Ensure the current axes are a cartopy.mpl.GeoAxes instance.
axes = kwargs.get("axes")
if axes is None:
if (
isinstance(cs, iris.coord_systems.RotatedGeogCS)
and x_coord.points.max() > 180
and x_coord.points.max() < 360
and x_coord.points.min() > 0
):
# The RotatedGeogCS has 0 - 360 extent, different from the
# assumptions made by Cartopy: rebase longitudes for the map axes
# to set the datum longitude to the International Date Line.
cs_kwargs = cs._ccrs_kwargs()
cs_kwargs["central_rotated_longitude"] = 180.0
adapted_cartopy_proj = ccrs.RotatedPole(**cs_kwargs)
_replace_axes_with_cartopy_axes(adapted_cartopy_proj)
else:
_replace_axes_with_cartopy_axes(cartopy_proj)
elif axes and not isinstance(axes, cartopy.mpl.geoaxes.GeoAxes):
raise TypeError(
"The supplied axes instance must be a cartopy " "GeoAxes instance."
)
# Set the "from transform" keyword.
if "transform" in kwargs:
raise ValueError(
"The 'transform' keyword is not allowed as it "
"automatically determined from the coordinate "
"metadata."
)
new_kwargs = kwargs.copy()
new_kwargs["transform"] = cartopy_proj
return new_kwargs
def _check_geostationary_coords_and_convert(x, y, kwargs):
# Geostationary stores projected coordinates as scanning angles (
# radians), in line with CF definition (this behaviour is unique to
# Geostationary). Before plotting, must be converted by multiplying by
# satellite height.
x, y = (i.copy() for i in (x, y))
transform = kwargs.get("transform")
if isinstance(transform, cartopy.crs.Geostationary):
satellite_height = transform.proj4_params["h"]
for i in (x, y):
i *= satellite_height
return x, y
def _map_common(
draw_method_name, arg_func, mode, cube, plot_defn, *args, **kwargs
):
"""
Draw the given cube on a map using its points or bounds.
"Mode" parameter will switch functionality between POINT or BOUND plotting.
"""
# Generate 2d x and 2d y grids.
y_coord, x_coord = plot_defn.coords
if mode == iris.coords.POINT_MODE:
if x_coord.ndim == y_coord.ndim == 1:
x, y = _meshgrid(x_coord.points, y_coord.points)
elif x_coord.ndim == y_coord.ndim == 2:
x = x_coord.points
y = y_coord.points
else:
raise ValueError("Expected 1D or 2D XY coords")
else:
if not x_coord.ndim == y_coord.ndim == 2:
try:
x, y = _meshgrid(
x_coord.contiguous_bounds(), y_coord.contiguous_bounds()
)
# Exception translation.
except iris.exceptions.CoordinateMultiDimError:
raise ValueError(
"Expected two 1D coords. Could not get XY"
" grid from bounds. X or Y coordinate not"
" 1D."
)
except ValueError:
raise ValueError(
"Could not get XY grid from bounds. "
"X or Y coordinate doesn't have 2 bounds "
"per point."
)
else:
x = x_coord.contiguous_bounds()
y = y_coord.contiguous_bounds()
# Obtain the data array.
data = cube.data
if plot_defn.transpose:
data = data.T
# If we are global, then append the first column of data the array to the
# last (and add 360 degrees) NOTE: if it is found that this block of code
# is useful in anywhere other than this plotting routine, it may be better
# placed in the CS.
if getattr(x_coord, "circular", False):
_, direction = iris.util.monotonic(
x_coord.points, return_direction=True
)
y = np.append(y, y[:, 0:1], axis=1)
x = np.append(x, x[:, 0:1] + 360 * direction, axis=1)
data = ma.concatenate([data, data[:, 0:1]], axis=1)
if "_v_data" in kwargs:
v_data = kwargs["_v_data"]
v_data = ma.concatenate([v_data, v_data[:, 0:1]], axis=1)
kwargs["_v_data"] = v_data
# Replace non-cartopy subplot/axes with a cartopy alternative and set the
# transform keyword.
kwargs = _ensure_cartopy_axes_and_determine_kwargs(
x_coord, y_coord, kwargs
)
# Make Geostationary coordinates plot-able.
x, y = _check_geostationary_coords_and_convert(x, y, kwargs)
if arg_func is not None:
new_args, kwargs = arg_func(x, y, data, *args, **kwargs)
else:
new_args = (x, y, data) + args
# Draw the contour lines/filled contours.
axes = kwargs.pop("axes", None)
plotfn = getattr(axes if axes else plt, draw_method_name)
return plotfn(*new_args, **kwargs)
def contour(cube, *args, **kwargs):
"""
Draws contour lines based on the given Cube.
Kwargs:
* coords: list of :class:`~iris.coords.Coord` objects or
coordinate names. Use the given coordinates as the axes for the
plot. The order of the given coordinates indicates which axis
to use for each, where the first element is the horizontal
axis of the plot and the second element is the vertical axis
of the plot.
* axes: the :class:`matplotlib.axes.Axes` to use for drawing.
Defaults to the current axes if none provided.
See :func:`matplotlib.pyplot.contour` for details of other valid
keyword arguments.
"""
result = _draw_2d_from_points("contour", None, cube, *args, **kwargs)
return result
def contourf(cube, *args, **kwargs):
"""
Draws filled contours based on the given Cube.
Kwargs:
* coords: list of :class:`~iris.coords.Coord` objects or
coordinate names. Use the given coordinates as the axes for the
plot. The order of the given coordinates indicates which axis
to use for each, where the first element is the horizontal
axis of the plot and the second element is the vertical axis
of the plot.
* axes: the :class:`matplotlib.axes.Axes` to use for drawing.
Defaults to the current axes if none provided.
See :func:`matplotlib.pyplot.contourf` for details of other valid
keyword arguments.
"""
coords = kwargs.get("coords")
kwargs.setdefault("antialiased", True)
result = _draw_2d_from_points("contourf", None, cube, *args, **kwargs)
# Matplotlib produces visible seams between anti-aliased polygons.
# But if the polygons are virtually opaque then we can cover the seams
# by drawing anti-aliased lines *underneath* the polygon joins.
# Figure out the alpha level for the contour plot
if result.alpha is None:
alpha = result.collections[0].get_facecolor()[0][3]
else:
alpha = result.alpha
# If the contours are anti-aliased and mostly opaque then draw lines under
# the seams.
if result.antialiased and alpha > 0.95:
levels = result.levels
colors = [c[0] for c in result.tcolors]
if result.extend == "neither":
levels = levels[1:-1]
colors = colors[:-1]
elif result.extend == "min":
levels = levels[:-1]
colors = colors[:-1]
elif result.extend == "max":
levels = levels[1:]
colors = colors[:-1]
else:
colors = colors[:-1]
if len(levels) > 0:
# Draw the lines just *below* the polygons to ensure we minimise
# any boundary shift.
zorder = result.collections[0].zorder - 0.1
axes = kwargs.get("axes", None)
contour(
cube,
levels=levels,
colors=colors,
antialiased=True,
zorder=zorder,
coords=coords,
axes=axes,
)
# Restore the current "image" to 'result' rather than the mappable
# resulting from the additional call to contour().
if axes:
axes._sci(result)
else:
plt.sci(result)
return result
def default_projection(cube):
"""
Return the primary map projection for the given cube.
Using the returned projection, one can create a cartopy map with::
import matplotlib.pyplot as plt
ax = plt.ax(projection=default_projection(cube))
"""
# XXX logic seems flawed, but it is what map_setup did...
cs = cube.coord_system("CoordSystem")
projection = cs.as_cartopy_projection() if cs else None
return projection
def default_projection_extent(cube, mode=iris.coords.POINT_MODE):
"""
Return the cube's extents ``(x0, x1, y0, y1)`` in its default projection.
Keyword arguments:
* mode - Either ``iris.coords.POINT_MODE`` or ``iris.coords.BOUND_MODE``.
Triggers whether the extent should be representative of the cell
points, or the limits of the cell's bounds.
The default is iris.coords.POINT_MODE.
"""
extents = cartography._xy_range(cube, mode)
xlim = extents[0]
ylim = extents[1]
return tuple(xlim) + tuple(ylim)
def _fill_orography(cube, coords, mode, vert_plot, horiz_plot, style_args):
# Find the orography coordinate.
orography = cube.coord("surface_altitude")
if coords is not None:
plot_defn = _get_plot_defn_custom_coords_picked(
cube, coords, mode, ndims=2
)
else:
plot_defn = _get_plot_defn(cube, mode, ndims=2)
v_coord, u_coord = plot_defn.coords
# Find which plot coordinate corresponds to the derived altitude, so that
# we can replace altitude with the surface altitude.
if v_coord and v_coord.standard_name == "altitude":
# v is altitude, so plot u and orography with orog in the y direction.
result = vert_plot(u_coord, orography, style_args)
elif u_coord and u_coord.standard_name == "altitude":
# u is altitude, so plot v and orography with orog in the x direction.
result = horiz_plot(v_coord, orography, style_args)
else:
raise ValueError(
"Plot does not use hybrid height. One of the "
"coordinates to plot must be altitude, but %s and %s "
"were given." % (u_coord.name(), v_coord.name())
)
return result
def orography_at_bounds(cube, facecolor="#888888", coords=None, axes=None):
"""Plots orography defined at cell boundaries from the given Cube."""
# XXX Needs contiguous orography corners to work.
raise NotImplementedError(
"This operation is temporarily not provided "
"until coordinates can expose 2d contiguous "
"bounds (corners)."
)
style_args = {"edgecolor": "none", "facecolor": facecolor}
def vert_plot(u_coord, orography, style_args):
u = u_coord.contiguous_bounds()
left = u[:-1]
height = orography.points
width = u[1:] - left
plotfn = axes.bar if axes else plt.bar
return plotfn(left, height, width, **style_args)
def horiz_plot(v_coord, orography, style_args):
v = v_coord.contiguous_bounds()
bottom = v[:-1]
width = orography.points
height = v[1:] - bottom
plotfn = axes.barh if axes else plt.barh
return plotfn(bottom, width, height, **style_args)
return _fill_orography(
cube, coords, iris.coords.BOUND_MODE, vert_plot, horiz_plot, style_args
)
def orography_at_points(cube, facecolor="#888888", coords=None, axes=None):
"""Plots orography defined at sample points from the given Cube."""
style_args = {"facecolor": facecolor}
def vert_plot(u_coord, orography, style_args):
x = u_coord.points
y = orography.points
plotfn = axes.fill_between if axes else plt.fill_between
return plotfn(x, y, **style_args)
def horiz_plot(v_coord, orography, style_args):
y = v_coord.points
x = orography.points
plotfn = axes.fill_betweenx if axes else plt.fill_betweenx
return plotfn(y, x, **style_args)
return _fill_orography(
cube, coords, iris.coords.POINT_MODE, vert_plot, horiz_plot, style_args
)
def outline(cube, coords=None, color="k", linewidth=None, axes=None):
"""
Draws cell outlines based on the given Cube.
Kwargs:
* coords: list of :class:`~iris.coords.Coord` objects or
coordinate names. Use the given coordinates as the axes for the
plot. The order of the given coordinates indicates which axis
to use for each, where the first element is the horizontal
axis of the plot and the second element is the vertical axis
of the plot.
* color: None or mpl color The color of the cell outlines. If
None, the matplotlibrc setting patch.edgecolor is used by
default.
* linewidth: None or number The width of the lines showing the
cell outlines. If None, the default width in patch.linewidth
in matplotlibrc is used.
* axes: the :class:`matplotlib.axes.Axes` to use for drawing.
Defaults to the current axes if none provided.
"""
result = _draw_2d_from_bounds(
"pcolormesh",
cube,
facecolors="none",
edgecolors=color,
linewidth=linewidth,
antialiased=True,
coords=coords,
axes=axes,
)
# set the _is_stroked property to get a single color grid.
# See https://github.com/matplotlib/matplotlib/issues/1302
result._is_stroked = False
if hasattr(result, "_wrapped_collection_fix"):
result._wrapped_collection_fix._is_stroked = False
return result
def pcolor(cube, *args, **kwargs):
"""
Draws a pseudocolor plot based on the given 2-dimensional Cube.
The cube must have either two 1-dimensional coordinates or two
2-dimensional coordinates with contiguous bounds to plot the cube against.
Kwargs:
* coords: list of :class:`~iris.coords.Coord` objects or
coordinate names. Use the given coordinates as the axes for the
plot. The order of the given coordinates indicates which axis
to use for each, where the first element is the horizontal
axis of the plot and the second element is the vertical axis
of the plot.
* axes: the :class:`matplotlib.axes.Axes` to use for drawing.
Defaults to the current axes if none provided.
* contiguity_tolerance: The absolute tolerance used when checking for
contiguity between the bounds of the cells. Defaults to None.
See :func:`matplotlib.pyplot.pcolor` for details of other valid
keyword arguments.
"""
kwargs.setdefault("antialiased", True)
kwargs.setdefault("snap", False)
result = _draw_2d_from_bounds("pcolor", cube, *args, **kwargs)
return result
def pcolormesh(cube, *args, **kwargs):
"""
Draws a pseudocolor plot based on the given 2-dimensional Cube.
The cube must have either two 1-dimensional coordinates or two
2-dimensional coordinates with contiguous bounds to plot against each
other.
Kwargs:
* coords: list of :class:`~iris.coords.Coord` objects or
coordinate names. Use the given coordinates as the axes for the
plot. The order of the given coordinates indicates which axis
to use for each, where the first element is the horizontal
axis of the plot and the second element is the vertical axis
of the plot.
* axes: the :class:`matplotlib.axes.Axes` to use for drawing.
Defaults to the current axes if none provided.
* contiguity_tolerance: The absolute tolerance used when checking for
contiguity between the bounds of the cells. Defaults to None.
See :func:`matplotlib.pyplot.pcolormesh` for details of other
valid keyword arguments.
"""
result = _draw_2d_from_bounds("pcolormesh", cube, *args, **kwargs)
return result
def points(cube, *args, **kwargs):
"""
Draws sample point positions based on the given Cube.
Kwargs:
* coords: list of :class:`~iris.coords.Coord` objects or
coordinate names. Use the given coordinates as the axes for the
plot. The order of the given coordinates indicates which axis
to use for each, where the first element is the horizontal
axis of the plot and the second element is the vertical axis
of the plot.
* axes: the :class:`matplotlib.axes.Axes` to use for drawing.
Defaults to the current axes if none provided.
See :func:`matplotlib.pyplot.scatter` for details of other valid
keyword arguments.
"""
def _scatter_args(u, v, data, *args, **kwargs):
return ((u, v) + args, kwargs)
return _draw_2d_from_points(
"scatter", _scatter_args, cube, *args, **kwargs
)
def _vector_component_args(x_points, y_points, u_data, *args, **kwargs):
"""
Callback from _draw_2d_from_points for 'quiver' and 'streamlines'.
Returns arguments (x, y, u, v), to be passed to the underlying matplotlib
call.
"u_data" will always be "u_cube.data".
The matching "v_cube.data" component is stored in kwargs['_v_data'].
"""
v_data = kwargs.pop("_v_data")
# Rescale u+v values for plot distortion.
crs = kwargs.get("transform", None)
if crs:
if not isinstance(crs, (ccrs.PlateCarree, ccrs.RotatedPole)):
msg = (
"Can only plot vectors provided in a lat-lon "
'projection, i.e. equivalent to "cartopy.crs.PlateCarree" '
'or "cartopy.crs.RotatedPole". This '
"cube coordinate system translates as Cartopy {}."
)
raise ValueError(msg.format(crs))
# Given the above check, the Y points must be latitudes.
# We therefore **assume** they are in degrees : I'm not sure this
# is wise, but all the rest of this plot code does that, e.g. in
# _map_common.
# TODO: investigate degree units assumptions, here + elsewhere.
# Implement a latitude scaling, but preserve the given magnitudes.
u_data, v_data = [arr.copy() for arr in (u_data, v_data)]
mags = np.sqrt(u_data * u_data + v_data * v_data)
v_data *= np.cos(np.deg2rad(y_points))
scales = mags / np.sqrt(u_data * u_data + v_data * v_data)
u_data *= scales
v_data *= scales
return ((x_points, y_points, u_data, v_data), kwargs)
def quiver(u_cube, v_cube, *args, **kwargs):
"""
Draws an arrow plot from two vector component cubes.
Args:
* u_cube, v_cube : (:class:`~iris.cube.Cube`)
u and v vector components. Must have same shape and units.
If the cubes have geographic coordinates, the values are treated as
true distance differentials, e.g. windspeeds, and *not* map coordinate
vectors. The components are aligned with the North and East of the
cube coordinate system.
.. Note:
At present, if u_cube and v_cube have geographic coordinates, then they
must be in a lat-lon coordinate system, though it may be a rotated one.
To transform wind values between coordinate systems, use
:func:`iris.analysis.cartography.rotate_vectors`.
To transform coordinate grid points, you will need to create
2-dimensional arrays of x and y values. These can be transformed with
:meth:`cartopy.crs.CRS.transform_points`.
Kwargs:
* coords: (list of :class:`~iris.coords.Coord` or string)
Coordinates or coordinate names. Use the given coordinates as the axes
for the plot. The order of the given coordinates indicates which axis
to use for each, where the first element is the horizontal
axis of the plot and the second element is the vertical axis
of the plot.
* axes: the :class:`matplotlib.axes.Axes` to use for drawing.
Defaults to the current axes if none provided.
See :func:`matplotlib.pyplot.quiver` for details of other valid
keyword arguments.
"""
#
# TODO: check u + v cubes for compatibility.
#
kwargs["_v_data"] = v_cube.data
return _draw_2d_from_points(
"quiver", _vector_component_args, u_cube, *args, **kwargs
)
def plot(*args, **kwargs):
"""
Draws a line plot based on the given cube(s) or coordinate(s).
The first one or two arguments may be cubes or coordinates to plot.
Each of the following is valid::
# plot a 1d cube against its dimension coordinate
plot(cube)
# plot a 1d coordinate
plot(coord)
# plot a 1d cube against a given 1d coordinate, with the cube
# values on the y-axis and the coordinate on the x-axis
plot(coord, cube)
# plot a 1d cube against a given 1d coordinate, with the cube
# values on the x-axis and the coordinate on the y-axis
plot(cube, coord)
# plot two 1d coordinates against one-another
plot(coord1, coord2)
# plot two 1d cubes against one-another
plot(cube1, cube2)
Kwargs:
* axes: the :class:`matplotlib.axes.Axes` to use for drawing.
Defaults to the current axes if none provided.
See :func:`matplotlib.pyplot.plot` for details of additional valid
keyword arguments.
"""
if "coords" in kwargs:
raise TypeError(
'"coords" is not a valid plot keyword. Coordinates '
"and cubes may be passed as arguments for "
"full control of the plot axes."
)
_plot_args = None
return _draw_1d_from_points("plot", _plot_args, *args, **kwargs)
def scatter(x, y, *args, **kwargs):
"""
Draws a scatter plot based on the given cube(s) or coordinate(s).
Args:
* x: :class:`~iris.cube.Cube` or :class:`~iris.coords.Coord`
A cube or a coordinate to plot on the x-axis.
* y: :class:`~iris.cube.Cube` or :class:`~iris.coords.Coord`
A cube or a coordinate to plot on the y-axis.
Kwargs:
* axes: the :class:`matplotlib.axes.Axes` to use for drawing.
Defaults to the current axes if none provided.
See :func:`matplotlib.pyplot.scatter` for details of additional
valid keyword arguments.
"""
# here we are more specific about argument types than generic 1d plotting
if not isinstance(x, (iris.cube.Cube, iris.coords.Coord)):
raise TypeError("x must be a cube or a coordinate.")
if not isinstance(y, (iris.cube.Cube, iris.coords.Coord)):
raise TypeError("y must be a cube or a coordinate.")
args = (x, y) + args
_plot_args = None
return _draw_1d_from_points("scatter", _plot_args, *args, **kwargs)
# Provide convenience show method from pyplot
show = plt.show
def symbols(x, y, symbols, size, axes=None, units="inches"):
"""
Draws fixed-size symbols.
See :mod:`iris.symbols` for available symbols.
Args:
* x: iterable
The x coordinates where the symbols will be plotted.
* y: iterable
The y coordinates where the symbols will be plotted.
* symbols: iterable
The symbols (from :mod:`iris.symbols`) to plot.
* size: float
The symbol size in `units`.
Kwargs:
* axes: the :class:`matplotlib.axes.Axes` to use for drawing.
Defaults to the current axes if none provided.
* units: ['inches', 'points']
The unit for the symbol size.
"""
if axes is None:
axes = plt.gca()
offsets = np.array(list(zip(x, y)))
# XXX "match_original" doesn't work ... so brute-force it instead.
# PatchCollection constructor ignores all non-style keywords when using
# match_original
# See matplotlib.collections.PatchCollection.__init__
# Specifically matplotlib/collections line 1053
# pc = PatchCollection(symbols, offsets=offsets, transOffset=ax.transData,
# match_original=True)
facecolors = [p.get_facecolor() for p in symbols]
edgecolors = [p.get_edgecolor() for p in symbols]
linewidths = [p.get_linewidth() for p in symbols]
pc = mpl_collections.PatchCollection(
symbols,
offsets=offsets,
transOffset=axes.transData,
facecolors=facecolors,
edgecolors=edgecolors,
linewidths=linewidths,
)
if units == "inches":
scale = axes.figure.dpi
elif units == "points":
scale = axes.figure.dpi / 72.0
else:
raise ValueError("Unrecognised units: '%s'" % units)
pc.set_transform(mpl_transforms.Affine2D().scale(0.5 * size * scale))
axes.add_collection(pc)
axes.autoscale_view()
def citation(text, figure=None, axes=None):
"""
Add a text citation to a plot.
Places an anchored text citation in the bottom right
hand corner of the plot.
Args:
* text:
Citation text to be plotted.
Kwargs:
* figure:
Target :class:`matplotlib.figure.Figure` instance. Defaults
to the current figure if none provided.
* axes: the :class:`matplotlib.axes.Axes` to use for drawing.
Defaults to the current axes if none provided.
"""
if text is not None and len(text):
if figure is None and not axes:
figure = plt.gcf()
anchor = AnchoredText(text, prop=dict(size=6), frameon=True, loc=4)
anchor.patch.set_boxstyle("round, pad=0, rounding_size=0.2")
axes = axes if axes else figure.gca()
axes.add_artist(anchor)
| lgpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.