repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
ryklith/pyltesim | plotting/plot_variance_of_user_rate_over_target_user_rate.py | 1 | 2170 | #!/usr/bin/env python
''' Plot standard deviation of delivered rate over target rate
x axis: standard deviation delivered rate per user
y axis: percentage of satisfied users
'''
__author__ = "Hauke Holtkamp"
__credits__ = "Hauke Holtkamp"
__license__ = "unknown"
__version__ = "unknown"
__maintainer__ = "Hauke Holtkamp"
__email__ = "[email protected]"
__status__ = "Development"
def plot(filename):
""" Open data file, process, generate pdf and png"""
import numpy as np
import matplotlib.pyplot as plt
from utils import utils
# data comes in a csv
data = np.genfromtxt(filename, delimiter=',')/1e6 # Mbps
# first row is x-axis (number of users in cell). Each user has a fixed rate.
x = data[0] # Mbps
fig = plt.figure()
ax1 = fig.add_subplot(111)
# second row is BA
ax1.plot(x, data[1], '-k+', label='Sequential alignment', markersize=10)
# ax1.plot(x, data[2], '-ro', label='Random shift each iter', markersize=10)
# ax1.plot(x, data[3], '-c^', label='Random shift once', markersize=10)
ax1.plot(x, data[2], '-b*', label='Random alignment', markersize=10)
# ax1.plot(x, data[4], '-cp', label='PF bandwidth adapting', markersize=10)
# ax1.plot(x, data[5], '-yx', label='Random once', markersize=10)
ax1.plot(x, data[3], '-gD', label='P-persistent ranking', markersize=10)
# ax1.plot(x, data[7], '-kp', label='Static Reuse 3', markersize=10)
ax1.plot(x, data[4], '-ms', label='DTX alignment with memory', markersize=10)
plt.axis( [1, 3, 0, 3])
plt.legend(loc='upper right', prop={'size':20})
plt.setp(ax1.get_xticklabels(), fontsize=20)
plt.setp(ax1.get_yticklabels(), fontsize=20)
xlabel = 'User target rate in Mbps'
ylabel = 'Standard deviation of \n achieved rate in Mbps'
title = 'Consumption over sum rate'
ax1.set_xlabel(xlabel,size=20)
ax1.set_ylabel(ylabel,size=20)
# plt.title(title)
plt.subplots_adjust(left=0.2)
plt.savefig(filename+'.pdf', format='pdf')
plt.savefig(filename+'.png', format='png')
if __name__ == '__main__':
import sys
filename = sys.argv[1]
plot(filename)
| gpl-2.0 |
josbys1/twitter-svm | calculations.py | 1 | 5501 | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 16 15:01:36 2015
@author: josbys1
"""
import re
import time
import pprint
import random
from scipy import stats
import math
from credentials import keys
import tweepy
import numpy
import sys
from sklearn.externals import joblib
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.svm import LinearSVC,SVC
from sklearn.svm import SVC
from sklearn.metrics import confusion_matrix
from sklearn import cross_validation,metrics,grid_search,linear_model
from TweetObj import Tweet
import matplotlib
import matplotlib.pyplot as plt
import plotcm
def readFromMemory(location):
return joblib.load("data/"+location +'.pkl')
def store(tweets,location):
joblib.dump(tweets,location+'.pkl')
def getX(tweets):
a=[]
for obj in tweets:
a.append(obj.text)
return a
def getY(tweets):
a=[]
for obj in tweets:
a.append(obj.author)
return numpy.asarray(a)
def vectorize(tweets):
vectorizer = CountVectorizer(analyzer='word')
fit_vectorizer = vectorizer.fit(getX(tweets))
ft = numpy.array(fit_vectorizer.transform(getX(tweets)).toarray())
print("Vectorized!")
for i in range(0,len(tweets)-1):
tweets[i].vector = ft[i]
return (fit_vectorizer,ft)
def split(tweets):
x=getY(tweets)
return vectorize(tweets),x
def gs(X,Y,folds,parameters):
cv=cross_validation.KFold(len(X), n_folds=folds,shuffle=True,random_state=None)
svr = SVC()
clf = grid_search.GridSearchCV(svr, parameters,cv=cv)
print("About to fit...")
clf.fit(X,Y)
pprint.pprint(clf.grid_scores_)
pprint.pprint(clf.best_params_)
def regularSVM(X,Y,c,pctTest,shouldReturnMetrics):
#svm = LinearSVC(C=c);
svm=linear_model.LogisticRegression(C=c);
cv=X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X,Y, test_size=pctTest, random_state=None)
svm.fit(X_train,Y_train)
y_pred=svm.predict(X_test)
channels=svm.classes_
channels.sort()
getWrongValues(y_pred,Y_test,channels,shouldReturnMetrics,num=len(X))
return svm
def showCoefficients(svm,vectorizer):
#Deal with binary class coefficients
channels=svm.classes_
channels.sort()
for i in range(0,len(channels)):
coef=svm.coef_[i]
indices=numpy.argsort(coef)
sorted_coef=coef[indices]
sorted_features=numpy.array(vectorizer.get_feature_names())[indices]
print("Positive 5 FW for " + channels[i])
for y in range(len(sorted_coef)-6,len(sorted_coef)):
print(sorted_features[y])
print("\n")
print("Negative 5 FW for " + channels[i])
for x in range(0,5):
print(sorted_features[x])
print("\n")
def showBinaryCoefs(svm,vectorizer):
channels=svm.classes_
channels.sort()
coef=svm.coef_[0]
indices=numpy.argsort(coef)
sorted_coef=coef[indices]
sorted_features=numpy.array(vectorizer.get_feature_names())[indices]
print("Positive 10 feature weights for " + channels[0])
for x in range(0,10):
print(sorted_features[x])
print("Negative 10 feature weights for " + channels[0])
for y in range(len(sorted_coef)-11,len(sorted_coef)):
print(sorted_features[y])
def crossValidate(X,Y,folds=10,c=1):
svm=LinearSVC(C=c)
cv=cross_validation.KFold(len(X), n_folds=folds,shuffle=True,random_state=None)
for i in cross_validation.cross_val_score(svm,X,Y,cv=cv):
print(i)
def predict(x_test,model):
return model.predict(x_test)
def getWrongValues(pred_values,y_test,channels,shouldReturnMetrics=True,num=0):
count_wrong=0
if(shouldReturnMetrics):
print("Accuracy percentage: " + str(metrics.accuracy_score(y_test, pred_values, normalize=True, sample_weight=None)))
# Compute confusion matrix
cm = confusion_matrix(pred_values, y_test,labels=channels)
numpy.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
#plotcm.plot_confusion_matrix(cm,channels,title="Confusion matrix: n=" + str(num/len(channels)),filename="cm"+(str(num/len(channels))))
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, numpy.newaxis]
plotcm.plot_confusion_matrix(cm_normalized, channels, title='Normalized confusion matrix, n='+str(num/len(channels)),filename="cm"+(str(num/len(channels)))+"norm.png")
def predictTweet(svm,vectorizer):
while True:
test=[re.sub(r"(?:\@|https?\:\/\/)\S+", "URL",input("Type a message: "))]
if(test[0]==-1):
return
v=vectorizer.transform(test).toarray()
print(v)
print(svm.predict(vectorizer.transform(test).toarray()))
z=sorted(zip(svm.classes_,svm.predict_proba(vectorizer.transform(test).toarray())[0]), key=lambda tup: tup[1])
for i in reversed(range(len(z)-4,len(z))):
print(z[i][0] + ": {0:.0f}%".format(z[i][1]*100))
def testOverN(X,Y,c,pctTest,channels,shouldReturnMetrics=False,increment=100):
for i in xrange(100,len(X),50):
start = time.time()
svm = LinearSVC(C=c);
cv=X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X[:i],Y[:i], test_size=pctTest, random_state=None)
svm.fit(X_train,Y_train)
y_pred=svm.predict(X_test)
print(str(i) + "," + str(metrics.accuracy_score(Y_test, y_pred, normalize=True, sample_weight=None))+","+str(time.time()-start)) | mit |
artmusic0/theano-learning.part03 | Layer_Changes/LayerChange_release_v5/cnn_training_computation.py | 3 | 7532 | import os
import sys, getopt
import time
import numpy
import theano
import theano.tensor as T
from sklearn import preprocessing
from cnn import CNN
import pickle as cPickle
from logistic_sgd import LogisticRegression
def fit(data, labels, filename = 'weights_v5.pkl'):
fit_predict(data, labels, filename = filename, action = 'fit')
def predict(test_dataset, filename = 'weights_v5.pkl' ):
return fit_predict(data=[], labels=[], filename= filename, test_datasets=[test_dataset], action = 'predict')[0]
def fit_predict(data, labels, action, filename, test_datasets = [], learning_rate=0.1, n_epochs=100, nkerns=[20, 50, 90], batch_size=50, seed=8000):
rng = numpy.random.RandomState(seed)
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of [int] labels
index = T.lscalar() # index to a [mini]batch
if action=='fit':
NUM_TRAIN = len(data)
#print NUM_TRAIN
#print batch_size
if NUM_TRAIN % batch_size != 0: #if the last batch is not full, just don't use the remainder
whole = (NUM_TRAIN / batch_size) * batch_size
data = data[:whole]
NUM_TRAIN = len(data)
#print NUM_TRAIN
#print batch_size
# random permutation
indices = rng.permutation(NUM_TRAIN)
data, labels = data[indices, :], labels[indices]
# batch_size == 500, splits (480, 20). We will use 96% of the data for training, and the rest to validate the NN while training
is_train = numpy.array( ([0]* (batch_size - 20) + [1] * 20) * (NUM_TRAIN / batch_size))
# now we split the dataset to test and valid datasets
train_set_x, train_set_y = numpy.array(data[is_train==0]), labels[is_train==0]
valid_set_x, valid_set_y = numpy.array(data[is_train==1]), labels[is_train==1]
# compute number of minibatches
n_train_batches = len(train_set_y) / batch_size
n_valid_batches = len(valid_set_y) / batch_size
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
# allocate symbolic variables for the data
epoch = T.scalar()
#index = T.lscalar() # index to a [mini]batch
#x = T.matrix('x') # the data is presented as rasterized images
#y = T.ivector('y') # the labels are presented as 1D vector of [int] labels
# construct the CNN class
classifier = CNN(
rng=rng,
input=x,
nkerns = nkerns,
batch_size = batch_size
)
train_set_x = theano.shared(numpy.asarray(train_set_x, dtype=theano.config.floatX))
train_set_y = T.cast(theano.shared(numpy.asarray(train_set_y, dtype=theano.config.floatX)), 'int32')
valid_set_x = theano.shared(numpy.asarray(valid_set_x, dtype=theano.config.floatX))
valid_set_y = T.cast(theano.shared(numpy.asarray(valid_set_y, dtype=theano.config.floatX)), 'int32')
validate_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size:(index + 1) * batch_size],
y: valid_set_y[index * batch_size:(index + 1) * batch_size]
}
)
cost = classifier.layer4.negative_log_likelihood(y)
# create a list of gradients for all model parameters
grads = T.grad(cost, classifier.params)
# specify how to update the parameters of the model as a list of (variable, update expression) pairs
updates = [
(param_i, param_i - learning_rate * grad_i)
for param_i, grad_i in zip(classifier.params, grads)
]
# compiling a Theano function `train_model` that returns the cost, but
# in the same time updates the parameter of the model based on the rules defined in `updates`
train_model = theano.function(
inputs=[index],
outputs=cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
###############
# TRAIN MODEL #
###############
print '... training'
best_iter = 0
test_score = 0.
start_time = time.clock()
epoch = 0
# here is an example how to print the current value of a Theano variable: print test_set_x.shape.eval()
# start training
while (epoch < n_epochs):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
iter = (epoch - 1) * n_train_batches + minibatch_index
if (epoch) % 1 == 0 and minibatch_index==0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i) for i
in xrange(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
print(
'epoch %i, minibatch %i/%i, validation error %f %%' %
(
epoch,
minibatch_index + 1,
n_train_batches,
this_validation_loss * 100.
)
)
###############
# PREDICTIONS #
###############
# save and load
f = file(filename, 'wb')
cPickle.dump(classifier.__getstate__(), f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
end_time = time.clock()
print >> sys.stderr, ('The code ran for %.2fm' % ((end_time - start_time) / 60.))
if action == 'predict':
# construct the CNN class
classifier_2 = CNN(
rng=rng,
input=x,
nkerns = nkerns,
batch_size = batch_size
)
print "...."
f = file(filename, 'rb')
classifier_2.__setstate__(cPickle.load(f))
f.close()
RET = []
for it in range(len(test_datasets)):
test_data = test_datasets[it]
N = len(test_data)
test_data = theano.shared(numpy.asarray(test_data, dtype=theano.config.floatX))
# just zeroes
test_labels = T.cast(theano.shared(numpy.asarray(numpy.zeros(batch_size), dtype=theano.config.floatX)), 'int32')
ppm = theano.function([index], classifier_2.layer3.pred_probs(),
givens={
x: test_data[index * batch_size: (index + 1) * batch_size],
y: test_labels
}, on_unused_input='warn')
# p : predictions, we need to take argmax, p is 3-dim: (# loop iterations x batch_size x 2)
p = [ppm(ii) for ii in xrange( N / batch_size)]
#p_one = sum(p, [])
#print p
p = numpy.array(p).reshape((N, 20))
#print p
p = numpy.argmax(p, axis=1)
p = p.astype(int)
RET.append(p)
return RET
| gpl-3.0 |
RobertABT/heightmap | build/matplotlib/examples/pylab_examples/demo_text_rotation_mode.py | 12 | 1410 |
#clf()
from mpl_toolkits.axes_grid1.axes_grid import ImageGrid
def test_rotation_mode(fig, mode, subplot_location):
ha_list = "left center right".split()
va_list = "top center baseline bottom".split()
grid = ImageGrid(fig, subplot_location,
nrows_ncols=(len(va_list), len(ha_list)),
share_all=True, aspect=True, #label_mode='1',
cbar_mode=None)
for ha, ax in zip(ha_list, grid.axes_row[-1]):
ax.axis["bottom"].label.set_text(ha)
grid.axes_row[0][1].set_title(mode, size="large")
for va, ax in zip(va_list, grid.axes_column[0]):
ax.axis["left"].label.set_text(va)
i = 0
for va in va_list:
for ha in ha_list:
ax = grid[i]
for axis in ax.axis.values():
axis.toggle(ticks=False, ticklabels=False)
ax.text(0.5, 0.5, "Tpg",
size="large", rotation=40,
bbox=dict(boxstyle="square,pad=0.",
ec="none", fc="0.5", alpha=0.5),
ha=ha, va=va,
rotation_mode=mode)
ax.axvline(0.5)
ax.axhline(0.5)
i += 1
if 1:
import matplotlib.pyplot as plt
fig = plt.figure(1, figsize=(5.5,4 ))
fig.clf()
test_rotation_mode(fig, "default", 121)
test_rotation_mode(fig, "anchor", 122)
plt.show()
| mit |
edonyM/toolkitem | fileprocess/emgui/pygui.py | 1 | 3839 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
r"""
# .---. .-----------
# / \ __ / ------
# / / \( )/ ----- (`-') _ _(`-') <-. (`-')_
# ////// '\/ ` --- ( OO).-/( (OO ).-> .-> \( OO) ) .->
# //// / // : : --- (,------. \ .'_ (`-')----. ,--./ ,--/ ,--.' ,-.
# // / / / `\/ '-- | .---' '`'-..__)( OO).-. ' | \ | | (`-')'.' /
# // //..\\ (| '--. | | ' |( _) | | | | . '| |)(OO \ /
# ============UU====UU==== | .--' | | / : \| |)| | | |\ | | / /)
# '//||\\` | `---. | '-' / ' '-' ' | | \ | `-/ /`
# ''`` `------' `------' `-----' `--' `--' `--'
# ######################################################################################
#
# Author: edony - [email protected]
#
# twitter : @edonyzpc
#
# Last modified: 2015-07-03 00:05
#
# Filename: pygui.py
#
# Description: All Rights Are Reserved
#
"""
#import scipy as sp
#import math as m
#import matplotlib as mpl
#import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D as Ax3
#from scipy import stats as st
#from matplotlib import cm
#import numpy as np
import Tkinter as tkinter
class PyColor(object):
""" This class is for colored print in the python interpreter!
"F3" call Addpy() function to add this class which is defined
in the .vimrc for vim Editor."""
def __init__(self):
self.self_doc = r"""
STYLE: \033['display model';'foreground';'background'm
DETAILS:
FOREGROUND BACKGOUND COLOR
---------------------------------------
30 40 black
31 41 red
32 42 green
33 43 yellow
34 44 blue
35 45 purple
36 46 cyan
37 47 white
DISPLAY MODEL DETAILS
-------------------------
0 default
1 highlight
4 underline
5 flicker
7 reverse
8 non-visiable
e.g:
\033[1;31;40m <!--1-highlight;31-foreground red;40-background black-->
\033[0m <!--set all into default-->
"""
self.warningcolor = '\033[0;31m'
self.tipcolor = '\033[0;32m'
self.endcolor = '\033[0m'
self._newcolor = ''
@property
def new(self):
"""
Customized Python Print Color.
"""
return self._newcolor
@new.setter
def new(self, color_str):
"""
New Color.
"""
self._newcolor = color_str
def disable(self):
"""
Disable Color Print.
"""
self.warningcolor = ''
self.endcolor = ''
if __name__ == "__main__":
window = tkinter.Tk()
window.title("edony-tk")
window.geometry("300x300")
#create a label widget called 'lbl'
lbl = tkinter.Label(window, text="Label")
#create a text entry widget called 'ent'
ent = tkinter.Entry(window)
#create a button widget called btn
btn = tkinter.Button(window, text="Button")
#create a label widget called 'lbl'
lbl1 = tkinter.Label(window, text="Label")
#create a text entry widget called 'ent'
ent1 = tkinter.Entry(window)
#create a button widget called btn
btn1 = tkinter.Button(window, text="Button")
#pack (add) the widgets into the window
lbl.pack()
ent.pack()
btn.pack()
lbl1.pack()
ent1.pack()
btn1.pack()
window.mainloop()
| mit |
taliamo/Final_Project | organ_pitch/Scripts/pitch_munge2.py | 1 | 2360 | #T. Martz-Oberlander, 2015-11-15
#Script for wrangling pitch data into a dataframe with media and standard dev. of sound frequencies
# To call this script: $ python Scripts/pitch_munge.py Data/[input_filename] Data/[output_pitch_dataframe_name] Figures/[output_pitch_fig_name]
#Import useful libraries
import pandas as pd
import numpy as np
import sys
import matplotlib.pyplot as plt
# I definte my arguments (input and output)
input_filename = sys.argv[1]
output_pitch_dataframe = sys.argv[2]
output_pitch_fig = sys.argv[3]
#I open my main function
def main():
#import pitch data file
pitch = pd.read_table(input_filename, sep=',')
#use date/time timestamp values
pitch['time'] = pd.to_datetime(pitch['time'])
#create new column for mean frequency from 9 frequency measurements
pitch['mean_freq'] = np.mean(pitch[['freq1','freq2','freq3', 'freq4', 'freq5', 'freq6', 'freq7', 'freq8', 'freq9']], axis=1)
#Test to see if data is a float, and useable in a plot
def test_data_type(data):
'''Check to see if a column contains only floats'''
obs = pitch['freq7'].dtype #I pass the dtype checking function through my test function
#print(obs)
exp = 'float64'
assert obs == 'float64', 'Data is not a float'
return
#Call the test function on the 'freq5' column in the 'pitch' dataframe
test_data_type(pitch['freq5'])
#do the same for standard deviation
pitch['stdev_freq'] = np.std(pitch['median_freq'])
#select rows of the pitch dataframe for single div's (sections) of the chapel
organized_pitch = pitch.groupby(['div']).get_group('choir')
#save this dataframe as a file that can be called in later scripts
organized_pitch.to_csv(output_pitch_dataframe, sep=',')
#Function to plot the new dataframe for one chapel section
def make_plot(data):
'''Make line plot for measured pitch'''
#Plot figure of change in pitch over time
plt.figure(figsize=(8,5))
#Select data
fig = plt.plot(organized_pitch['time'], organized_pitch['mean_freq'], color = 'navy')
#Make title and labels for plot
plt.title('Pitch of C5 Pipe Organ Note')
plt.ylabel('Sound Frequency (Hz)')
plt.xlabel('Time of Sample Taken (Apr. 13, 16, and 17, 2010)')
#Save figure in Figures
plt.savefig(output_pitch_fig)
#Close visualization function
return()
#Call visualization function
make_plot(organized_pitch)
#close main function
main()
| mit |
Astroua/TurbuStat | Examples/paper_plots/test_fBM_models_vca_pspec_vel_4_dens_4.py | 2 | 6384 |
'''
Example VCA and Pspec for the fBM cubes that suffer the least from shot noise.
These are the cubes with velocity and density indices of -4.
'''
from spectral_cube import SpectralCube
from astropy.table import Table, Column
import numpy as np
import astropy.units as u
from astropy import constants as cc
import os
import matplotlib.pyplot as plt
import seaborn as sb
from turbustat.statistics import PowerSpectrum, VCA, SCF, PCA
col_pal = sb.color_palette()
plt.rcParams['axes.unicode_minus'] = False
# data_path = "/Volumes/Travel_Data/Turbulence/fBM_cubes/"
data_path = os.path.expanduser("~/MyRAID/Astrostat/TurbuStat_Paper/fBM_cubes/")
reps = range(4)
cube_size = 256
# Will want to vary the slice size.
pixs = [1, 2, 4, 8, 16, 32, 64, 128]
outputs = {"v_thick_exp": [],
"thin_exp": [],
"VCA": [],
"VCA_2D": [],
"pspec": [],
"pspec_2D": []}
def esquivel_max_k(sigma_L, chan_width, v_th, m):
return (sigma_L**(-2) * (chan_width**2 + 2 * v_th**2))**(-1. / m)
dens = 4
vel = 4
markers = ['D', 'o', 's', 'p', '*']
width = 8.75
# fig_ratio = (4.4 / 6.4) / 2
height = 5.07
figsize = (width, height)
fig, axes = plt.subplots(1, 2, sharex=True, sharey=True,
figsize=figsize)
for rep in reps:
# Expected slopes
vca_exp = 0.5 * (9 - vel) if dens >= 3 else 0.5 * (2 * dens - vel + 3)
vca_thick_exp = dens if dens < 3 else 3 - 0.5 * (3 - vel)
pspec_exp = dens
name = "fBM_density_{0:.2f}_velocity_{1:.2f}_rep_{2}"\
.format(np.abs(dens), np.abs(vel), rep)
filename = "fBM_density_{0:.2f}_velocity_{1:.2f}_rep_{2}_size_{3}.fits"\
.format(np.abs(dens), np.abs(vel), rep, cube_size)
cube = SpectralCube.read(os.path.join(data_path, filename))
cube.allow_huge_operations = True
mom0 = cube.moment0()
vca_slopes = []
vca_slopes_2D = []
vel_size = []
# Estimate the upper limit freq. by Eq 14 in Esquivel+2003
chan_width = np.diff(cube.spectral_axis[:2])[0].value
T = 100 * u.K
v_th = np.sqrt(cc.k_B * T / (1.4 * cc.m_p)).to(u.km / u.s)
chan_width = np.diff(cube.spectral_axis[:2])[0].value
high_cut = esquivel_max_k(10., chan_width, v_th.value, vel - 3.) / \
float(cube.shape[1])
print(1 / high_cut)
# Largest limit at 2 pix
if high_cut > 0.25:
high_cut = 0.25
# Limit to ~1/5 of the box
if ((1 / high_cut) / cube_size) > 0.1:
high_cut = 1 / (0.1 * cube_size)
# vca = VCA(cube).run(low_cut=1 / (32 * u.pix), high_cut=high_cut / u.pix,
# verbose=True,
# radial_pspec_kwargs={'binsize': 4.})
# print(vca.slope, vca.slope2D, vca_exp)
for sl_pix in pixs:
if sl_pix == 1:
cube_dsamp = cube
else:
cube_dsamp = cube.downsample_axis(sl_pix, axis=0)
vca = VCA(cube_dsamp).run(low_cut=1 / (100 * u.pix),
high_cut=(1 / 4.) / u.pix,
fit_2D_kwargs={"fix_ellip_params": True},
verbose=False,
fit_2D=True)
# plt.draw()
# input((sl_pix, vca.slope, vca.slope2D, vca.ellip2D, vca.ellip2D_err))
# plt.clf()
vca_slopes.append(vca.slope)
vca_slopes_2D.append(vca.slope2D)
vel_size.append(np.abs(np.diff(cube_dsamp.spectral_axis.value))[:1])
# pspec = PowerSpectrum(mom0).run(low_cut=1 / (64 * u.pix),
# high_cut=0.5 / u.pix, verbose=False)
pspec = PowerSpectrum(mom0).run(low_cut=1 / (100 * u.pix),
high_cut=(1 / 4.) / u.pix,
fit_2D_kwargs={"fix_ellip_params": True},
verbose=False)
outputs['VCA'].append(vca_slopes)
outputs['VCA_2D'].append(vca_slopes_2D)
outputs['pspec'].append(pspec.slope)
outputs['pspec_2D'].append(pspec.slope2D)
vel_size.append(np.ptp(cube.spectral_axis.value))
axes[0].semilogx(vel_size,
vca_slopes + [pspec.slope], '-',
label='{}'.format(rep + 1),
marker=markers[rep])
axes[1].semilogx(vel_size,
vca_slopes_2D + [pspec.slope2D], '-',
marker=markers[rep])
axes[0].axhline(-vca_exp, alpha=0.5, linewidth=5,
color='k', zorder=-10)
axes[0].axhline(-vca_thick_exp, alpha=0.5, linewidth=5,
color='k', zorder=-10, linestyle='-')
axes[0].axhline(-pspec_exp, alpha=0.5, linewidth=5,
color='k', zorder=-10, linestyle='-')
axes[0].annotate("Thin Slice", xy=(10., -2.5),
bbox={"boxstyle": "round", "facecolor": "w",
"edgecolor": 'k'},
horizontalalignment='left',
verticalalignment='center')
axes[0].annotate("Thick Slice", xy=(0.2, -3.5),
bbox={"boxstyle": "round", "facecolor": "w"},
horizontalalignment='left',
verticalalignment='center')
axes[0].annotate("Very Thick Slice", xy=(0.2, -4),
bbox={"boxstyle": "round", "facecolor": "w"},
horizontalalignment='left',
verticalalignment='center')
axes[1].axhline(-vca_exp, alpha=0.5, linewidth=5,
color='k', zorder=-10)
axes[1].axhline(-vca_thick_exp, alpha=0.5, linewidth=5,
color='k', zorder=-10, linestyle='-')
axes[1].axhline(-pspec_exp, alpha=0.5, linewidth=5,
color='k', zorder=-10, linestyle='-')
axes[0].axvline(v_th.value, alpha=0.9, color=col_pal[5], zorder=-1,
linestyle='--', linewidth=5)
axes[1].axvline(v_th.value, alpha=0.9, color=col_pal[5], zorder=-1,
linestyle='--', linewidth=5)
axes[0].set_ylabel("Power Spectrum Index")
# Change to figure text in centre
fig.text(0.5, 0.04, "Slice thickness (km/s)", ha='center')
axes[0].set_title("1D Fit")
axes[1].set_title("2D Fit")
fig.legend(frameon=True, loc=(0.38, 0.5), framealpha=0.9)
axes[0].grid()
axes[1].grid()
axes[0].set_ylim([-4.1, -2.4])
# plt.tight_layout()
plt.subplots_adjust(wspace=0.02, bottom=0.18)
plt.savefig("../figures/vca_slice_thickness_recovery.png")
plt.savefig("../figures/vca_slice_thickness_recovery.pdf")
plt.close()
| mit |
jayflo/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 228 | 11221 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
| bsd-3-clause |
psychopy/psychopy | psychopy/tests/test_data/test_xlsx.py | 2 | 3554 | """Tests for psychopy.data.DataHandler"""
from __future__ import print_function
from builtins import object
import os, shutil
import numpy as np
from tempfile import mkdtemp
import pytest
from psychopy import data
from psychopy.tests import utils
thisDir,filename = os.path.split(os.path.abspath(__file__))
fixturesPath = os.path.join(thisDir,'..','data')
class TestXLSX(object):
def setup_class(self):
self.temp_dir = mkdtemp(prefix='psychopy-tests-testdata')
self.name = os.path.join(self.temp_dir,'testXlsx')
self.fullName = self.name+'.xlsx'
self.random_seed = 100
def teardown_class(self):
shutil.rmtree(self.temp_dir)
def test_TrialHandlerAndXLSX(self):
"""Currently tests the contents of xslx file against known good example
"""
conds = data.importConditions(os.path.join(fixturesPath,
'trialTypes.xlsx'))
trials = data.TrialHandler(trialList=conds,
seed=self.random_seed,
nReps=2, autoLog=False)
responses = [1,1,None,3,2,3, 1,3,2,2,1,1]
rts = [0.1,0.1,None,0.3,0.2,0.3, 0.1,0.3,0.2,0.2,0.1,0.1]
for trialN, trial in enumerate(trials):
if responses[trialN] is None:
continue
trials.addData('resp', responses[trialN])
trials.addData('rt',rts[trialN])
trials.saveAsExcel(self.name)# '.xlsx' should be added automatically
trials.saveAsText(self.name, delim=',')# '.xlsx' added automatically
trials.saveAsWideText(os.path.join(self.temp_dir,'actualXlsx'))
# Make sure the file is there
assert os.path.isfile(self.fullName)
#compare with known good file
utils.compareXlsxFiles(self.fullName,
os.path.join(fixturesPath,'corrXlsx.xlsx'))
def test_TrialTypeImport():
def checkEachtrial(fromCSV, fromXLSX):
for trialN, trialCSV in enumerate(fromCSV):
trialXLSX = fromXLSX[trialN]
assert list(trialXLSX.keys()) == list(trialCSV.keys())
for header in trialCSV:
if trialXLSX[header] != trialCSV[header]:
print(header, trialCSV[header], trialXLSX[header])
assert trialXLSX[header] == trialCSV[header]
fromCSV = data.importConditions(os.path.join(fixturesPath,
'trialTypes.csv'))
# use pandas/xlrd once
fromXLSX = data.importConditions(os.path.join(fixturesPath,
'trialTypes.xlsx'))
checkEachtrial(fromCSV, fromXLSX)
# then pretend it doesn't exist to force use of openpyxl
haveXlrd = data.haveXlrd
data.haveXlrd = False
fromXLSX = data.importConditions(os.path.join(fixturesPath,
'trialTypes.xlsx'))
checkEachtrial(fromCSV, fromXLSX)
data.haveXlrd = haveXlrd # return to what it was
def test_ImportCondsUnicode():
if not data.haveXlrd:
# open pyxl thinks the right-to-left file has blanks in header
pytest.skip("We know this fails with openpyxl")
fromXLSX = data.importConditions(os.path.join(fixturesPath,
'right_to_left_unidcode.xlsx'))
assert u'\u05d2\u05d9\u05dc' in fromXLSX[0]['question']
if __name__ == '__main__':
t = TestXLSX()
t.setup_class()
t.test_TrialHandlerAndXLSX()
t.teardown_class()
| gpl-3.0 |
nrhine1/scikit-learn | examples/svm/plot_svm_nonlinear.py | 268 | 1091 | """
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learned by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
| bsd-3-clause |
BassantMorsi/finderApp | lib/python2.7/site-packages/numpy/lib/twodim_base.py | 34 | 25580 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
absolute, asanyarray, arange, zeros, greater_equal, multiply, ones,
asarray, where, int8, int16, int32, int64, empty, promote_types, diagonal,
)
from numpy.core import iinfo, transpose
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to m[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A) == A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``m[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A) == A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2-D histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(2, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))
>>> H = H.T # Let each row list bins with common y range.
:func:`imshow <matplotlib.pyplot.imshow>` can only display square bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131, title='imshow: square bins')
>>> plt.imshow(H, interpolation='nearest', origin='low',
... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>` can display actual edges:
>>> ax = fig.add_subplot(132, title='pcolormesh: actual edges',
... aspect='equal')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
:class:`NonUniformImage <matplotlib.image.NonUniformImage>` can be used to
display actual bin edges with interpolation:
>>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated',
... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]])
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = (xedges[:-1] + xedges[1:]) / 2
>>> ycenters = (yedges[:-1] + yedges[1:]) / 2
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| mit |
maniteja123/numpy | numpy/core/code_generators/ufunc_docstrings.py | 9 | 90842 | """
Docstrings for generated ufuncs
The syntax is designed to look like the function add_newdoc is being
called from numpy.lib, but in this file add_newdoc puts the docstrings
in a dictionary. This dictionary is used in
numpy/core/code_generators/generate_umath.py to generate the docstrings
for the ufuncs in numpy.core at the C level when the ufuncs are created
at compile time.
"""
from __future__ import division, absolute_import, print_function
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc('numpy.core.umath', 'absolute',
"""
Calculate the absolute value element-wise.
Parameters
----------
x : array_like
Input array.
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. For complex input, ``a + ib``, the
absolute value is :math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
>>> x = np.array([-1.2, 1.2])
>>> np.absolute(x)
array([ 1.2, 1.2])
>>> np.absolute(1.2 + 1j)
1.5620499351813308
Plot the function over ``[-10, 10]``:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(start=-10, stop=10, num=101)
>>> plt.plot(x, np.absolute(x))
>>> plt.show()
Plot the function over the complex plane:
>>> xx = x + 1j * x[:, np.newaxis]
>>> plt.imshow(np.abs(xx), extent=[-10, 10, -10, 10])
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'add',
"""
Add arguments element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be added. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
add : ndarray or scalar
The sum of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` + `x2` in terms of array broadcasting.
Examples
--------
>>> np.add(1.0, 4.0)
5.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.add(x1, x2)
array([[ 0., 2., 4.],
[ 3., 5., 7.],
[ 6., 8., 10.]])
""")
add_newdoc('numpy.core.umath', 'arccos',
"""
Trigonometric inverse cosine, element-wise.
The inverse of `cos` so that, if ``y = cos(x)``, then ``x = arccos(y)``.
Parameters
----------
x : array_like
`x`-coordinate on the unit circle.
For real arguments, the domain is [-1, 1].
out : ndarray, optional
Array of the same shape as `a`, to store results in. See
`doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The angle of the ray intersecting the unit circle at the given
`x`-coordinate in radians [0, pi]. If `x` is a scalar then a
scalar is returned, otherwise an array of the same shape as `x`
is returned.
See Also
--------
cos, arctan, arcsin, emath.arccos
Notes
-----
`arccos` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cos(z) = x`. The convention is to return
the angle `z` whose real part lies in `[0, pi]`.
For real-valued input data types, `arccos` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytic function that
has branch cuts `[-inf, -1]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse `cos` is also known as `acos` or cos^-1.
References
----------
M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arccos of 1 to be 0, and of -1 to be pi:
>>> np.arccos([1, -1])
array([ 0. , 3.14159265])
Plot arccos:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-1, 1, num=100)
>>> plt.plot(x, np.arccos(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arccosh',
"""
Inverse hyperbolic cosine, element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array of the same shape as `x`, to store results in.
See `doc.ufuncs` (Section "Output arguments") for details.
Returns
-------
arccosh : ndarray
Array of the same shape as `x`.
See Also
--------
cosh, arcsinh, sinh, arctanh, tanh
Notes
-----
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]` and the real part in
``[0, inf]``.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccosh` is a complex analytical function that
has a branch cut `[-inf, 1]` and is continuous from above on it.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arccosh
Examples
--------
>>> np.arccosh([np.e, 10.0])
array([ 1.65745445, 2.99322285])
>>> np.arccosh(1)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsin',
"""
Inverse sine, element-wise.
Parameters
----------
x : array_like
`y`-coordinate on the unit circle.
out : ndarray, optional
Array of the same shape as `x`, in which to store the results.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The inverse sine of each element in `x`, in radians and in the
closed interval ``[-pi/2, pi/2]``. If `x` is a scalar, a scalar
is returned, otherwise an array.
See Also
--------
sin, cos, arccos, tan, arctan, arctan2, emath.arcsin
Notes
-----
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that :math:`sin(z) = x`. The convention is to
return the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, *arcsin* always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arcsin` is a complex analytic function that
has, by convention, the branch cuts [-inf, -1] and [1, inf] and is
continuous from above on the former and from below on the latter.
The inverse sine is also known as `asin` or sin^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79ff.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
>>> np.arcsin(1) # pi/2
1.5707963267948966
>>> np.arcsin(-1) # -pi/2
-1.5707963267948966
>>> np.arcsin(0)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsinh',
"""
Inverse hyperbolic sine element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : ndarray
Array of of the same shape as `x`.
Notes
-----
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
returns ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytical function that
has branch cuts `[1j, infj]` and `[-1j, -infj]` and is continuous from
the right on the former and from the left on the latter.
The inverse hyperbolic sine is also known as `asinh` or ``sinh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arcsinh
Examples
--------
>>> np.arcsinh(np.array([np.e, 10.0]))
array([ 1.72538256, 2.99822295])
""")
add_newdoc('numpy.core.umath', 'arctan',
"""
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.
Parameters
----------
x : array_like
Input values. `arctan` is applied to each element of `x`.
Returns
-------
out : ndarray
Out has the same shape as `x`. Its real part is in
``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).
It is a scalar if `x` is a scalar.
See Also
--------
arctan2 : The "four quadrant" arctan of the angle formed by (`x`, `y`)
and the positive `x`-axis.
angle : Argument of complex values.
Notes
-----
`arctan` is a multi-valued function: for each `x` there are infinitely
many numbers `z` such that tan(`z`) = `x`. The convention is to return
the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctan` is a complex analytic function that
has [`1j, infj`] and [`-1j, -infj`] as branch cuts, and is continuous
from the left on the former and from the right on the latter.
The inverse tangent is also known as `atan` or tan^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arctan of 0 to be 0, and of 1 to be pi/4:
>>> np.arctan([0, 1])
array([ 0. , 0.78539816])
>>> np.pi/4
0.78539816339744828
Plot arctan:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10, 10)
>>> plt.plot(x, np.arctan(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arctan2',
"""
Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is
the signed angle in radians between the ray ending at the origin and
passing through the point (1,0), and the ray ending at the origin and
passing through the point (`x2`, `x1`). (Note the role reversal: the
"`y`-coordinate" is the first function parameter, the "`x`-coordinate"
is the second.) By IEEE convention, this function is defined for
`x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see
Notes for specific values).
This function is not defined for complex-valued arguments; for the
so-called argument of complex values, use `angle`.
Parameters
----------
x1 : array_like, real-valued
`y`-coordinates.
x2 : array_like, real-valued
`x`-coordinates. `x2` must be broadcastable to match the shape of
`x1` or vice versa.
Returns
-------
angle : ndarray
Array of angles in radians, in the range ``[-pi, pi]``.
See Also
--------
arctan, tan, angle
Notes
-----
*arctan2* is identical to the `atan2` function of the underlying
C library. The following special values are defined in the C
standard: [1]_
====== ====== ================
`x1` `x2` `arctan2(x1,x2)`
====== ====== ================
+/- 0 +0 +/- 0
+/- 0 -0 +/- pi
> 0 +/-inf +0 / +pi
< 0 +/-inf -0 / -pi
+/-inf +inf +/- (pi/4)
+/-inf -inf +/- (3*pi/4)
====== ====== ================
Note that +0 and -0 are distinct floating point numbers, as are +inf
and -inf.
References
----------
.. [1] ISO/IEC standard 9899:1999, "Programming language C."
Examples
--------
Consider four points in different quadrants:
>>> x = np.array([-1, +1, +1, -1])
>>> y = np.array([-1, -1, +1, +1])
>>> np.arctan2(y, x) * 180 / np.pi
array([-135., -45., 45., 135.])
Note the order of the parameters. `arctan2` is defined also when `x2` = 0
and at several other special points, obtaining values in
the range ``[-pi, pi]``:
>>> np.arctan2([1., -1.], [0., 0.])
array([ 1.57079633, -1.57079633])
>>> np.arctan2([0., 0., np.inf], [+0., -0., np.inf])
array([ 0. , 3.14159265, 0.78539816])
""")
add_newdoc('numpy.core.umath', '_arg',
"""
DO NOT USE, ONLY FOR TESTING
""")
add_newdoc('numpy.core.umath', 'arctanh',
"""
Inverse hyperbolic tangent element-wise.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Array of the same shape as `x`.
See Also
--------
emath.arctanh
Notes
-----
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `tanh(z) = x`. The convention is to return
the `z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctanh` is a complex analytical function
that has branch cuts `[-1, -inf]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse hyperbolic tangent is also known as `atanh` or ``tanh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arctanh
Examples
--------
>>> np.arctanh([0, -0.5])
array([ 0. , -0.54930614])
""")
add_newdoc('numpy.core.umath', 'bitwise_and',
"""
Compute the bit-wise AND of two arrays element-wise.
Computes the bit-wise AND of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``&``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
Returns
-------
out : array_like
Result.
See Also
--------
logical_and
bitwise_or
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise AND of 13 and 17 is
therefore ``000000001``, or 1:
>>> np.bitwise_and(13, 17)
1
>>> np.bitwise_and(14, 13)
12
>>> np.binary_repr(12)
'1100'
>>> np.bitwise_and([14,3], 13)
array([12, 1])
>>> np.bitwise_and([11,7], [4,25])
array([0, 1])
>>> np.bitwise_and(np.array([2,5,255]), np.array([3,14,16]))
array([ 2, 4, 16])
>>> np.bitwise_and([True, True], [False, True])
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_or',
"""
Compute the bit-wise OR of two arrays element-wise.
Computes the bit-wise OR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``|``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
Result.
See Also
--------
logical_or
bitwise_and
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 has the binaray representation ``00001101``. Likewise,
16 is represented by ``00010000``. The bit-wise OR of 13 and 16 is
then ``000111011``, or 29:
>>> np.bitwise_or(13, 16)
29
>>> np.binary_repr(29)
'11101'
>>> np.bitwise_or(32, 2)
34
>>> np.bitwise_or([33, 4], 1)
array([33, 5])
>>> np.bitwise_or([33, 4], [1, 2])
array([33, 6])
>>> np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4]))
array([ 6, 5, 255])
>>> np.array([2, 5, 255]) | np.array([4, 4, 4])
array([ 6, 5, 255])
>>> np.bitwise_or(np.array([2, 5, 255, 2147483647L], dtype=np.int32),
... np.array([4, 4, 4, 2147483647L], dtype=np.int32))
array([ 6, 5, 255, 2147483647])
>>> np.bitwise_or([True, True], [False, True])
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_xor',
"""
Compute the bit-wise XOR of two arrays element-wise.
Computes the bit-wise XOR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``^``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
Returns
-------
out : array_like
Result.
See Also
--------
logical_xor
bitwise_and
bitwise_or
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise XOR of 13 and 17 is
therefore ``00011100``, or 28:
>>> np.bitwise_xor(13, 17)
28
>>> np.binary_repr(28)
'11100'
>>> np.bitwise_xor(31, 5)
26
>>> np.bitwise_xor([31,3], 5)
array([26, 6])
>>> np.bitwise_xor([31,3], [5,6])
array([26, 5])
>>> np.bitwise_xor([True, True], [False, True])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'ceil',
"""
Return the ceiling of the input, element-wise.
The ceil of the scalar `x` is the smallest integer `i`, such that
`i >= x`. It is often denoted as :math:`\\lceil x \\rceil`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : ndarray or scalar
The ceiling of each element in `x`, with `float` dtype.
See Also
--------
floor, trunc, rint
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'trunc',
"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : ndarray or scalar
The truncated value of each element in `x`.
See Also
--------
ceil, floor, rint
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'conjugate',
"""
Return the complex conjugate, element-wise.
The complex conjugate of a complex number is obtained by changing the
sign of its imaginary part.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The complex conjugate of `x`, with same dtype as `y`.
Examples
--------
>>> np.conjugate(1+2j)
(1-2j)
>>> x = np.eye(2) + 1j * np.eye(2)
>>> np.conjugate(x)
array([[ 1.-1.j, 0.-0.j],
[ 0.-0.j, 1.-1.j]])
""")
add_newdoc('numpy.core.umath', 'cos',
"""
Cosine element-wise.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding cosine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> np.cos(np.array([0, np.pi/2, np.pi]))
array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00])
>>>
>>> # Example of providing the optional output parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'cosh',
"""
Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Output array of same shape as `x`.
Examples
--------
>>> np.cosh(0)
1.0
The hyperbolic cosine describes the shape of a hanging cable:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-4, 4, 1000)
>>> plt.plot(x, np.cosh(x))
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'degrees',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as x.
Returns
-------
y : ndarray of floats
The corresponding degree values; if `out` was supplied this is a
reference to it.
See Also
--------
rad2deg : equivalent function
Examples
--------
Convert a radian array to degrees
>>> rad = np.arange(12.)*np.pi/6
>>> np.degrees(rad)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240.,
270., 300., 330.])
>>> out = np.zeros((rad.shape))
>>> r = degrees(rad, out)
>>> np.all(r == out)
True
""")
add_newdoc('numpy.core.umath', 'rad2deg',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Angle in radians.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The corresponding angle in degrees.
See Also
--------
deg2rad : Convert angles from degrees to radians.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
rad2deg(x) is ``180 * x / pi``.
Examples
--------
>>> np.rad2deg(np.pi/2)
90.0
""")
add_newdoc('numpy.core.umath', 'divide',
"""
Divide arguments element-wise.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray or scalar
The quotient ``x1/x2``, element-wise. Returns a scalar if
both ``x1`` and ``x2`` are scalars.
See Also
--------
seterr : Set whether to raise or warn on overflow, underflow and
division by zero.
Notes
-----
Equivalent to ``x1`` / ``x2`` in terms of array-broadcasting.
Behavior on division by zero can be changed using ``seterr``.
In Python 2, when both ``x1`` and ``x2`` are of an integer type,
``divide`` will behave like ``floor_divide``. In Python 3, it behaves
like ``true_divide``.
Examples
--------
>>> np.divide(2.0, 4.0)
0.5
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.divide(x1, x2)
array([[ NaN, 1. , 1. ],
[ Inf, 4. , 2.5],
[ Inf, 7. , 4. ]])
Note the behavior with integer types (Python 2 only):
>>> np.divide(2, 4)
0
>>> np.divide(2, 4.)
0.5
Division by zero always yields zero in integer arithmetic (again,
Python 2 only), and does not raise an exception or a warning:
>>> np.divide(np.array([0, 1], dtype=int), np.array([0, 0], dtype=int))
array([0, 0])
Division by zero can, however, be caught using ``seterr``:
>>> old_err_state = np.seterr(divide='raise')
>>> np.divide(1, 0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: divide by zero encountered in divide
>>> ignored_states = np.seterr(**old_err_state)
>>> np.divide(1, 0)
0
""")
add_newdoc('numpy.core.umath', 'equal',
"""
Return (x1 == x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays of the same shape.
Returns
-------
out : ndarray or bool
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
not_equal, greater_equal, less_equal, greater, less
Examples
--------
>>> np.equal([0, 1, 3], np.arange(3))
array([ True, True, False], dtype=bool)
What is compared are values, not types. So an int (1) and an array of
length one can evaluate as True:
>>> np.equal(1, np.ones(1))
array([ True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'exp',
"""
Calculate the exponential of all elements in the input array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Output array, element-wise exponential of `x`.
See Also
--------
expm1 : Calculate ``exp(x) - 1`` for all elements in the array.
exp2 : Calculate ``2**x`` for all elements in the array.
Notes
-----
The irrational number ``e`` is also known as Euler's number. It is
approximately 2.718281, and is the base of the natural logarithm,
``ln`` (this means that, if :math:`x = \\ln y = \\log_e y`,
then :math:`e^x = y`. For real input, ``exp(x)`` is always positive.
For complex arguments, ``x = a + ib``, we can write
:math:`e^x = e^a e^{ib}`. The first term, :math:`e^a`, is already
known (it is the real argument, described above). The second term,
:math:`e^{ib}`, is :math:`\\cos b + i \\sin b`, a function with
magnitude 1 and a periodic phase.
References
----------
.. [1] Wikipedia, "Exponential function",
http://en.wikipedia.org/wiki/Exponential_function
.. [2] M. Abramovitz and I. A. Stegun, "Handbook of Mathematical Functions
with Formulas, Graphs, and Mathematical Tables," Dover, 1964, p. 69,
http://www.math.sfu.ca/~cbm/aands/page_69.htm
Examples
--------
Plot the magnitude and phase of ``exp(x)`` in the complex plane:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2*np.pi, 2*np.pi, 100)
>>> xx = x + 1j * x[:, np.newaxis] # a + ib over complex plane
>>> out = np.exp(xx)
>>> plt.subplot(121)
>>> plt.imshow(np.abs(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
>>> plt.title('Magnitude of exp(x)')
>>> plt.subplot(122)
>>> plt.imshow(np.angle(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
>>> plt.title('Phase (angle) of exp(x)')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'exp2',
"""
Calculate `2**p` for all `p` in the input array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array to insert results into.
Returns
-------
out : ndarray
Element-wise 2 to the power `x`.
See Also
--------
power
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> np.exp2([2, 3])
array([ 4., 8.])
""")
add_newdoc('numpy.core.umath', 'expm1',
"""
Calculate ``exp(x) - 1`` for all elements in the array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Element-wise exponential minus one: ``out = exp(x) - 1``.
See Also
--------
log1p : ``log(1 + x)``, the inverse of expm1.
Notes
-----
This function provides greater precision than ``exp(x) - 1``
for small values of ``x``.
Examples
--------
The true value of ``exp(1e-10) - 1`` is ``1.00000000005e-10`` to
about 32 significant digits. This example shows the superiority of
expm1 in this case.
>>> np.expm1(1e-10)
1.00000000005e-10
>>> np.exp(1e-10) - 1
1.000000082740371e-10
""")
add_newdoc('numpy.core.umath', 'fabs',
"""
Compute the absolute values element-wise.
This function returns the absolute values (positive magnitude) of the
data in `x`. Complex values are not handled, use `absolute` to find the
absolute values of complex data.
Parameters
----------
x : array_like
The array of numbers for which the absolute values are required. If
`x` is a scalar, the result `y` will also be a scalar.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray or scalar
The absolute values of `x`, the returned values are always floats.
See Also
--------
absolute : Absolute values including `complex` types.
Examples
--------
>>> np.fabs(-1)
1.0
>>> np.fabs([-1.2, 1.2])
array([ 1.2, 1.2])
""")
add_newdoc('numpy.core.umath', 'floor',
"""
Return the floor of the input, element-wise.
The floor of the scalar `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\\lfloor x \\rfloor`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : ndarray or scalar
The floor of each element in `x`.
See Also
--------
ceil, trunc, rint
Notes
-----
Some spreadsheet programs calculate the "floor-towards-zero", in other
words ``floor(-2.5) == -2``. NumPy instead uses the definition of
`floor` where `floor(-2.5) == -3`.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'floor_divide',
"""
Return the largest integer smaller or equal to the division of the inputs.
It is equivalent to the Python ``//`` operator and pairs with the
Python ``%`` (`remainder`), function so that ``b = a % b + b * (a // b)``
up to roundoff.
Parameters
----------
x1 : array_like
Numerator.
x2 : array_like
Denominator.
Returns
-------
y : ndarray
y = floor(`x1`/`x2`)
See Also
--------
remainder : Remainder complementary to floor_divide.
divide : Standard division.
floor : Round a number to the nearest integer toward minus infinity.
ceil : Round a number to the nearest integer toward infinity.
Examples
--------
>>> np.floor_divide(7,3)
2
>>> np.floor_divide([1., 2., 3., 4.], 2.5)
array([ 0., 0., 1., 1.])
""")
add_newdoc('numpy.core.umath', 'fmod',
"""
Return the element-wise remainder of division.
This is the NumPy implementation of the C library function fmod, the
remainder has the same sign as the dividend `x1`. It is equivalent to
the Matlab(TM) ``rem`` function and should not be confused with the
Python modulus operator ``x1 % x2``.
Parameters
----------
x1 : array_like
Dividend.
x2 : array_like
Divisor.
Returns
-------
y : array_like
The remainder of the division of `x1` by `x2`.
See Also
--------
remainder : Equivalent to the Python ``%`` operator.
divide
Notes
-----
The result of the modulo operation for negative dividend and divisors
is bound by conventions. For `fmod`, the sign of result is the sign of
the dividend, while for `remainder` the sign of the result is the sign
of the divisor. The `fmod` function is equivalent to the Matlab(TM)
``rem`` function.
Examples
--------
>>> np.fmod([-3, -2, -1, 1, 2, 3], 2)
array([-1, 0, -1, 1, 0, 1])
>>> np.remainder([-3, -2, -1, 1, 2, 3], 2)
array([1, 0, 1, 1, 0, 1])
>>> np.fmod([5, 3], [2, 2.])
array([ 1., 1.])
>>> a = np.arange(-3, 3).reshape(3, 2)
>>> a
array([[-3, -2],
[-1, 0],
[ 1, 2]])
>>> np.fmod(a, [2,2])
array([[-1, 0],
[-1, 0],
[ 1, 0]])
""")
add_newdoc('numpy.core.umath', 'greater',
"""
Return the truth value of (x1 > x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater_equal, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater([4,2],[2,2])
array([ True, False], dtype=bool)
If the inputs are ndarrays, then np.greater is equivalent to '>'.
>>> a = np.array([4,2])
>>> b = np.array([2,2])
>>> a > b
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'greater_equal',
"""
Return the truth value of (x1 >= x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater_equal([4, 2, 1], [2, 2, 2])
array([ True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'hypot',
"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
(See Examples)
Parameters
----------
x1, x2 : array_like
Leg of the triangle(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
z : ndarray
The hypotenuse of the triangle(s).
Examples
--------
>>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> np.hypot(3*np.ones((3, 3)), [4])
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
""")
add_newdoc('numpy.core.umath', 'invert',
"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
For signed integer inputs, the two's complement is returned. In a
two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit
two's-complement system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
x1 : array_like
Only integer and boolean types are handled.
Returns
-------
out : array_like
Result.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
References
----------
.. [1] Wikipedia, "Two's complement",
http://en.wikipedia.org/wiki/Two's_complement
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> np.invert(np.array([13], dtype=uint8))
array([242], dtype=uint8)
>>> np.binary_repr(x, width=8)
'00001101'
>>> np.binary_repr(242, width=8)
'11110010'
The result depends on the bit-width:
>>> np.invert(np.array([13], dtype=uint16))
array([65522], dtype=uint16)
>>> np.binary_repr(x, width=16)
'0000000000001101'
>>> np.binary_repr(65522, width=16)
'1111111111110010'
When using signed integer types the result is the two's complement of
the result for the unsigned type:
>>> np.invert(np.array([13], dtype=int8))
array([-14], dtype=int8)
>>> np.binary_repr(-14, width=8)
'11110010'
Booleans are accepted as well:
>>> np.invert(array([True, False]))
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'isfinite',
"""
Test element-wise for finiteness (not infinity or not Not a Number).
The result is returned as a boolean array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
y : ndarray, bool
For scalar input, the result is a new boolean with value True
if the input is finite; otherwise the value is False (input is
either positive infinity, negative infinity or Not a Number).
For array input, the result is a boolean array with the same
dimensions as the input and the values are True if the
corresponding element of the input is finite; otherwise the values
are False (element is either positive infinity, negative infinity
or Not a Number).
See Also
--------
isinf, isneginf, isposinf, isnan
Notes
-----
Not a Number, positive infinity and negative infinity are considered
to be non-finite.
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity. Errors result if the
second argument is also supplied when `x` is a scalar input, or if
first and second arguments have different shapes.
Examples
--------
>>> np.isfinite(1)
True
>>> np.isfinite(0)
True
>>> np.isfinite(np.nan)
False
>>> np.isfinite(np.inf)
False
>>> np.isfinite(np.NINF)
False
>>> np.isfinite([np.log(-1.),1.,np.log(0)])
array([False, True, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isfinite(x, y)
array([0, 1, 0])
>>> y
array([0, 1, 0])
""")
add_newdoc('numpy.core.umath', 'isinf',
"""
Test element-wise for positive or negative infinity.
Returns a boolean array of the same shape as `x`, True where ``x ==
+/-inf``, otherwise False.
Parameters
----------
x : array_like
Input values
out : array_like, optional
An array with the same shape as `x` to store the result.
Returns
-------
y : bool (scalar) or boolean ndarray
For scalar input, the result is a new boolean with value True if
the input is positive or negative infinity; otherwise the value is
False.
For array input, the result is a boolean array with the same shape
as the input and the values are True where the corresponding
element of the input is positive or negative infinity; elsewhere
the values are False. If a second argument was supplied the result
is stored there. If the type of that array is a numeric type the
result is represented as zeros and ones, if the type is boolean
then as False and True, respectively. The return value `y` is then
a reference to that array.
See Also
--------
isneginf, isposinf, isnan, isfinite
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754).
Errors result if the second argument is supplied when the first
argument is a scalar, or if the first and second arguments have
different shapes.
Examples
--------
>>> np.isinf(np.inf)
True
>>> np.isinf(np.nan)
False
>>> np.isinf(np.NINF)
True
>>> np.isinf([np.inf, -np.inf, 1.0, np.nan])
array([ True, True, False, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isinf(x, y)
array([1, 0, 1])
>>> y
array([1, 0, 1])
""")
add_newdoc('numpy.core.umath', 'isnan',
"""
Test element-wise for NaN and return result as a boolean array.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : ndarray or bool
For scalar input, the result is a new boolean with value True if
the input is NaN; otherwise the value is False.
For array input, the result is a boolean array of the same
dimensions as the input and the values are True if the
corresponding element of the input is NaN; otherwise the values are
False.
See Also
--------
isinf, isneginf, isposinf, isfinite
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isnan(np.nan)
True
>>> np.isnan(np.inf)
False
>>> np.isnan([np.log(-1.),1.,np.log(0)])
array([ True, False, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'left_shift',
"""
Shift the bits of an integer to the left.
Bits are shifted to the left by appending `x2` 0s at the right of `x1`.
Since the internal representation of numbers is in binary format, this
operation is equivalent to multiplying `x1` by ``2**x2``.
Parameters
----------
x1 : array_like of integer type
Input values.
x2 : array_like of integer type
Number of zeros to append to `x1`. Has to be non-negative.
Returns
-------
out : array of integer type
Return `x1` with bits shifted `x2` times to the left.
See Also
--------
right_shift : Shift the bits of an integer to the right.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(5)
'101'
>>> np.left_shift(5, 2)
20
>>> np.binary_repr(20)
'10100'
>>> np.left_shift(5, [1,2,3])
array([10, 20, 40])
""")
add_newdoc('numpy.core.umath', 'less',
"""
Return the truth value of (x1 < x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less_equal, greater_equal, equal, not_equal
Examples
--------
>>> np.less([1, 2], [2, 2])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'less_equal',
"""
Return the truth value of (x1 =< x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less, greater_equal, equal, not_equal
Examples
--------
>>> np.less_equal([4, 2, 1], [2, 2, 2])
array([False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'log',
"""
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base
`e`.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The natural logarithm of `x`, element-wise.
See Also
--------
log10, log2, log1p, emath.log
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log([1, np.e, np.e**2, 0])
array([ 0., 1., 2., -Inf])
""")
add_newdoc('numpy.core.umath', 'log10',
"""
Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative.
See Also
--------
emath.log10
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `10**z = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log10` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log10` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it.
`log10` handles the floating-point negative zero as an infinitesimal
negative number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log10([1e-15, -3.])
array([-15., NaN])
""")
add_newdoc('numpy.core.umath', 'log2',
"""
Base-2 logarithm of `x`.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Base-2 logarithm of `x`.
See Also
--------
log, log10, log1p, emath.log2
Notes
-----
.. versionadded:: 1.3.0
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `2**z = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log2` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log2` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log2`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
Examples
--------
>>> x = np.array([0, 1, 2, 2**4])
>>> np.log2(x)
array([-Inf, 0., 1., 4.])
>>> xi = np.array([0+1.j, 1, 2+0.j, 4.j])
>>> np.log2(xi)
array([ 0.+2.26618007j, 0.+0.j , 1.+0.j , 2.+2.26618007j])
""")
add_newdoc('numpy.core.umath', 'logaddexp',
"""
Logarithm of the sum of exponentiations of the inputs.
Calculates ``log(exp(x1) + exp(x2))``. This function is useful in
statistics where the calculated probabilities of events may be so small
as to exceed the range of normal floating point numbers. In such cases
the logarithm of the calculated probability is stored. This function
allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
Returns
-------
result : ndarray
Logarithm of ``exp(x1) + exp(x2)``.
See Also
--------
logaddexp2: Logarithm of the sum of exponentiations of inputs in base 2.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log(1e-50)
>>> prob2 = np.log(2.5e-50)
>>> prob12 = np.logaddexp(prob1, prob2)
>>> prob12
-113.87649168120691
>>> np.exp(prob12)
3.5000000000000057e-50
""")
add_newdoc('numpy.core.umath', 'logaddexp2',
"""
Logarithm of the sum of exponentiations of the inputs in base-2.
Calculates ``log2(2**x1 + 2**x2)``. This function is useful in machine
learning when the calculated probabilities of events may be so small as
to exceed the range of normal floating point numbers. In such cases
the base-2 logarithm of the calculated probability can be used instead.
This function allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
out : ndarray, optional
Array to store results in.
Returns
-------
result : ndarray
Base-2 logarithm of ``2**x1 + 2**x2``.
See Also
--------
logaddexp: Logarithm of the sum of exponentiations of the inputs.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log2(1e-50)
>>> prob2 = np.log2(2.5e-50)
>>> prob12 = np.logaddexp2(prob1, prob2)
>>> prob1, prob2, prob12
(-166.09640474436813, -164.77447664948076, -164.28904982231052)
>>> 2**prob12
3.4999999999999914e-50
""")
add_newdoc('numpy.core.umath', 'log1p',
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Natural logarithm of `1 + x`, element-wise.
See Also
--------
expm1 : ``exp(x) - 1``, the inverse of `log1p`.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log1p` is a complex analytical function that
has a branch cut `[-inf, -1]` and is continuous from above on it.
`log1p` handles the floating-point negative zero as an infinitesimal
negative number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> np.log(1 + 1e-99)
0.0
""")
add_newdoc('numpy.core.umath', 'logical_and',
"""
Compute the truth value of x1 AND x2 element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. `x1` and `x2` must be of the same shape.
Returns
-------
y : ndarray or bool
Boolean result with the same shape as `x1` and `x2` of the logical
AND operation on corresponding elements of `x1` and `x2`.
See Also
--------
logical_or, logical_not, logical_xor
bitwise_and
Examples
--------
>>> np.logical_and(True, False)
False
>>> np.logical_and([True, False], [False, False])
array([False, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_and(x>1, x<4)
array([False, False, True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_not',
"""
Compute the truth value of NOT x element-wise.
Parameters
----------
x : array_like
Logical NOT is applied to the elements of `x`.
Returns
-------
y : bool or ndarray of bool
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
See Also
--------
logical_and, logical_or, logical_xor
Examples
--------
>>> np.logical_not(3)
False
>>> np.logical_not([True, False, 0, 1])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_not(x<3)
array([False, False, False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_or',
"""
Compute the truth value of x1 OR x2 element-wise.
Parameters
----------
x1, x2 : array_like
Logical OR is applied to the elements of `x1` and `x2`.
They have to be of the same shape.
Returns
-------
y : ndarray or bool
Boolean result with the same shape as `x1` and `x2` of the logical
OR operation on elements of `x1` and `x2`.
See Also
--------
logical_and, logical_not, logical_xor
bitwise_or
Examples
--------
>>> np.logical_or(True, False)
True
>>> np.logical_or([True, False], [False, False])
array([ True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_or(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_xor',
"""
Compute the truth value of x1 XOR x2, element-wise.
Parameters
----------
x1, x2 : array_like
Logical XOR is applied to the elements of `x1` and `x2`. They must
be broadcastable to the same shape.
Returns
-------
y : bool or ndarray of bool
Boolean result of the logical XOR operation applied to the elements
of `x1` and `x2`; the shape is determined by whether or not
broadcasting of one or both arrays was required.
See Also
--------
logical_and, logical_or, logical_not, bitwise_xor
Examples
--------
>>> np.logical_xor(True, False)
True
>>> np.logical_xor([True, True, False, False], [True, False, True, False])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_xor(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
Simple example showing support of broadcasting
>>> np.logical_xor(0, np.eye(2))
array([[ True, False],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'maximum',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
are defined as at least one of the real or imaginary parts being a NaN.
The net effect is that NaNs are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : ndarray or scalar
The maximum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
minimum :
Element-wise minimum of two arrays, propagates NaNs.
fmax :
Element-wise maximum of two arrays, ignores NaNs.
amax :
The maximum value of an array along a given axis, propagates NaNs.
nanmax :
The maximum value of an array along a given axis, ignores NaNs.
fmin, amin, nanmin
Notes
-----
The maximum is equivalent to ``np.where(x1 >= x2, x1, x2)`` when
neither x1 nor x2 are nans, but it is faster and does proper
broadcasting.
Examples
--------
>>> np.maximum([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.maximum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan])
array([ NaN, NaN, NaN])
>>> np.maximum(np.Inf, 1)
inf
""")
add_newdoc('numpy.core.umath', 'minimum',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
are defined as at least one of the real or imaginary parts being a NaN.
The net effect is that NaNs are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : ndarray or scalar
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
maximum :
Element-wise maximum of two arrays, propagates NaNs.
fmin :
Element-wise minimum of two arrays, ignores NaNs.
amin :
The minimum value of an array along a given axis, propagates NaNs.
nanmin :
The minimum value of an array along a given axis, ignores NaNs.
fmax, amax, nanmax
Notes
-----
The minimum is equivalent to ``np.where(x1 <= x2, x1, x2)`` when
neither x1 nor x2 are NaNs, but it is faster and does proper
broadcasting.
Examples
--------
>>> np.minimum([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.minimum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ NaN, NaN, NaN])
>>> np.minimum(-np.Inf, 1)
-inf
""")
add_newdoc('numpy.core.umath', 'fmax',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
which are defined as at least one of the real or imaginary parts being
a NaN. The net effect is that NaNs are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : ndarray or scalar
The maximum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmin :
Element-wise minimum of two arrays, ignores NaNs.
maximum :
Element-wise maximum of two arrays, propagates NaNs.
amax :
The maximum value of an array along a given axis, propagates NaNs.
nanmax :
The maximum value of an array along a given axis, ignores NaNs.
minimum, amin, nanmin
Notes
-----
.. versionadded:: 1.3.0
The fmax is equivalent to ``np.where(x1 >= x2, x1, x2)`` when neither
x1 nor x2 are NaNs, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmax([2, 3, 4], [1, 5, 2])
array([ 2., 5., 4.])
>>> np.fmax(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'fmin',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
which are defined as at least one of the real or imaginary parts being
a NaN. The net effect is that NaNs are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : ndarray or scalar
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmax :
Element-wise maximum of two arrays, ignores NaNs.
minimum :
Element-wise minimum of two arrays, propagates NaNs.
amin :
The minimum value of an array along a given axis, propagates NaNs.
nanmin :
The minimum value of an array along a given axis, ignores NaNs.
maximum, amax, nanmax
Notes
-----
.. versionadded:: 1.3.0
The fmin is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither
x1 nor x2 are NaNs, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmin([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.fmin(np.eye(2), [0.5, 2])
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'modf',
"""
Return the fractional and integral parts of an array, element-wise.
The fractional and integral parts are negative if the given number is
negative.
Parameters
----------
x : array_like
Input array.
Returns
-------
y1 : ndarray
Fractional part of `x`.
y2 : ndarray
Integral part of `x`.
Notes
-----
For integer input the return values are floats.
Examples
--------
>>> np.modf([0, 3.5])
(array([ 0. , 0.5]), array([ 0., 3.]))
>>> np.modf(-0.5)
(-0.5, -0)
""")
add_newdoc('numpy.core.umath', 'multiply',
"""
Multiply arguments element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays to be multiplied.
Returns
-------
y : ndarray
The product of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` * `x2` in terms of array broadcasting.
Examples
--------
>>> np.multiply(2.0, 4.0)
8.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.multiply(x1, x2)
array([[ 0., 1., 4.],
[ 0., 4., 10.],
[ 0., 7., 16.]])
""")
add_newdoc('numpy.core.umath', 'negative',
"""
Numerical negative, element-wise.
Parameters
----------
x : array_like or scalar
Input array.
Returns
-------
y : ndarray or scalar
Returned array or scalar: `y = -x`.
Examples
--------
>>> np.negative([1.,-1.])
array([-1., 1.])
""")
add_newdoc('numpy.core.umath', 'not_equal',
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
out : ndarray, optional
A placeholder the same shape as `x1` to store the result.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
not_equal : ndarray bool, scalar bool
For each element in `x1, x2`, return True if `x1` is not equal
to `x2` and False otherwise.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal([1.,2.], [1., 3.])
array([False, True], dtype=bool)
>>> np.not_equal([1, 2], [[1, 3],[1, 4]])
array([[False, True],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', '_ones_like',
"""
This function used to be the numpy.ones_like, but now a specific
function for that has been written for consistency with the other
*_like functions. It is only used internally in a limited fashion now.
See Also
--------
ones_like
""")
add_newdoc('numpy.core.umath', 'power',
"""
First array elements raised to powers from second array, element-wise.
Raise each base in `x1` to the positionally-corresponding power in
`x2`. `x1` and `x2` must be broadcastable to the same shape.
Parameters
----------
x1 : array_like
The bases.
x2 : array_like
The exponents.
Returns
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
Examples
--------
Cube each element in a list.
>>> x1 = range(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.power(x1, 3)
array([ 0, 1, 8, 27, 64, 125])
Raise the bases to different exponents.
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
[1, 2, 3, 3, 2, 1]])
>>> np.power(x1, x2)
array([[ 0, 1, 8, 27, 16, 5],
[ 0, 1, 8, 27, 16, 5]])
""")
add_newdoc('numpy.core.umath', 'radians',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Input array in degrees.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding radian values.
See Also
--------
deg2rad : equivalent function
Examples
--------
Convert a degree array to radians
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([ 0. , 0.52359878, 1.04719755, 1.57079633, 2.0943951 ,
2.61799388, 3.14159265, 3.66519143, 4.1887902 , 4.71238898,
5.23598776, 5.75958653])
>>> out = np.zeros((deg.shape))
>>> ret = np.radians(deg, out)
>>> ret is out
True
""")
add_newdoc('numpy.core.umath', 'deg2rad',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Angles in degrees.
Returns
-------
y : ndarray
The corresponding angle in radians.
See Also
--------
rad2deg : Convert angles from radians to degrees.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
``deg2rad(x)`` is ``x * pi / 180``.
Examples
--------
>>> np.deg2rad(180)
3.1415926535897931
""")
add_newdoc('numpy.core.umath', 'reciprocal',
"""
Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : ndarray
Return array.
Notes
-----
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division. For
integer zero the result is an overflow.
Examples
--------
>>> np.reciprocal(2.)
0.5
>>> np.reciprocal([1, 2., 3.33])
array([ 1. , 0.5 , 0.3003003])
""")
add_newdoc('numpy.core.umath', 'remainder',
"""
Return element-wise remainder of division.
Computes the remainder complementary to the `floor_divide` function. It is
equivalent to the Python modulus operator``x1 % x2`` and has the same sign
as the divisor `x2`. It should not be confused with the Matlab(TM) ``rem``
function.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The element-wise remainder of the quotient ``floor_divide(x1, x2)``.
Returns a scalar if both `x1` and `x2` are scalars.
See Also
--------
floor_divide : Equivalent of Python ``//`` operator.
fmod : Equivalent of the Matlab(TM) ``rem`` function.
divide, floor
Notes
-----
Returns 0 when `x2` is 0 and both `x1` and `x2` are (arrays of)
integers.
Examples
--------
>>> np.remainder([4, 7], [2, 3])
array([0, 1])
>>> np.remainder(np.arange(7), 5)
array([0, 1, 2, 3, 4, 0, 1])
""")
add_newdoc('numpy.core.umath', 'right_shift',
"""
Shift the bits of an integer to the right.
Bits are shifted to the right `x2`. Because the internal
representation of numbers is in binary format, this operation is
equivalent to dividing `x1` by ``2**x2``.
Parameters
----------
x1 : array_like, int
Input values.
x2 : array_like, int
Number of bits to remove at the right of `x1`.
Returns
-------
out : ndarray, int
Return `x1` with bits shifted `x2` times to the right.
See Also
--------
left_shift : Shift the bits of an integer to the left.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(10)
'1010'
>>> np.right_shift(10, 1)
5
>>> np.binary_repr(5)
'101'
>>> np.right_shift(10, [1,2,3])
array([5, 2, 1])
""")
add_newdoc('numpy.core.umath', 'rint',
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray or scalar
Output array is same shape and type as `x`.
See Also
--------
ceil, floor, trunc
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.rint(a)
array([-2., -2., -0., 0., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'sign',
"""
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. nan
is returned for nan inputs.
For complex inputs, the `sign` function returns
``sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j``.
complex(nan, 0) is returned for complex nan inputs.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The sign of `x`.
Notes
-----
There is more than one definition of sign in common use for complex
numbers. The definition used here is equivalent to :math:`x/\\sqrt{x*x}`
which is different from a common alternative, :math:`x/|x|`.
Examples
--------
>>> np.sign([-5., 4.5])
array([-1., 1.])
>>> np.sign(0)
0
>>> np.sign(5-2j)
(1+0j)
""")
add_newdoc('numpy.core.umath', 'signbit',
"""
Returns element-wise True where signbit is set (less than zero).
Parameters
----------
x : array_like
The input value(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
result : ndarray of bool
Output array, or reference to `out` if that was supplied.
Examples
--------
>>> np.signbit(-1.2)
True
>>> np.signbit(np.array([1, -2.3, 2.1]))
array([False, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'copysign',
"""
Change the sign of x1 to that of x2, element-wise.
If both arguments are arrays or sequences, they have to be of the same
length. If `x2` is a scalar, its sign will be copied to all elements of
`x1`.
Parameters
----------
x1 : array_like
Values to change the sign of.
x2 : array_like
The sign of `x2` is copied to `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
The values of `x1` with the sign of `x2`.
Examples
--------
>>> np.copysign(1.3, -1)
-1.3
>>> 1/np.copysign(0, 1)
inf
>>> 1/np.copysign(0, -1)
-inf
>>> np.copysign([-1, 0, 1], -1.1)
array([-1., -0., -1.])
>>> np.copysign([-1, 0, 1], np.arange(3)-1)
array([-1., 0., 1.])
""")
add_newdoc('numpy.core.umath', 'nextafter',
"""
Return the next floating-point value after x1 towards x2, element-wise.
Parameters
----------
x1 : array_like
Values to find the next representable value of.
x2 : array_like
The direction where to look for the next representable value of `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : array_like
The next representable values of `x1` in the direction of `x2`.
Examples
--------
>>> eps = np.finfo(np.float64).eps
>>> np.nextafter(1, 2) == eps + 1
True
>>> np.nextafter([1, 2], [2, 1]) == [eps + 1, 2 - eps]
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'spacing',
"""
Return the distance between x and the nearest adjacent number.
Parameters
----------
x1 : array_like
Values to find the spacing of.
Returns
-------
out : array_like
The spacing of values of `x1`.
Notes
-----
It can be considered as a generalization of EPS:
``spacing(np.float64(1)) == np.finfo(np.float64).eps``, and there
should not be any representable number between ``x + spacing(x)`` and
x for any finite x.
Spacing of +- inf and NaN is NaN.
Examples
--------
>>> np.spacing(1) == np.finfo(np.float64).eps
True
""")
add_newdoc('numpy.core.umath', 'sin',
"""
Trigonometric sine, element-wise.
Parameters
----------
x : array_like
Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).
Returns
-------
y : array_like
The sine of each element of x.
See Also
--------
arcsin, sinh, cos
Notes
-----
The sine is one of the fundamental functions of trigonometry (the
mathematical study of triangles). Consider a circle of radius 1
centered on the origin. A ray comes in from the :math:`+x` axis, makes
an angle at the origin (measured counter-clockwise from that axis), and
departs from the origin. The :math:`y` coordinate of the outgoing
ray's intersection with the unit circle is the sine of that angle. It
ranges from -1 for :math:`x=3\\pi / 2` to +1 for :math:`\\pi / 2.` The
function has zeroes where the angle is a multiple of :math:`\\pi`.
Sines of angles between :math:`\\pi` and :math:`2\\pi` are negative.
The numerous properties of the sine and related functions are included
in any standard trigonometry text.
Examples
--------
Print sine of one angle:
>>> np.sin(np.pi/2.)
1.0
Print sines of an array of angles given in degrees:
>>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180. )
array([ 0. , 0.5 , 0.70710678, 0.8660254 , 1. ])
Plot the sine function:
>>> import matplotlib.pylab as plt
>>> x = np.linspace(-np.pi, np.pi, 201)
>>> plt.plot(x, np.sin(x))
>>> plt.xlabel('Angle [rad]')
>>> plt.ylabel('sin(x)')
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'sinh',
"""
Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or
``-1j * np.sin(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic sine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
Examples
--------
>>> np.sinh(0)
0.0
>>> np.sinh(np.pi*1j/2)
1j
>>> np.sinh(np.pi*1j) # (exact value is 0)
1.2246063538223773e-016j
>>> # Discrepancy due to vagaries of floating point arithmetic.
>>> # Example of providing the optional output parameter
>>> out2 = np.sinh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.sinh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'sqrt',
"""
Return the positive square-root of an array, element-wise.
Parameters
----------
x : array_like
The values whose square-roots are required.
out : ndarray, optional
Alternate array object in which to put the result; if provided, it
must have the same shape as `x`
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the positive
square-root of each element in `x`. If any element in `x` is
complex, a complex array is returned (and the square-roots of
negative reals are calculated). If all of the elements in `x`
are real, so is `y`, with negative elements returning ``nan``.
If `out` was provided, `y` is a reference to it.
See Also
--------
lib.scimath.sqrt
A version which returns complex numbers when given negative reals.
Notes
-----
*sqrt* has--consistent with common convention--as its branch cut the
real "interval" [`-inf`, 0), and is continuous from above on it.
A branch cut is a curve in the complex plane across which a given
complex function fails to be continuous.
Examples
--------
>>> np.sqrt([1,4,9])
array([ 1., 2., 3.])
>>> np.sqrt([4, -1, -3+4J])
array([ 2.+0.j, 0.+1.j, 1.+2.j])
>>> np.sqrt([4, -1, numpy.inf])
array([ 2., NaN, Inf])
""")
add_newdoc('numpy.core.umath', 'cbrt',
"""
Return the cube-root of an array, element-wise.
.. versionadded:: 1.10.0
Parameters
----------
x : array_like
The values whose cube-roots are required.
out : ndarray, optional
Alternate array object in which to put the result; if provided, it
must have the same shape as `x`
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the cube
cube-root of each element in `x`.
If `out` was provided, `y` is a reference to it.
Examples
--------
>>> np.cbrt([1,8,27])
array([ 1., 2., 3.])
""")
add_newdoc('numpy.core.umath', 'square',
"""
Return the element-wise square of the input.
Parameters
----------
x : array_like
Input data.
Returns
-------
out : ndarray
Element-wise `x*x`, of the same shape and dtype as `x`.
Returns scalar if `x` is a scalar.
See Also
--------
numpy.linalg.matrix_power
sqrt
power
Examples
--------
>>> np.square([-1j, 1])
array([-1.-0.j, 1.+0.j])
""")
add_newdoc('numpy.core.umath', 'subtract',
"""
Subtract arguments, element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be subtracted from each other.
Returns
-------
y : ndarray
The difference of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to ``x1 - x2`` in terms of array broadcasting.
Examples
--------
>>> np.subtract(1.0, 4.0)
-3.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.subtract(x1, x2)
array([[ 0., 0., 0.],
[ 3., 3., 3.],
[ 6., 6., 6.]])
""")
add_newdoc('numpy.core.umath', 'tan',
"""
Compute tangent element-wise.
Equivalent to ``np.sin(x)/np.cos(x)`` element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> from math import pi
>>> np.tan(np.array([-pi,pi/2,pi]))
array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16])
>>>
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'tanh',
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)`` or ``-1j * np.tan(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
.. [1] M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Hyperbolic function",
http://en.wikipedia.org/wiki/Hyperbolic_function
Examples
--------
>>> np.tanh((0, np.pi*1j, np.pi*1j/2))
array([ 0. +0.00000000e+00j, 0. -1.22460635e-16j, 0. +1.63317787e+16j])
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.tanh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.tanh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'true_divide',
"""
Returns a true division of the inputs, element-wise.
Instead of the Python traditional 'floor division', this returns a true
division. True division adjusts the output type to present the best
answer, regardless of input types.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
Returns
-------
out : ndarray
Result is scalar if both inputs are scalar, ndarray otherwise.
Notes
-----
The floor division operator ``//`` was added in Python 2.2 making
``//`` and ``/`` equivalent operators. The default floor division
operation of ``/`` can be replaced by true division with ``from
__future__ import division``.
In Python 3.0, ``//`` is the floor division operator and ``/`` the
true division operator. The ``true_divide(x1, x2)`` function is
equivalent to true division in Python.
Examples
--------
>>> x = np.arange(5)
>>> np.true_divide(x, 4)
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x/4
array([0, 0, 0, 0, 1])
>>> x//4
array([0, 0, 0, 0, 1])
>>> from __future__ import division
>>> x/4
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x//4
array([0, 0, 0, 0, 1])
""")
add_newdoc('numpy.core.umath', 'frexp',
"""
Decompose the elements of x into mantissa and twos exponent.
Returns (`mantissa`, `exponent`), where `x = mantissa * 2**exponent``.
The mantissa is lies in the open interval(-1, 1), while the twos
exponent is a signed integer.
Parameters
----------
x : array_like
Array of numbers to be decomposed.
out1 : ndarray, optional
Output array for the mantissa. Must have the same shape as `x`.
out2 : ndarray, optional
Output array for the exponent. Must have the same shape as `x`.
Returns
-------
(mantissa, exponent) : tuple of ndarrays, (float, int)
`mantissa` is a float array with values between -1 and 1.
`exponent` is an int array which represents the exponent of 2.
See Also
--------
ldexp : Compute ``y = x1 * 2**x2``, the inverse of `frexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
Examples
--------
>>> x = np.arange(9)
>>> y1, y2 = np.frexp(x)
>>> y1
array([ 0. , 0.5 , 0.5 , 0.75 , 0.5 , 0.625, 0.75 , 0.875,
0.5 ])
>>> y2
array([0, 1, 2, 2, 3, 3, 3, 3, 4])
>>> y1 * 2**y2
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8.])
""")
add_newdoc('numpy.core.umath', 'ldexp',
"""
Returns x1 * 2**x2, element-wise.
The mantissas `x1` and twos exponents `x2` are used to construct
floating point numbers ``x1 * 2**x2``.
Parameters
----------
x1 : array_like
Array of multipliers.
x2 : array_like, int
Array of twos exponents.
out : ndarray, optional
Output array for the result.
Returns
-------
y : ndarray or scalar
The result of ``x1 * 2**x2``.
See Also
--------
frexp : Return (y1, y2) from ``x = y1 * 2**y2``, inverse to `ldexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
`ldexp` is useful as the inverse of `frexp`, if used by itself it is
more clear to simply use the expression ``x1 * 2**x2``.
Examples
--------
>>> np.ldexp(5, np.arange(4))
array([ 5., 10., 20., 40.], dtype=float32)
>>> x = np.arange(6)
>>> np.ldexp(*np.frexp(x))
array([ 0., 1., 2., 3., 4., 5.])
""")
| bsd-3-clause |
alivecor/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator_test.py | 21 | 53471 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import json
import os
import tempfile
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from google.protobuf import text_format
from tensorflow.contrib import learn
from tensorflow.contrib import lookup
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn import monitors as monitors_lib
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.contrib.testing.python.framework import util_test
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_state_pb2
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import compat
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features], 0), array_ops.concat(
[labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
variables.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
def _build_estimator_for_export_tests(tmpdir):
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
est = linear.LinearRegressor(feature_columns)
est.fit(input_fn=_input_fn, steps=20)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
# hack in an op that uses an asset, in order to test asset export.
# this is not actually valid, of course.
def serving_input_fn_with_asset():
features, labels, inputs = serving_input_fn()
vocab_file_name = os.path.join(tmpdir, 'my_vocab_file')
vocab_file = gfile.GFile(vocab_file_name, mode='w')
vocab_file.write(VOCAB_FILE_CONTENT)
vocab_file.close()
hashtable = lookup.HashTable(
lookup.TextFileStringTableInitializer(vocab_file_name), 'x')
features['bogus_lookup'] = hashtable.lookup(
math_ops.to_int64(features['feature']))
return input_fn_utils.InputFnOps(features, labels, inputs)
return est, serving_input_fn_with_asset
def _build_estimator_for_resource_export_test():
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column('feature', dimension=4)
]
def resource_constant_model_fn(unused_features, unused_labels, mode):
"""A model_fn that loads a constant from a resource and serves it."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
const = constant_op.constant(-1, dtype=dtypes.int64)
table = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableModel')
update_global_step = variables.get_global_step().assign_add(1)
if mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL):
key = constant_op.constant(['key'])
value = constant_op.constant([42], dtype=dtypes.int64)
train_op_1 = table.insert(key, value)
training_state = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableTrainingState')
training_op_2 = training_state.insert(key, value)
return (const, const,
control_flow_ops.group(train_op_1, training_op_2,
update_global_step))
if mode == model_fn.ModeKeys.INFER:
key = constant_op.constant(['key'])
prediction = table.lookup(key)
return prediction, const, update_global_step
est = estimator.Estimator(model_fn=resource_constant_model_fn)
est.fit(input_fn=_input_fn, steps=1)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
return est, serving_input_fn
class CheckCallsMonitor(monitors_lib.BaseMonitor):
def __init__(self, expect_calls):
super(CheckCallsMonitor, self).__init__()
self.begin_calls = None
self.end_calls = None
self.expect_calls = expect_calls
def begin(self, max_steps):
self.begin_calls = 0
self.end_calls = 0
def step_begin(self, step):
self.begin_calls += 1
return {}
def step_end(self, step, outputs):
self.end_calls += 1
return False
def end(self):
assert (self.end_calls == self.expect_calls and
self.begin_calls == self.expect_calls)
def _model_fn_ops(
expected_features, expected_labels, actual_features, actual_labels, mode):
assert_ops = tuple([
check_ops.assert_equal(
expected_features[k], actual_features[k], name='assert_%s' % k)
for k in expected_features
] + [
check_ops.assert_equal(
expected_labels, actual_labels, name='assert_labels')
])
with ops.control_dependencies(assert_ops):
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=variables.get_global_step().assign_add(1))
def _make_input_fn(features, labels):
def _input_fn():
return {
k: constant_op.constant(v)
for k, v in six.iteritems(features)
}, constant_op.constant(labels)
return _input_fn
class EstimatorModelFnTest(test.TestCase):
def testModelFnArgs(self):
features = {'x': 42., 'y': 43.}
labels = 44.
expected_params = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
# TODO(ptucker): We have to roll our own mock since Estimator._get_arguments
# doesn't work with mock fns.
model_fn_call_count = [0]
# `features` and `labels` are passed by position, `arg0` and `arg1` here.
def _model_fn(arg0, arg1, mode, params, config):
model_fn_call_count[0] += 1
self.assertItemsEqual(features.keys(), arg0.keys())
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_params, params)
self.assertTrue(config.i_am_test)
return _model_fn_ops(features, labels, arg0, arg1, mode)
est = estimator.Estimator(
model_fn=_model_fn, params=expected_params, config=expected_config)
self.assertEqual(0, model_fn_call_count[0])
est.fit(input_fn=_make_input_fn(features, labels), steps=1)
self.assertEqual(1, model_fn_call_count[0])
def testPartialModelFnArgs(self):
features = {'x': 42., 'y': 43.}
labels = 44.
expected_params = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
expected_foo = 45.
expected_bar = 46.
# TODO(ptucker): We have to roll our own mock since Estimator._get_arguments
# doesn't work with mock fns.
model_fn_call_count = [0]
# `features` and `labels` are passed by position, `arg0` and `arg1` here.
def _model_fn(arg0, arg1, foo, mode, params, config, bar):
model_fn_call_count[0] += 1
self.assertEqual(expected_foo, foo)
self.assertEqual(expected_bar, bar)
self.assertItemsEqual(features.keys(), arg0.keys())
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_params, params)
self.assertTrue(config.i_am_test)
return _model_fn_ops(features, labels, arg0, arg1, mode)
partial_model_fn = functools.partial(
_model_fn, foo=expected_foo, bar=expected_bar)
est = estimator.Estimator(
model_fn=partial_model_fn, params=expected_params,
config=expected_config)
self.assertEqual(0, model_fn_call_count[0])
est.fit(input_fn=_make_input_fn(features, labels), steps=1)
self.assertEqual(1, model_fn_call_count[0])
def testModelFnWithModelDir(self):
expected_param = {'some_param': 'some_value'}
expected_model_dir = tempfile.mkdtemp()
def _argument_checker(features, labels, mode, params, config=None,
model_dir=None):
_, _, _ = features, labels, config
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_param, params)
self.assertEqual(model_dir, expected_model_dir)
return (constant_op.constant(0.), constant_op.constant(0.),
variables.get_global_step().assign_add(1))
est = estimator.Estimator(model_fn=_argument_checker,
params=expected_param,
model_dir=expected_model_dir)
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_train_op(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
update_global_step = variables.get_global_step().assign_add(1)
with ops.control_dependencies([update_global_step]):
loss = 100.0 - w
return None, loss, None
est = estimator.Estimator(model_fn=_invalid_model_fn)
with self.assertRaisesRegexp(ValueError, 'Missing train_op'):
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_loss(self):
def _invalid_model_fn(features, labels, mode):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
update_global_step = variables.get_global_step().assign_add(1)
with ops.control_dependencies([update_global_step]):
train_op = w.assign_add(loss / 100.0)
predictions = loss
if mode == model_fn.ModeKeys.EVAL:
loss = None
return predictions, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing loss'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
def testInvalidModelFn_no_prediction(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
update_global_step = variables.get_global_step().assign_add(1)
with ops.control_dependencies([update_global_step]):
train_op = w.assign_add(loss / 100.0)
return None, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(input_fn=boston_input_fn)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(
input_fn=functools.partial(
boston_input_fn, num_epochs=1),
as_iterable=True)
def testModelFnScaffoldInTraining(self):
self.is_init_fn_called = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
self.is_init_fn_called = True
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=variables.get_global_step().assign_add(1),
scaffold=monitored_session.Scaffold(init_fn=_init_fn))
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=boston_input_fn, steps=1)
self.assertTrue(self.is_init_fn_called)
def testModelFnScaffoldSaverUsage(self):
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
variables_lib.Variable(1., 'weight')
real_saver = saver_lib.Saver()
self.mock_saver = test.mock.Mock(
wraps=real_saver, saver_def=real_saver.saver_def)
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant([[1.]]),
loss=constant_op.constant(0.),
train_op=variables.get_global_step().assign_add(1),
scaffold=monitored_session.Scaffold(saver=self.mock_saver))
def input_fn():
return {
'x': constant_op.constant([[1.]]),
}, constant_op.constant([[1.]])
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.save.called)
est.evaluate(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.restore.called)
est.predict(input_fn=input_fn)
self.assertTrue(self.mock_saver.restore.called)
def serving_input_fn():
serialized_tf_example = array_ops.placeholder(dtype=dtypes.string,
shape=[None],
name='input_example_tensor')
features, labels = input_fn()
return input_fn_utils.InputFnOps(
features, labels, {'examples': serialized_tf_example})
est.export_savedmodel(os.path.join(est.model_dir, 'export'), serving_input_fn)
self.assertTrue(self.mock_saver.restore.called)
class EstimatorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=estimator.Estimator(model_fn=linear_model_fn),
train_input_fn=boston_input_fn,
eval_input_fn=boston_input_fn)
exp.test()
def testCheckpointSaverHookSuppressesTheDefaultOne(self):
saver_hook = test.mock.Mock(
spec=basic_session_run_hooks.CheckpointSaverHook)
saver_hook.before_run.return_value = None
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1, monitors=[saver_hook])
# test nothing is saved, due to suppressing default saver
with self.assertRaises(learn.NotFittedError):
est.evaluate(input_fn=boston_input_fn, steps=1)
def testCustomConfig(self):
test_random_seed = 5783452
class TestInput(object):
def __init__(self):
self.random_seed = 0
def config_test_input_fn(self):
self.random_seed = ops.get_default_graph().seed
return constant_op.constant([[1.]]), constant_op.constant([1.])
config = run_config.RunConfig(tf_random_seed=test_random_seed)
test_input = TestInput()
est = estimator.Estimator(model_fn=linear_model_fn, config=config)
est.fit(input_fn=test_input.config_test_input_fn, steps=1)
# If input_fn ran, it will have given us the random seed set on the graph.
self.assertEquals(test_random_seed, test_input.random_seed)
def testRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn,
config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAndRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn,
config=config,
model_dir='test_dir')
self.assertEqual('test_dir', est.config.model_dir)
with self.assertRaisesRegexp(
ValueError,
'model_dir are set both in constructor and RunConfig, '
'but with different'):
estimator.Estimator(model_fn=linear_model_fn,
config=config,
model_dir='different_dir')
def testModelDirIsCopiedToRunConfig(self):
config = run_config.RunConfig()
self.assertIsNone(config.model_dir)
est = estimator.Estimator(model_fn=linear_model_fn,
model_dir='test_dir',
config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAsTempDir(self):
with test.mock.patch.object(tempfile, 'mkdtemp', return_value='temp_dir'):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertEqual('temp_dir', est.config.model_dir)
self.assertEqual('temp_dir', est.model_dir)
def testCheckInputs(self):
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
# Lambdas so we have to different objects to compare
right_features = lambda: np.ones(shape=[7, 8], dtype=np.float32)
right_labels = lambda: np.ones(shape=[7, 10], dtype=np.int32)
est.fit(right_features(), right_labels(), steps=1)
# TODO(wicke): This does not fail for np.int32 because of data_feeder magic.
wrong_type_features = np.ones(shape=[7, 8], dtype=np.int64)
wrong_size_features = np.ones(shape=[7, 10])
wrong_type_labels = np.ones(shape=[7, 10], dtype=np.float32)
wrong_size_labels = np.ones(shape=[7, 11])
est.fit(x=right_features(), y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_type_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_size_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_type_labels, steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_size_labels, steps=1)
def testBadInput(self):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertRaisesRegexp(
ValueError,
'Either x or input_fn must be provided.',
est.fit,
x=None,
input_fn=None,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
x='X',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
y='Y',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and batch_size',
est.fit,
input_fn=iris_input_fn,
batch_size=100,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Inputs cannot be tensors. Please provide input_fn.',
est.fit,
x=constant_op.constant(1.),
steps=1)
def testUntrained(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
with self.assertRaises(learn.NotFittedError):
_ = est.score(x=boston.data, y=boston.target.astype(np.float64))
with self.assertRaises(learn.NotFittedError):
est.predict(x=boston.data)
def testContinueTraining(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=50)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
# Check we can evaluate and predict.
scores2 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores['MSE'], scores2['MSE'])
predictions = np.array(list(est2.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, float64_labels)
self.assertAllClose(scores['MSE'], other_score)
# Check we can keep training.
est2.fit(x=boston.data, y=float64_labels, steps=100)
scores3 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertLess(scores3['MSE'], scores['MSE'])
def test_checkpoint_contains_relative_paths(self):
tmpdir = tempfile.mkdtemp()
est = estimator.Estimator(
model_dir=tmpdir,
model_fn=linear_model_fn_with_model_fn_ops)
est.fit(input_fn=boston_input_fn, steps=5)
checkpoint_file_content = file_io.read_file_to_string(
os.path.join(tmpdir, 'checkpoint'))
ckpt = checkpoint_state_pb2.CheckpointState()
text_format.Merge(checkpoint_file_content, ckpt)
self.assertEqual(ckpt.model_checkpoint_path, 'model.ckpt-5')
self.assertAllEqual(
['model.ckpt-1', 'model.ckpt-5'], ckpt.all_model_checkpoint_paths)
def test_train_save_copy_reload(self):
tmpdir = tempfile.mkdtemp()
model_dir1 = os.path.join(tmpdir, 'model_dir1')
est1 = estimator.Estimator(
model_dir=model_dir1,
model_fn=linear_model_fn_with_model_fn_ops)
est1.fit(input_fn=boston_input_fn, steps=5)
model_dir2 = os.path.join(tmpdir, 'model_dir2')
os.renames(model_dir1, model_dir2)
est2 = estimator.Estimator(
model_dir=model_dir2,
model_fn=linear_model_fn_with_model_fn_ops)
self.assertEqual(5, est2.get_variable_value('global_step'))
est2.fit(input_fn=boston_input_fn, steps=5)
self.assertEqual(10, est2.get_variable_value('global_step'))
def testEstimatorParams(self):
boston = base.load_boston()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_params_fn, params={'learning_rate': 0.01}))
est.fit(x=boston.data, y=boston.target, steps=100)
def testHooksNotChanged(self):
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
# We pass empty array and expect it to remain empty after calling
# fit and evaluate. Requires inside to copy this array if any hooks were
# added.
my_array = []
est.fit(input_fn=iris_input_fn, steps=100, monitors=my_array)
_ = est.evaluate(input_fn=iris_input_fn, steps=1, hooks=my_array)
self.assertEqual(my_array, [])
def testIrisIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = itertools.islice(iris.target, 100)
estimator.SKCompat(est).fit(x_iter, y_iter, steps=20)
eval_result = est.evaluate(input_fn=iris_input_fn, steps=1)
x_iter_eval = itertools.islice(iris.data, 100)
y_iter_eval = itertools.islice(iris.target, 100)
score_result = estimator.SKCompat(est).score(x_iter_eval, y_iter_eval)
print(score_result)
self.assertItemsEqual(eval_result.keys(), score_result.keys())
self.assertItemsEqual(['global_step', 'loss'], score_result.keys())
predictions = estimator.SKCompat(est).predict(x=iris.data)['class']
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisIteratorArray(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (np.array(x) for x in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisIteratorPlainInt(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (v for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisTruncatedIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 50)
y_iter = ([np.int32(v)] for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
def testTrainStepsIsIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, steps=15)
self.assertEqual(25, est.get_variable_value('global_step'))
def testTrainMaxStepsIsNotIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, max_steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, max_steps=15)
self.assertEqual(15, est.get_variable_value('global_step'))
def testPredict(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
output = list(est.predict(x=boston.data, batch_size=10))
self.assertEqual(len(output), boston.target.shape[0])
def testWithModelFnOps(self):
"""Test for model_fn that returns `ModelFnOps`."""
est = estimator.Estimator(model_fn=linear_model_fn_with_model_fn_ops)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
scores = est.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores.keys())
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testWrongInput(self):
def other_input_fn():
return {
'other': constant_op.constant([0, 0, 0])
}, constant_op.constant([0, 0, 0])
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaises(ValueError):
est.fit(input_fn=other_input_fn, steps=1)
def testMonitorsForFit(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn,
steps=21,
monitors=[CheckCallsMonitor(expect_calls=21)])
def testHooksForEvaluate(self):
class CheckCallHook(session_run_hook.SessionRunHook):
def __init__(self):
self.run_count = 0
def after_run(self, run_context, run_values):
self.run_count += 1
est = learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
hook = CheckCallHook()
est.evaluate(input_fn=boston_eval_fn, steps=3, hooks=[hook])
self.assertEqual(3, hook.run_count)
def testSummaryWriting(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(input_fn=boston_input_fn, steps=200)
loss_summary = util_test.simple_values_from_events(
util_test.latest_events(est.model_dir), ['OptimizeLoss/loss'])
self.assertEqual(1, len(loss_summary))
def testSummaryWritingWithSummaryProto(self):
def _streaming_mean_squared_error_histogram(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
metrics, update_ops = metric_ops.streaming_mean_squared_error(
predictions,
labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
return summary.histogram('histogram', metrics), update_ops
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(
input_fn=boston_input_fn,
steps=200,
metrics={'MSE': _streaming_mean_squared_error_histogram})
events = util_test.latest_events(est.model_dir + '/eval')
output_values = {}
for e in events:
if e.HasField('summary'):
for v in e.summary.value:
output_values[v.tag] = v
self.assertTrue('MSE' in output_values)
self.assertTrue(output_values['MSE'].HasField('histo'))
def testLossInGraphCollection(self):
class _LossCheckerHook(session_run_hook.SessionRunHook):
def begin(self):
self.loss_collection = ops.get_collection(ops.GraphKeys.LOSSES)
hook = _LossCheckerHook()
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200, monitors=[hook])
self.assertTrue(hook.loss_collection)
def test_export_returns_exported_dirname(self):
expected = '/path/to/some_dir'
with test.mock.patch.object(estimator, 'export') as mock_export_module:
mock_export_module._export_estimator.return_value = expected
est = estimator.Estimator(model_fn=linear_model_fn)
actual = est.export('/path/to')
self.assertEquals(expected, actual)
def test_export_savedmodel(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base, serving_input_fn, assets_extra=assets_extra)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(
gfile.GFile(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
self.assertItemsEqual(
['bogus_lookup', 'feature'],
[compat.as_str_any(x) for x in graph.get_collection(
constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS)])
# cleanup
gfile.DeleteRecursively(tmpdir)
def test_export_savedmodel_with_resource(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_resource_export_test()
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(export_dir_base, serving_input_fn)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('LookupTableModel' in graph_ops)
self.assertFalse('LookupTableTrainingState' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
def test_export_savedmodel_with_graph_transforms(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base, serving_input_fn, assets_extra=assets_extra,
graph_rewrite_specs=[
estimator.GraphRewriteSpec(['tag_1'], []),
estimator.GraphRewriteSpec(['tag_2', 'tag_3'],
['strip_unused_nodes'])])
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(
gfile.GFile(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
# tag_1 is untransformed.
tags = ['tag_1']
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, tags, export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
# Since there were no transforms, both save ops are still present.
self.assertTrue('save/SaveV2/tensor_names' in graph_ops)
self.assertTrue('save_1/SaveV2/tensor_names' in graph_ops)
# Since there were no transforms, the hash table lookup is still there.
self.assertTrue('hash_table_Lookup' in graph_ops)
# Restore, to validate that the export was well-formed.
# tag_2, tag_3 was subjected to strip_unused_nodes.
tags = ['tag_2', 'tag_3']
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, tags, export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
# The Saver used to restore the checkpoint into the export Session
# was not added to the SAVERS collection, so strip_unused_nodes removes
# it. The one explicitly created in export_savedmodel is tracked in
# the MetaGraphDef saver_def field, so that one is retained.
# TODO(soergel): Make Savers sane again. I understand this is all a bit
# nuts but for now the test demonstrates what actually happens.
self.assertFalse('save/SaveV2/tensor_names' in graph_ops)
self.assertTrue('save_1/SaveV2/tensor_names' in graph_ops)
# The fake hash table lookup wasn't connected to anything; stripped.
self.assertFalse('hash_table_Lookup' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
class InferRealValuedColumnsTest(test.TestCase):
def testInvalidArgs(self):
with self.assertRaisesRegexp(ValueError, 'x or input_fn must be provided'):
estimator.infer_real_valued_columns_from_input(None)
with self.assertRaisesRegexp(ValueError, 'cannot be tensors'):
estimator.infer_real_valued_columns_from_input(constant_op.constant(1.0))
def _assert_single_feature_column(self, expected_shape, expected_dtype,
feature_columns):
self.assertEqual(1, len(feature_columns))
feature_column = feature_columns[0]
self.assertEqual('', feature_column.name)
self.assertEqual(
{
'':
parsing_ops.FixedLenFeature(
shape=expected_shape, dtype=expected_dtype)
},
feature_column.config)
def testInt32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int32))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int32), None))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int64))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testInt64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int64), None))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testFloat32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float32))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float32), None))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float64))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testFloat64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float64), None))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testBoolInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
estimator.infer_real_valued_columns_from_input(
np.array([[False for _ in xrange(8)] for _ in xrange(7)]))
def testBoolInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (constant_op.constant(False, shape=[7, 8], dtype=dtypes.bool),
None))
def testStringInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input(
np.array([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]))
def testStringInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (
constant_op.constant([['%d.0' % i
for i in xrange(8)]
for _ in xrange(7)]),
None))
def testBostonInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
self._assert_single_feature_column([_BOSTON_INPUT_DIM], dtypes.float64,
feature_columns)
def testIrisInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
iris_input_fn)
self._assert_single_feature_column([_IRIS_INPUT_DIM], dtypes.float64,
feature_columns)
class ReplicaDeviceSetterTest(test.TestCase):
def testVariablesAreOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker', a.device)
def testVariablesAreLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('', v.device)
self.assertDeviceEqual('', v.initializer.device)
self.assertDeviceEqual('', w.device)
self.assertDeviceEqual('', w.initializer.device)
self.assertDeviceEqual('', a.device)
def testMutableHashTableIsOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('/job:ps/task:0', table._table_ref.device)
self.assertDeviceEqual('/job:ps/task:0', output.device)
def testMutableHashTableIsLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('', table._table_ref.device)
self.assertDeviceEqual('', output.device)
def testTaskIsSetOnWorkerWhenJobNameIsSet(self):
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0']
},
'task': {
'type': run_config.TaskType.WORKER,
'index': 3
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker/task:3', a.device)
if __name__ == '__main__':
test.main()
| apache-2.0 |
cemfi/hannds | hannds_data.py | 1 | 12589 | """Provides training, validation and test2 data."""
import math
from collections import namedtuple
import os
import numpy as np
import pretty_midi
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import Sampler
import hannds_files
def train_valid_test_data_windowed(len_train_sequence, cv_partition=1, debug=False):
"""Training, validation and test data in the categorical windowed format"""
module_directory = os.path.dirname(os.path.abspath(__file__))
hannds_dir = os.path.join(module_directory, 'data-hannds')
make_npz_files(overwrite=False, midi_dir=hannds_dir, subdir='windowed', convert_func=convert_windowed)
all_files = hannds_files.TrainValidTestFiles(hannds_dir)
all_files.get_partition(cv_partition)
train_data = HanndsDataset(hannds_dir, all_files.train_files, 'windowed', len_sequence=len_train_sequence,
debug=debug)
valid_data = HanndsDataset(hannds_dir, all_files.valid_files, 'windowed', len_sequence=-1, debug=debug)
test_data = HanndsDataset(hannds_dir, all_files.test_files, 'windowed', len_sequence=-1, debug=debug)
return train_data, valid_data, test_data
def train_valid_test_data_windowed_tanh(len_train_sequence, cv_partition=1, debug=False):
"""Training, validation and test data in the windowed +/-1 format"""
module_directory = os.path.dirname(os.path.abspath(__file__))
hannds_dir = os.path.join(module_directory, 'data-hannds')
make_npz_files(overwrite=False, midi_dir=hannds_dir, subdir='windowed_tanh', convert_func=convert_windowed_tanh)
all_files = hannds_files.TrainValidTestFiles(hannds_dir)
all_files.get_partition(cv_partition)
train_data = HanndsDataset(hannds_dir, all_files.train_files, 'windowed_tanh', len_sequence=len_train_sequence,
debug=debug)
valid_data = HanndsDataset(hannds_dir, all_files.valid_files, 'windowed_tanh', len_sequence=-1, debug=debug)
test_data = HanndsDataset(hannds_dir, all_files.test_files, 'windowed_tanh', len_sequence=-1, debug=debug)
return train_data, valid_data, test_data
def train_valid_test_data_event(len_train_sequence, cv_partition=1, debug=False):
"""Training, validation and test data in the MIDI event format"""
module_directory = os.path.dirname(os.path.abspath(__file__))
hannds_dir = os.path.join(module_directory, 'data-hannds')
make_npz_files(overwrite=False, midi_dir=hannds_dir, subdir='event', convert_func=convert_event)
all_files = hannds_files.TrainValidTestFiles(hannds_dir)
all_files.get_partition(cv_partition)
train_data = HanndsDataset(hannds_dir, all_files.train_files, 'event', len_sequence=len_train_sequence, debug=debug)
valid_data = HanndsDataset(hannds_dir, all_files.valid_files, 'event', len_sequence=-1, debug=debug)
test_data = HanndsDataset(hannds_dir, all_files.test_files, 'event', len_sequence=-1, debug=debug)
return train_data, valid_data, test_data
def train_valid_test_data_magenta(len_train_sequence, cv_partition=1, debug=False):
"""Training, validation and test data in the magenta project's MIDI event format for Performance RNN"""
module_directory = os.path.dirname(os.path.abspath(__file__))
hannds_dir = os.path.join(module_directory, 'data-hannds')
make_npz_files(overwrite=False, midi_dir=hannds_dir, subdir='magenta', convert_func=convert_magenta)
all_files = hannds_files.TrainValidTestFiles(hannds_dir)
all_files.get_partition(cv_partition)
train_data = HanndsDataset(hannds_dir, all_files.train_files, 'magenta', len_sequence=len_train_sequence,
debug=debug)
valid_data = HanndsDataset(hannds_dir, all_files.valid_files, 'magenta', len_sequence=-1, debug=debug)
test_data = HanndsDataset(hannds_dir, all_files.test_files, 'magenta', len_sequence=-1, debug=debug)
return train_data, valid_data, test_data
WINDOWED_NOT_PLAYED = 0
WINDOWED_LEFT_HAND = 1
WINDOWED_RIGHT_HAND = 2
WINDOWED_TANH_LEFT_HAND = -1
WINDOWED_TANH_RIGHT_HAND = +1
WINDOWED_TANH_NOT_PLAYED = 0
def make_npz_files(overwrite, midi_dir, subdir, convert_func):
midi_files = hannds_files.all_midi_files(midi_dir, absolute_path=True)
npy_paths = hannds_files.npz_files_for_midi(midi_dir, midi_files, subdir)
for midi_file, npy_path in zip(midi_files, npy_paths):
if overwrite or not os.path.exists(npy_path):
print("Converting file '" + midi_file + "'")
midi = pretty_midi.PrettyMIDI(midi_file)
X, Y = convert_func(midi)
np.savez(npy_path, X=X, Y=Y)
def convert_windowed(midi):
ms_window = 20
samples_per_sec = 1000 // ms_window
midi_data = midi.instruments[0], midi.instruments[1]
# Generate empty numpy arrays
n_windows = math.ceil(midi.get_end_time() * samples_per_sec)
hands = np.zeros((
n_windows, # Number of windows to calculate
2, # Left and right hand = 2 hands
88 # 88 keys on a piano
), dtype=np.bool)
# Fill array with data
for hand, midi_hand in enumerate(midi_data):
for note in midi_hand.notes:
start = int(math.floor(note.start * samples_per_sec))
end = int(math.ceil(note.end * samples_per_sec))
hands[start:end, hand, note.pitch - 21] = True
data = hands
batch_size = n_windows
# Merge both hands in a single array
X = np.logical_or(
data[:, 0, :],
data[:, 1, :]
)
Y = np.full((batch_size, 88), WINDOWED_NOT_PLAYED)
Y[data[:, 0, :]] = WINDOWED_LEFT_HAND
Y[data[:, 1, :]] = WINDOWED_RIGHT_HAND
return X.astype(np.float32), Y.astype(np.longlong)
def convert_windowed_tanh(midi):
X, Y = convert_windowed(midi)
Y[Y == WINDOWED_LEFT_HAND] = WINDOWED_TANH_LEFT_HAND
Y[Y == WINDOWED_RIGHT_HAND] = WINDOWED_TANH_RIGHT_HAND
return X, Y.astype(np.float32)
def convert_event(midi):
num_notes = 0
for instrument in midi.instruments:
num_notes += len(instrument.notes)
# Generate empty numpy array
events = np.empty((2 * num_notes, 5))
# Generate event list
# Format:[ 0 , 1 , 2 , 3 , 4 ]
# [timestamp, midi_pitch/127, is_start, is_end, left|right]
i = 0
for hand, instrument in enumerate(midi.instruments):
notes = instrument.notes
for note in notes:
events[i:i + 2, 1] = note.pitch / 127
events[i:i + 2, 4] = hand # 0 = Right, 1 = Left
events[i, 0] = note.start # Timestamp note on
events[i, 2:4] = [1, 0] # One hot vector for note on
events[i + 1, 0] = note.end # Timestamp note off
events[i + 1, 2:4] = [0, 1] # One hot vector for note off
i += 2
# Compute timestamp deltas
events = events[events[:, 0].argsort()] # Sort by column 0
events[1:, 0] = np.diff(events[:, 0])
events[0, 0] = 0 # Something suitable for the first entry
events[:, 0] = np.maximum(events[:, 0], 0) # Don't allow negative time deltas (happens at file borders)
Y = events[:, 4].astype(np.float32)
return events[:, :4].astype(np.float32), Y
def convert_magenta(midi):
num_notes = 0
for instrument in midi.instruments:
num_notes += len(instrument.notes)
# Generate empty numpy array
events = np.zeros((2 * num_notes, 128 + 128 + 100 + 32 + 2))
# Format:[ 0-127 , 128-255, 256-355, 356-387, 388, 389 ]
# [note on, note off, time-shift, velocity, hand, absolute time]
i = 0
for hand, instrument in enumerate(midi.instruments):
notes = instrument.notes
for note in notes:
velocity_0_to_32 = note.velocity // 4
events[i, note.pitch] = 1 # Note on
events[i, -1] = note.start # Timestamp note on
events[i + 1, note.pitch + 128] = 1 # Note off
events[i + 1, -1] = note.end # Timestamp note off
events[i:i + 2, 356 + velocity_0_to_32] = 1 # Set velocity
events[i:i + 2, -2] = hand
i += 2
events = events[events[:, -1].argsort()] # Sort by timestamp (last column)
delta_time = np.diff(events[:, -1])
delta_time = np.clip(delta_time, 0.01, 10.0)
delta_time = (delta_time - 0.01) / (10.0 - 0.01) * 99.5
delta_time = delta_time.astype(np.int)
events[0, 355] = 1 # Assume ten seconds silence before first note is played
for i in range(1, len(events)):
events[i, 256 + delta_time[i - 1]] = 1
Y = events[:, -2].astype(np.float32)
return events[:, :-2].astype(np.float32), Y
class HanndsDataset(Dataset):
"""Provides the Hannds dataset.
Args:
midi_files: list of MIDI files to load
subdir: subdir under preprocessed where npz files can be found,
e.g. 'event', 'windowed', 'windowed_tanh'
len_sequence: produced sequences are len_sequence long.
len_sequence == -1 produces single max length sequence
debug: load minimal data for faster debugging
"""
XY = namedtuple('XY', ['X', 'Y'])
def __init__(self, midi_dir, midi_files, subdir, len_sequence, debug):
self.len_sequence = len_sequence
npz_files = hannds_files.npz_files_for_midi(midi_dir, midi_files, subdir)
if debug:
load_all = [np.load(npz_file) for npz_file in npz_files[:2]]
else:
load_all = [np.load(npz_file) for npz_file in npz_files]
X = np.concatenate([item['X'] for item in load_all], axis=0)
Y = np.concatenate([item['Y'] for item in load_all], axis=0)
self.data = self.XY(X, Y)
def __len__(self):
if self.len_sequence == -1:
return 1
else:
return self.data.X.shape[0] // self.len_sequence - 1
def __getitem__(self, idx):
if self.len_sequence == -1:
return self.data.X, self.data.Y
else:
start = idx * self.len_sequence
end = start + self.len_sequence
res1 = self.data.X[start: end]
res2 = self.data.Y[start: end]
assert res1.shape[0] == res2.shape[0] == self.len_sequence
return res1, res2
def len_features(self):
return self.data.X.shape[1]
def num_categories(self):
return np.max(self.data.Y) + 1
class ContinuationSampler(Sampler):
def __init__(self, len_dataset, batch_size):
Sampler.__init__(self, None)
self.len_dataset = len_dataset
self.batch_size = batch_size
def __iter__(self):
return iter(self._generate_indices())
def __len__(self):
num_batches = self.len_dataset // self.batch_size
return num_batches * self.batch_size
def _generate_indices(self):
num_batches = step = self.len_dataset // self.batch_size
for i in range(num_batches):
index = i
for j in range(self.batch_size):
yield index
index += step
return
def main():
module_directory = os.path.dirname(os.path.abspath(__file__))
hannds_dir = os.path.join(module_directory, 'data-hannds')
print('Making magenta')
make_npz_files(overwrite=True, midi_dir=hannds_dir, subdir='magenta', convert_func=convert_magenta)
print('Making windowed')
make_npz_files(overwrite=True, midi_dir=hannds_dir, subdir='windowed', convert_func=convert_windowed)
print()
print('Making windowed_tanh')
make_npz_files(overwrite=True, midi_dir=hannds_dir, subdir='windowed_tanh', convert_func=convert_windowed_tanh)
print()
print('Making event')
make_npz_files(overwrite=True, midi_dir=hannds_dir, subdir='event', convert_func=convert_event)
print()
f = hannds_files.TrainValidTestFiles(hannds_dir)
f.get_partition(1)
data = HanndsDataset(hannds_dir, f.train_files, 'windowed', 100, debug=False)
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
batchX, batchY = data[0]
batch_size = 50
continuity = ContinuationSampler(len(data), batch_size)
loader = DataLoader(data, batch_size, sampler=continuity)
for idx, (X_batch, Y_batch) in enumerate(loader):
X = X_batch[8]
Y = Y_batch[8]
img = np.full((X.shape[0] + 2, X.shape[1]), -0.2)
img[:-2] = X
img[-1] = Y[-1, :] - 1.0
plt.imshow(img, cmap='bwr', origin='lower', vmin=-1, vmax=1)
plt.show()
if idx == 5:
break
if __name__ == '__main__':
main()
| mit |
pv/scikit-learn | examples/ensemble/plot_voting_probas.py | 316 | 2824 | """
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`LogisticRegression`,
`GaussianNB`, and `RandomForestClassifier`) and used to initialize a
soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that
the predicted probabilities of the `RandomForestClassifier` count 5 times
as much as the weights of the other classifiers when the averaged probability
is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.show()
| bsd-3-clause |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/scipy/signal/spectral.py | 2 | 13369 | """Tools for spectral analysis.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy import fftpack
from . import signaltools
from .windows import get_window
from ._spectral import lombscargle
import warnings
from scipy.lib.six import string_types
__all__ = ['periodogram', 'welch', 'lombscargle']
def periodogram(x, fs=1.0, window=None, nfft=None, detrend='constant',
return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using a periodogram.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series in units of Hz. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is an array it will be used
directly as the window. Defaults to None; equivalent to 'boxcar'.
nfft : int, optional
Length of the FFT used. If None the length of `x` will be used.
detrend : str or function, optional
Specifies how to detrend `x` prior to computing the spectrum. If
`detrend` is a string, it is passed as the ``type`` argument to
`detrend`. If it is a function, it should return a detrended array.
Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz if `x` is measured in V and computing
the power spectrum ('spectrum') where `Pxx` has units of V**2 if `x` is
measured in V. Defaults to 'density'
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of `x`.
Notes
-----
.. versionadded:: 0.12.0
See Also
--------
welch: Estimate power spectral density using Welch's method
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.periodogram(x, fs)
>>> plt.semilogy(f, Pxx_den)
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.periodogram(x, fs, 'flattop', scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if window is None:
window = 'boxcar'
if nfft is None:
nperseg = x.shape[axis]
elif nfft == x.shape[axis]:
nperseg = nfft
elif nfft > x.shape[axis]:
nperseg = x.shape[axis]
elif nfft < x.shape[axis]:
s = [np.s_[:]]*len(x.shape)
s[axis] = np.s_[:nfft]
x = x[s]
nperseg = nfft
nfft = None
return welch(x, fs, window, nperseg, 0, nfft, detrend, return_onesided,
scaling, axis)
def welch(x, fs=1.0, window='hanning', nperseg=256, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using Welch's method.
Welch's method [1]_ computes an estimate of the power spectral density
by dividing the data into overlapping segments, computing a modified
periodogram for each segment and averaging the periodograms.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series in units of Hz. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hanning'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap: int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg / 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where Pxx has units of V**2/Hz if x is measured in V and computing
the power spectrum ('spectrum') where Pxx has units of V**2 if x is
measured in V. Defaults to 'density'.
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of x.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default 'hanning' window an
overlap of 50% is a reasonable trade off between accurately estimating
the signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
If `noverlap` is 0, this method is equivalent to Bartlett's method [2]_.
.. versionadded:: 0.12.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika, vol. 37, pp. 1-16, 1950.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.welch(x, fs, 1024)
>>> plt.semilogy(f, Pxx_den)
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if axis != -1:
x = np.rollaxis(x, axis, len(x.shape))
if x.shape[-1] < nperseg:
warnings.warn('nperseg = %d, is greater than x.shape[%d] = %d, using '
'nperseg = x.shape[%d]'
% (nperseg, axis, x.shape[axis], axis))
nperseg = x.shape[-1]
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] > x.shape[-1]:
raise ValueError('window is longer than x.')
nperseg = win.shape[0]
if scaling == 'density':
scale = 1.0 / (fs * (win*win).sum())
elif scaling == 'spectrum':
scale = 1.0 / win.sum()**2
else:
raise ValueError('Unknown scaling: %r' % scaling)
if noverlap is None:
noverlap = nperseg // 2
elif noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
if nfft is None:
nfft = nperseg
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
if not hasattr(detrend, '__call__'):
detrend_func = lambda seg: signaltools.detrend(seg, type=detrend)
elif axis != -1:
# Wrap this function so that it receives a shape that it could
# reasonably expect to receive.
def detrend_func(seg):
seg = np.rollaxis(seg, -1, axis)
seg = detrend(seg)
return np.rollaxis(seg, axis, len(seg.shape))
else:
detrend_func = detrend
step = nperseg - noverlap
indices = np.arange(0, x.shape[-1]-nperseg+1, step)
if np.isrealobj(x) and return_onesided:
outshape = list(x.shape)
if nfft % 2 == 0: # even
outshape[-1] = nfft // 2 + 1
Pxx = np.empty(outshape, x.dtype)
for k, ind in enumerate(indices):
x_dt = detrend_func(x[..., ind:ind+nperseg])
xft = fftpack.rfft(x_dt*win, nfft)
# fftpack.rfft returns the positive frequency part of the fft
# as real values, packed r r i r i r i ...
# this indexing is to extract the matching real and imaginary
# parts, while also handling the pure real zero and nyquist
# frequencies.
if k == 0:
Pxx[..., (0,-1)] = xft[..., (0,-1)]**2
Pxx[..., 1:-1] = xft[..., 1:-1:2]**2 + xft[..., 2::2]**2
else:
Pxx *= k/(k+1.0)
Pxx[..., (0,-1)] += xft[..., (0,-1)]**2 / (k+1.0)
Pxx[..., 1:-1] += (xft[..., 1:-1:2]**2 + xft[..., 2::2]**2) \
/ (k+1.0)
else: # odd
outshape[-1] = (nfft+1) // 2
Pxx = np.empty(outshape, x.dtype)
for k, ind in enumerate(indices):
x_dt = detrend_func(x[..., ind:ind+nperseg])
xft = fftpack.rfft(x_dt*win, nfft)
if k == 0:
Pxx[..., 0] = xft[..., 0]**2
Pxx[..., 1:] = xft[..., 1::2]**2 + xft[..., 2::2]**2
else:
Pxx *= k/(k+1.0)
Pxx[..., 0] += xft[..., 0]**2 / (k+1)
Pxx[..., 1:] += (xft[..., 1::2]**2 + xft[..., 2::2]**2) \
/ (k+1.0)
Pxx[..., 1:-1] *= 2*scale
Pxx[..., (0,-1)] *= scale
f = np.arange(Pxx.shape[-1]) * (fs/nfft)
else:
for k, ind in enumerate(indices):
x_dt = detrend_func(x[..., ind:ind+nperseg])
xft = fftpack.fft(x_dt*win, nfft)
if k == 0:
Pxx = (xft * xft.conj()).real
else:
Pxx *= k/(k+1.0)
Pxx += (xft * xft.conj()).real / (k+1.0)
Pxx *= scale
f = fftpack.fftfreq(nfft, 1.0/fs)
if axis != -1:
Pxx = np.rollaxis(Pxx, -1, axis)
return f, Pxx
| gpl-3.0 |
Vvucinic/Wander | venv_2_7/lib/python2.7/site-packages/numpy/lib/polynomial.py | 82 | 37957 | """
Functions to operate on polynomials.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
ones)
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros, sort_complex
from numpy.lib.type_check import iscomplex, real, imag, mintypecode
from numpy.linalg import eigvals, lstsq, inv
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Evaluate a polynomial at a point.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Or a square matrix object:
>>> np.poly(np.matrix(P))
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
dt = seq_of_zeros.dtype
# Let object arrays slip through, e.g. for arbitrary precision
if dt != object:
seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char))
else:
raise ValueError("input must be 1d or non-empty square 2d array.")
if len(seq_of_zeros) == 0:
return 1.0
dt = seq_of_zeros.dtype
a = ones((1,), dtype=dt)
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, array([1, -seq_of_zeros[k]], dtype=dt),
mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
pos_roots = sort_complex(NX.compress(roots.imag > 0, roots))
neg_roots = NX.conjugate(sort_complex(
NX.compress(roots.imag < 0, roots)))
if (len(pos_roots) == len(neg_roots) and
NX.alltrue(neg_roots == pos_roots)):
a = a.real.copy()
return a
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by::
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like
Rank-1 array of polynomial coefficients.
Returns
-------
out : ndarray
An array containing the complex roots of the polynomial.
Raises
------
ValueError
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with a given sequence
of roots.
polyval : Evaluate a polynomial at a point.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if len(p.shape) != 1:
raise ValueError("Input must be a rank-1 array.")
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0,:] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : array_like or poly1d
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : list of `m` scalars or scalar, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ])
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ])
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError("Order of integral must be positive (see polyder)")
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError(
"k must be a scalar or a rank-1 array of length 1 or >m.")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([ 0.])
"""
m = int(m)
if m < 0:
raise ValueError("Order of derivative must be positive (see polyint)")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (M,), optional
weights to apply to the y-coordinates of the sample points.
cov : bool, optional
Return the estimate and the covariance matrix of the estimate
If full is True, then cov is not returned.
Returns
-------
p : ndarray, shape (M,) or (M, K)
Polynomial coefficients, highest power first. If `y` was 2-D, the
coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond :
Present only if `full` = True. Residuals of the least-squares fit,
the effective rank of the scaled Vandermonde coefficient matrix,
its singular values, and the specified value of `rcond`. For more
details, see `linalg.lstsq`.
V : ndarray, shape (M,M) or (M,M,K)
Present only if `full` = False and `cov`=True. The covariance
matrix of the polynomial coefficient estimates. The diagonal of
this matrix are the variance estimates for each coefficient. If y
is a 2-D array, then the covariance matrix for the `k`-th data set
are in ``V[:,:,k]``
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Computes polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
...
x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
http://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179
>>> p(3.5)
-0.34732142857143039
>>> p(10)
22.579365079365115
High-order polynomials may oscillate wildly:
>>> p30 = np.poly1d(np.polyfit(x, y, 30))
/... RankWarning: Polyfit may be poorly conditioned...
>>> p30(4)
-0.80000000000000204
>>> p30(5)
-0.99999999999999445
>>> p30(4.5)
-0.10547061179440398
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if x.shape[0] != y.shape[0]:
raise TypeError("expected x and y to have same length")
# set rcond
if rcond is None:
rcond = len(x)*finfo(x.dtype).eps
# set up least squares equation for powers of x
lhs = vander(x, order)
rhs = y
# apply weighting
if w is not None:
w = NX.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
lhs *= w[:, NX.newaxis]
if rhs.ndim == 2:
rhs *= w[:, NX.newaxis]
else:
rhs *= w
# scale lhs to improve condition number and solve
scale = NX.sqrt((lhs*lhs).sum(axis=0))
lhs /= scale
c, resids, rank, s = lstsq(lhs, rhs, rcond)
c = (c.T/scale).T # broadcast scale coefficients
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning)
if full:
return c, resids, rank, s, rcond
elif cov:
Vbase = inv(dot(lhs.T, lhs))
Vbase /= NX.outer(scale, scale)
# Some literature ignores the extra -2.0 factor in the denominator, but
# it is included here because the covariance of Multivariate Student-T
# (which is implied by a Bayesian uncertainty analysis) includes it.
# Plus, it gives a slightly more conservative estimate of uncertainty.
fac = resids / (len(x) - order - 2.0)
if y.ndim == 1:
return c, Vbase * fac
else:
return c, Vbase[:,:, NX.newaxis] * fac
else:
return c
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, a 1D array of numbers, or an instance of poly1d, "at"
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([ 76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([ 76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = y * x + p[i]
return y
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print p1
1 x + 2
>>> print p2
2
9 x + 5 x + 4
>>> print np.polyadd(p1, p2)
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
convolve : Array convolution. Same output as polymul, but has parameter
for overlap mode.
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print p1
2
1 x + 2 x + 3
>>> print p2
2
9 x + 5 x + 1
>>> print np.polymul(p1, p2)
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1, a2 = poly1d(a1), poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([ 1.5 , 1.75]), array([ 0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.copy()
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while True:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2) + len(toadd2) > wrap) or
(len(line1) + len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print np.poly1d(p)
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j])
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print p
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1, -3, 2])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
coeffs = None
order = None
variable = None
__hash__ = None
def __init__(self, c_or_r, r=0, variable=None):
if isinstance(c_or_r, poly1d):
for key in c_or_r.__dict__.keys():
self.__dict__[key] = c_or_r.__dict__[key]
if variable is not None:
self.__dict__['variable'] = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if len(c_or_r.shape) > 1:
raise ValueError("Polynomial must be 1d only.")
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self.__dict__['coeffs'] = c_or_r
self.__dict__['order'] = len(c_or_r) - 1
if variable is None:
variable = 'x'
self.__dict__['variable'] = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError("Power to non-negative integers only.")
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
if self.coeffs.shape != other.coeffs.shape:
return False
return (self.coeffs == other.coeffs).all()
def __ne__(self, other):
return not self.__eq__(other)
def __setattr__(self, key, val):
raise ValueError("Attributes cannot be changed this way.")
def __getattr__(self, key):
if key in ['r', 'roots']:
return roots(self.coeffs)
elif key in ['c', 'coef', 'coefficients']:
return self.coeffs
elif key in ['o']:
return self.order
else:
try:
return self.__dict__[key]
except KeyError:
raise AttributeError(
"'%s' has no attribute '%s'" % (self.__class__, key))
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError("Does not support negative powers.")
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self.__dict__['coeffs'] = NX.concatenate((zr, self.coeffs))
self.__dict__['order'] = key
ind = 0
self.__dict__['coeffs'][ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always', RankWarning)
| artistic-2.0 |
HeraclesHX/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 254 | 2253 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
hmendozap/auto-sklearn | test/test_pipeline/implementations/test_ProjLogit.py | 1 | 1277 | import unittest
import os
import numpy as np
#import scipy.io
from autosklearn.pipeline.implementations.ProjLogit import ProjLogit
class TestProjLogit(unittest.TestCase):
def test_sparse_filtering(self):
"""Test logistic regression implementation based on least squares"""
# simple test that should work out
trainx = np.random.rand(100,3)
trainy = np.zeros(10000)
testx = np.random.rand(100,3)
testy = np.zeros(100)
for i in range(100):
if trainx[i, 2] > 0.5:
trainy[i] = 1
for i in range(100):
if testx[i, 2] > 0.5:
testy[i] = 1
model = ProjLogit(max_epochs = 10, verbose = True)
model.fit(trainx, trainy)
print("weights 0:")
print(model.w0)
predicted_prob = model.predict_proba(testx)
predicted2 = np.argmax(predicted_prob, axis = 1)
predicted = model.predict(testx)
#print(predicted)
#print(testy)
#print((predicted != testy).sum())
#print((predicted2 != testy).sum())
self.assertTrue((predicted == predicted2).all())
self.assertTrue(((1 - predicted_prob.sum(axis=1)) < 1e-3).all())
self.assertTrue((predicted != testy).sum() < 20)
| bsd-3-clause |
noxer-org/noxer | noxer/rnn.py | 1 | 10485 | """
Learning with networks that can process sequential data.
"""
from sklearn.base import ClassifierMixin, RegressorMixin, BaseEstimator, TransformerMixin
from sklearn.preprocessing import LabelEncoder, FunctionTransformer
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from abc import abstractmethod
import tempfile
import numpy as np
# Keras preprocessing - making it picklable
# The function is run only when keras is necessary
def make_keras_picklable():
import keras.models
cls = keras.models.Model
if hasattr(cls, "is_now_picklable"):
return
cls.is_now_picklable = True
def __getstate__(self):
model_str = ""
with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:
keras.models.save_model(self, fd.name, overwrite=True)
model_str = fd.read()
d = { 'model_str': model_str }
return d
def __setstate__(self, state):
with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:
fd.write(state['model_str'])
fd.flush()
model = keras.models.load_model(fd.name)
self.__dict__ = model.__dict__
cls.__getstate__ = __getstate__
cls.__setstate__ = __setstate__
class KerasNNBase(BaseEstimator):
"""
Recurrent neural networks using keras as a backend.
Parameters
----------
n_neurons : [int, default=32]
Width of the neural network.
lr : [float, default=32]
Learning rate used in the optimizer for the network.
beta1 : [float, default=0.9]
beta_1 parameter of the Adam optimization algorithm.
beta2 : [float, default=0.99]
beta_2 parameter of the Adam optimization algorithm.
"""
def __init__(self, n_neurons=32, n_layers=1, lr=1e-4, beta1=0.9, beta2=0.99,
batch_size=128, max_iter=128, max_patience=1e10, val_fraction=0.2):
self.n_neurons = n_neurons
self.n_layers = n_layers
self.lr = lr
self.beta1 = beta1
self.beta2 = beta2
self.batch_size = batch_size
self.max_iter = max_iter
self.val_fraction = val_fraction
self.max_patience = max_patience
self.model_ = None # future keras model
def fit(self, X, y):
"""
Fit RNN model.
Parameters
----------
X : array of array of sequences [n_samples, seq_length, n_features]
y : numpy array of shape [n_samples]
Target classes. Can be string, int etc.
Returns
-------
self : returns an instance of self.
"""
from keras.optimizers import Adam
from copy import deepcopy
make_keras_picklable()
optimizer = Adam(
lr=self.lr,
beta_1=self.beta1,
beta_2=self.beta2
)
self._make_model(X, y, optimizer)
y = self.encoder.transform(y)
# split data into training and validation parts
#X_train, X_val, y_train, y_val = train_test_split(X, y, train_size=(1.0 - self.val_fraction))
X_train = X
y_train = y
best_loss_ = 100000000000.0
patience = self.max_patience
max_iter = self.max_iter
best_model_ = [np.copy(w) for w in self.model.get_weights()]
while patience > 0 and max_iter > 0:
max_iter -= 1
val_loss = self.model.fit(X_train,y_train, epochs=1, batch_size=self.batch_size, verbose=0)
val_loss = val_loss.history['loss'][-1]
#val_loss = self.model.evaluate(X_val, y_val, verbose=0)
if np.isnan(val_loss) or np.isinf(val_loss):
break
best_model_ = [np.copy(w) for w in self.model.get_weights()]
max_iter -= 1
"""
if val_loss < best_loss_:
best_loss_ = val_loss
patience = self.max_patience
else:
patience -= 1
"""
self.model.set_weights(best_model_)
return self
def _predict(self, X):
raise NotImplementedError("Abstract method not implemented!")
def predict(self, X):
return self.encoder.inverse_transform(self._predict(X))
class KerasClassifierBase(KerasNNBase, ClassifierMixin):
@abstractmethod
def create_architecture(self, X, n_classes):
"""
Generates the architecture of nn to be trained.
"""
def _make_model(self, X, y, optimizer):
import keras.models
from keras.layers import Input, Dense, Conv1D, Flatten
from keras.layers import Activation
from keras.optimizers import Adam
n_classes = len(np.unique(y))
self.encoder = LabelEncoder()
self.encoder.fit(y)
y = self.encoder.transform(y)
try:
model = self.create_architecture(X, n_classes)
except BaseException as ex:
ip = Input(shape=X[0].shape)
x = ip
x = Flatten()(x)
x = Dense(n_classes, activation='tanh')(x)
x = Activation('sigmoid')(x)
print('Infeasible!')
print(ex)
model = keras.models.Model(inputs=ip, outputs=x)
model.compile(
optimizer=optimizer,
loss='sparse_categorical_crossentropy'
)
self.model = model
def predict_proba(self, X):
make_keras_picklable()
return self.model.predict(X)
def _predict(self, X):
yp = self.predict_proba(X)
return np.argmax(yp, axis=1)
class RNNClassifier(KerasClassifierBase):
def create_architecture(self, X, n_classes):
import keras.models
from keras.layers import Input, Dense, GRU, Flatten
from keras.layers.advanced_activations import LeakyReLU
ip = Input(shape=X[0].shape)
x = ip
for i in range(self.n_layers):
x = GRU(self.n_neurons, return_sequences=True)(x)
x = Flatten()(x)
x = Dense(n_classes, activation='softmax')(x)
return keras.models.Model(inputs=ip, outputs=x)
class CNN1DClassifier(KerasClassifierBase):
def __init__(self, conv_sz=3, stride=1, n_neurons=32, n_layers=1, lr=1e-4, beta1=0.9, beta2=0.99,
batch_size=128, max_iter=128, max_patience=32, val_fraction=0.2):
super(CNN1DClassifier, self).__init__(
n_neurons=n_neurons, n_layers=n_layers, lr=lr, beta1=beta1, beta2=beta2,
batch_size=batch_size, max_iter=max_iter, max_patience=max_patience, val_fraction=val_fraction
)
self.conv_sz = conv_sz
self.stride = stride
def create_architecture(self, X, n_classes):
import keras.models
from keras.layers import Input, Dense, Conv1D, Flatten
from keras.layers.advanced_activations import LeakyReLU
ip = Input(shape=X[0].shape)
x = ip
for i in range(self.n_layers):
x = Conv1D(filters=self.n_neurons, kernel_size=self.conv_sz,
strides=self.stride, padding='same')(x)
x = LeakyReLU(0.05)(x)
x = Flatten()(x)
x = Dense(n_classes, activation='softmax')(x)
return keras.models.Model(inputs=ip, outputs=x)
class DNNClassifier(KerasClassifierBase):
def create_architecture(self, X, n_classes):
import keras.models
from keras.layers import Input, Dense, Flatten
from keras.layers.advanced_activations import LeakyReLU
ip = Input(shape=X[0].shape)
x = ip
x = Flatten()(x)
for i in range(self.n_layers):
x = Dense(self.n_neurons)(x)
x = LeakyReLU(0.05)(x)
x = Dense(n_classes, activation='softmax')(x)
model = keras.models.Model(inputs=ip, outputs=x)
return model
class KerasRegressorBase(KerasNNBase, RegressorMixin):
@abstractmethod
def create_architecture(self, X):
"""
Creates architecture of regressor.
"""
def _make_model(self, X, y, optimizer):
import keras.models
from keras.layers import Input, Dense, GRU
from keras.optimizers import Adam
self.encoder = FunctionTransformer(func=lambda x: x, inverse_func=lambda x: x)
try:
model = self.create_architecture
except BaseException as ex:
ip = Input(shape=X[0].shape)
x = ip
x = Dense(1)(x)
model = keras.models.Model(inputs=ip, outputs=x)
model.compile(
optimizer=optimizer,
loss='sparse_categorical_crossentropy'
)
self.model = model
def _predict(self, X):
return self.model.predict(X)
class RNNRegressor(KerasRegressorBase):
def create_architecture(self, X):
import keras.models
from keras.layers import Input, Dense, GRU
from keras.layers.advanced_activations import LeakyReLU
ip = Input(shape=X[0].shape)
x = ip
for i in range(self.n_layers):
x = GRU(self.n_neurons)(x)
x = LeakyReLU(0.05)(x)
x = Dense(1)(x)
model = keras.models.Model(inputs=ip, outputs=x)
return model
class CNN1DRegressor(KerasRegressorBase):
def __init__(self, conv_sz, stride, *args, **kwargs):
super(CNN1DRegressor, self).__init__(
*args, **kwargs
)
self.conv_sz = conv_sz
self.stride = stride
def create_architecture(self, X):
import keras.models
from keras.layers import Input, Dense, Conv1D, Flatten
from keras.layers.advanced_activations import LeakyReLU
ip = Input(shape=X[0].shape)
x = ip
for i in range(self.n_layers):
x = Conv1D(self.n_neurons, self.conv_sz, self.stride, padding='same')(x)
x = LeakyReLU(0.05)(x)
x = Flatten()(x)
x = Dense(1)(x)
model = keras.models.Model(inputs=ip, outputs=x)
return model
class DNNRegressor(KerasRegressorBase):
def create_architecture(self, X):
import keras.models
from keras.layers import Input, Dense
from keras.layers.advanced_activations import LeakyReLU
ip = Input(shape=X[0].shape)
x = ip
for i in range(self.n_layers):
x = Dense(self.n_neurons)(x)
x = LeakyReLU(0.05)(x)
x = Dense(1)(x)
model = keras.models.Model(inputs=ip, outputs=x)
return model | mit |
zfrenchee/pandas | pandas/io/parquet.py | 1 | 8906 | """ parquet compat """
from warnings import catch_warnings
from distutils.version import LooseVersion
from pandas import DataFrame, RangeIndex, Int64Index, get_option
from pandas.compat import string_types
from pandas.core.common import AbstractMethodError
from pandas.io.common import get_filepath_or_buffer
def get_engine(engine):
""" return our implementation """
if engine == 'auto':
engine = get_option('io.parquet.engine')
if engine == 'auto':
# try engines in this order
try:
return PyArrowImpl()
except ImportError:
pass
try:
return FastParquetImpl()
except ImportError:
pass
raise ImportError("Unable to find a usable engine; "
"tried using: 'pyarrow', 'fastparquet'.\n"
"pyarrow or fastparquet is required for parquet "
"support")
if engine not in ['pyarrow', 'fastparquet']:
raise ValueError("engine must be one of 'pyarrow', 'fastparquet'")
if engine == 'pyarrow':
return PyArrowImpl()
elif engine == 'fastparquet':
return FastParquetImpl()
class BaseImpl(object):
api = None # module
@staticmethod
def validate_dataframe(df):
if not isinstance(df, DataFrame):
raise ValueError("to_parquet only supports IO with DataFrames")
# must have value column names (strings only)
if df.columns.inferred_type not in {'string', 'unicode'}:
raise ValueError("parquet must have string column names")
# index level names must be strings
valid_names = all(
isinstance(name, string_types)
for name in df.index.names
if name is not None
)
if not valid_names:
raise ValueError("Index level names must be strings")
def write(self, df, path, compression, **kwargs):
raise AbstractMethodError(self)
def read(self, path, columns=None, **kwargs):
raise AbstractMethodError(self)
class PyArrowImpl(BaseImpl):
def __init__(self):
# since pandas is a dependency of pyarrow
# we need to import on first use
try:
import pyarrow
import pyarrow.parquet
except ImportError:
raise ImportError(
"pyarrow is required for parquet support\n\n"
"you can install via conda\n"
"conda install pyarrow -c conda-forge\n"
"\nor via pip\n"
"pip install -U pyarrow\n"
)
if LooseVersion(pyarrow.__version__) < '0.4.1':
raise ImportError(
"pyarrow >= 0.4.1 is required for parquet support\n\n"
"you can install via conda\n"
"conda install pyarrow -c conda-forge\n"
"\nor via pip\n"
"pip install -U pyarrow\n"
)
self._pyarrow_lt_060 = (
LooseVersion(pyarrow.__version__) < LooseVersion('0.6.0'))
self._pyarrow_lt_070 = (
LooseVersion(pyarrow.__version__) < LooseVersion('0.7.0'))
self.api = pyarrow
def write(self, df, path, compression='snappy',
coerce_timestamps='ms', **kwargs):
self.validate_dataframe(df)
if self._pyarrow_lt_070:
self._validate_write_lt_070(df)
path, _, _ = get_filepath_or_buffer(path)
if self._pyarrow_lt_060:
table = self.api.Table.from_pandas(df, timestamps_to_ms=True)
self.api.parquet.write_table(
table, path, compression=compression, **kwargs)
else:
table = self.api.Table.from_pandas(df)
self.api.parquet.write_table(
table, path, compression=compression,
coerce_timestamps=coerce_timestamps, **kwargs)
def read(self, path, columns=None, **kwargs):
path, _, _ = get_filepath_or_buffer(path)
if self._pyarrow_lt_070:
return self.api.parquet.read_pandas(path, columns=columns,
**kwargs).to_pandas()
kwargs['use_pandas_metadata'] = True
return self.api.parquet.read_table(path, columns=columns,
**kwargs).to_pandas()
def _validate_write_lt_070(self, df):
# Compatibility shim for pyarrow < 0.7.0
# TODO: Remove in pandas 0.23.0
from pandas.core.indexes.multi import MultiIndex
if isinstance(df.index, MultiIndex):
msg = (
"Multi-index DataFrames are only supported "
"with pyarrow >= 0.7.0"
)
raise ValueError(msg)
# Validate index
if not isinstance(df.index, Int64Index):
msg = (
"pyarrow < 0.7.0 does not support serializing {} for the "
"index; you can .reset_index() to make the index into "
"column(s), or install the latest version of pyarrow or "
"fastparquet."
)
raise ValueError(msg.format(type(df.index)))
if not df.index.equals(RangeIndex(len(df))):
raise ValueError(
"pyarrow < 0.7.0 does not support serializing a non-default "
"index; you can .reset_index() to make the index into "
"column(s), or install the latest version of pyarrow or "
"fastparquet."
)
if df.index.name is not None:
raise ValueError(
"pyarrow < 0.7.0 does not serialize indexes with a name; you "
"can set the index.name to None or install the latest version "
"of pyarrow or fastparquet."
)
class FastParquetImpl(BaseImpl):
def __init__(self):
# since pandas is a dependency of fastparquet
# we need to import on first use
try:
import fastparquet
except ImportError:
raise ImportError(
"fastparquet is required for parquet support\n\n"
"you can install via conda\n"
"conda install fastparquet -c conda-forge\n"
"\nor via pip\n"
"pip install -U fastparquet"
)
if LooseVersion(fastparquet.__version__) < '0.1.0':
raise ImportError(
"fastparquet >= 0.1.0 is required for parquet "
"support\n\n"
"you can install via conda\n"
"conda install fastparquet -c conda-forge\n"
"\nor via pip\n"
"pip install -U fastparquet"
)
self.api = fastparquet
def write(self, df, path, compression='snappy', **kwargs):
self.validate_dataframe(df)
# thriftpy/protocol/compact.py:339:
# DeprecationWarning: tostring() is deprecated.
# Use tobytes() instead.
path, _, _ = get_filepath_or_buffer(path)
with catch_warnings(record=True):
self.api.write(path, df,
compression=compression, **kwargs)
def read(self, path, columns=None, **kwargs):
path, _, _ = get_filepath_or_buffer(path)
parquet_file = self.api.ParquetFile(path)
return parquet_file.to_pandas(columns=columns, **kwargs)
def to_parquet(df, path, engine='auto', compression='snappy', **kwargs):
"""
Write a DataFrame to the parquet format.
Parameters
----------
df : DataFrame
path : string
File path
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet reader library to use. If 'auto', then the option
'io.parquet.engine' is used. If 'auto', then the first
library to be installed is used.
compression : str, optional, default 'snappy'
compression method, includes {'gzip', 'snappy', 'brotli'}
kwargs
Additional keyword arguments passed to the engine
"""
impl = get_engine(engine)
return impl.write(df, path, compression=compression, **kwargs)
def read_parquet(path, engine='auto', columns=None, **kwargs):
"""
Load a parquet object from the file path, returning a DataFrame.
.. versionadded 0.21.0
Parameters
----------
path : string
File path
columns: list, default=None
If not None, only these columns will be read from the file.
.. versionadded 0.21.1
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet reader library to use. If 'auto', then the option
'io.parquet.engine' is used. If 'auto', then the first
library to be installed is used.
kwargs are passed to the engine
Returns
-------
DataFrame
"""
impl = get_engine(engine)
return impl.read(path, columns=columns, **kwargs)
| bsd-3-clause |
wavelets/scipy_2015_sklearn_tutorial | notebooks/figures/plot_digits_datasets.py | 19 | 2750 | # Taken from example in scikit-learn examples
# Authors: Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble, lda,
random_projection)
def digits_plot():
digits = datasets.load_digits(n_class=6)
n_digits = 500
X = digits.data[:n_digits]
y = digits.target[:n_digits]
n_samples, n_features = X.shape
n_neighbors = 30
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(X.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 1e5:
# don't show points that are too close
# set a high threshold to basically turn this off
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
n_img_per_row = 10
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
print("Computing PCA projection")
pca = decomposition.PCA(n_components=2).fit(X)
X_pca = pca.transform(X)
plot_embedding(X_pca, "Principal Components projection of the digits")
plt.figure()
plt.matshow(pca.components_[0, :].reshape(8, 8), cmap="gray")
plt.axis('off')
plt.figure()
plt.matshow(pca.components_[1, :].reshape(8, 8), cmap="gray")
plt.axis('off')
plt.show()
| cc0-1.0 |
eliasrg/SURF2017 | code/measurements.py | 1 | 5037 | # Copyright (c) 2017 Elias Riedel Gårding
# Licensed under the MIT License
from itertools import islice
from types import SimpleNamespace
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import numpy as np
import pickle
class Measurement:
def __init__(self, params):
self.params = params
self.x = []
self.w = []
self.v = []
self.noise = []
self.LQG = []
if params.scheme in ['noisy_lloyd_max', 'separate']:
# Quantization index translated into bits
self.bits = []
# Entire history believed by the decoder (at each step)
self.decoded_bits_history = []
self.correctly_decoded = []
def record(self, sim):
self.x.append(sim.plant.x)
self.w.append(sim.plant.w)
self.v.append(sim.plant.v)
self.noise.append(sim.channel.last_noise)
self.LQG.append(sim.LQG.evaluate(sim.t))
self.channel_average_power = sim.channel.average_power()
if hasattr(self, 'bits'):
self.bits = sim.encoder.get_bits_history()
self.decoded_bits_history.append(list(
sim.decoder.stack_decoder.first_nodes[-1].input_history()))
self.correctly_decoded.append(
all((word == history_word).all()
for word, history_word in \
zip(self.bits, self.decoded_bits_history[-1])))
print("Correctly decoded: {}".format(self.correctly_decoded[-1]))
def save(self, filename):
with open(filename, 'wb') as f:
pickle.dump(self, f)
@staticmethod
def load(filename):
with open(filename, 'rb') as f:
measurement = pickle.load(f)
assert isinstance(measurement, Measurement)
return measurement
@staticmethod
def average(measurements):
new = Measurement(measurements[0].params)
def average_sequence(sequences):
sequences = [np.array(sequence).flatten() for sequence in sequences]
slices = list(zip(*sequences))
return np.array(list(map(np.mean, slices)))
new.x = average_sequence(m.x for m in measurements)
new.w = average_sequence(m.w for m in measurements)
new.v = average_sequence(m.v for m in measurements)
new.noise = average_sequence(m.noise for m in measurements)
new.LQG = average_sequence(m.LQG for m in measurements)
return new
def get_noise_record(self):
noise = SimpleNamespace()
noise.x1 = self.x[0]
noise.w_sequence = self.w[:]
noise.v_sequence = self.v[:]
noise.n_sequence = list(np.array(self.noise).flatten())
return noise
def plot(self, label=None):
self.plot_setup()
self.plot_LQG(label=label)
self.plot_bounds()
if hasattr(self, 'correctly_decoded'):
self.plot_correctly_decoded()
plt.legend()
def plot_setup(self, label="Time [steps]"):
plt.xlabel(label)
plt.grid()
def plot_x(self):
plt.plot(list(range(len(self.x))), self.x)
plt.ylabel("Plant state")
def plot_LQG(self, label=None, *args, **kwargs):
plt.plot(list(range(len(self.LQG))), 10 * np.log10(self.LQG),
label=label, *args, **kwargs)
plt.ylabel(r"$\bar{J}_t$ [dB]")
def plot_bounds(self, lower_label="Theoretical average lower bound",
upper_label="Theoretical prediction",
lower_args=['--'], lower_kwargs={},
upper_args=['--'], upper_kwargs={}):
params = self.params
# Upper bound
if params.analog and hasattr(params, 'SDR0'):
plt.plot((1, len(self.LQG)),
10 * np.log10(params.LQR_inf_upper_bound()) * np.ones(2),
*upper_args, label=upper_label, **upper_kwargs)
# Lower bound
if params.analog:
plt.plot((1, len(self.LQG)),
10 * np.log10(params.LQR_inf_lower_bound()) * np.ones(2),
*lower_args, label=lower_label, **lower_kwargs)
def plot_correctly_decoded(self, y=0):
RECTANGLE_HEIGHT = 0.8
# Find intervals of consecutive Trues
intervals = []
start = None
for (t, good) in enumerate(self.correctly_decoded, 1):
if not start and not good:
start = t
elif start and (good or t == len(self.correctly_decoded)):
intervals.append((start, t))
start = None
for i, (start, stop) in enumerate(intervals):
print("({}, {})".format(start, stop))
plt.gca().add_patch(
patches.Rectangle(
(start, y - RECTANGLE_HEIGHT/2),
stop - start,
RECTANGLE_HEIGHT,
label="Decoding errors" if i == 0 else None,
color='purple'
)
)
| mit |
DonBeo/statsmodels | statsmodels/base/tests/test_generic_methods.py | 25 | 16558 | # -*- coding: utf-8 -*-
"""Tests that use cross-checks for generic methods
Should be easy to check consistency across models
Does not cover tsa
Initial cases copied from test_shrink_pickle
Created on Wed Oct 30 14:01:27 2013
Author: Josef Perktold
"""
from statsmodels.compat.python import range
import numpy as np
import statsmodels.api as sm
from statsmodels.compat.scipy import NumpyVersion
from numpy.testing import assert_, assert_allclose, assert_equal
from nose import SkipTest
import platform
iswin = platform.system() == 'Windows'
npversionless15 = NumpyVersion(np.__version__) < '1.5.0'
winoldnp = iswin & npversionless15
class CheckGenericMixin(object):
def __init__(self):
self.predict_kwds = {}
@classmethod
def setup_class(self):
nobs = 500
np.random.seed(987689)
x = np.random.randn(nobs, 3)
x = sm.add_constant(x)
self.exog = x
self.xf = 0.25 * np.ones((2, 4))
def test_ttest_tvalues(self):
# test that t_test has same results a params, bse, tvalues, ...
res = self.results
mat = np.eye(len(res.params))
tt = res.t_test(mat)
assert_allclose(tt.effect, res.params, rtol=1e-12)
# TODO: tt.sd and tt.tvalue are 2d also for single regressor, squeeze
assert_allclose(np.squeeze(tt.sd), res.bse, rtol=1e-10)
assert_allclose(np.squeeze(tt.tvalue), res.tvalues, rtol=1e-12)
assert_allclose(tt.pvalue, res.pvalues, rtol=5e-10)
assert_allclose(tt.conf_int(), res.conf_int(), rtol=1e-10)
# test params table frame returned by t_test
table_res = np.column_stack((res.params, res.bse, res.tvalues,
res.pvalues, res.conf_int()))
table1 = np.column_stack((tt.effect, tt.sd, tt.tvalue, tt.pvalue,
tt.conf_int()))
table2 = tt.summary_frame().values
assert_allclose(table2, table_res, rtol=1e-12)
# move this to test_attributes ?
assert_(hasattr(res, 'use_t'))
tt = res.t_test(mat[0])
tt.summary() # smoke test for #1323
assert_allclose(tt.pvalue, res.pvalues[0], rtol=5e-10)
def test_ftest_pvalues(self):
res = self.results
use_t = res.use_t
k_vars = len(res.params)
# check default use_t
pvals = [res.wald_test(np.eye(k_vars)[k], use_f=use_t).pvalue
for k in range(k_vars)]
assert_allclose(pvals, res.pvalues, rtol=5e-10, atol=1e-25)
# sutomatic use_f based on results class use_t
pvals = [res.wald_test(np.eye(k_vars)[k]).pvalue
for k in range(k_vars)]
assert_allclose(pvals, res.pvalues, rtol=5e-10, atol=1e-25)
# label for pvalues in summary
string_use_t = 'P>|z|' if use_t is False else 'P>|t|'
summ = str(res.summary())
assert_(string_use_t in summ)
# try except for models that don't have summary2
try:
summ2 = str(res.summary2())
except AttributeError:
summ2 = None
if summ2 is not None:
assert_(string_use_t in summ2)
# TODO The following is not (yet) guaranteed across models
#@knownfailureif(True)
def test_fitted(self):
# ignore wrapper for isinstance check
from statsmodels.genmod.generalized_linear_model import GLMResults
from statsmodels.discrete.discrete_model import DiscreteResults
# FIXME: work around GEE has no wrapper
if hasattr(self.results, '_results'):
results = self.results._results
else:
results = self.results
if (isinstance(results, GLMResults) or
isinstance(results, DiscreteResults)):
raise SkipTest
res = self.results
fitted = res.fittedvalues
assert_allclose(res.model.endog - fitted, res.resid, rtol=1e-12)
assert_allclose(fitted, res.predict(), rtol=1e-12)
def test_predict_types(self):
res = self.results
# squeeze to make 1d for single regressor test case
p_exog = np.squeeze(np.asarray(res.model.exog[:2]))
# ignore wrapper for isinstance check
from statsmodels.genmod.generalized_linear_model import GLMResults
from statsmodels.discrete.discrete_model import DiscreteResults
# FIXME: work around GEE has no wrapper
if hasattr(self.results, '_results'):
results = self.results._results
else:
results = self.results
if (isinstance(results, GLMResults) or
isinstance(results, DiscreteResults)):
# SMOKE test only TODO
res.predict(p_exog)
res.predict(p_exog.tolist())
res.predict(p_exog[0].tolist())
else:
fitted = res.fittedvalues[:2]
assert_allclose(fitted, res.predict(p_exog), rtol=1e-12)
# this needs reshape to column-vector:
assert_allclose(fitted, res.predict(np.squeeze(p_exog).tolist()),
rtol=1e-12)
# only one prediction:
assert_allclose(fitted[:1], res.predict(p_exog[0].tolist()),
rtol=1e-12)
assert_allclose(fitted[:1], res.predict(p_exog[0]),
rtol=1e-12)
# predict doesn't preserve DataFrame, e.g. dot converts to ndarray
# import pandas
# predicted = res.predict(pandas.DataFrame(p_exog))
# assert_(isinstance(predicted, pandas.DataFrame))
# assert_allclose(predicted, fitted, rtol=1e-12)
######### subclasses for individual models, unchanged from test_shrink_pickle
# TODO: check if setup_class is faster than setup
class TestGenericOLS(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y = x.sum(1) + np.random.randn(x.shape[0])
self.results = sm.OLS(y, self.exog).fit()
class TestGenericOLSOneExog(CheckGenericMixin):
# check with single regressor (no constant)
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog[:, 1]
np.random.seed(987689)
y = x + np.random.randn(x.shape[0])
self.results = sm.OLS(y, x).fit()
class TestGenericWLS(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y = x.sum(1) + np.random.randn(x.shape[0])
self.results = sm.WLS(y, self.exog, weights=np.ones(len(y))).fit()
class TestGenericPoisson(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y_count = np.random.poisson(np.exp(x.sum(1) - x.mean()))
model = sm.Poisson(y_count, x) #, exposure=np.ones(nobs), offset=np.zeros(nobs)) #bug with default
# use start_params to converge faster
start_params = np.array([0.75334818, 0.99425553, 1.00494724, 1.00247112])
self.results = model.fit(start_params=start_params, method='bfgs',
disp=0)
#TODO: temporary, fixed in master
self.predict_kwds = dict(exposure=1, offset=0)
class TestGenericNegativeBinomial(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
np.random.seed(987689)
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
mod = sm.NegativeBinomial(data.endog, data.exog)
start_params = np.array([-0.0565406 , -0.21213599, 0.08783076,
-0.02991835, 0.22901974, 0.0621026,
0.06799283, 0.08406688, 0.18530969,
1.36645452])
self.results = mod.fit(start_params=start_params, disp=0)
class TestGenericLogit(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
nobs = x.shape[0]
np.random.seed(987689)
y_bin = (np.random.rand(nobs) < 1.0 / (1 + np.exp(x.sum(1) - x.mean()))).astype(int)
model = sm.Logit(y_bin, x) #, exposure=np.ones(nobs), offset=np.zeros(nobs)) #bug with default
# use start_params to converge faster
start_params = np.array([-0.73403806, -1.00901514, -0.97754543, -0.95648212])
self.results = model.fit(start_params=start_params, method='bfgs', disp=0)
class TestGenericRLM(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y = x.sum(1) + np.random.randn(x.shape[0])
self.results = sm.RLM(y, self.exog).fit()
class TestGenericGLM(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y = x.sum(1) + np.random.randn(x.shape[0])
self.results = sm.GLM(y, self.exog).fit()
class TestGenericGEEPoisson(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y_count = np.random.poisson(np.exp(x.sum(1) - x.mean()))
groups = np.random.randint(0, 4, size=x.shape[0])
# use start_params to speed up test, difficult convergence not tested
start_params = np.array([0., 1., 1., 1.])
vi = sm.cov_struct.Independence()
family = sm.families.Poisson()
self.results = sm.GEE(y_count, self.exog, groups, family=family,
cov_struct=vi).fit(start_params=start_params)
class TestGenericGEEPoissonNaive(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
#y_count = np.random.poisson(np.exp(x.sum(1) - x.mean()))
y_count = np.random.poisson(np.exp(x.sum(1) - x.sum(1).mean(0)))
groups = np.random.randint(0, 4, size=x.shape[0])
# use start_params to speed up test, difficult convergence not tested
start_params = np.array([0., 1., 1., 1.])
vi = sm.cov_struct.Independence()
family = sm.families.Poisson()
self.results = sm.GEE(y_count, self.exog, groups, family=family,
cov_struct=vi).fit(start_params=start_params,
cov_type='naive')
class TestGenericGEEPoissonBC(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
#y_count = np.random.poisson(np.exp(x.sum(1) - x.mean()))
y_count = np.random.poisson(np.exp(x.sum(1) - x.sum(1).mean(0)))
groups = np.random.randint(0, 4, size=x.shape[0])
# use start_params to speed up test, difficult convergence not tested
start_params = np.array([0., 1., 1., 1.])
# params_est = np.array([-0.0063238 , 0.99463752, 1.02790201, 0.98080081])
vi = sm.cov_struct.Independence()
family = sm.families.Poisson()
mod = sm.GEE(y_count, self.exog, groups, family=family, cov_struct=vi)
self.results = mod.fit(start_params=start_params,
cov_type='bias_reduced')
# Other test classes
class CheckAnovaMixin(object):
@classmethod
def setup_class(cls):
import statsmodels.stats.tests.test_anova as ttmod
test = ttmod.TestAnova3()
test.setupClass()
cls.data = test.data.drop([0,1,2])
cls.initialize()
def test_combined(self):
res = self.res
wa = res.wald_test_terms(skip_single=False, combine_terms=['Duration', 'Weight'])
eye = np.eye(len(res.params))
c_const = eye[0]
c_w = eye[[2,3]]
c_d = eye[1]
c_dw = eye[[4,5]]
c_weight = eye[2:6]
c_duration = eye[[1, 4, 5]]
compare_waldres(res, wa, [c_const, c_d, c_w, c_dw, c_duration, c_weight])
def test_categories(self):
# test only multicolumn terms
res = self.res
wa = res.wald_test_terms(skip_single=True)
eye = np.eye(len(res.params))
c_w = eye[[2,3]]
c_dw = eye[[4,5]]
compare_waldres(res, wa, [c_w, c_dw])
def compare_waldres(res, wa, constrasts):
for i, c in enumerate(constrasts):
wt = res.wald_test(c)
assert_allclose(wa.table.values[i, 0], wt.statistic)
assert_allclose(wa.table.values[i, 1], wt.pvalue)
df = c.shape[0] if c.ndim == 2 else 1
assert_equal(wa.table.values[i, 2], df)
# attributes
assert_allclose(wa.statistic[i], wt.statistic)
assert_allclose(wa.pvalues[i], wt.pvalue)
assert_equal(wa.df_constraints[i], df)
if res.use_t:
assert_equal(wa.df_denom[i], res.df_resid)
col_names = wa.col_names
if res.use_t:
assert_equal(wa.distribution, 'F')
assert_equal(col_names[0], 'F')
assert_equal(col_names[1], 'P>F')
else:
assert_equal(wa.distribution, 'chi2')
assert_equal(col_names[0], 'chi2')
assert_equal(col_names[1], 'P>chi2')
# SMOKETEST
wa.summary_frame()
class TestWaldAnovaOLS(CheckAnovaMixin):
@classmethod
def initialize(cls):
from statsmodels.formula.api import ols, glm, poisson
from statsmodels.discrete.discrete_model import Poisson
mod = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)", cls.data)
cls.res = mod.fit(use_t=False)
def test_noformula(self):
endog = self.res.model.endog
exog = self.res.model.data.orig_exog
del exog.design_info
res = sm.OLS(endog, exog).fit()
wa = res.wald_test_terms(skip_single=True,
combine_terms=['Duration', 'Weight'])
eye = np.eye(len(res.params))
c_weight = eye[2:6]
c_duration = eye[[1, 4, 5]]
compare_waldres(res, wa, [c_duration, c_weight])
class TestWaldAnovaOLSF(CheckAnovaMixin):
@classmethod
def initialize(cls):
from statsmodels.formula.api import ols, glm, poisson
from statsmodels.discrete.discrete_model import Poisson
mod = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)", cls.data)
cls.res = mod.fit() # default use_t=True
class TestWaldAnovaGLM(CheckAnovaMixin):
@classmethod
def initialize(cls):
from statsmodels.formula.api import ols, glm, poisson
from statsmodels.discrete.discrete_model import Poisson
mod = glm("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)", cls.data)
cls.res = mod.fit(use_t=False)
class TestWaldAnovaPoisson(CheckAnovaMixin):
@classmethod
def initialize(cls):
from statsmodels.discrete.discrete_model import Poisson
mod = Poisson.from_formula("Days ~ C(Duration, Sum)*C(Weight, Sum)", cls.data)
cls.res = mod.fit(cov_type='HC0')
class TestWaldAnovaNegBin(CheckAnovaMixin):
@classmethod
def initialize(cls):
from statsmodels.discrete.discrete_model import NegativeBinomial
formula = "Days ~ C(Duration, Sum)*C(Weight, Sum)"
mod = NegativeBinomial.from_formula(formula, cls.data,
loglike_method='nb2')
cls.res = mod.fit()
class TestWaldAnovaNegBin1(CheckAnovaMixin):
@classmethod
def initialize(cls):
from statsmodels.discrete.discrete_model import NegativeBinomial
formula = "Days ~ C(Duration, Sum)*C(Weight, Sum)"
mod = NegativeBinomial.from_formula(formula, cls.data,
loglike_method='nb1')
cls.res = mod.fit(cov_type='HC0')
class T_estWaldAnovaOLSNoFormula(object):
@classmethod
def initialize(cls):
from statsmodels.formula.api import ols, glm, poisson
from statsmodels.discrete.discrete_model import Poisson
mod = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)", cls.data)
cls.res = mod.fit() # default use_t=True
if __name__ == '__main__':
pass
| bsd-3-clause |
ChanderG/numpy | numpy/lib/polynomial.py | 82 | 37957 | """
Functions to operate on polynomials.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
ones)
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros, sort_complex
from numpy.lib.type_check import iscomplex, real, imag, mintypecode
from numpy.linalg import eigvals, lstsq, inv
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Evaluate a polynomial at a point.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Or a square matrix object:
>>> np.poly(np.matrix(P))
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
dt = seq_of_zeros.dtype
# Let object arrays slip through, e.g. for arbitrary precision
if dt != object:
seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char))
else:
raise ValueError("input must be 1d or non-empty square 2d array.")
if len(seq_of_zeros) == 0:
return 1.0
dt = seq_of_zeros.dtype
a = ones((1,), dtype=dt)
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, array([1, -seq_of_zeros[k]], dtype=dt),
mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
pos_roots = sort_complex(NX.compress(roots.imag > 0, roots))
neg_roots = NX.conjugate(sort_complex(
NX.compress(roots.imag < 0, roots)))
if (len(pos_roots) == len(neg_roots) and
NX.alltrue(neg_roots == pos_roots)):
a = a.real.copy()
return a
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by::
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like
Rank-1 array of polynomial coefficients.
Returns
-------
out : ndarray
An array containing the complex roots of the polynomial.
Raises
------
ValueError
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with a given sequence
of roots.
polyval : Evaluate a polynomial at a point.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if len(p.shape) != 1:
raise ValueError("Input must be a rank-1 array.")
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0,:] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : array_like or poly1d
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : list of `m` scalars or scalar, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ])
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ])
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError("Order of integral must be positive (see polyder)")
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError(
"k must be a scalar or a rank-1 array of length 1 or >m.")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([ 0.])
"""
m = int(m)
if m < 0:
raise ValueError("Order of derivative must be positive (see polyint)")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (M,), optional
weights to apply to the y-coordinates of the sample points.
cov : bool, optional
Return the estimate and the covariance matrix of the estimate
If full is True, then cov is not returned.
Returns
-------
p : ndarray, shape (M,) or (M, K)
Polynomial coefficients, highest power first. If `y` was 2-D, the
coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond :
Present only if `full` = True. Residuals of the least-squares fit,
the effective rank of the scaled Vandermonde coefficient matrix,
its singular values, and the specified value of `rcond`. For more
details, see `linalg.lstsq`.
V : ndarray, shape (M,M) or (M,M,K)
Present only if `full` = False and `cov`=True. The covariance
matrix of the polynomial coefficient estimates. The diagonal of
this matrix are the variance estimates for each coefficient. If y
is a 2-D array, then the covariance matrix for the `k`-th data set
are in ``V[:,:,k]``
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Computes polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
...
x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
http://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179
>>> p(3.5)
-0.34732142857143039
>>> p(10)
22.579365079365115
High-order polynomials may oscillate wildly:
>>> p30 = np.poly1d(np.polyfit(x, y, 30))
/... RankWarning: Polyfit may be poorly conditioned...
>>> p30(4)
-0.80000000000000204
>>> p30(5)
-0.99999999999999445
>>> p30(4.5)
-0.10547061179440398
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if x.shape[0] != y.shape[0]:
raise TypeError("expected x and y to have same length")
# set rcond
if rcond is None:
rcond = len(x)*finfo(x.dtype).eps
# set up least squares equation for powers of x
lhs = vander(x, order)
rhs = y
# apply weighting
if w is not None:
w = NX.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
lhs *= w[:, NX.newaxis]
if rhs.ndim == 2:
rhs *= w[:, NX.newaxis]
else:
rhs *= w
# scale lhs to improve condition number and solve
scale = NX.sqrt((lhs*lhs).sum(axis=0))
lhs /= scale
c, resids, rank, s = lstsq(lhs, rhs, rcond)
c = (c.T/scale).T # broadcast scale coefficients
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning)
if full:
return c, resids, rank, s, rcond
elif cov:
Vbase = inv(dot(lhs.T, lhs))
Vbase /= NX.outer(scale, scale)
# Some literature ignores the extra -2.0 factor in the denominator, but
# it is included here because the covariance of Multivariate Student-T
# (which is implied by a Bayesian uncertainty analysis) includes it.
# Plus, it gives a slightly more conservative estimate of uncertainty.
fac = resids / (len(x) - order - 2.0)
if y.ndim == 1:
return c, Vbase * fac
else:
return c, Vbase[:,:, NX.newaxis] * fac
else:
return c
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, a 1D array of numbers, or an instance of poly1d, "at"
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([ 76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([ 76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = y * x + p[i]
return y
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print p1
1 x + 2
>>> print p2
2
9 x + 5 x + 4
>>> print np.polyadd(p1, p2)
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
convolve : Array convolution. Same output as polymul, but has parameter
for overlap mode.
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print p1
2
1 x + 2 x + 3
>>> print p2
2
9 x + 5 x + 1
>>> print np.polymul(p1, p2)
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1, a2 = poly1d(a1), poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([ 1.5 , 1.75]), array([ 0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.copy()
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while True:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2) + len(toadd2) > wrap) or
(len(line1) + len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print np.poly1d(p)
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j])
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print p
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1, -3, 2])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
coeffs = None
order = None
variable = None
__hash__ = None
def __init__(self, c_or_r, r=0, variable=None):
if isinstance(c_or_r, poly1d):
for key in c_or_r.__dict__.keys():
self.__dict__[key] = c_or_r.__dict__[key]
if variable is not None:
self.__dict__['variable'] = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if len(c_or_r.shape) > 1:
raise ValueError("Polynomial must be 1d only.")
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self.__dict__['coeffs'] = c_or_r
self.__dict__['order'] = len(c_or_r) - 1
if variable is None:
variable = 'x'
self.__dict__['variable'] = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError("Power to non-negative integers only.")
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
if self.coeffs.shape != other.coeffs.shape:
return False
return (self.coeffs == other.coeffs).all()
def __ne__(self, other):
return not self.__eq__(other)
def __setattr__(self, key, val):
raise ValueError("Attributes cannot be changed this way.")
def __getattr__(self, key):
if key in ['r', 'roots']:
return roots(self.coeffs)
elif key in ['c', 'coef', 'coefficients']:
return self.coeffs
elif key in ['o']:
return self.order
else:
try:
return self.__dict__[key]
except KeyError:
raise AttributeError(
"'%s' has no attribute '%s'" % (self.__class__, key))
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError("Does not support negative powers.")
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self.__dict__['coeffs'] = NX.concatenate((zr, self.coeffs))
self.__dict__['order'] = key
ind = 0
self.__dict__['coeffs'][ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always', RankWarning)
| bsd-3-clause |
ktaneishi/deepchem | contrib/dragonn/tutorial_utils.py | 6 | 10169 | from __future__ import division
import random
random.seed(1)
import inspect
from collections import namedtuple, defaultdict, OrderedDict
import numpy as np
np.random.seed(1)
from sklearn.model_selection import train_test_split
#from simdna import simulations
import simulations
from simdna.synthetic import StringEmbeddable
from utils import get_motif_scores, one_hot_encode
from models import SequenceDNN
from dragonn.plot import add_letters_to_axis, plot_motif
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
def SequenceDNN_learning_curve(dnn):
if dnn.valid_metrics is not None:
train_losses, valid_losses = [
np.array([epoch_metrics['Loss']
for epoch_metrics in metrics])
for metrics in (dnn.train_metrics, dnn.valid_metrics)
]
min_loss_indx = min(enumerate(valid_losses), key=lambda x: x[1])[0]
f = plt.figure(figsize=(10, 4))
ax = f.add_subplot(1, 1, 1)
ax.plot(range(len(train_losses)), train_losses, 'b', label='Training', lw=4)
ax.plot(
range(len(train_losses)), valid_losses, 'r', label='Validation', lw=4)
ax.plot([min_loss_indx, min_loss_indx], [0, 1.0], 'k--', label='Early Stop')
ax.legend(loc="upper right")
ax.set_ylabel("Loss")
ax.set_ylim((0.0, 1.0))
ax.set_xlabel("Epoch")
plt.show()
else:
print("learning curve can only be obtained after training!")
def test_SequenceDNN(dnn, simulation_data):
print("Test performance:")
print(dnn.test(simulation_data.X_test, simulation_data.y_test))
def plot_motifs(simulation_data):
for motif_name in simulation_data.motif_names:
plot_motif(motif_name, figsize=(10, 4), ylab=motif_name)
def plot_sequence_filters(dnn):
fig = plt.figure(figsize=(15, 8))
fig.subplots_adjust(hspace=0.1, wspace=0.1)
conv_filters = dnn.get_sequence_filters()
num_plots_per_axis = int(len(conv_filters)**0.5) + 1
for i, conv_filter in enumerate(conv_filters):
ax = fig.add_subplot(num_plots_per_axis, num_plots_per_axis, i + 1)
add_letters_to_axis(ax, conv_filter.T)
ax.axis("off")
ax.set_title("Filter %s" % (str(i + 1)))
def plot_SequenceDNN_layer_outputs(dnn, simulation_data):
# define layer out functions
import theano
get_conv_output = theano.function(
[dnn.model.layers[0].input],
dnn.model.layers[0].get_output(train=False),
allow_input_downcast=True)
get_conv_relu_output = theano.function(
[dnn.model.layers[0].input],
dnn.model.layers[1].get_output(train=False),
allow_input_downcast=True)
get_maxpool_output = theano.function(
[dnn.model.layers[0].input],
dnn.model.layers[-4].get_output(train=False),
allow_input_downcast=True)
# get layer outputs for a positive simulation example
pos_indx = np.where(simulation_data.y_valid == 1)[0][0]
pos_X = simulation_data.X_valid[pos_indx:(pos_indx + 1)]
conv_outputs = get_conv_output(pos_X).squeeze()
conv_relu_outputs = get_conv_relu_output(pos_X).squeeze()
maxpool_outputs = get_maxpool_output(pos_X).squeeze()
# plot layer outputs
fig = plt.figure(figsize=(15, 12))
ax1 = fig.add_subplot(3, 1, 3)
heatmap = ax1.imshow(
conv_outputs, aspect='auto', interpolation='None', cmap='seismic')
fig.colorbar(heatmap)
ax1.set_ylabel("Convolutional Filters")
ax1.set_xlabel("Position")
ax1.get_yaxis().set_ticks([])
ax1.get_xaxis().set_ticks([])
ax1.set_title("SequenceDNN outputs from convolutional layer.\t\
Locations of motif sites are highlighted in grey.")
ax2 = fig.add_subplot(3, 1, 2)
heatmap = ax2.imshow(
conv_relu_outputs, aspect='auto', interpolation='None', cmap='seismic')
fig.colorbar(heatmap)
ax2.set_ylabel("Convolutional Filters")
ax2.get_yaxis().set_ticks([])
ax2.get_xaxis().set_ticks([])
ax2.set_title("Convolutional outputs after ReLU transformation.\t\
Locations of motif sites are highlighted in grey.")
ax3 = fig.add_subplot(3, 1, 1)
heatmap = ax3.imshow(
maxpool_outputs, aspect='auto', interpolation='None', cmap='seismic')
fig.colorbar(heatmap)
ax3.set_title("DNN outputs after max pooling")
ax3.set_ylabel("Convolutional Filters")
ax3.get_yaxis().set_ticks([])
ax3.get_xaxis().set_ticks([])
# highlight motif sites
motif_scores = get_motif_scores(pos_X, simulation_data.motif_names)
motif_sites = [np.argmax(motif_scores[0, i, :]) for i in [0, 1]]
for motif_site in motif_sites:
conv_output_start = motif_site - max(dnn.conv_width - 10, 0)
conv_output_stop = motif_site + max(dnn.conv_width - 10, 0)
ax1.axvspan(conv_output_start, conv_output_stop, color='grey', alpha=0.5)
ax2.axvspan(conv_output_start, conv_output_stop, color='grey', alpha=0.5)
def interpret_SequenceDNN_filters(dnn, simulation_data):
print("Plotting simulation motifs...")
plot_motifs(simulation_data)
plt.show()
print("Visualizing convolutional sequence filters in SequenceDNN...")
plot_sequence_filters(dnn)
plt.show()
def interpret_data_with_SequenceDNN(dnn, simulation_data):
# get a positive and a negative example from the simulation data
pos_indx = np.flatnonzero(simulation_data.y_valid == 1)[2]
neg_indx = np.flatnonzero(simulation_data.y_valid == 0)[2]
pos_X = simulation_data.X_valid[pos_indx:pos_indx + 1]
neg_X = simulation_data.X_valid[neg_indx:neg_indx + 1]
# get motif scores, ISM scores, and DeepLIFT scores
scores_dict = defaultdict(OrderedDict)
scores_dict['Positive']['Motif Scores'] = get_motif_scores(
pos_X, simulation_data.motif_names)
scores_dict['Positive']['ISM Scores'] = dnn.in_silico_mutagenesis(pos_X).max(
axis=-2)
scores_dict['Positive']['DeepLIFT Scores'] = dnn.deeplift(pos_X).max(axis=-2)
scores_dict['Negative']['Motif Scores'] = get_motif_scores(
neg_X, simulation_data.motif_names)
scores_dict['Negative']['ISM Scores'] = dnn.in_silico_mutagenesis(neg_X).max(
axis=-2)
scores_dict['Negative']['DeepLIFT Scores'] = dnn.deeplift(neg_X).max(axis=-2)
# get motif site locations
motif_sites = {
key: [
embedded_motif.startPos + len(embedded_motif.what.string) // 2
for embedded_motif in (next(
embedded_motif
for embedded_motif in simulation_data.valid_embeddings[index]
if isinstance(embedded_motif.what, StringEmbeddable) and
motif_name in embedded_motif.what.stringDescription)
for motif_name in simulation_data.motif_names)
]
for key, index in (('Positive', pos_indx), ('Negative', neg_indx))
}
# organize legends
motif_label_dict = {}
motif_label_dict['Motif Scores'] = simulation_data.motif_names
if len(simulation_data.motif_names) == dnn.num_tasks:
motif_label_dict['ISM Scores'] = simulation_data.motif_names
else:
motif_label_dict['ISM Scores'] = ['_'.join(simulation_data.motif_names)]
motif_label_dict['DeepLIFT Scores'] = motif_label_dict['ISM Scores']
# plot scores and highlight motif site locations
seq_length = pos_X.shape[-1]
plots_per_row = 2
plots_per_column = 3
ylim_dict = {
'Motif Scores': (-80, 30),
'ISM Scores': (-1.5, 3.0),
'DeepLIFT Scores': (-1.5, 3.0)
}
motif_colors = ['b', 'r', 'c', 'm', 'g', 'k', 'y']
font_size = 12
num_x_ticks = 5
highlight_width = 5
motif_labels_cache = []
f = plt.figure(figsize=(10, 12))
f.subplots_adjust(hspace=0.15, wspace=0.15)
f.set_tight_layout(True)
for j, key in enumerate(['Positive', 'Negative']):
for i, (score_type, scores) in enumerate(scores_dict[key].items()):
ax = f.add_subplot(plots_per_column, plots_per_row,
plots_per_row * i + j + 1)
ax.set_ylim(ylim_dict[score_type])
ax.set_xlim((0, seq_length))
ax.set_frame_on(False)
if j == 0: # put y axis and ticks only on left side
xmin, xmax = ax.get_xaxis().get_view_interval()
ymin, ymax = ax.get_yaxis().get_view_interval()
ax.add_artist(
Line2D((xmin, xmin), (ymin, ymax), color='black', linewidth=2))
ax.get_yaxis().tick_left()
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(font_size / 1.5)
ax.set_ylabel(score_type)
if j > 0: # remove y axes
ax.get_yaxis().set_visible(False)
if i < (plots_per_column - 1): # remove x axes
ax.get_xaxis().set_visible(False)
if i == (plots_per_column - 1): # set x axis and ticks on bottom
ax.set_xticks(seq_length / num_x_ticks * (np.arange(num_x_ticks + 1)))
xmin, xmax = ax.get_xaxis().get_view_interval()
ymin, ymax = ax.get_yaxis().get_view_interval()
ax.add_artist(
Line2D((xmin, xmax), (ymin, ymin), color='black', linewidth=2))
ax.get_xaxis().tick_bottom()
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(font_size / 1.5)
ax.set_xlabel("Position")
if j > 0 and i < (plots_per_column - 1): # remove all axes
ax.axis('off')
add_legend = False
for _i, motif_label in enumerate(motif_label_dict[score_type]):
if score_type == 'Motif Scores':
scores_to_plot = scores[0, _i, :]
else:
scores_to_plot = scores[0, 0, 0, :]
if motif_label not in motif_labels_cache:
motif_labels_cache.append(motif_label)
add_legend = True
motif_color = motif_colors[motif_labels_cache.index(motif_label)]
ax.plot(scores_to_plot, label=motif_label, c=motif_color)
if add_legend:
leg = ax.legend(
loc=[0, 0.85],
frameon=False,
fontsize=font_size,
ncol=3,
handlelength=-0.5)
for legobj in leg.legendHandles:
legobj.set_color('w')
for _j, text in enumerate(leg.get_texts()):
text_color = motif_colors[motif_labels_cache.index(
motif_label_dict[score_type][_j])]
text.set_color(text_color)
for motif_site in motif_sites[key]:
ax.axvspan(
motif_site - highlight_width,
motif_site + highlight_width,
color='grey',
alpha=0.1)
| mit |
antoinecarme/pyaf | tests/perf/test_perf1.py | 1 | 1445 | import pandas as pd
import numpy as np
import pyaf.ForecastEngine as autof
import pyaf.Bench.TS_datasets as tsds
# get_ipython().magic('matplotlib inline')
# %load_ext memory_profiler
lValues = [ 64 ];
for cyc in lValues:
print("TEST_CYCLES_START", cyc)
b1 = tsds.generate_random_TS(N = 4800 , FREQ = 'D', seed = 0, trendtype = "linear", cycle_length = cyc, transform = "None", sigma = 0.0, exog_count = 100, ar_order=0);
df = b1.mPastData
# df.tail(10)
# df[:-10].tail()
# df[:-10:-1]
# df.describe()
lEngine = autof.cForecastEngine()
lEngine.mOptions.mCycleLengths = [ k for k in range(2,128) ];
lEngine
H = b1.mHorizon[b1.mSignalVar];
lEngine.train(df , b1.mTimeVar , b1.mSignalVar, H);
lEngine.getModelInfo();
lEngine.mSignalDecomposition.mBestModel.mTimeInfo.mResolution
dfapp_in = df.copy();
dfapp_in.tail()
# H = 12
dfapp_out = lEngine.forecast(dfapp_in, H);
dfapp_out.tail(2 * H)
print("Forecast Columns " , dfapp_out.columns);
Forecast_DF = dfapp_out[[b1.mTimeVar , b1.mSignalVar, b1.mSignalVar + '_Forecast']]
print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(H).values);
print("\n\n<ModelInfo>")
print(lEngine.to_json());
print("</ModelInfo>\n\n")
print("\n\n<Forecast>")
print(Forecast_DF.tail(2*H).to_json(date_format='iso'))
print("</Forecast>\n\n")
print("TEST_CYCLES_END", cyc)
| bsd-3-clause |
jorik041/scikit-learn | examples/text/document_clustering.py | 230 | 8356 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent sematic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
larsoner/mne-python | tutorials/preprocessing/plot_60_maxwell_filtering_sss.py | 8 | 16845 | """
.. _tut-artifact-sss:
Signal-space separation (SSS) and Maxwell filtering
===================================================
This tutorial covers reducing environmental noise and compensating for head
movement with SSS and Maxwell filtering.
.. contents:: Page contents
:local:
:depth: 2
As usual we'll start by importing the modules we need, loading some
:ref:`example data <sample-dataset>`, and cropping it to save on memory:
"""
import os
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import mne
from mne.preprocessing import find_bad_channels_maxwell
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file, verbose=False)
raw.crop(tmax=60)
###############################################################################
# Background on SSS and Maxwell filtering
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Signal-space separation (SSS) :footcite:`TauluKajola2005,TauluSimola2006`
# is a technique based on the physics
# of electromagnetic fields. SSS separates the measured signal into components
# attributable to sources *inside* the measurement volume of the sensor array
# (the *internal components*), and components attributable to sources *outside*
# the measurement volume (the *external components*). The internal and external
# components are linearly independent, so it is possible to simply discard the
# external components to reduce environmental noise. *Maxwell filtering* is a
# related procedure that omits the higher-order components of the internal
# subspace, which are dominated by sensor noise. Typically, Maxwell filtering
# and SSS are performed together (in MNE-Python they are implemented together
# in a single function).
#
# Like :ref:`SSP <tut-artifact-ssp>`, SSS is a form of projection. Whereas SSP
# empirically determines a noise subspace based on data (empty-room recordings,
# EOG or ECG activity, etc) and projects the measurements onto a subspace
# orthogonal to the noise, SSS mathematically constructs the external and
# internal subspaces from `spherical harmonics`_ and reconstructs the sensor
# signals using only the internal subspace (i.e., does an oblique projection).
#
# .. warning::
#
# Maxwell filtering was originally developed for Elekta Neuromag® systems,
# and should be considered *experimental* for non-Neuromag data. See the
# Notes section of the :func:`~mne.preprocessing.maxwell_filter` docstring
# for details.
#
# The MNE-Python implementation of SSS / Maxwell filtering currently provides
# the following features:
#
# - Basic bad channel detection
# (:func:`~mne.preprocessing.find_bad_channels_maxwell`)
# - Bad channel reconstruction
# - Cross-talk cancellation
# - Fine calibration correction
# - tSSS
# - Coordinate frame translation
# - Regularization of internal components using information theory
# - Raw movement compensation (using head positions estimated by MaxFilter)
# - cHPI subtraction (see :func:`mne.chpi.filter_chpi`)
# - Handling of 3D (in addition to 1D) fine calibration files
# - Epoch-based movement compensation as described in
# :footcite:`TauluKajola2005` through :func:`mne.epochs.average_movements`
# - **Experimental** processing of data from (un-compensated) non-Elekta
# systems
#
#
# Using SSS and Maxwell filtering in MNE-Python
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# For optimal use of SSS with data from Elekta Neuromag® systems, you should
# provide the path to the fine calibration file (which encodes site-specific
# information about sensor orientation and calibration) as well as a crosstalk
# compensation file (which reduces interference between Elekta's co-located
# magnetometer and paired gradiometer sensor units).
fine_cal_file = os.path.join(sample_data_folder, 'SSS', 'sss_cal_mgh.dat')
crosstalk_file = os.path.join(sample_data_folder, 'SSS', 'ct_sparse_mgh.fif')
###############################################################################
# Before we perform SSS we'll look for bad channels — ``MEG 2443`` is quite
# noisy.
#
# .. warning::
#
# It is critical to mark bad channels in ``raw.info['bads']`` *before*
# calling :func:`~mne.preprocessing.maxwell_filter` in order to prevent
# bad channel noise from spreading.
#
# Let's see if we can automatically detect it.
raw.info['bads'] = []
raw_check = raw.copy()
auto_noisy_chs, auto_flat_chs, auto_scores = find_bad_channels_maxwell(
raw_check, cross_talk=crosstalk_file, calibration=fine_cal_file,
return_scores=True, verbose=True)
print(auto_noisy_chs) # we should find them!
print(auto_flat_chs) # none for this dataset
###############################################################################
#
# .. note:: `~mne.preprocessing.find_bad_channels_maxwell` needs to operate on
# a signal without line noise or cHPI signals. By default, it simply
# applies a low-pass filter with a cutoff frequency of 40 Hz to the
# data, which should remove these artifacts. You may also specify a
# different cutoff by passing the ``h_freq`` keyword argument. If you
# set ``h_freq=None``, no filtering will be applied. This can be
# useful if your data has already been preconditioned, for example
# using :func:`mne.chpi.filter_chpi`,
# :func:`mne.io.Raw.notch_filter`, or :meth:`mne.io.Raw.filter`.
#
# Now we can update the list of bad channels in the dataset.
bads = raw.info['bads'] + auto_noisy_chs + auto_flat_chs
raw.info['bads'] = bads
###############################################################################
# We called `~mne.preprocessing.find_bad_channels_maxwell` with the optional
# keyword argument ``return_scores=True``, causing the function to return a
# dictionary of all data related to the scoring used to classify channels as
# noisy or flat. This information can be used to produce diagnostic figures.
#
# In the following, we will generate such visualizations for
# the automated detection of *noisy* gradiometer channels.
# Only select the data forgradiometer channels.
ch_type = 'grad'
ch_subset = auto_scores['ch_types'] == ch_type
ch_names = auto_scores['ch_names'][ch_subset]
scores = auto_scores['scores_noisy'][ch_subset]
limits = auto_scores['limits_noisy'][ch_subset]
bins = auto_scores['bins'] # The the windows that were evaluated.
# We will label each segment by its start and stop time, with up to 3
# digits before and 3 digits after the decimal place (1 ms precision).
bin_labels = [f'{start:3.3f} – {stop:3.3f}'
for start, stop in bins]
# We store the data in a Pandas DataFrame. The seaborn heatmap function
# we will call below will then be able to automatically assign the correct
# labels to all axes.
data_to_plot = pd.DataFrame(data=scores,
columns=pd.Index(bin_labels, name='Time (s)'),
index=pd.Index(ch_names, name='Channel'))
# First, plot the "raw" scores.
fig, ax = plt.subplots(1, 2, figsize=(12, 8))
fig.suptitle(f'Automated noisy channel detection: {ch_type}',
fontsize=16, fontweight='bold')
sns.heatmap(data=data_to_plot, cmap='Reds', cbar_kws=dict(label='Score'),
ax=ax[0])
[ax[0].axvline(x, ls='dashed', lw=0.25, dashes=(25, 15), color='gray')
for x in range(1, len(bins))]
ax[0].set_title('All Scores', fontweight='bold')
# Now, adjust the color range to highlight segments that exceeded the limit.
sns.heatmap(data=data_to_plot,
vmin=np.nanmin(limits), # bads in input data have NaN limits
cmap='Reds', cbar_kws=dict(label='Score'), ax=ax[1])
[ax[1].axvline(x, ls='dashed', lw=0.25, dashes=(25, 15), color='gray')
for x in range(1, len(bins))]
ax[1].set_title('Scores > Limit', fontweight='bold')
# The figure title should not overlap with the subplots.
fig.tight_layout(rect=[0, 0.03, 1, 0.95])
###############################################################################
#
# .. note:: You can use the very same code as above to produce figures for
# *flat* channel detection. Simply replace the word "noisy" with
# "flat", and replace ``vmin=np.nanmin(limits)`` with
# ``vmax=np.nanmax(limits)``.
#
# You can see the un-altered scores for each channel and time segment in the
# left subplots, and thresholded scores – those which exceeded a certain limit
# of noisiness – in the right subplots. While the right subplot is entirely
# white for the magnetometers, we can see a horizontal line extending all the
# way from left to right for the gradiometers. This line corresponds to channel
# ``MEG 2443``, which was reported as auto-detected noisy channel in the step
# above. But we can also see another channel exceeding the limits, apparently
# in a more transient fashion. It was therefore *not* detected as bad, because
# the number of segments in which it exceeded the limits was less than 5,
# which MNE-Python uses by default.
#
# .. note:: You can request a different number of segments that must be
# found to be problematic before
# `~mne.preprocessing.find_bad_channels_maxwell` reports them as bad.
# To do this, pass the keyword argument ``min_count`` to the
# function.
###############################################################################
# Obviously, this algorithm is not perfect. Specifically, on closer inspection
# of the raw data after looking at the diagnostic plots above, it becomes clear
# that the channel exceeding the "noise" limits in some segments without
# qualifying as "bad", in fact contains some flux jumps. There were just not
# *enough* flux jumps in the recording for our automated procedure to report
# the channel as bad. So it can still be useful to manually inspect and mark
# bad channels. The channel in question is ``MEG 2313``. Let's mark it as bad:
raw.info['bads'] += ['MEG 2313'] # from manual inspection
###############################################################################
# After that, performing SSS and Maxwell filtering is done with a
# single call to :func:`~mne.preprocessing.maxwell_filter`, with the crosstalk
# and fine calibration filenames provided (if available):
raw_sss = mne.preprocessing.maxwell_filter(
raw, cross_talk=crosstalk_file, calibration=fine_cal_file, verbose=True)
###############################################################################
# To see the effect, we can plot the data before and after SSS / Maxwell
# filtering.
raw.pick(['meg']).plot(duration=2, butterfly=True)
raw_sss.pick(['meg']).plot(duration=2, butterfly=True)
###############################################################################
# Notice that channels marked as "bad" have been effectively repaired by SSS,
# eliminating the need to perform :ref:`interpolation <tut-bad-channels>`.
# The heartbeat artifact has also been substantially reduced.
#
# The :func:`~mne.preprocessing.maxwell_filter` function has parameters
# ``int_order`` and ``ext_order`` for setting the order of the spherical
# harmonic expansion of the interior and exterior components; the default
# values are appropriate for most use cases. Additional parameters include
# ``coord_frame`` and ``origin`` for controlling the coordinate frame ("head"
# or "meg") and the origin of the sphere; the defaults are appropriate for most
# studies that include digitization of the scalp surface / electrodes. See the
# documentation of :func:`~mne.preprocessing.maxwell_filter` for details.
#
#
# Spatiotemporal SSS (tSSS)
# ^^^^^^^^^^^^^^^^^^^^^^^^^
#
# An assumption of SSS is that the measurement volume (the spherical shell
# where the sensors are physically located) is free of electromagnetic sources.
# The thickness of this source-free measurement shell should be 4-8 cm for SSS
# to perform optimally. In practice, there may be sources falling within that
# measurement volume; these can often be mitigated by using Spatiotemporal
# Signal Space Separation (tSSS) :footcite:`TauluSimola2006`.
# tSSS works by looking for temporal
# correlation between components of the internal and external subspaces, and
# projecting out any components that are common to the internal and external
# subspaces. The projection is done in an analogous way to
# :ref:`SSP <tut-artifact-ssp>`, except that the noise vector is computed
# across time points instead of across sensors.
#
# To use tSSS in MNE-Python, pass a time (in seconds) to the parameter
# ``st_duration`` of :func:`~mne.preprocessing.maxwell_filter`. This will
# determine the "chunk duration" over which to compute the temporal projection.
# The chunk duration effectively acts as a high-pass filter with a cutoff
# frequency of :math:`\frac{1}{\mathtt{st\_duration}}~\mathrm{Hz}`; this
# effective high-pass has an important consequence:
#
# - In general, larger values of ``st_duration`` are better (provided that your
# computer has sufficient memory) because larger values of ``st_duration``
# will have a smaller effect on the signal.
#
# If the chunk duration does not evenly divide your data length, the final
# (shorter) chunk will be added to the prior chunk before filtering, leading
# to slightly different effective filtering for the combined chunk (the
# effective cutoff frequency differing at most by a factor of 2). If you need
# to ensure identical processing of all analyzed chunks, either:
#
# - choose a chunk duration that evenly divides your data length (only
# recommended if analyzing a single subject or run), or
#
# - include at least ``2 * st_duration`` of post-experiment recording time at
# the end of the :class:`~mne.io.Raw` object, so that the data you intend to
# further analyze is guaranteed not to be in the final or penultimate chunks.
#
# Additional parameters affecting tSSS include ``st_correlation`` (to set the
# correlation value above which correlated internal and external components
# will be projected out) and ``st_only`` (to apply only the temporal projection
# without also performing SSS and Maxwell filtering). See the docstring of
# :func:`~mne.preprocessing.maxwell_filter` for details.
#
#
# Movement compensation
# ^^^^^^^^^^^^^^^^^^^^^
#
# If you have information about subject head position relative to the sensors
# (i.e., continuous head position indicator coils, or :term:`cHPI <HPI>`), SSS
# can take that into account when projecting sensor data onto the internal
# subspace. Head position data can be computed using
# :func:`mne.chpi.compute_chpi_locs` and :func:`mne.chpi.compute_head_pos`,
# or loaded with the:func:`mne.chpi.read_head_pos` function. The
# :ref:`example data <sample-dataset>` doesn't include cHPI, so here we'll
# load a :file:`.pos` file used for testing, just to demonstrate:
head_pos_file = os.path.join(mne.datasets.testing.data_path(), 'SSS',
'test_move_anon_raw.pos')
head_pos = mne.chpi.read_head_pos(head_pos_file)
mne.viz.plot_head_positions(head_pos, mode='traces')
###############################################################################
# The cHPI data file could also be passed as the ``head_pos`` parameter of
# :func:`~mne.preprocessing.maxwell_filter`. Not only would this account for
# movement within a given recording session, but also would effectively
# normalize head position across different measurement sessions and subjects.
# See :ref:`here <example-movement-comp>` for an extended example of applying
# movement compensation during Maxwell filtering / SSS. Another option is to
# apply movement compensation when averaging epochs into an
# :class:`~mne.Evoked` instance, using the :func:`mne.epochs.average_movements`
# function.
#
# Each of these approaches requires time-varying estimates of head position,
# which is obtained from MaxFilter using the ``-headpos`` and ``-hp``
# arguments (see the MaxFilter manual for details).
#
#
# Caveats to using SSS / Maxwell filtering
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# 1. There are patents related to the Maxwell filtering algorithm, which may
# legally preclude using it in commercial applications. More details are
# provided in the documentation of
# :func:`~mne.preprocessing.maxwell_filter`.
#
# 2. SSS works best when both magnetometers and gradiometers are present, and
# is most effective when gradiometers are planar (due to the need for very
# accurate sensor geometry and fine calibration information). Thus its
# performance is dependent on the MEG system used to collect the data.
#
#
# References
# ^^^^^^^^^^
#
# .. footbibliography::
#
#
# .. LINKS
#
# .. _spherical harmonics: https://en.wikipedia.org/wiki/Spherical_harmonics
| bsd-3-clause |
cshallue/models | research/skip_thoughts/skip_thoughts/vocabulary_expansion.py | 18 | 7370 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compute an expanded vocabulary of embeddings using a word2vec model.
This script loads the word embeddings from a trained skip-thoughts model and
from a trained word2vec model (typically with a larger vocabulary). It trains a
linear regression model without regularization to learn a linear mapping from
the word2vec embedding space to the skip-thoughts embedding space. The model is
then applied to all words in the word2vec vocabulary, yielding vectors in the
skip-thoughts word embedding space for the union of the two vocabularies.
The linear regression task is to learn a parameter matrix W to minimize
|| X - Y * W ||^2,
where X is a matrix of skip-thoughts embeddings of shape [num_words, dim1],
Y is a matrix of word2vec embeddings of shape [num_words, dim2], and W is a
matrix of shape [dim2, dim1].
This is based on the "Translation Matrix" method from the paper:
"Exploiting Similarities among Languages for Machine Translation"
Tomas Mikolov, Quoc V. Le, Ilya Sutskever
https://arxiv.org/abs/1309.4168
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os.path
import gensim.models
import numpy as np
import sklearn.linear_model
import tensorflow as tf
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("skip_thoughts_model", None,
"Checkpoint file or directory containing a checkpoint "
"file.")
tf.flags.DEFINE_string("skip_thoughts_vocab", None,
"Path to vocabulary file containing a list of newline-"
"separated words where the word id is the "
"corresponding 0-based index in the file.")
tf.flags.DEFINE_string("word2vec_model", None,
"File containing a word2vec model in binary format.")
tf.flags.DEFINE_string("output_dir", None, "Output directory.")
tf.logging.set_verbosity(tf.logging.INFO)
def _load_skip_thoughts_embeddings(checkpoint_path):
"""Loads the embedding matrix from a skip-thoughts model checkpoint.
Args:
checkpoint_path: Model checkpoint file or directory containing a checkpoint
file.
Returns:
word_embedding: A numpy array of shape [vocab_size, embedding_dim].
Raises:
ValueError: If no checkpoint file matches checkpoint_path.
"""
if tf.gfile.IsDirectory(checkpoint_path):
checkpoint_file = tf.train.latest_checkpoint(checkpoint_path)
if not checkpoint_file:
raise ValueError("No checkpoint file found in %s" % checkpoint_path)
else:
checkpoint_file = checkpoint_path
tf.logging.info("Loading skip-thoughts embedding matrix from %s",
checkpoint_file)
reader = tf.train.NewCheckpointReader(checkpoint_file)
word_embedding = reader.get_tensor("word_embedding")
tf.logging.info("Loaded skip-thoughts embedding matrix of shape %s",
word_embedding.shape)
return word_embedding
def _load_vocabulary(filename):
"""Loads a vocabulary file.
Args:
filename: Path to text file containing newline-separated words.
Returns:
vocab: A dictionary mapping word to word id.
"""
tf.logging.info("Reading vocabulary from %s", filename)
vocab = collections.OrderedDict()
with tf.gfile.GFile(filename, mode="r") as f:
for i, line in enumerate(f):
word = line.decode("utf-8").strip()
assert word not in vocab, "Attempting to add word twice: %s" % word
vocab[word] = i
tf.logging.info("Read vocabulary of size %d", len(vocab))
return vocab
def _expand_vocabulary(skip_thoughts_emb, skip_thoughts_vocab, word2vec):
"""Runs vocabulary expansion on a skip-thoughts model using a word2vec model.
Args:
skip_thoughts_emb: A numpy array of shape [skip_thoughts_vocab_size,
skip_thoughts_embedding_dim].
skip_thoughts_vocab: A dictionary of word to id.
word2vec: An instance of gensim.models.Word2Vec.
Returns:
combined_emb: A dictionary mapping words to embedding vectors.
"""
# Find words shared between the two vocabularies.
tf.logging.info("Finding shared words")
shared_words = [w for w in word2vec.vocab if w in skip_thoughts_vocab]
# Select embedding vectors for shared words.
tf.logging.info("Selecting embeddings for %d shared words", len(shared_words))
shared_st_emb = skip_thoughts_emb[[
skip_thoughts_vocab[w] for w in shared_words
]]
shared_w2v_emb = word2vec[shared_words]
# Train a linear regression model on the shared embedding vectors.
tf.logging.info("Training linear regression model")
model = sklearn.linear_model.LinearRegression()
model.fit(shared_w2v_emb, shared_st_emb)
# Create the expanded vocabulary.
tf.logging.info("Creating embeddings for expanded vocabuary")
combined_emb = collections.OrderedDict()
for w in word2vec.vocab:
# Ignore words with underscores (spaces).
if "_" not in w:
w_emb = model.predict(word2vec[w].reshape(1, -1))
combined_emb[w] = w_emb.reshape(-1)
for w in skip_thoughts_vocab:
combined_emb[w] = skip_thoughts_emb[skip_thoughts_vocab[w]]
tf.logging.info("Created expanded vocabulary of %d words", len(combined_emb))
return combined_emb
def main(unused_argv):
if not FLAGS.skip_thoughts_model:
raise ValueError("--skip_thoughts_model is required.")
if not FLAGS.skip_thoughts_vocab:
raise ValueError("--skip_thoughts_vocab is required.")
if not FLAGS.word2vec_model:
raise ValueError("--word2vec_model is required.")
if not FLAGS.output_dir:
raise ValueError("--output_dir is required.")
if not tf.gfile.IsDirectory(FLAGS.output_dir):
tf.gfile.MakeDirs(FLAGS.output_dir)
# Load the skip-thoughts embeddings and vocabulary.
skip_thoughts_emb = _load_skip_thoughts_embeddings(FLAGS.skip_thoughts_model)
skip_thoughts_vocab = _load_vocabulary(FLAGS.skip_thoughts_vocab)
# Load the Word2Vec model.
word2vec = gensim.models.Word2Vec.load_word2vec_format(
FLAGS.word2vec_model, binary=True)
# Run vocabulary expansion.
embedding_map = _expand_vocabulary(skip_thoughts_emb, skip_thoughts_vocab,
word2vec)
# Save the output.
vocab = embedding_map.keys()
vocab_file = os.path.join(FLAGS.output_dir, "vocab.txt")
with tf.gfile.GFile(vocab_file, "w") as f:
f.write("\n".join(vocab))
tf.logging.info("Wrote vocabulary file to %s", vocab_file)
embeddings = np.array(embedding_map.values())
embeddings_file = os.path.join(FLAGS.output_dir, "embeddings.npy")
np.save(embeddings_file, embeddings)
tf.logging.info("Wrote embeddings file to %s", embeddings_file)
if __name__ == "__main__":
tf.app.run()
| apache-2.0 |
poeticcapybara/pythalesians | pythalesians/graphics/graphs/lowleveladapters/adaptertemplate.py | 1 | 5178 | __author__ = 'saeedamen' # Saeed Amen / [email protected]
#
# Copyright 2015 Thalesians Ltd. - http//www.thalesians.com / @thalesians
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
import abc
from math import log10, floor
from pythalesians.util.constants import Constants
import matplotlib
class AdapterTemplate:
def init(self):
return
@abc.abstractmethod
def plot_2d_graph(self, data_frame, gp, type):
return
def get_bar_indices(self, data_frame, gp, chart_type, bar_ind):
has_bar = False
xd = data_frame.index
no_of_bars = len(data_frame.columns)
if gp.chart_type is not None:
if isinstance(gp.chart_type, list):
if 'bar' in gp.chart_type:
xd = bar_ind
no_of_bars = gp.chart_type.count('bar')
has_bar = True
elif 'stacked' in gp.chart_type:
xd = bar_ind
no_of_bars = 1
has_bar = True
elif 'bar' == gp.chart_type:
xd = bar_ind
has_bar = True
elif 'stacked' == gp.chart_type:
xd = bar_ind
has_bar = True
else:
if chart_type == 'bar' or chart_type == 'stacked':
xd = bar_ind
has_bar = True
return xd, bar_ind, has_bar, no_of_bars
def assign(self, structure, field, default):
if hasattr(structure, field): default = getattr(structure, field)
return default
def assign_list(self, gp, field, list):
if hasattr(gp, field):
list = [str(x) for x in getattr(gp, field)]
return list
def get_linewidth(self, label, linewidth_1, linewidth_2, linewidth_2_series):
if label in linewidth_2_series:
return linewidth_2
return linewidth_1
def round_to_1(self, x):
return round(x, -int(floor(log10(x))))
def create_color_list(self, gp, data_frame):
# get all the correct colors (and construct gradients if necessary eg. from 'blues')
color = self.construct_color(gp, 'color', len(data_frame.columns.values) - len(gp.color_2_series))
color_2 = self.construct_color(gp, 'color_2', len(gp.color_2_series))
return self.assign_color(data_frame.columns, color, color_2,
gp.exclude_from_color, gp.color_2_series)
def construct_color(self, gp, color_field_name, no_of_entries):
color = []
if hasattr(gp, color_field_name):
if isinstance(getattr(gp, color_field_name), list):
color = getattr(gp, color_field_name, color)
else:
try:
color = self.create_colormap(no_of_entries, getattr(gp, color_field_name))
except: pass
return color
def exclude_from_color(self, gp):
if not(isinstance(gp.exclude_from_color, list)):
gp.exclude_from_color = [gp.exclude_from_color]
exclude_from_color = [str(x) for x in gp.exclude_from_color]
return exclude_from_color
def assign_color(self, labels, color, color_2, exclude_from_color,
color_2_series):
color_list = []
axis_1_color_index = 0; axis_2_color_index = 0
# convert all the labels to strings
labels = [str(x) for x in labels]
# go through each label
for label in labels:
color_spec = None
if label in exclude_from_color:
color_spec = None
elif label in color_2_series:
if color_2 != []:
color_spec = self.get_color_code(color_2[axis_2_color_index])
axis_2_color_index = axis_2_color_index + 1
else:
if color != []:
color_spec = self.get_color_code(color[axis_1_color_index])
axis_1_color_index = axis_1_color_index + 1
try:
color_spec = matplotlib.colors.colorConverter.to_rgba(color_spec)
except:
pass
color_list.append(color_spec)
return color_list
def get_color_code(self, code):
# redefine color names
dict = Constants().plotfactory_color_overwrites
if code in dict: return dict[code]
return code
def create_colormap(self, num_colors, map_name):
## matplotlib ref for colors: http://matplotlib.org/examples/color/colormaps_reference.html
cm = matplotlib.cm.get_cmap(name = map_name)
return [cm(1.*i/num_colors) for i in range(num_colors)] | apache-2.0 |
Windy-Ground/scikit-learn | examples/svm/plot_weighted_samples.py | 188 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
MichaelSheely/RegionPredictionFromTemperature | main.py | 1 | 4256 | import pandas as pd
import numpy as np
import os.path
from geopy.geocoders import Nominatim
from geopy.point import Point
from geopy.exc import GeocoderTimedOut
from geopy.exc import GeocoderServiceError
TEMPERATURES_FILE = 'data/USCityTemperaturesAfter1850.csv'
CITY_STATE_FILE = 'data/city_state.csv'
STATE_REGION_FILE = 'data/StatesAndRegions.csv'
FINAL_TEMPERATURES_FILE = 'data/labeled_data.csv' # TODO: Change all relevant files names to this constant.
def add_state(path=TEMPERATURES_FILE):
temp = pd.read_csv(path, header=0)
only_unique = temp.sort_values(['City']).drop_duplicates(subset=['City','Latitude', 'Longitude'])
if not os.path.isfile('data/States.csv'):
states = create_state_column(only_unique)
else:
states = pd.read_csv('data/States.csv', header=0)
joined_table = pd.merge(temp, states, how='left', on=['City', 'Latitude', 'Longitude'])
joined_table.to_csv('data/Temp.csv', index=False)
def create_state_column(us):
geolocator = Nominatim()
us = us.assign(State=np.nan)
lat_ndx = us.columns.get_loc('Latitude')
lon_ndx = us.columns.get_loc('Longitude')
state_ndx = us.columns.get_loc('State')
city_ndx = us.columns.get_loc('City')
def find_state(row):
lat = row[lat_ndx]
lon = row[lon_ndx]
city = row[city_ndx]
# print city,lat, lon
coord = Point(lat + ' ' + lon)
try:
location = geolocator.reverse(coord)
except (GeocoderTimedOut, GeocoderServiceError) as e:
if city == 'Chicago':
state = 'Illinois'
elif city == 'Milwaukee':
state = 'Wisconsin'
else:
print "location %s at %s %s timed out" %(city, lat, lon)
state = np.nan
return state
# print location.raw
if location.raw['address']['country_code'] != 'us':
print "ERRRRRROOORRRRR"
return
try:
state = location.raw['address']['state']
except KeyError as e:
if lat == '32.95N' and lon == '117.77W':
state = 'California'
elif city in ['Anaheim', 'Chula Vista', 'San Diego']:
state = 'California'
elif city == 'Brownsville':
state = 'Texas'
else:
print "location %s at %s %s keyed out" %(city, lat, lon)
state= np.nan
return state
state = us.apply(find_state, axis=1)
us = us.assign(State = state)
print state
us['City', 'Latitude', 'Longitude', 'State'].to_csv('data/States.csv', index=False)
return us
def filter_and_save_data(path='data/GlobalLandTemperaturesbyCity.csv', ignore_before=1850):
out = pd.read_csv(path, header=0)
us = out.loc[out['Country'] == 'United States']
# lexicograpical comparison of strings makes this work for any 4 digit year
us = us[us['dt'] > '1850']
# delete the old index and Country code
us.drop('Country', axis=1, inplace=True)
us.reset_index(drop=True, inplace=True)
us.to_csv(TEMPERATURES_FILE, index=False)
return us
def merge_data(to_merge='data/Temp.csv', new_file='data/joined.csv'):
state_region = pd.read_csv(STATE_REGION_FILE, header=0)
temperatures = pd.read_csv(to_merge, header=0)
joined_table = pd.merge(temperatures, state_region, how='left', on='State')
joined_table.to_csv(new_file, index=False)
def city_country(raw_file='data/RawUSData.csv'):
out = pd.read_csv(raw_file)
keep = ['Name', 'Canonical Name']
us = out[keep]
us = us.assign(State = us['Canonical Name'].apply(get_state))
us = us.rename(columns={'Name':'City'})
us = us[['City', 'State']]
us.to_csv(CITY_STATE_FILE, index=False)
def get_state(raw_string):
return raw_string.split(',')[-2]
def main():
if not os.path.isfile(TEMPERATURES_FILE): # TODO: add force make file
filter_and_save_data()
data = pd.read_csv(TEMPERATURES_FILE)
if not os.path.isfile(CITY_STATE_FILE):
city_country()
cities = pd.read_csv(CITY_STATE_FILE)
add_state()
merge_data(to_merge='data/States.csv', new_file='data/CityRegions.csv')
merge_data()
if __name__ == "__main__":
main()
| mit |
ViennaRNA/forgi | test/forgi/threedee/utilities/vector_test.py | 1 | 24259 | from __future__ import absolute_import
from __future__ import division
from builtins import range
import unittest
import math
import numpy as np
import numpy.testing as nptest
import forgi.threedee.utilities.vector as ftuv
REPEAT_TESTS_CONTAINING_RANDOM = 10
class TestLineSegmentDistance(unittest.TestCase):
def setUp(self):
pass
def test_line_segment_distance_parallel(self):
a0 = np.array([0., 0., 1.])
a1 = np.array([0., 0., 10.])
b0 = np.array([0., 0., 11.])
b1 = np.array([0., 0., 20.])
self.assertAlmostEqual(ftuv.vec_distance(
*ftuv.line_segment_distance(a0, a1, b0, b1)), 1.)
def test_line_segment_distance_zero(self):
a0 = np.array([0., 0., 1.])
a1 = np.array([0., 0., 10.])
b0 = np.array([0., -10., 5.])
b1 = np.array([0., 10., 5.])
self.assertAlmostEqual(ftuv.vec_distance(
*ftuv.line_segment_distance(a0, a1, b0, b1)), 0.)
def test_line_segment_distance_point_to_line(self):
a0 = np.array([0., 0., 1.])
a1 = np.array([0., 0., 10.])
b0 = np.array([0., -10., 12.])
b1 = np.array([0., 10., 12.])
self.assertAlmostEqual(ftuv.vec_distance(
*ftuv.line_segment_distance(a0, a1, b0, b1)), 2.)
def test_line_segment_distance_windschief(self):
a0 = np.array([0., 0., -10.])
a1 = np.array([0., 0., 10.])
b0 = np.array([5.2, -10., 5.])
b1 = np.array([5.2, 10., 5.])
self.assertAlmostEqual(ftuv.vec_distance(
*ftuv.line_segment_distance(a0, a1, b0, b1)), 5.2)
def test_line_segment_distance_real_world(self):
a0 = np.array([0., 0., 1.])
a1 = np.array([-2.76245752, -6.86976093, 7.54094508])
b0 = np.array([-27.57744115, 6.96488989, -22.47619655])
b1 = np.array([-16.93424799, -4.0631445, -16.19822301])
self.assertLess(ftuv.vec_distance(
*ftuv.line_segment_distance(a0, a1, b0, b1)), 25)
class TestLineSegmentCollinearity(unittest.TestCase):
def test_collinear_segments(self):
a = (np.array([0., 0, 0]), np.array([0, 0, 1]))
b = (np.array([0., 0, 1]), np.array([0, 0, 2.]))
self.assertAlmostEqual(ftuv.line_segments_collinearity(a, b), 1)
a = (np.array([0., 0, 0]), np.array([0, 0, 1]))
b = (np.array([0., 0, -1]), np.array([0, 0, -2.]))
self.assertAlmostEqual(ftuv.line_segments_collinearity(a, b), 1)
a = (np.array([0., 0, 0]), np.array([0, 0, 1]))
b = (np.array([0., 0, -2]), np.array([0, 0, -1.]))
self.assertAlmostEqual(ftuv.line_segments_collinearity(a, b), 1)
a = (np.array([0., 0, 0]), np.array([1, 1, 1]))
b = (np.array([2., 2, 2]), np.array([3, 3, 3.]))
self.assertAlmostEqual(ftuv.line_segments_collinearity(a, b), 1)
a = (np.array([0., 0, 0]), np.array([1, 1, 1]))
b = (np.array([0.5, 0.5, 0.5]), np.array([0.7, 0.7, 0.7]))
self.assertAlmostEqual(ftuv.line_segments_collinearity(a, b), 1)
def test_fixed_angle(self):
a = (np.array([0., 0, 0]), np.array([0, 0, 1]))
b = (np.array([0., 0, 1]), np.array(
[0., 0, 1]) + ftuv.normalize([1, 1, 1.]))
x = np.linspace(0.01, 4, 500)
for f in x:
col = ftuv.line_segments_collinearity(a, (b[0] * f, b[1] * f))
self.assertLess(col, 0.95)
self.assertGreater(col, 0.6)
def test_normal_angle(self):
a = (np.array([0., 0, 0]), np.array([0, 0, 1]))
b = (np.array([0., 0, 1]), np.array([0., 1, 1]))
x = np.linspace(0.01, 4, 500)
for f in x:
col = ftuv.line_segments_collinearity(a, (b[0] * f, b[1] * f))
self.assertLess(col, 0.6)
self.assertGreater(col, 0.)
def plot_fixed_angle(self):
a = (np.array([0., 0, 0]), np.array([0, 0, 1]))
b = (np.array([0., 0, 1]), np.array(
[0., 0, 1]) + ftuv.normalize([1, 1, 1.]))
x = np.linspace(0.01, 4, 5000)
y = []
for f in x:
y.append(ftuv.line_segments_collinearity(a, (b[0] * f, b[1] * f)))
import matplotlib.pyplot as plt
plt.title("Fixed angle")
plt.plot(x, y)
plt.show()
assert False
def plot_normal(self):
a = (np.array([0., 0, 0]), np.array([0, 0, 1]))
b = (np.array([0., 0, 1]), np.array([0., 1, 1]))
x = np.linspace(0.01, 4, 5000)
y = []
for f in x:
y.append(ftuv.line_segments_collinearity(a, (b[0] * f, b[1] * f)))
import matplotlib.pyplot as plt
plt.title("normal")
plt.plot(x, y)
plt.show()
assert False
def test_distance(self):
# score decreases with increasing distance, but stays above 0
a = (np.array([0., 0, 0]), np.array([0, 0, 1]))
x = np.linspace(0.01, 10, 5000)
y_old = 1
for d in x:
b0 = a[1] + np.array([0, 1., 0.]) * d
b = b0, b0 + np.array([0., 0, 1])
y = ftuv.line_segments_collinearity(a, b)
self.assertLess(y, y_old)
self.assertGreater(y, 0)
y_old = y
def plot_distance(self):
a = (np.array([0., 0, 0]), np.array([0, 0, 1]))
x = np.linspace(0.01, 10, 5000)
y = []
for d in x:
b0 = a[1] + np.array([0, 1., 0.]) * d
b = b0, b0 + np.array([0., 0, 1])
y.append(ftuv.line_segments_collinearity(a, b))
import matplotlib.pyplot as plt
plt.title("distance")
plt.plot(x, y)
plt.show()
class TestRotationMatrix(unittest.TestCase):
def setUp(self):
pass
def test_around_xyz(self):
vec = np.array([1., 2., 3.])
nptest.assert_allclose(np.dot(ftuv.rotation_matrix(
"x", math.radians(90)), vec), [1., 3., -2.])
nptest.assert_allclose(np.dot(ftuv.rotation_matrix(
"y", math.radians(90)), vec), [-3., 2., 1.])
nptest.assert_allclose(np.dot(ftuv.rotation_matrix(
"z", math.radians(90)), vec), [2., -1., 3.])
def test_shortcut_works(self):
nptest.assert_allclose(ftuv.rotation_matrix(
"x", 1.3), ftuv.rotation_matrix(ftuv.standard_basis[0], 1.3))
nptest.assert_allclose(ftuv.rotation_matrix(
"y", -2.3), ftuv.rotation_matrix(ftuv.standard_basis[1], -2.3))
nptest.assert_allclose(ftuv.rotation_matrix(
"z", 0.23), ftuv.rotation_matrix(ftuv.standard_basis[2], 0.23))
def test_list_or_array_works(self):
nptest.assert_allclose(ftuv.rotation_matrix(
[2, 3., 4.], 1.43), ftuv.rotation_matrix(np.array([2, 3., 4.]), 1.43))
class TestVector(unittest.TestCase):
"""Tests for the threedee.utilities.vector module"""
def setUp(self):
return
def test_closest_point_on_seg(self):
self.assertEqual(
tuple(ftuv.closest_point_on_seg((0, 1), (0, 3), (2, 2))), (0, 2))
self.assertEqual(
tuple(ftuv.closest_point_on_seg((1, 0), (3, 0), (2, 2))), (2, 0))
# Not parallel to axis: Floating point values...
self.assertAlmostEqual(ftuv.closest_point_on_seg(
(0, 0), (2, 2), (0, 2))[0], 1.) # x-coordinate
self.assertAlmostEqual(ftuv.closest_point_on_seg(
(0, 0), (2, 2), (0, 2))[0], 1.) # y-coordinate
# Outside segment: returns one endpoint of the segment.
self.assertEqual(
tuple(ftuv.closest_point_on_seg((0, 1), (0, 3), (2, 4))), (0, 3))
self.assertEqual(tuple(ftuv.closest_point_on_seg(
(0, 1), (0, 3), (-2, 0))), (0, 1))
def test_get_inter_distances(self):
vecs = [np.array([1., 0., 0.]), np.array([0., 0., 0.]),
np.array([0., 0., 0.]), np.array([-1., 0., 0.])]
distances = ftuv.get_inter_distances(vecs)
self.assertEqual(sorted(distances), [0, 1, 1, 1, 1, 2])
def test_get_random_vector(self):
for _ in range(REPEAT_TESTS_CONTAINING_RANDOM):
vec = ftuv.get_random_vector()
self.assertLessEqual(ftuv.magnitude(vec[0]), 1.)
vec1 = ftuv.get_random_vector()
vec2 = ftuv.get_random_vector()
self.assertTrue(all(vec1[j] != vec2[j] for j in [0, 1, 2]),
msg="Repeated calls should generate different results."
"This tests depends on random values. if it fails, try running it again.")
def test_get_alignment_matrix(self):
vec1 = np.array([0.5, 0.7, 0.9])
vec2 = np.array([0.345, 3.4347, 0.55])
rotMat = ftuv.get_alignment_matrix(vec1, vec2)
self.assertTrue(ftuv.is_almost_parallel(vec2, np.dot(vec1, rotMat)))
self.assertTrue(ftuv.is_almost_parallel(np.dot(rotMat, vec2), vec1))
def test_get_double_alignment_matrix(self):
vec1 = np.array([0.5, 0.7, 0.9])
vec1b = np.array([9., 0, -5.])
vec2 = np.array([0.345, 3.5, 0.55])
vec2b = np.array([0., 0.55, -3.5])
rotMat = ftuv.get_double_alignment_matrix((vec1, vec1b), (vec2, vec2b))
self.assertTrue(ftuv.is_almost_parallel(vec2, np.dot(vec1, rotMat)))
self.assertTrue(ftuv.is_almost_parallel(np.dot(rotMat, vec2), vec1))
self.assertTrue(ftuv.is_almost_parallel(vec2b, np.dot(vec1b, rotMat)),
msg="{} not colinear with {}".format(vec2b, np.dot(vec1b, rotMat)))
self.assertTrue(ftuv.is_almost_parallel(np.dot(rotMat, vec2b), vec1b),
msg="{} not colinear with {}".format(np.dot(rotMat, vec2b), vec1b))
def test_get_orthogonal_unit_vector(self):
vecs = [np.array([1., 0., 0.]), np.array([2.7, 5.6, 8.2]), np.array(
[11., -40., 0.]), np.array([-1., 0., 0.])]
for vec in vecs:
ortVec = ftuv.get_orthogonal_unit_vector(vec)
self.assertAlmostEqual(np.dot(ortVec, vec), 0, places=10)
self.assertAlmostEqual(np.linalg.norm(ortVec), 1, places=10)
# Every vector is orthogonal to the zero-vector:
vec = np.array([0., 0., 0.])
# ortVec=ftuv.get_orthogonal_unit_vector(vec)
# Currently, ortVec==nan, so the assertion fails.
#self.assertAlmostEqual(np.dot(ortVec, vec), 0, places=10)
#self.assertAlmostEqual(np.linalg.norm(ortVec), 1, places=10)
def test_seg_intersect(self):
# normal case
isec = ftuv.seg_intersect(([0., 1.], [0., -1.]), ([-1., 0.], [1., 0.]))
self.assertEqual(len(isec), 1)
np.testing.assert_allclose(isec[0], [0., 0.])
# parallel, no intersection
isec = ftuv.seg_intersect(([0., 3.], [1., 3.]), ([2., 3.], [3., 3.]))
self.assertEqual(isec, [])
# one inside other
isec = ftuv.seg_intersect(([0., 0.], [4., 4.]), ([1., 1.], [2., 2.]))
self.assertEqual(len(isec), 2)
isec = sorted(isec, key=lambda x: (x[0], x[1]))
np.testing.assert_allclose(isec[0], [1., 1.])
np.testing.assert_allclose(isec[1], [2., 2.])
isec = ftuv.seg_intersect(([1., 1.], [2., 2.]), ([0., 0.], [4., 4.]))
self.assertEqual(len(isec), 2)
isec = sorted(isec, key=lambda x: (x[0], x[1]))
np.testing.assert_allclose(isec[0], [1., 1.])
np.testing.assert_allclose(isec[1], [2., 2.])
# overlapping
isec = ftuv.seg_intersect(([0., 2.], [2., 4.]), ([1., 3.], [3., 5.]))
self.assertEqual(len(isec), 2)
isec = sorted(isec, key=lambda x: (x[0], x[1]))
np.testing.assert_allclose(isec[0], [1., 3.])
np.testing.assert_allclose(isec[1], [2., 4.])
# non-parallel, no intersection
isec = ftuv.seg_intersect(([0., 2.], [2., 4.]), ([5., 3.], [10, 5.]))
self.assertEqual(isec, [])
# shared endpoint
isec = ftuv.seg_intersect(([0., 1.], [0., 4.]), ([0., 4.], [5., 7.]))
self.assertEqual(len(isec), 1)
np.testing.assert_allclose(isec[0], [0., 4.])
isec = ftuv.seg_intersect(([0., 1.], [0., 4.]), ([0., 1.], [-5., 7.]))
self.assertEqual(len(isec), 1)
np.testing.assert_allclose(isec[0], [0., 1.])
# Invalid inputs
with self.assertRaises(ValueError):
ftuv.seg_intersect(([0., 1.], [0., 4.]), ([0., 1.], [-5., 7., 5.]))
with self.assertRaises(ValueError):
ftuv.seg_intersect(([0., 1., 3.], [0., 4.]), ([0., 1.], [-5., 7.]))
with self.assertRaises(ValueError):
ftuv.seg_intersect(([0., 1.], [0., 4., 5.]), ([0., 1.], [-5., 7.]))
with self.assertRaises(ValueError):
ftuv.seg_intersect(([0., 1.], [0., 4.]), ([0., 1., 7.], [-5., 7.]))
with self.assertRaises(ValueError):
ftuv.seg_intersect(([0., 1.], [0., 4., 6.]),
([0., 1., 7.], [-5., 7., 8.]))
with self.assertRaises(ValueError):
ftuv.seg_intersect(([0.], [0., 4.]), ([0., 1.], [-5., 7.]))
with self.assertRaises(ValueError):
ftuv.seg_intersect(([0., 5.], [4.34]), ([0., 1.], [-5., 7.]))
with self.assertRaises(ValueError):
ftuv.seg_intersect(([0.3, 5.2], [0.3, 5.2]), ([0., 1.], [-5., 7.]))
def test_is_almost_parallel(self):
# Zero-vector is colinear to nothing
self.assertFalse(ftuv.is_almost_parallel(
np.array([0, 0, 0]), np.array([0., 0., 0.])))
self.assertFalse(ftuv.is_almost_parallel(
np.array([0.4, 0, 0]), np.array([0., 0., 0.])))
self.assertFalse(ftuv.is_almost_parallel(
np.array([0, 0, 0]), np.array([0, 20, 10])))
# 10*-8 is treated as zero
self.assertTrue(ftuv.is_almost_parallel(
np.array([0, 1, 1]), np.array([10**-10, 2, 2])))
self.assertTrue(ftuv.is_almost_parallel(
np.array([1, 0, 1]), np.array([2, 10**-10, 2])))
self.assertTrue(ftuv.is_almost_parallel(
np.array([1, 1, 0]), np.array([2, 2, 10**-10])))
self.assertTrue(ftuv.is_almost_parallel(
np.array([10**-10, 2, 2]), np.array([0, 1, 1])))
self.assertTrue(ftuv.is_almost_parallel(
np.array([2, 10**-10, 2]), np.array([1, 0, 1])))
self.assertTrue(ftuv.is_almost_parallel(
np.array([2, 2, 10**-10]), np.array([1, 1, 0])))
self.assertTrue(ftuv.is_almost_parallel(
[6.13714577e-16, 3.68203114, 1.66697272e-15], [0., 15.302, 0.]))
# Real world example, where 10**-9 is not working
self.assertTrue(ftuv.is_almost_parallel(
[6.22374626e+00, -6.47794687e-01, -3.29655380e-06], [7.61983623e+00, -7.93105828e-01, -4.03602575e-06]))
# Colinear
self.assertTrue(ftuv.is_almost_parallel(
np.array([0, 0, 2]), np.array([0., 0., 3.])))
self.assertTrue(ftuv.is_almost_parallel(
np.array([3, 6, 7]), np.array([9., 18., 21.])))
self.assertTrue(ftuv.is_almost_parallel(
np.array([3, 6, 0]), np.array([9., 18., 0.])))
self.assertTrue(ftuv.is_almost_parallel(
np.array([3, 0, 8]), np.array([9., 0., 24. + 10**-12])))
# Not colinear
self.assertFalse(ftuv.is_almost_parallel(
np.array([0, 0, 3.]), np.array([2., 0, 0])))
self.assertFalse(ftuv.is_almost_parallel(
np.array([0, 3., 0]), np.array([0, 0, 3.])))
self.assertFalse(ftuv.is_almost_parallel(
np.array([1, 2, 3]), np.array([2., 4., -6.])))
self.assertFalse(ftuv.is_almost_parallel(
np.array([1, 2, 3]), np.array([3., 4., 6.])))
self.assertFalse(ftuv.is_almost_parallel(
np.array([1, 2, 3]), np.array([2., 5., 6.])))
def test_is_almost_parallel_vs_antiparallel(self):
self.assertEqual(ftuv.is_almost_parallel(
np.array([0, 0, 2]), np.array([0., 0., 3.])), 1)
self.assertTrue(ftuv.is_almost_parallel(
np.array([3, 6, 7]), np.array([-9., -18., -21.])), -1)
self.assertTrue(ftuv.is_almost_parallel(
np.array([3, 6, 0]), np.array([9., 18., 0.])), 1)
self.assertTrue(ftuv.is_almost_parallel(
np.array([-3, 0, -8]), np.array([-9., 0., -24.])), -1)
def test_middlepoint(self):
self.assertIsInstance(ftuv.middlepoint((1, 2, 3), (4, 5, 6)), tuple)
self.assertIsInstance(ftuv.middlepoint([1, 2, 3], [4, 5, 6]), list)
self.assertIsInstance(ftuv.middlepoint(
np.array([1, 2, 3]), np.array([4, 5, 6])), type(np.array([1, 2, 3])))
self.assertEqual(ftuv.middlepoint((1, 2), (3, 4)), (2, 3))
self.assertEqual(ftuv.middlepoint([1, 2, 3], [5, 6, 7]), [3, 4, 5])
mp = ftuv.middlepoint(np.array([1, 2, -3]), np.array([1, 0, -5]))
self.assertTrue(((mp == np.array([1, 1, -4])).all()), msg="Middlepoint for np arrays: {} "
"is not {}".format(mp, np.array([1, 1, -4])))
def test_create_orthonormal_basis(self):
basis1 = ftuv.create_orthonormal_basis(np.array([0.0, 0.0, 2.0]))
self.assertTrue(ftuv.is_almost_parallel(
basis1[0], np.array([0., 0., 2.])))
basis2 = ftuv.create_orthonormal_basis(
np.array([0.0, 0.0, 2.0]), np.array([0.0, 3.6, 0.]))
self.assertTrue(ftuv.is_almost_parallel(
basis2[0], np.array([0., 0., 2.])))
self.assertTrue(ftuv.is_almost_parallel(
basis2[1], np.array([0., 3.6, 0])))
basis3 = ftuv.create_orthonormal_basis(
np.array([0.0, 0.0, 2.0]), np.array([0.0, 3.6, 0.]), np.array([1., 0, 0]))
self.assertTrue(ftuv.is_almost_parallel(
basis3[0], np.array([0., 0., 2.])))
self.assertTrue(ftuv.is_almost_parallel(
basis3[1], np.array([0., 3.6, 0])))
self.assertTrue(ftuv.is_almost_parallel(
basis3[2], np.array([1., 0, 0])))
for basis in [basis1, basis2, basis3]:
self.assertAlmostEqual(np.dot(basis[0], basis[1]), 0)
self.assertAlmostEqual(np.dot(basis[0], basis[2]), 0)
self.assertAlmostEqual(np.dot(basis[2], basis[1]), 0)
for b in basis:
self.assertAlmostEqual(ftuv.magnitude(b), 1)
def test_spherical_coordinate_transforms(self):
for vec in [np.array([0, 0, 1]), np.array([0, 2, 0]), np.array([3, 0, 0]), np.array([4, 5, 0]), np.array([6, 0, 7]), np.array([8, 9, 0.4])]:
sphe = ftuv.spherical_cartesian_to_polar(vec)
nptest.assert_allclose(
ftuv.spherical_polar_to_cartesian(sphe), vec, atol=0.0000001)
nptest.assert_allclose(ftuv.spherical_polar_to_cartesian(
[1, 0, math.pi / 2]), np.array([0, 0, 1]), atol=0.0000001)
nptest.assert_allclose(ftuv.spherical_polar_to_cartesian(
[2, math.pi / 2, math.pi / 4]), np.array([1, 1, 0]) * 2 / math.sqrt(2), atol=0.0000001)
nptest.assert_allclose(ftuv.spherical_cartesian_to_polar(np.array(
[0, 2, 2]) / math.sqrt(2)), [2, math.pi / 4, math.pi / 2], atol=0.0000001)
def test_get_standard_basis(self):
nptest.assert_allclose(ftuv.get_standard_basis(2), [[1, 0], [0, 1]])
nptest.assert_allclose(ftuv.get_standard_basis(3), [
[1, 0, 0], [0, 1, 0], [0, 0, 1]])
def test_change_basis(self):
new_v = ftuv.change_basis(np.array([1., 2., 3.]), np.array(
[[0, 1., 0], [1., 0, 0], [0, 0, 1.]]), np.array([[1., 0, 0], [0, 1., 0], [0, 0, 1.]]))
nptest.assert_allclose(new_v, np.array([2., 1., 3.]))
def test_change_basis_vectorized(self):
coords = np.array([[0., 1., 2.], [1., 2., 3.],
[0., 0., 2.], [0., 1., 0.]])
basis1 = np.array([[0., 0., 1.], [0., 1., 0.], [1., 0., 1.]])
basis2 = np.array([[1., 0., 0.], [0., 1., 0.], [0., 1., 1.]])
new_coords = ftuv.change_basis_vectorized(coords, basis2, basis1)
for i in range(4):
nptest.assert_array_equal(
new_coords[i], ftuv.change_basis(coords[i], basis2, basis1))
nptest.assert_array_equal(new_coords[2], np.array([2., -2., 2.]))
def benchmark_change_basis(self):
import timeit
t1 = timeit.timeit("ftuv.change_basis_vectorized(coords, new_basis, old_basis)",
"import forgi.threedee.utilities.vector as ftuv; import numpy as np; coords = (np.random.rand(100,3)-0.5)*20; "
"new_basis=np.array([[1.,2.,0.],[0.,6.,7],[0.4,0,9.3]]);old_basis=np.array([[1.5,2.5,0],[1.5,0,7],[0,0.7,9.3]])", number=1000000)
t2 = timeit.timeit("for coord in coords: ftuv.change_basis(coord, new_basis, old_basis)",
setup="import numpy as np; coords = (np.random.rand(100,3)-0.5)*20; import forgi.threedee.utilities.vector as ftuv; "
"new_basis=np.array([[1.,2.,0.],[0.,6.,7],[0.4,0,9.3]]);old_basis=np.array([[1.5,2.5,0],[1.5,0,7],[0,0.7,9.3]])", number=1000000)
self.assertLess(int(t1) + 50, int(t2))
def test_det3x3(self):
m1 = np.array([[1., 2, 3], [4., 5, 6], [7, 8, 9]])
m2 = np.array([[1, 1, 2], [3, 3, 4.], [6, 6, 8]])
m3 = np.array([[2, -4, 6], [-2, 6., 9], [0, 0, 1]])
for m in [m1, m2, m3]:
self.assertAlmostEqual(ftuv.det3x3(m), np.linalg.det(m))
def test_get_centroid(self):
coords = [[0., 1., 1.], [1, 1, 1], [-1, 2, 3], [3, 0, 0], [-3, 1, 0]]
nptest.assert_almost_equal(
ftuv.get_vector_centroid(np.array(coords)), [0, 1, 1])
nptest.assert_almost_equal(ftuv.get_vector_centroid(coords), [0, 1, 1])
def test_center_on_centroid(self):
coords = [[0., 1., 1.], [1, 1, 1], [-1, 2, 3], [3, 0, 0], [-3, 1, 0]]
nptest.assert_almost_equal(ftuv.center_on_centroid(np.array(coords)),
[[0, 0., 0], [1, 0, 0], [-1, 1, 2], [3, -1, -1], [-3, 0, -1]])
nptest.assert_equal(ftuv.get_vector_centroid(
ftuv.center_on_centroid(coords)), [0, 0., 0])
class TestRotationMatrix(unittest.TestCase):
def test_special_cases_xyz(self):
nptest.assert_almost_equal(ftuv.rotation_matrix("x", 0.4),
ftuv.rotation_matrix(ftuv.standard_basis[0], 0.4))
nptest.assert_almost_equal(ftuv.rotation_matrix("y", 0.7),
ftuv.rotation_matrix(ftuv.standard_basis[1], 0.7))
nptest.assert_almost_equal(ftuv.rotation_matrix("z", 0.9),
ftuv.rotation_matrix(ftuv.standard_basis[2], 0.9))
nptest.assert_almost_equal(ftuv.rotation_matrix("x", 1.84),
ftuv.rotation_matrix(ftuv.standard_basis[0], 1.84))
nptest.assert_almost_equal(ftuv.rotation_matrix("y", 2.7),
ftuv.rotation_matrix(ftuv.standard_basis[1], 2.7))
nptest.assert_almost_equal(ftuv.rotation_matrix("z", 3.9),
ftuv.rotation_matrix(ftuv.standard_basis[2], 3.9))
class Test_PairDistanceDistribution(unittest.TestCase):
def test_pdd(self):
points = [[0, 0, 4], [0, 0, -4], [3, 0, 0]]
x, y = ftuv.pair_distance_distribution(points)
nptest.assert_array_equal(x, np.array([0, 1, 2, 3, 4, 5, 6, 7, 8]))
nptest.assert_array_equal(y, np.array([0, 0, 0, 0, 0, 2, 0, 0, 1]))
def test_pdd_smaller_step(self):
points = [[0, 0, 4], [0, 0, -4], [3, 0, 0]]
x, y = ftuv.pair_distance_distribution(points, 0.5)
nptest.assert_array_equal(x, np.array(
[0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5, 6, 6.5, 7, 7.5, 8]))
nptest.assert_array_equal(y, np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 1]))
def test_pdd_rounding_down(self):
points = [[0, 0, 2], [0, 0, 2.4]]
x, y = ftuv.pair_distance_distribution(points, 0.3)
nptest.assert_array_equal(x, np.array([0, 0.3]))
nptest.assert_array_equal(y, np.array([0, 1]))
x, y = ftuv.pair_distance_distribution(points, 0.06)
nptest.assert_array_equal(x, np.array(
[0, 0.06, 0.12, 0.18, 0.24, 0.3, 0.36]))
nptest.assert_array_equal(y, np.array(
[0, 0, 0, 0, 0, 0, 1]))
| gpl-3.0 |
chrsrds/scikit-learn | sklearn/utils/tests/test_shortest_path.py | 303 | 2841 | from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix = dist_matrix + dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
| bsd-3-clause |
implus/scipy_2015_sklearn_tutorial | notebooks/figures/plot_interactive_tree.py | 20 | 2317 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.tree import DecisionTreeClassifier
from sklearn.externals.six import StringIO # doctest: +SKIP
from sklearn.tree import export_graphviz
from scipy.misc import imread
from scipy import ndimage
import re
X, y = make_blobs(centers=[[0, 0], [1, 1]], random_state=61526, n_samples=50)
def tree_image(tree, fout=None):
try:
import pydot
except ImportError:
# make a hacky white plot
x = np.ones((10, 10))
x[0, 0] = 0
return x
dot_data = StringIO()
export_graphviz(tree, out_file=dot_data)
data = re.sub(r"gini = 0\.[0-9]+\\n", "", dot_data.getvalue())
data = re.sub(r"samples = [0-9]+\\n", "", data)
data = re.sub(r"\\nsamples = [0-9]+", "", data)
graph = pydot.graph_from_dot_data(data)
if fout is None:
fout = "tmp.png"
graph.write_png(fout)
return imread(fout)
def plot_tree(max_depth=1):
fig, ax = plt.subplots(1, 2, figsize=(15, 7))
h = 0.02
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
if max_depth != 0:
tree = DecisionTreeClassifier(max_depth=max_depth, random_state=1).fit(X, y)
Z = tree.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
Z = Z.reshape(xx.shape)
faces = tree.tree_.apply(np.c_[xx.ravel(), yy.ravel()].astype(np.float32))
faces = faces.reshape(xx.shape)
border = ndimage.laplace(faces) != 0
ax[0].contourf(xx, yy, Z, alpha=.4)
ax[0].scatter(xx[border], yy[border], marker='.', s=1)
ax[0].set_title("max_depth = %d" % max_depth)
ax[1].imshow(tree_image(tree))
ax[1].axis("off")
else:
ax[0].set_title("data set")
ax[1].set_visible(False)
ax[0].scatter(X[:, 0], X[:, 1], c=np.array(['b', 'r'])[y], s=60)
ax[0].set_xlim(x_min, x_max)
ax[0].set_ylim(y_min, y_max)
ax[0].set_xticks(())
ax[0].set_yticks(())
def plot_tree_interactive():
from IPython.html.widgets import interactive, IntSlider
slider = IntSlider(min=0, max=8, step=1, value=0)
return interactive(plot_tree, max_depth=slider)
| cc0-1.0 |
mne-tools/mne-tools.github.io | 0.18/_downloads/211ffeef048b9a95c431fe8767a130ee/plot_find_eog_artifacts.py | 24 | 1228 | """
==================
Find EOG artifacts
==================
Locate peaks of EOG to spot blinks and general EOG artifacts.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
event_id = 998
eog_events = mne.preprocessing.find_eog_events(raw, event_id)
# Read epochs
picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=False, eog=True,
exclude='bads')
tmin, tmax = -0.2, 0.2
epochs = mne.Epochs(raw, eog_events, event_id, tmin, tmax, picks=picks)
data = epochs.get_data()
print("Number of detected EOG artifacts : %d" % len(data))
###############################################################################
# Plot EOG artifacts
plt.plot(1e3 * epochs.times, np.squeeze(data).T)
plt.xlabel('Times (ms)')
plt.ylabel('EOG (muV)')
plt.show()
| bsd-3-clause |
mohanprasath/Course-Work | data_analysis/uh_data_analysis_with_python/hy-data-analysis-with-python-spring-2020/part05-e09_commute/src/commute.py | 1 | 2121 | #!/usr/bin/env python3
import os
import pandas as pd
import matplotlib.pyplot as plt
def cyclists(f):
df = pd.read_csv(f, sep = ";")
df = df.dropna(axis = 0, how = "all")
return df.dropna(axis = 1, how = "all")
def split_date(df):
df = df.Päivämäärä.str.split(expand = True)
colnames = ["Weekday", "Day", "Month", "Year", "Hour"]
df.columns = colnames
old_week = ["ma", "ti", "ke", "to", "pe", "la", "su"]
week = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
for i in range(len(week)):
df.Weekday = df.Weekday.str.replace(old_week[i], week[i])
months = ["tammi", "helmi", "maalis", "huhti", "touko", "kesä", "heinä", "elo", "syys", "loka", "marras", "joulu"]
i = 1
for i in range(0, len(months)):
df.Month = df.Month.replace(months[i], i+1)
df.Month = pd.to_numeric(df.Month.map(int), downcast = "integer")
df.Hour = df.Hour.str.extract(r"([0-9]*)", expand = False)
df.Hour = df.Hour.map(int)
df.Day = df.Day.astype("int")
df.Year = df.Year.map(int)
df.Weekday = df.Weekday.astype("object")
df = df.astype({"Weekday": object, "Day": int, "Month": int, "Year": int, "Hour": int})
return df
def split_date_continues(f):
df = cyclists(f)
df2 = split_date(df)
df = df.drop("Päivämäärä", axis = 1)
return pd.concat([df2,df], axis = 1)
def bicycle_timeseries():
f = os.path.dirname(os.path.realpath(__file__)) + "/Helsingin_pyorailijamaarat.csv"
df = split_date_continues(f)
df["Date"] = pd.to_datetime(df[["Year", "Month", "Day", "Hour"]])
df = df.drop(columns = ["Year", "Month", "Day", "Hour"])
df = df.set_index("Date")
return df
def commute():
df = bicycle_timeseries()
aug_17 = df["2017-08-01":"2017-08-31"].copy()
old_days = "mon tue wed thu fri sat sun".title().split()
for i in range(1, 8):
aug_17.loc[aug_17.loc[:,"Weekday"] == old_days[i-1], "Weekday"] = i
weekdays = aug_17.groupby("Weekday")
return weekdays.sum()
def main():
df = commute()
plt.plot(df)
plt.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
owenst/geotweets | real_time_vis.py | 1 | 11219 | #!/usr/bin/python
# real_time_vis.py
# Saito 2015
"""This grabs tweets and visualizes them in real time using params.txt.
You can get the tweets using the streaming API or the REST API. The
rest API requires 5 second pauses between successive calls to the
twitter server. This is the default. Use the --stream or -s flag to
enable the streaming API. The Streaming API gets all tweets that are
geotagged within the bounding box. The geolocation is approximately
converted, by inscribing a bounding box square in the circle around
the geocoordinates. The tweets are also saved in JSON form to
a file called 'tweets.json'.
USAGE:
$ python real_time_vis.py [-h][-d][-f FILENAME][-n NUMBER][-s][-a ADDRESS]
OR for help, try:
$ ./real_time_vis.py -h
OR:
$ python real_time_vis.py
Example using default parameter file 'params.txt', with 20 top words
to display, on a growing chart:
$ ./real_time_vis --number 20
Or using the streaming API with an address:
$ ./real_time_vis -n 20 -s -a "175 5th Avenue NYC"
TO EXIT:
To exit one of these multithreaded programs, use a keyboard interrupt
like CTRL+C.
"""
from __future__ import division
import Queue
import argparse
import sys
import matplotlib.pyplot as plt
import geo_converter
import geosearchclass
import streamer
import utils
global stream # so that CTRL + C kills stream
def update_fdist(fdist, new_words):
for word in new_words:
if word in fdist:
fdist[word] += 1
else:
fdist[word] = 1
return fdist
def remove_infrequent_words(samples, fdist):
trimmed_samples = []
for item in samples:
if fdist[item] > 2:
trimmed_samples.append(item)
return trimmed_samples
def updating_plot(geosearchclass, number_of_words, grow=True):
search_results = geosearchclass.search()
filtered_words = utils.tokenize_and_filter(search_results)
fdist = utils.get_freq_dist(filtered_words)
# set up plot
samples = [item for item, _ in fdist.most_common(number_of_words)]
freqs = [fdist[sample] for sample in samples]
plt.grid(True, color="silver")
plt.plot(freqs, range(len(freqs)))
plt.yticks(range(len(samples)), [s for s in samples])
plt.ylabel("Samples")
plt.xlabel("Counts")
plt.title("Top Words Frequency Distribution")
plt.ion()
plt.show()
# set up loop
old_ids = set([s.id for s in search_results])
for i in xrange(100):
plt.pause(5)
# use mixed above, change to recent here
geosearchclass.result_type = "recent"
# perturbation study
# if i%2: # for testing purposes
# # #change location every odd time to nyc
# # geosearchclass.latitude =40.734073
# # geosearchclass.longitude =-73.990663
# # perturb latitude
# geosearchclass.latitude =geosearchclass.latitude + .001
# else:
# #now back to sf
# # geosearchclass.latitude = 37.7821
# # geosearchclass.longitude = -122.4093
# geosearchclass.longitude =geosearchclass.longitude + .001
search_results = geosearchclass.search()
new_search_results = utils.new_tweets(search_results, old_ids)
if new_search_results:
filtered_words = utils.tokenize_and_filter(new_search_results)
fdist = update_fdist(fdist, filtered_words)
if grow:
newsamples = [item
for item, _ in fdist.most_common(number_of_words)
]
s1 = set(newsamples)
s2 = set(samples)
s1.difference_update(s2)
if s1:
print "New words: " + str(list(s1))
newsamples = list(s1)
samples.extend(newsamples)
plt.yticks(range(len(samples)), [s for s in samples])
freqs = [fdist[sample] for sample in samples]
plt.plot(freqs, range(len(freqs)))
if grow:
plt.draw()
print '%d new tweet(s)' % len(new_search_results)
old_ids.update(set([s.id for s in new_search_results]))
else:
print "no updates"
# g = geosearchclass.GeoSearchClass()
# g.set_params_from_file('params.txt')
# search_results = g.search()
def updating_stream_plot(q, number_of_words=30):
"""This plot uses the streaming API to get real time twitter
information from a given region, determined by a geo-coordinate
bounding box. The upper left and lower right determine the
bounding box.
q is a queue instance, which holds tweets
number_of_words determines the average number of words in the
plot. Once the plot reaches 2 x number_of_words, it is shrunk down
to the new set of words and starts growing again
To exit the program early, hit CTRL + Z to stop the python script
and then CTRL + D twice to kill the terminal process and close the
window.
"""
setup = False
fdist = None
samples = None
draw_time = 0.1
samples = []
plt.ion()
plt.grid(True, color="silver")
for i in range(100000):
status = q.get()
search_results = [status]
while not q.empty():
print "getting another tweet"
status = q.get()
search_results.append(status)
if not setup:
print "Gathering enough data to begin plotting"
while len(samples) < 1:
status = q.get()
search_results.append(status)
filtered_words = utils.tokenize_and_filter(search_results)
if fdist is None:
fdist = utils.get_freq_dist(filtered_words)
else:
fdist = update_fdist(fdist, filtered_words)
n_words = min(10, len(fdist))
samples = [item for item, _ in fdist.most_common(n_words)]
# print "len(samples) = {}".format(len(samples))
samples = remove_infrequent_words(samples, fdist)
freqs = [fdist[sample] for sample in samples]
plt.plot(freqs, range(len(freqs)))
plt.yticks(range(len(samples)), [s for s in samples])
plt.ylabel("Samples")
plt.xlabel("Counts")
plt.title("Top Words Frequency Distribution")
plt.show()
plt.pause(draw_time)
setup = True
else:
filtered_words = utils.tokenize_and_filter(search_results)
fdist = update_fdist(fdist, filtered_words)
newsamples = [item
for item, _ in fdist.most_common(number_of_words)]
newsamples = remove_infrequent_words(newsamples, fdist)
s1 = set(newsamples)
s2 = set(samples)
s1.difference_update(s2)
if s1:
print "New words: " + str(list(s1))
newsamples = list(s1)
samples.extend(newsamples)
if len(samples) > 2*number_of_words:
samples = newsamples
plt.close()
plt.yticks(range(len(samples)), [s for s in samples])
freqs = [fdist[sample] for sample in samples]
plt.plot(freqs, range(len(freqs)))
plt.draw()
plt.pause(draw_time)
kill_plot()
return
def kill_plot():
print "turning interactive off"
plt.ioff()
print "closing plot"
plt.close()
return
def get_parser():
""" Creates a command line parser
--doc -d
--help -h
--filename -f
--grow -g
--number -n
"""
# Create command line argument parser
parser = argparse.ArgumentParser(
description='Create an updating word frequency distribution chart.')
parser.add_argument('-d',
'--doc',
action='store_true',
help='print module documentation and exit')
parser.add_argument(
'-f',
'--filename',
help='''specify a FILENAME to use as the parameter file.
If not specified, will use 'params.txt'.''')
parser.add_argument(
'-a',
'--address',
help='''give an ADDRESS to get geocoordinates for.
Put the address in quotes''')
# parser.add_argument('-r',
# '--rest',
# action='store_true',
# help='Use the REST API to create a growing chart\
# as new words arrive.')
parser.add_argument('-n',
'--number',
help='specify NUMBER of words to display. The\
streaming plot will grow to twice this number\
before shrinking again')
parser.add_argument('-s',
'--stream',
action='store_true',
help='Use streaming API to update a growing plot. \
Otherwise, results will be batched.\
Use Interrupt signal, like CTRL + C to exit. \
This uses the LOCATION and SEARCH_TERM from\
parameter file. The tweets are saved to tweets.json.')
return parser
def main():
parser = get_parser()
args = parser.parse_args()
# print args
# print args.help
if args.doc:
print __doc__
import sys
sys.exit(0)
if args.number:
number = int(args.number)
else:
number = 30
g = geosearchclass.GeoSearchClass()
if args.filename:
print 'Using parameters from ' + str(args.filename)
g.set_params_from_file(args.filename)
else:
print "Using search values from params.txt"
g.set_params_from_file('params.txt')
if args.address:
print "Finding geocoordates for address:\n{}".format(args.address)
coords = geo_converter.get_geocoords_from_address(args.address)
if coords:
g.latitude = coords[0]
print "Found this latitude:"
print g.latitude
g.longitude = coords[1]
print "Found this longitude:"
print g.longitude
else:
print "Failed to find coordinates. Exiting."
sys.exit()
if args.stream:
print "using streaming queue"
q = Queue.Queue()
bounding_box = geo_converter.get_bounding_box_from(g)
search_terms = geo_converter.get_search_terms_from(g)
print "bounding_box = {}".format(bounding_box)
print "search_terms = {}".format(search_terms)
global stream
fn = 'tweets.json'
stream = streamer.start_stream(q, bounding_box, fn, search_terms)
updating_stream_plot(q, number)
else:
print "using REST API updating plot"
updating_plot(g, number, True) # set grow flag to True
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print "Main function interrupted"
if "stream" in globals():
streamer.kill_stream(stream)
kill_plot()
sys.exit()
| gpl-2.0 |
josenavas/qiime | scripts/make_distance_comparison_plots.py | 15 | 19365 | #!/usr/bin/env python
from __future__ import division
__author__ = "Jai Ram Rideout"
__copyright__ = "Copyright 2011, The QIIME project"
__credits__ = ["Jai Ram Rideout"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Jai Ram Rideout"
__email__ = "[email protected]"
from os.path import join
from string import strip
from skbio.util import create_dir
from skbio.draw import grouped_distributions
from qiime.colors import data_colors, data_color_order
from qiime.group import get_field_state_comparisons
from qiime.colors import matplotlib_rgb_color
from qiime.parse import (group_by_field, parse_distmat, parse_mapping_file,
QiimeParseError)
from qiime.stats import all_pairs_t_test, tail_types
from qiime.util import (get_options_lookup, make_option,
parse_command_line_parameters)
script_info = {}
script_info[
'brief_description'] = "Creates plots comparing distances between sample groupings"
script_info['script_description'] = """
This script creates plots (bar charts, scatter plots, or box plots) that
allow for the comparison between samples grouped at different field states
of a mapping file field.
This script can work with any field in the mapping file, and it can compare
any number of field states to all other field states within that field.
This script may be especially useful for fields that represent a time series,
because a plot can be generated showing the distances between samples at
certain timepoints against all other timepoints.
For example, a time field might contain the values 1, 2, 3, 4, and 5, which
label samples that are from day 1, day 2, day 3, and so on. This time field
can be specified when the script is run, as well as the timepoint(s) to
compare to every other timepoint. For example, two comparison groups
might be timepoints 1 and 2. The resulting plot would contain timepoints for
days 3, 4, and 5 along the x-axis, and at each of those timepoints, the
distances between day 1 and that timepoint would be plotted, as well as the
distances between day 2 and the timepoint.
The script also performs two-sample t-tests for all pairs of distributions to
help determine which distributions are significantly different from each other.
Tip: the script tries its best to fit everything into the plot, but there are
cases where plot elements may get cut off (e.g. if axis labels are extremely
long), or things may appear squashed, cluttered, or too small (e.g. if
there are many boxplots in one plot). Increasing the width and/or height of the
plot (using --width and --height) usually fixes these problems.
For more information and examples pertaining to this script, please refer to
the accompanying tutorial, which can be found at
http://qiime.org/tutorials/creating_distance_comparison_plots.html.
"""
script_info['script_usage'] = []
script_info['script_usage'].append((
"Compare distances between Native and Input samples for each timepoint in the "
"Time field",
"This example will generate a PDF containing a bar chart with the distances "
"between Native samples and every other timepoint, as well as the distances "
"between Input samples and every other timepoint. The output image will be "
"put in the 'out1' directory. For more details about this example input data, "
"please refer to the accompanying tutorial.",
"%prog -d forearm_only_unweighted_unifrac_dm.txt -m "
"costello_timeseries_map.txt -f TIME_SINCE_TRANSPLANT -c \"Native,Input\" -o "
"out1"))
script_info['output_description'] = """
An image of the plot is written to the specified output directory. The raw data
used in the plots and the results of significance tests can optionally be
written into tab-delimited files that are most easily viewed in a spreadsheet
program such as Microsoft Excel.
"""
options = get_options_lookup()
script_info['required_options'] = [
options['mapping_fp'],
options['output_dir'],
make_option('-d', '--distance_matrix_fp',
help='input distance matrix filepath (i.e. the result of '
'beta_diversity.py). WARNING: Only symmetric, hollow distance '
'matrices may be used as input. Asymmetric distance matrices, such as '
'those obtained by the UniFrac Gain metric (i.e. beta_diversity.py '
'-m unifrac_g), should not be used as input',
type='existing_filepath'),
make_option('-f', '--field', type='string',
help='field in the mapping file to make comparisons on'),
make_option('-c', '--comparison_groups', type='string',
help='comma-separated list of field states to compare to every other '
'field state, where the list of field states should be in quotes '
'(e.g. "FieldState1,FieldState2,FieldState3")')]
script_info['optional_options'] = [
make_option('-t', '--plot_type',
help='type of plot to produce ("bar" is bar chart, "scatter" is '
'scatter plot, and "box" is box plot) [default: %default]',
default='bar', type='choice', choices=['bar', 'scatter', 'box']),
make_option('-g', '--imagetype',
help='type of image to produce (i.e. png, svg, pdf) '
'[default: %default]', default='pdf', type="choice",
choices=['pdf', 'png', 'svg']),
make_option('--save_raw_data', action='store_true',
help='store raw data used to create plot in a tab-delimited file '
'[default: %default]',
default=False),
make_option('--suppress_significance_tests', action='store_true',
help='suppress performing signifance tests between each pair of '
'distributions [default: %default]', default=False),
make_option('-n', '--num_permutations', type='int',
help='the number of Monte Carlo permutations to perform when '
'calculating the nonparametric p-value in the significance tests. '
'Must be an integer greater than or equal to zero. If zero, the '
'nonparametric p-value will not be calculated and will instead be '
'reported as "N/A". This option has no effect if '
'--suppress_significance_tests is supplied [default: %default]',
default=0),
make_option('--tail_type', type='choice',
choices=tail_types, help='the type of tail test to compute when '
'calculating the p-values in the significance tests. "high" specifies '
'a one-tailed test for values greater than the observed t statistic, '
'while "low" specifies a one-tailed test for values less than the '
'observed t statistic. "two-sided" specifies a two-tailed test for '
'values greater in magnitude than the observed t statistic. This '
'option has no effect if --suppress_significance_tests is supplied. '
'Valid choices: ' +
' or '.join(tail_types) + ' [default: %default]',
default='two-sided'),
make_option('--width',
help='width of the output image in inches [default: %default]',
default=12, type='float'),
make_option('--height',
help='height of the output image in inches [default: %default]',
default=6, type='float'),
make_option('--x_tick_labels_orientation',
help='type of orientation for x-axis tick labels [default: %default]',
default='vertical', type='choice', choices=['vertical', 'horizontal']),
make_option('-a', '--label_type',
help='Label type ("numeric" or "categorical"). '
'If the label type is defined as numeric, the x-axis will be '
'scaled accordingly. Otherwise the x-values will treated '
'categorically and will be evenly spaced [default: %default].',
default='categorical',
type='choice', choices=['categorical', 'numeric']),
make_option('--y_min',
help='the minimum y-axis value in the resulting plot. If "auto", '
'it is automatically calculated [default: %default]',
default=0, type='string'),
make_option('--y_max',
help='the maximum y-axis value in the resulting plot. If "auto", '
'it is automatically calculated [default: %default]',
default=1, type='string'),
make_option('--transparent', action='store_true',
help='make output images transparent (useful for overlaying an image '
'on top of a colored background ) [default: %default]',
default=False),
make_option('--whisker_length',
help='if --plot_type is "box", determines the length of the whiskers '
'as a function of the IQR. For example, if 1.5, the whiskers '
'extend to 1.5 * IQR. Anything outside of that range is seen as '
'an outlier. If --plot_type is not "box", this option is ignored '
'[default: %default]',
default='1.5', type='float'),
make_option('--error_bar_type',
help='if --plot_type is "bar", determines the type of error bars to '
'use. "stdv" is standard deviation and "sem" is the standard '
'error of the mean. If --plot_type is not "bar", this option is '
'ignored [default: %default]',
default='stdv', type='choice', choices=['stdv', 'sem']),
make_option('--distribution_width',
help='width (in plot units) of each individual distribution (e.g. each '
'bar if the plot type is a bar chart, or the width of each box '
'if the plot type is a boxplot) [default: auto]',
default=None, type='float')]
script_info[
'option_label'] = {'mapping_fp': 'QIIME-formatted mapping filepath',
'output_dir': 'output directory',
'distance_matrix_fp':
'distance matrix filepath',
'field': 'field in mapping file',
'comparison_groups': 'field states to compare',
'imagetype': 'output image format',
'save_raw_data': 'save raw data used in plot',
'plot_type': 'output plot type',
'width': 'image width',
'height': 'image height',
'x_tick_labels_orientation':
'x-axis tick label '
'orientation',
'label_type': 'x-axis label type',
'y_min': 'y-axis min',
'y_max': 'y-axis max',
'transparent': 'make images transparent',
'whisker_length': 'whisker length as function '
'of IQR',
'error_bar_type': 'type of error bars to use ',
'distribution_width': 'width of each '
'distribution'}
script_info['version'] = __version__
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
# Create the output dir if it doesn't already exist.
try:
create_dir(opts.output_dir)
except:
option_parser.error("Could not create or access output directory "
"specified with the -o option.")
# Parse the distance matrix and mapping file.
try:
dist_matrix_header, dist_matrix = parse_distmat(
open(opts.distance_matrix_fp, 'U'))
except:
option_parser.error("This does not look like a valid distance matrix "
"file. Please supply a valid distance matrix file using the -d "
"option.")
try:
mapping, mapping_header, mapping_comments = parse_mapping_file(
open(opts.mapping_fp, 'U'))
except QiimeParseError:
option_parser.error("This does not look like a valid metadata mapping "
"file. Please supply a valid mapping file using the -m option.")
# Make sure the y_min and y_max options make sense, as they can be either
# 'auto' or a number.
y_min = opts.y_min
y_max = opts.y_max
try:
y_min = float(y_min)
except ValueError:
if y_min == 'auto':
y_min = None
else:
option_parser.error("The --y_min option must be either a number "
"or 'auto'.")
try:
y_max = float(y_max)
except ValueError:
if y_max == 'auto':
y_max = None
else:
option_parser.error("The --y_max option must be either a number "
"or 'auto'.")
# Parse the field states that will be compared to every other field state.
comparison_field_states = opts.comparison_groups
comparison_field_states = map(strip, comparison_field_states.split(','))
comparison_field_states = [field_state.strip('"').strip("'")
for field_state in comparison_field_states]
if comparison_field_states is None:
option_parser.error("You must provide at least one field state to "
"compare (using the -c option).")
# Get distance comparisons between each field state and each of the
# comparison field states.
field = opts.field
comparison_groupings = get_field_state_comparisons(dist_matrix_header,
dist_matrix, mapping_header, mapping, field,
comparison_field_states)
# Grab a list of all field states that had the comparison field states
# compared against them. These will be plotted along the x-axis.
field_states = comparison_groupings.keys()
def custom_comparator(x, y):
try:
num_x = float(x)
num_y = float(y)
return int(num_x - num_y)
except:
if x < y:
return -1
elif x > y:
return 1
else:
return 0
# Sort the field states as numbers if the elements are numbers, else sort
# them lexically.
field_states.sort(custom_comparator)
# If the label type is numeric, get a list of all field states in sorted
# numeric order. These will be used to determine the spacing of the
# field state 'points' along the x-axis.
x_spacing = None
if opts.label_type == "numeric":
try:
x_spacing = sorted(map(float, field_states))
except:
option_parser.error("The 'numeric' label type is invalid because "
"not all field states could be converted into "
"numbers. Please specify a different label "
"type.")
# Accumulate the data for each field state 'point' along the x-axis.
plot_data = []
plot_x_axis_labels = []
for field_state in field_states:
field_state_data = []
for comp_field_state in comparison_field_states:
field_state_data.append(
comparison_groupings[field_state][comp_field_state])
plot_data.append(field_state_data)
plot_x_axis_labels.append(field_state)
# Plot the data and labels.
plot_title = "Distance Comparisons"
plot_x_label = field
plot_y_label = "Distance"
# If we are creating a bar chart or box plot, grab a list of good data
# colors to use.
plot_type = opts.plot_type
plot_colors = None
if plot_type == "bar" or plot_type == "box":
plot_colors = [matplotlib_rgb_color(data_colors[color].toRGB())
for color in data_color_order]
assert plot_data, "Error: there is no data to plot!"
width = opts.width
height = opts.height
if width <= 0 or height <= 0:
option_parser.error("The specified width and height of the image must "
"be greater than zero.")
plot_figure = grouped_distributions(
opts.plot_type, plot_data, x_values=x_spacing,
data_point_labels=plot_x_axis_labels,
distribution_labels=comparison_field_states,
distribution_markers=plot_colors, x_label=plot_x_label,
y_label=plot_y_label, title=plot_title,
x_tick_labels_orientation=opts.x_tick_labels_orientation, y_min=y_min,
y_max=y_max, whisker_length=opts.whisker_length,
error_bar_type=opts.error_bar_type,
distribution_width=opts.distribution_width, figure_width=width,
figure_height=height)
# Save the plot in the specified format.
output_plot_fp = join(opts.output_dir, "%s_Distance_Comparisons.%s" %
(field, opts.imagetype))
plot_figure.savefig(output_plot_fp, format=opts.imagetype,
transparent=opts.transparent)
if not opts.suppress_significance_tests:
sig_tests_f = open(join(opts.output_dir, "%s_Stats.txt" % field), 'w')
# Rearrange the plot data into a format suitable for all_pairs_t_test.
sig_tests_labels = []
sig_tests_data = []
for data_point, data_point_label in zip(plot_data, plot_x_axis_labels):
for dist, comp_field in zip(data_point, comparison_field_states):
sig_tests_labels.append('%s vs %s' % (data_point_label,
comp_field))
sig_tests_data.append(dist)
sig_tests_results = all_pairs_t_test(sig_tests_labels, sig_tests_data,
tail_type=opts.tail_type,
num_permutations=opts.num_permutations)
sig_tests_f.write(sig_tests_results)
sig_tests_f.close()
if opts.save_raw_data:
# Write the raw plot data into a tab-delimited file, where each line
# has the distances between a comparison group and another field state
# 'point' along the x-axis.
assert (len(plot_x_axis_labels) == len(plot_data)), "The number of " +\
"labels do not match the number of points along the x-axis."
raw_data_fp = join(opts.output_dir,
"%s_Distance_Comparisons.txt" % field)
raw_data_f = open(raw_data_fp, 'w')
raw_data_f.write("#ComparisonGroup\tFieldState\tDistances\n")
for label, data in zip(plot_x_axis_labels, plot_data):
assert (len(comparison_field_states) == len(data)), "The " +\
"number of specified comparison groups does not match " +\
"the number of groups found at the current point along " +\
"the x-axis."
for comp_field_state, comp_grp_data in zip(comparison_field_states, data):
raw_data_f.write(comp_field_state + "\t" + label + "\t" +
"\t".join(map(str, comp_grp_data)) + "\n")
raw_data_f.close()
if __name__ == "__main__":
main()
| gpl-2.0 |
anaviltripathi/pgmpy | pgmpy/tests/test_models/test_NaiveBayes.py | 4 | 8573 | import unittest
import networkx as nx
import pandas as pd
import numpy as np
from pgmpy.models import NaiveBayes
from pgmpy.independencies import Independencies
from pgmpy.extern import six
class TestBaseModelCreation(unittest.TestCase):
def setUp(self):
self.G = NaiveBayes()
def test_class_init_without_data(self):
self.assertIsInstance(self.G, nx.DiGraph)
def test_class_init_with_data_string(self):
self.g = NaiveBayes([('a', 'b'), ('a', 'c')])
six.assertCountEqual(self, self.g.nodes(), ['a', 'b', 'c'])
six.assertCountEqual(self, self.g.edges(), [('a', 'b'), ('a', 'c')])
self.assertEqual(self.g.parent_node, 'a')
self.assertSetEqual(self.g.children_nodes, {'b', 'c'})
self.assertRaises(ValueError, NaiveBayes, [('a', 'b'), ('b', 'c')])
self.assertRaises(ValueError, NaiveBayes, [('a', 'b'), ('c', 'b')])
self.assertRaises(ValueError, NaiveBayes, [('a', 'b'), ('d', 'e')])
def test_class_init_with_data_nonstring(self):
self.g = NaiveBayes([(1, 2), (1, 3)])
six.assertCountEqual(self, self.g.nodes(), [1, 2, 3])
six.assertCountEqual(self, self.g.edges(), [(1, 2), (1, 3)])
self.assertEqual(self.g.parent_node, 1)
self.assertSetEqual(self.g.children_nodes, {2, 3})
self.assertRaises(ValueError, NaiveBayes, [(1, 2), (2, 3)])
self.assertRaises(ValueError, NaiveBayes, [(1, 2), (3, 2)])
self.assertRaises(ValueError, NaiveBayes, [(1, 2), (3, 4)])
def test_add_node_string(self):
self.G.add_node('a')
self.assertListEqual(self.G.nodes(), ['a'])
def test_add_node_nonstring(self):
self.G.add_node(1)
self.assertListEqual(self.G.nodes(), [1])
def test_add_nodes_from_string(self):
self.G.add_nodes_from(['a', 'b', 'c', 'd'])
six.assertCountEqual(self, self.G.nodes(), ['a', 'b', 'c', 'd'])
def test_add_nodes_from_non_string(self):
self.G.add_nodes_from([1, 2, 3, 4])
six.assertCountEqual(self, self.G.nodes(), [1, 2, 3, 4])
def test_add_edge_string(self):
self.G.add_edge('a', 'b')
six.assertCountEqual(self, self.G.nodes(), ['a', 'b'])
self.assertListEqual(self.G.edges(), [('a', 'b')])
self.assertEqual(self.G.parent_node, 'a')
self.assertSetEqual(self.G.children_nodes, {'b'})
self.G.add_nodes_from(['c', 'd'])
self.G.add_edge('a', 'c')
self.G.add_edge('a', 'd')
six.assertCountEqual(self, self.G.nodes(), ['a', 'b', 'c', 'd'])
six.assertCountEqual(self, self.G.edges(), [('a', 'b'), ('a', 'c'), ('a', 'd')])
self.assertEqual(self.G.parent_node, 'a')
self.assertSetEqual(self.G.children_nodes, {'b', 'c', 'd'})
self.assertRaises(ValueError, self.G.add_edge, 'b', 'c')
self.assertRaises(ValueError, self.G.add_edge, 'd', 'f')
self.assertRaises(ValueError, self.G.add_edge, 'e', 'f')
self.assertRaises(ValueError, self.G.add_edges_from, [('a', 'e'), ('b', 'f')])
self.assertRaises(ValueError, self.G.add_edges_from, [('b', 'f')])
def test_add_edge_nonstring(self):
self.G.add_edge(1, 2)
six.assertCountEqual(self, self.G.nodes(), [1, 2])
self.assertListEqual(self.G.edges(), [(1, 2)])
self.assertEqual(self.G.parent_node, 1)
self.assertSetEqual(self.G.children_nodes, {2})
self.G.add_nodes_from([3, 4])
self.G.add_edge(1, 3)
self.G.add_edge(1, 4)
six.assertCountEqual(self, self.G.nodes(), [1, 2, 3, 4])
six.assertCountEqual(self, self.G.edges(), [(1, 2), (1, 3), (1, 4)])
self.assertEqual(self.G.parent_node, 1)
self.assertSetEqual(self.G.children_nodes, {2, 3, 4})
self.assertRaises(ValueError, self.G.add_edge, 2, 3)
self.assertRaises(ValueError, self.G.add_edge, 3, 6)
self.assertRaises(ValueError, self.G.add_edge, 5, 6)
self.assertRaises(ValueError, self.G.add_edges_from, [(1, 5), (2, 6)])
self.assertRaises(ValueError, self.G.add_edges_from, [(2, 6)])
def test_add_edge_selfloop(self):
self.assertRaises(ValueError, self.G.add_edge, 'a', 'a')
self.assertRaises(ValueError, self.G.add_edge, 1, 1)
def test_add_edges_from_self_loop(self):
self.assertRaises(ValueError, self.G.add_edges_from,
[('a', 'a')])
def test_update_node_parents_bm_constructor(self):
self.g = NaiveBayes([('a', 'b'), ('a', 'c')])
self.assertListEqual(self.g.predecessors('a'), [])
self.assertListEqual(self.g.predecessors('b'), ['a'])
self.assertListEqual(self.g.predecessors('c'), ['a'])
def test_update_node_parents(self):
self.G.add_nodes_from(['a', 'b', 'c'])
self.G.add_edges_from([('a', 'b'), ('a', 'c')])
self.assertListEqual(self.G.predecessors('a'), [])
self.assertListEqual(self.G.predecessors('b'), ['a'])
self.assertListEqual(self.G.predecessors('c'), ['a'])
def tearDown(self):
del self.G
class TestNaiveBayesMethods(unittest.TestCase):
def setUp(self):
self.G1 = NaiveBayes([('a', 'b'), ('a', 'c'),
('a', 'd'), ('a', 'e')])
self.G2 = NaiveBayes([('d', 'g'), ('d', 'l'),
('d', 's')])
def test_local_independencies(self):
self.assertListEqual(self.G1.local_independencies('a'), [None])
self.assertListEqual(self.G1.local_independencies('b'),
[Independencies(['b', ['e', 'c', 'd'], 'a'])])
self.assertListEqual(self.G1.local_independencies('c'),
[Independencies(['c', ['e', 'b', 'd'], 'a'])])
self.assertListEqual(self.G1.local_independencies('d'),
[Independencies(['d', ['b', 'c', 'e'], 'a'])])
def test_active_trail_nodes(self):
self.assertListEqual(sorted(self.G2.active_trail_nodes('d')), ['d', 'g', 'l', 's'])
self.assertListEqual(sorted(self.G2.active_trail_nodes('g')), ['d', 'g', 'l', 's'])
self.assertListEqual(sorted(self.G2.active_trail_nodes('l')), ['d', 'g', 'l', 's'])
self.assertListEqual(sorted(self.G2.active_trail_nodes('s')), ['d', 'g', 'l', 's'])
def test_active_trail_nodes_args(self):
self.assertListEqual(sorted(self.G2.active_trail_nodes('d', observed='g')), ['d', 'l', 's'])
self.assertListEqual(sorted(self.G2.active_trail_nodes('l', observed='g')), ['d', 'l', 's'])
self.assertListEqual(sorted(self.G2.active_trail_nodes('s', observed=['g', 'l'])), ['d', 's'])
self.assertListEqual(sorted(self.G2.active_trail_nodes('s', observed=['d', 'l'])), ['s'])
def tearDown(self):
del self.G1
del self.G2
class TestNaiveBayesFit(unittest.TestCase):
def setUp(self):
self.model1 = NaiveBayes()
self.model2 = NaiveBayes([('A', 'B')])
def test_fit_model_creation(self):
values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
columns=['A', 'B', 'C', 'D', 'E'])
self.model1.fit(values, 'A')
six.assertCountEqual(self, self.model1.nodes(), ['A', 'B', 'C', 'D', 'E'])
six.assertCountEqual(self, self.model1.edges(), [('A', 'B'), ('A', 'C'), ('A', 'D'),
('A', 'E')])
self.assertEqual(self.model1.parent_node, 'A')
self.assertSetEqual(self.model1.children_nodes, {'B', 'C', 'D', 'E'})
self.model2.fit(values)
six.assertCountEqual(self, self.model1.nodes(), ['A', 'B', 'C', 'D', 'E'])
six.assertCountEqual(self, self.model1.edges(), [('A', 'B'), ('A', 'C'), ('A', 'D'),
('A', 'E')])
self.assertEqual(self.model2.parent_node, 'A')
self.assertSetEqual(self.model2.children_nodes, {'B', 'C', 'D', 'E'})
def test_fit_model_creation_exception(self):
values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
columns=['A', 'B', 'C', 'D', 'E'])
values2 = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 3)),
columns=['C', 'D', 'E'])
self.assertRaises(ValueError, self.model1.fit, values)
self.assertRaises(ValueError, self.model1.fit, values2)
self.assertRaises(ValueError, self.model2.fit, values2, 'A')
def tearDown(self):
del self.model1
del self.model2
| mit |
smooge/fed-infra-scripts | mirror-data/moving_average.py | 1 | 2963 | # Moving Average
import pandas
import matplotlib.pyplot as plt
import math
tree = {}
df = pandas.read_csv("ALX.csv")
dates = df['1970-01-01']
epel4 = pandas.rolling_mean(df['epel4'],7)
epel5 = pandas.rolling_mean(df['epel5'],7)
epel6 = pandas.rolling_mean(df['epel6'],7)
epel7 = pandas.rolling_mean(df['epel7'],7)
fed03 = pandas.rolling_mean(df['fed03'],7)
fed04 = pandas.rolling_mean(df['fed04'],7)
fed05 = pandas.rolling_mean(df['fed05'],7)
fed06 = pandas.rolling_mean(df['fed06'],7)
fed07 = pandas.rolling_mean(df['fed07'],7)
fed08 = pandas.rolling_mean(df['fed08'],7)
fed09 = pandas.rolling_mean(df['fed09'],7)
fed10 = pandas.rolling_mean(df['fed10'],7)
fed11 = pandas.rolling_mean(df['fed11'],7)
fed12 = pandas.rolling_mean(df['fed12'],7)
fed13 = pandas.rolling_mean(df['fed13'],7)
fed14 = pandas.rolling_mean(df['fed14'],7)
fed15 = pandas.rolling_mean(df['fed15'],7)
fed16 = pandas.rolling_mean(df['fed16'],7)
fed17 = pandas.rolling_mean(df['fed17'],7)
fed18 = pandas.rolling_mean(df['fed18'],7)
fed19 = pandas.rolling_mean(df['fed19'],7)
fed20 = pandas.rolling_mean(df['fed20'],7)
fed21 = pandas.rolling_mean(df['fed21'],7)
fed22 = pandas.rolling_mean(df['fed22'],7)
fed23 = pandas.rolling_mean(df['fed23'],7)
rawhide = pandas.rolling_mean(df['rawhide'],7)
unk_rel = pandas.rolling_mean(df['unk_rel'],7)
EPEL = pandas.rolling_mean(df['EPEL'],7)
Fedora = pandas.rolling_mean(df['Fedora'],7)
ARM = pandas.rolling_mean(df['ARM'],7)
ARM64 = pandas.rolling_mean(df['ARM64'],7)
ia64 = pandas.rolling_mean(df['ia64'],7)
mips = pandas.rolling_mean(df['mips'],7)
ppc = pandas.rolling_mean(df['ppc'],7)
s390 = pandas.rolling_mean(df['s390'],7)
sparc = pandas.rolling_mean(df['sparc'],7)
tilegx = pandas.rolling_mean(df['tilegx'],7)
x86_32 = pandas.rolling_mean(df['x86_32'],7)
x86_64 = pandas.rolling_mean(df['x86_64'],7)
unk_arc = pandas.rolling_mean(df['unk_arc'],7)
for i in xrange(0,len(dates)):
if math.isnan(epel4[i]):
csv_line = ",".join([dates[i],"0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0"])
else:
csv_line = ",".join([dates[i],str(int(epel4[i])),str(int(epel5[i])),str(int(epel6[i])),str(int(epel7[i])),str(int(fed03[i])),str(int(fed04[i])),str(int(fed05[i])),str(int(fed06[i])),str(int(fed07[i])),str(int(fed08[i])),str(int(fed09[i])),str(int(fed10[i])),str(int(fed11[i])),str(int(fed12[i])),str(int(fed13[i])),str(int(fed14[i])),str(int(fed15[i])),str(int(fed16[i])),str(int(fed17[i])),str(int(fed18[i])),str(int(fed19[i])),str(int(fed20[i])),str(int(fed21[i])),str(int(fed22[i])),str(int(fed23[i])),str(int(rawhide[i])),str(int(unk_rel[i])),str(int(EPEL[i])),str(int(Fedora[i])),str(int(ARM[i])),str(int(ARM64[i])),str(int(ia64[i])),str(int(mips[i])),str(int(ppc[i])),str(int(s390[i])),str(int(sparc[i])),str(int(tilegx[i])),str(int(x86_32[i])),str(int(x86_64[i])),str(int(unk_arc[i]))])
print csv_line
| gpl-2.0 |
hfutsuchao/Python2.6 | stocks/strategey_classes/quant_stock_df.py | 1 | 11167 | #coding:utf-8
import pandas as pd
import numpy as np
import talib
import cral_CNstock_order_ana
class GetStockDf(object):
def __init__(self,code,start_date='0',end_date='9'):
self.__code = code
self.__start_date = start_date
self.__end_date = end_date
def get_quota(self):
#stock_amount = cral_CNstock_order_ana.main()
close = self.__df['close']
high_prices = self.__df['high'].values
low_prices = self.__df['low'].values
close_prices = close.values
ma5 = talib.MA(close_prices,5)
ma10 = talib.MA(close_prices,10)
ma20 = talib.MA(close_prices,20)
ma30 = talib.MA(close_prices,30)
K, D = talib.STOCH(high_prices,low_prices,close_prices, fastk_period=9, slowk_period=3)
J = K * 3 - D * 2
sar = talib.SAR(high_prices, low_prices, acceleration=0.05, maximum=0.2)
sar = pd.DataFrame(sar-close)
sar.index = self.__df.date
atr = talib.ATR(high_prices,low_prices,close_prices)
natr = talib.NATR(high_prices,low_prices,close_prices)
trange = talib.TRANGE(high_prices,low_prices,close_prices)
cci = talib.CCI(high_prices,low_prices,close_prices,14)
dif, dea, bar = talib.MACDFIX(close_prices)
bar = bar * 2
df_all = self.__df.drop(['code','open','low', 'high','volume'],axis=1).set_index('date')
df_all.insert(0,'ma5',ma5)
df_all.insert(0,'ma10',ma10)
df_all.insert(0,'ma20',ma20)
df_all.insert(0,'ma30',ma30)
df_all.insert(0,'K',K)
df_all.insert(0,'D',D)
df_all.insert(0,'J',J)
df_all.insert(0,'cci',cci)
df_all.insert(0,'bar',bar)
df_all.insert(0,'dif',dif)
df_all.insert(0,'dea',dea)
df_all.insert(0,'sar',sar)
#df_all = pd.concat([df_all,stock_amount],axis=1)
df_yesterday = df_all.T
index_c = df_all.index
added = [np.nan] * len(df_all.columns)
df_yesterday.insert(0, len(df_yesterday.columns), added)
df_yesterday = df_yesterday.T
df_yesterday = df_yesterday.drop(df_all.index[len(df_all.index)-1])
df_yesterday.insert(0, 'index_c', index_c)
df_yesterday = df_yesterday.set_index('index_c')
df_dif = df_all - df_yesterday
df_dif_close_plus_one_day = df_dif.copy()
for i in range(len(df_dif_close_plus_one_day['close'])-1):
df_dif_close_plus_one_day['close'][i] = df_dif_close_plus_one_day['close'][i+1]
df_dif_close_plus_one_day['close'][len(df_dif_close_plus_one_day['close'])-1] = np.nan
df_dif = df_dif.dropna(axis=0,how='any')
df_dif_close_plus_one_day = df_dif_close_plus_one_day.dropna(axis=0,how='any')
return df_dif, df_dif_close_plus_one_day
def get_normlized(self,df):
df_norm = df.copy()
if self.__norm_type == 'max':
for column in df_norm.columns:
df_norm[column] = df_norm[column] / abs(df_norm[column]).max()
elif self.__norm_type == 'character':
for column in df_norm.columns:
df_norm[column].ix[df_norm[column] <= 0] = -1
df_norm[column].ix[df_norm[column] > 0] = 1
else:
return None
return df_norm
def get_trade_chance(self):
#df,close,norm_type,start_date,end_date,lost
rate = {}
rate['based'] = {}
rate['based']['profit'] = {}
buy_price = {}
buy_date = {}
sell_price = {}
sell_date = {}
is_buy = {}
is_sell = {}
df_dif_norm = self.get_normlized(self.df,self.__norm_type)
df_dif_norm_corr = self.df_dif_norm.corr().ix['close']
start_date_open = 0
end_date_open = 0
for idx in range(len(self.df_dif_norm)):
date_this = self.df_dif_norm.index[idx]
close_val = self.close[idx]
if date_this < self.__start_date:
continue
if date_this > self.__end_date:
end_date_open = close_val
break
sign = 0
for key_name in df_dif_norm.drop('close',axis=1).columns:
sign = sign + df_dif_norm.ix[date_this,key_name] * df_dif_norm_corr[key_name]
if start_date_open == 0:
start_date_open = close_val
x = idx
if idx>=1:
lastdate = df_dif_norm.index[idx-1]
if lastdate not in rate['based']['profit']:
rate['based']['profit'][lastdate] = 1.0
rate['based']['profit'][date_this] = rate['based']['profit'][lastdate] * self.close[idx] / self.close[idx-1]
for m in np.array(range(-100,200,5))/20.0:
for n in np.array(range(-100,int(50*m+1),5))/20.0:
s_type = 'corr' + str(m) + '_' + str(n)
if s_type not in buy_price:
buy_price[s_type] = []
buy_date[s_type] = []
sell_price[s_type] = []
sell_date[s_type] = []
is_buy[s_type] = 0
#is_sell[s_type] = 0
if sign>=m:
if is_buy[s_type] == 0:
is_buy[s_type] = 1
buy_price[s_type].append(close_val)
buy_date[s_type].append(date_this)
#is_sell[s_type] = 0
continue
if sign<n or (len(buy_price[s_type]) and close_val * (1-0.002) / buy_price[s_type][-1] <= (1-self.lost)):
if is_buy[s_type] == 1 : #and is_sell[s_type] == 0
is_buy[s_type] = 0
sell_price[s_type].append(close_val)
sell_date[s_type].append(date_this)
#is_sell[s_type] = 1
if not end_date_open:
end_date_open = close_val
if not start_date_open:
return []
rate['based']['profit']['total'] = end_date_open * (1 - 0.002) / start_date_open
return rate, date_this, buy_price, buy_date, sell_price, sell_date, start_date_open, end_date_open
def back_test(df,start_date='0',date_delta=60,norm_type='character',quota_index=0,lost=1.0):
if self.__start_date:
end_date = date_add(start_date,date_delta)
else:
end_date = '9'
if end_date > today():
return []
df_dif = get_quota(df)[quota_index]
df_dif_norm = get_normlized(df_dif)
df = pd.concat([df.set_index('date')['close'],df_dif_norm['sar']],axis=1).dropna(how='any')
close = df['close']
r = get_trade_chance(df_dif,close,norm_type,start_date,end_date,lost)
if r:
rate, date_this, buy_price, buy_date, sell_price, sell_date, start_date_open, end_date_open = r
else:
return []
for s_type in sell_price:
rate[s_type] = {}
rate[s_type]['profit'] = {}
rate[s_type]['profit']['total'] = 1.0
rate[s_type]['trade'] = {}
for i in range(len(buy_price[s_type])):
try:
#rate[s_type]['profit']['total'] = rate[s_type]['profit']['total'] * (sell_price[s_type][i] * (1 - 0.002) / buy_price[s_type][i])
rate[s_type]['profit']['total'] = rate[s_type]['profit']['total'] * (sell_price[s_type][i] * (1 - 0.002) / buy_price[s_type][i]) * ((sell_price[s_type][i]) * (1 - 0.002) / buy_price[s_type][i+1])
rate[s_type]['profit'][buy_date[s_type][i]] = rate[s_type]['profit']['total']
rate[s_type]['trade'][buy_date[s_type][i]] = [buy_date[s_type][i], buy_price[s_type][i], sell_date[s_type][i], sell_price[s_type][i]]
except Exception,e:
if len(buy_price[s_type]) == len(sell_price[s_type]):
rate[s_type]['profit']['total'] = rate[s_type]['profit']['total'] * (end_date_open * (1 - 0.002) / sell_price[s_type][i])
else:
rate[s_type]['profit']['total'] = rate[s_type]['profit']['total'] * (end_date_open * (1 - 0.002) / buy_price[s_type][i])
rate[s_type]['profit'][date_this] = rate[s_type]['profit']['total']
rate[s_type]['trade'][date_this] = [buy_date[s_type][i], buy_price[s_type][i], 'lastday', end_date_open]
return sorted(rate.items(),key=lambda x:x[1]['profit']['total'],reverse=True)
def plot_profit(rate,s_type=''):
for code in rate:
best_strategy_code = rate[code][0][0]
rate_dic = dict(rate[code])
based_profit = pd.DataFrame(rate_dic['based']).drop('total',axis=0)
if s_type:
best_strategy_profit = pd.DataFrame(rate_dic[s_type]).fillna(method='pad').drop('total',axis=0)
best_strategy_code = s_type
else:
if rate[code][0][0] == 'based':
best_strategy_profit = pd.DataFrame(rate_dic[rate[code][1][0]]).fillna(method='pad').drop('total',axis=0)
else:
best_strategy_profit = pd.DataFrame(rate_dic[rate[code][0][0]]).fillna(method='pad').drop('total',axis=0)
profit_all = pd.concat([based_profit['profit'], best_strategy_profit['profit']], axis=1).fillna(method='pad')
profit_all.plot()
plt.legend(('based_profit', 'best_strategy_profit'), loc='upper left')
plt.title(code + '_' + best_strategy_code)
plt.savefig('/Users/NealSu/Downloads/profit_pic/' + code + '_' + best_strategy_code + '.jpg')
plt.close('all')
try:
print code
print best_strategy_profit['trade']
except:
pass
def strategy_choose(rate):
strategy_sum = {}
best_strategy = {}
for code in rate:
rate_dic = dict(rate[code])
best_strategy_code = rate[code][0][0]
if best_strategy_code not in best_strategy:
best_strategy[best_strategy_code] = 1
else:
best_strategy[best_strategy_code] = best_strategy[best_strategy_code] + 1
for s_type in rate_dic:
if s_type not in strategy_sum:
strategy_sum[s_type] = rate_dic[s_type]['profit']['total']
else:
strategy_sum[s_type] = strategy_sum[s_type] + rate_dic[s_type]['profit']['total']
best_strategy = sorted(best_strategy.items(),key=lambda x:x[1],reverse=True)
strategy_sum = sorted(strategy_sum.items(),key=lambda x:x[1],reverse=True)
return (best_strategy,strategy_sum)
def single_test(df,start_dates,date_deltas,norm_type,quota_index):
rate = {}
for start_date in start_dates:
for date_delta in date_deltas:
r = back_test(df, start_date, date_delta, norm_type, quota_index)
if r:
rate[start_date+'_'+date_add(start_date,date_delta)] = r
return rate
| gpl-2.0 |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/pandas/tests/io/formats/test_to_latex.py | 9 | 13691 | from datetime import datetime
import pytest
import pandas as pd
from pandas import DataFrame, compat, Series
from pandas.util import testing as tm
from pandas.compat import u
import codecs
@pytest.fixture
def frame():
return DataFrame(tm.getSeriesData())
class TestToLatex(object):
def test_to_latex_filename(self, frame):
with tm.ensure_clean('test.tex') as path:
frame.to_latex(path)
with open(path, 'r') as f:
assert frame.to_latex() == f.read()
# test with utf-8 and encoding option (GH 7061)
df = DataFrame([[u'au\xdfgangen']])
with tm.ensure_clean('test.tex') as path:
df.to_latex(path, encoding='utf-8')
with codecs.open(path, 'r', encoding='utf-8') as f:
assert df.to_latex() == f.read()
# test with utf-8 without encoding option
if compat.PY3: # python3: pandas default encoding is utf-8
with tm.ensure_clean('test.tex') as path:
df.to_latex(path)
with codecs.open(path, 'r', encoding='utf-8') as f:
assert df.to_latex() == f.read()
else:
# python2 default encoding is ascii, so an error should be raised
with tm.ensure_clean('test.tex') as path:
with pytest.raises(UnicodeEncodeError):
df.to_latex(path)
def test_to_latex(self, frame):
# it works!
frame.to_latex()
df = DataFrame({'a': [1, 2], 'b': ['b1', 'b2']})
withindex_result = df.to_latex()
withindex_expected = r"""\begin{tabular}{lrl}
\toprule
{} & a & b \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
"""
assert withindex_result == withindex_expected
withoutindex_result = df.to_latex(index=False)
withoutindex_expected = r"""\begin{tabular}{rl}
\toprule
a & b \\
\midrule
1 & b1 \\
2 & b2 \\
\bottomrule
\end{tabular}
"""
assert withoutindex_result == withoutindex_expected
def test_to_latex_format(self, frame):
# GH Bug #9402
frame.to_latex(column_format='ccc')
df = DataFrame({'a': [1, 2], 'b': ['b1', 'b2']})
withindex_result = df.to_latex(column_format='ccc')
withindex_expected = r"""\begin{tabular}{ccc}
\toprule
{} & a & b \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
"""
assert withindex_result == withindex_expected
def test_to_latex_with_formatters(self):
df = DataFrame({'int': [1, 2, 3],
'float': [1.0, 2.0, 3.0],
'object': [(1, 2), True, False],
'datetime64': [datetime(2016, 1, 1),
datetime(2016, 2, 5),
datetime(2016, 3, 3)]})
formatters = {'int': lambda x: '0x%x' % x,
'float': lambda x: '[% 4.1f]' % x,
'object': lambda x: '-%s-' % str(x),
'datetime64': lambda x: x.strftime('%Y-%m'),
'__index__': lambda x: 'index: %s' % x}
result = df.to_latex(formatters=dict(formatters))
expected = r"""\begin{tabular}{llrrl}
\toprule
{} & datetime64 & float & int & object \\
\midrule
index: 0 & 2016-01 & [ 1.0] & 0x1 & -(1, 2)- \\
index: 1 & 2016-02 & [ 2.0] & 0x2 & -True- \\
index: 2 & 2016-03 & [ 3.0] & 0x3 & -False- \\
\bottomrule
\end{tabular}
"""
assert result == expected
def test_to_latex_multiindex(self):
df = DataFrame({('x', 'y'): ['a']})
result = df.to_latex()
expected = r"""\begin{tabular}{ll}
\toprule
{} & x \\
{} & y \\
\midrule
0 & a \\
\bottomrule
\end{tabular}
"""
assert result == expected
result = df.T.to_latex()
expected = r"""\begin{tabular}{lll}
\toprule
& & 0 \\
\midrule
x & y & a \\
\bottomrule
\end{tabular}
"""
assert result == expected
df = DataFrame.from_dict({
('c1', 0): pd.Series(dict((x, x) for x in range(4))),
('c1', 1): pd.Series(dict((x, x + 4) for x in range(4))),
('c2', 0): pd.Series(dict((x, x) for x in range(4))),
('c2', 1): pd.Series(dict((x, x + 4) for x in range(4))),
('c3', 0): pd.Series(dict((x, x) for x in range(4))),
}).T
result = df.to_latex()
expected = r"""\begin{tabular}{llrrrr}
\toprule
& & 0 & 1 & 2 & 3 \\
\midrule
c1 & 0 & 0 & 1 & 2 & 3 \\
& 1 & 4 & 5 & 6 & 7 \\
c2 & 0 & 0 & 1 & 2 & 3 \\
& 1 & 4 & 5 & 6 & 7 \\
c3 & 0 & 0 & 1 & 2 & 3 \\
\bottomrule
\end{tabular}
"""
assert result == expected
# GH 14184
df = df.T
df.columns.names = ['a', 'b']
result = df.to_latex()
expected = r"""\begin{tabular}{lrrrrr}
\toprule
a & \multicolumn{2}{l}{c1} & \multicolumn{2}{l}{c2} & c3 \\
b & 0 & 1 & 0 & 1 & 0 \\
\midrule
0 & 0 & 4 & 0 & 4 & 0 \\
1 & 1 & 5 & 1 & 5 & 1 \\
2 & 2 & 6 & 2 & 6 & 2 \\
3 & 3 & 7 & 3 & 7 & 3 \\
\bottomrule
\end{tabular}
"""
assert result == expected
# GH 10660
df = pd.DataFrame({'a': [0, 0, 1, 1],
'b': list('abab'),
'c': [1, 2, 3, 4]})
result = df.set_index(['a', 'b']).to_latex()
expected = r"""\begin{tabular}{llr}
\toprule
& & c \\
a & b & \\
\midrule
0 & a & 1 \\
& b & 2 \\
1 & a & 3 \\
& b & 4 \\
\bottomrule
\end{tabular}
"""
assert result == expected
result = df.groupby('a').describe().to_latex()
expected = r"""\begin{tabular}{lrrrrrrrr}
\toprule
{} & \multicolumn{8}{l}{c} \\
{} & count & mean & std & min & 25\% & 50\% & 75\% & max \\
a & & & & & & & & \\
\midrule
0 & 2.0 & 1.5 & 0.707107 & 1.0 & 1.25 & 1.5 & 1.75 & 2.0 \\
1 & 2.0 & 3.5 & 0.707107 & 3.0 & 3.25 & 3.5 & 3.75 & 4.0 \\
\bottomrule
\end{tabular}
"""
assert result == expected
def test_to_latex_multicolumnrow(self):
df = pd.DataFrame({
('c1', 0): dict((x, x) for x in range(5)),
('c1', 1): dict((x, x + 5) for x in range(5)),
('c2', 0): dict((x, x) for x in range(5)),
('c2', 1): dict((x, x + 5) for x in range(5)),
('c3', 0): dict((x, x) for x in range(5))
})
result = df.to_latex()
expected = r"""\begin{tabular}{lrrrrr}
\toprule
{} & \multicolumn{2}{l}{c1} & \multicolumn{2}{l}{c2} & c3 \\
{} & 0 & 1 & 0 & 1 & 0 \\
\midrule
0 & 0 & 5 & 0 & 5 & 0 \\
1 & 1 & 6 & 1 & 6 & 1 \\
2 & 2 & 7 & 2 & 7 & 2 \\
3 & 3 & 8 & 3 & 8 & 3 \\
4 & 4 & 9 & 4 & 9 & 4 \\
\bottomrule
\end{tabular}
"""
assert result == expected
result = df.to_latex(multicolumn=False)
expected = r"""\begin{tabular}{lrrrrr}
\toprule
{} & c1 & & c2 & & c3 \\
{} & 0 & 1 & 0 & 1 & 0 \\
\midrule
0 & 0 & 5 & 0 & 5 & 0 \\
1 & 1 & 6 & 1 & 6 & 1 \\
2 & 2 & 7 & 2 & 7 & 2 \\
3 & 3 & 8 & 3 & 8 & 3 \\
4 & 4 & 9 & 4 & 9 & 4 \\
\bottomrule
\end{tabular}
"""
assert result == expected
result = df.T.to_latex(multirow=True)
expected = r"""\begin{tabular}{llrrrrr}
\toprule
& & 0 & 1 & 2 & 3 & 4 \\
\midrule
\multirow{2}{*}{c1} & 0 & 0 & 1 & 2 & 3 & 4 \\
& 1 & 5 & 6 & 7 & 8 & 9 \\
\cline{1-7}
\multirow{2}{*}{c2} & 0 & 0 & 1 & 2 & 3 & 4 \\
& 1 & 5 & 6 & 7 & 8 & 9 \\
\cline{1-7}
c3 & 0 & 0 & 1 & 2 & 3 & 4 \\
\bottomrule
\end{tabular}
"""
assert result == expected
df.index = df.T.index
result = df.T.to_latex(multirow=True, multicolumn=True,
multicolumn_format='c')
expected = r"""\begin{tabular}{llrrrrr}
\toprule
& & \multicolumn{2}{c}{c1} & \multicolumn{2}{c}{c2} & c3 \\
& & 0 & 1 & 0 & 1 & 0 \\
\midrule
\multirow{2}{*}{c1} & 0 & 0 & 1 & 2 & 3 & 4 \\
& 1 & 5 & 6 & 7 & 8 & 9 \\
\cline{1-7}
\multirow{2}{*}{c2} & 0 & 0 & 1 & 2 & 3 & 4 \\
& 1 & 5 & 6 & 7 & 8 & 9 \\
\cline{1-7}
c3 & 0 & 0 & 1 & 2 & 3 & 4 \\
\bottomrule
\end{tabular}
"""
assert result == expected
def test_to_latex_escape(self):
a = 'a'
b = 'b'
test_dict = {u('co^l1'): {a: "a",
b: "b"},
u('co$e^x$'): {a: "a",
b: "b"}}
unescaped_result = DataFrame(test_dict).to_latex(escape=False)
escaped_result = DataFrame(test_dict).to_latex(
) # default: escape=True
unescaped_expected = r'''\begin{tabular}{lll}
\toprule
{} & co$e^x$ & co^l1 \\
\midrule
a & a & a \\
b & b & b \\
\bottomrule
\end{tabular}
'''
escaped_expected = r'''\begin{tabular}{lll}
\toprule
{} & co\$e\textasciicircumx\$ & co\textasciicircuml1 \\
\midrule
a & a & a \\
b & b & b \\
\bottomrule
\end{tabular}
'''
assert unescaped_result == unescaped_expected
assert escaped_result == escaped_expected
def test_to_latex_longtable(self, frame):
frame.to_latex(longtable=True)
df = DataFrame({'a': [1, 2], 'b': ['b1', 'b2']})
withindex_result = df.to_latex(longtable=True)
withindex_expected = r"""\begin{longtable}{lrl}
\toprule
{} & a & b \\
\midrule
\endhead
\midrule
\multicolumn{3}{r}{{Continued on next page}} \\
\midrule
\endfoot
\bottomrule
\endlastfoot
0 & 1 & b1 \\
1 & 2 & b2 \\
\end{longtable}
"""
assert withindex_result == withindex_expected
withoutindex_result = df.to_latex(index=False, longtable=True)
withoutindex_expected = r"""\begin{longtable}{rl}
\toprule
a & b \\
\midrule
\endhead
\midrule
\multicolumn{3}{r}{{Continued on next page}} \\
\midrule
\endfoot
\bottomrule
\endlastfoot
1 & b1 \\
2 & b2 \\
\end{longtable}
"""
assert withoutindex_result == withoutindex_expected
def test_to_latex_escape_special_chars(self):
special_characters = ['&', '%', '$', '#', '_', '{', '}', '~', '^',
'\\']
df = DataFrame(data=special_characters)
observed = df.to_latex()
expected = r"""\begin{tabular}{ll}
\toprule
{} & 0 \\
\midrule
0 & \& \\
1 & \% \\
2 & \$ \\
3 & \# \\
4 & \_ \\
5 & \{ \\
6 & \} \\
7 & \textasciitilde \\
8 & \textasciicircum \\
9 & \textbackslash \\
\bottomrule
\end{tabular}
"""
assert observed == expected
def test_to_latex_no_header(self):
# GH 7124
df = DataFrame({'a': [1, 2], 'b': ['b1', 'b2']})
withindex_result = df.to_latex(header=False)
withindex_expected = r"""\begin{tabular}{lrl}
\toprule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
"""
assert withindex_result == withindex_expected
withoutindex_result = df.to_latex(index=False, header=False)
withoutindex_expected = r"""\begin{tabular}{rl}
\toprule
1 & b1 \\
2 & b2 \\
\bottomrule
\end{tabular}
"""
assert withoutindex_result == withoutindex_expected
def test_to_latex_specified_header(self):
# GH 7124
df = DataFrame({'a': [1, 2], 'b': ['b1', 'b2']})
withindex_result = df.to_latex(header=['AA', 'BB'])
withindex_expected = r"""\begin{tabular}{lrl}
\toprule
{} & AA & BB \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
"""
assert withindex_result == withindex_expected
withoutindex_result = df.to_latex(header=['AA', 'BB'], index=False)
withoutindex_expected = r"""\begin{tabular}{rl}
\toprule
AA & BB \\
\midrule
1 & b1 \\
2 & b2 \\
\bottomrule
\end{tabular}
"""
assert withoutindex_result == withoutindex_expected
withoutescape_result = df.to_latex(header=['$A$', '$B$'], escape=False)
withoutescape_expected = r"""\begin{tabular}{lrl}
\toprule
{} & $A$ & $B$ \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
"""
assert withoutescape_result == withoutescape_expected
with pytest.raises(ValueError):
df.to_latex(header=['A'])
def test_to_latex_decimal(self, frame):
# GH 12031
frame.to_latex()
df = DataFrame({'a': [1.0, 2.1], 'b': ['b1', 'b2']})
withindex_result = df.to_latex(decimal=',')
withindex_expected = r"""\begin{tabular}{lrl}
\toprule
{} & a & b \\
\midrule
0 & 1,0 & b1 \\
1 & 2,1 & b2 \\
\bottomrule
\end{tabular}
"""
assert withindex_result == withindex_expected
def test_to_latex_series(self):
s = Series(['a', 'b', 'c'])
withindex_result = s.to_latex()
withindex_expected = r"""\begin{tabular}{ll}
\toprule
{} & 0 \\
\midrule
0 & a \\
1 & b \\
2 & c \\
\bottomrule
\end{tabular}
"""
assert withindex_result == withindex_expected
def test_to_latex_bold_rows(self):
# GH 16707
df = pd.DataFrame({'a': [1, 2], 'b': ['b1', 'b2']})
observed = df.to_latex(bold_rows=True)
expected = r"""\begin{tabular}{lrl}
\toprule
{} & a & b \\
\midrule
\textbf{0} & 1 & b1 \\
\textbf{1} & 2 & b2 \\
\bottomrule
\end{tabular}
"""
assert observed == expected
def test_to_latex_no_bold_rows(self):
# GH 16707
df = pd.DataFrame({'a': [1, 2], 'b': ['b1', 'b2']})
observed = df.to_latex(bold_rows=False)
expected = r"""\begin{tabular}{lrl}
\toprule
{} & a & b \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
"""
assert observed == expected
| mit |
scipy/scipy | scipy/signal/_max_len_seq.py | 12 | 4962 | # Author: Eric Larson
# 2014
"""Tools for MLS generation"""
import numpy as np
from ._max_len_seq_inner import _max_len_seq_inner
__all__ = ['max_len_seq']
# These are definitions of linear shift register taps for use in max_len_seq()
_mls_taps = {2: [1], 3: [2], 4: [3], 5: [3], 6: [5], 7: [6], 8: [7, 6, 1],
9: [5], 10: [7], 11: [9], 12: [11, 10, 4], 13: [12, 11, 8],
14: [13, 12, 2], 15: [14], 16: [15, 13, 4], 17: [14],
18: [11], 19: [18, 17, 14], 20: [17], 21: [19], 22: [21],
23: [18], 24: [23, 22, 17], 25: [22], 26: [25, 24, 20],
27: [26, 25, 22], 28: [25], 29: [27], 30: [29, 28, 7],
31: [28], 32: [31, 30, 10]}
def max_len_seq(nbits, state=None, length=None, taps=None):
"""
Maximum length sequence (MLS) generator.
Parameters
----------
nbits : int
Number of bits to use. Length of the resulting sequence will
be ``(2**nbits) - 1``. Note that generating long sequences
(e.g., greater than ``nbits == 16``) can take a long time.
state : array_like, optional
If array, must be of length ``nbits``, and will be cast to binary
(bool) representation. If None, a seed of ones will be used,
producing a repeatable representation. If ``state`` is all
zeros, an error is raised as this is invalid. Default: None.
length : int, optional
Number of samples to compute. If None, the entire length
``(2**nbits) - 1`` is computed.
taps : array_like, optional
Polynomial taps to use (e.g., ``[7, 6, 1]`` for an 8-bit sequence).
If None, taps will be automatically selected (for up to
``nbits == 32``).
Returns
-------
seq : array
Resulting MLS sequence of 0's and 1's.
state : array
The final state of the shift register.
Notes
-----
The algorithm for MLS generation is generically described in:
https://en.wikipedia.org/wiki/Maximum_length_sequence
The default values for taps are specifically taken from the first
option listed for each value of ``nbits`` in:
https://web.archive.org/web/20181001062252/http://www.newwaveinstruments.com/resources/articles/m_sequence_linear_feedback_shift_register_lfsr.htm
.. versionadded:: 0.15.0
Examples
--------
MLS uses binary convention:
>>> from scipy.signal import max_len_seq
>>> max_len_seq(4)[0]
array([1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0], dtype=int8)
MLS has a white spectrum (except for DC):
>>> import matplotlib.pyplot as plt
>>> from numpy.fft import fft, ifft, fftshift, fftfreq
>>> seq = max_len_seq(6)[0]*2-1 # +1 and -1
>>> spec = fft(seq)
>>> N = len(seq)
>>> plt.plot(fftshift(fftfreq(N)), fftshift(np.abs(spec)), '.-')
>>> plt.margins(0.1, 0.1)
>>> plt.grid(True)
>>> plt.show()
Circular autocorrelation of MLS is an impulse:
>>> acorrcirc = ifft(spec * np.conj(spec)).real
>>> plt.figure()
>>> plt.plot(np.arange(-N/2+1, N/2+1), fftshift(acorrcirc), '.-')
>>> plt.margins(0.1, 0.1)
>>> plt.grid(True)
>>> plt.show()
Linear autocorrelation of MLS is approximately an impulse:
>>> acorr = np.correlate(seq, seq, 'full')
>>> plt.figure()
>>> plt.plot(np.arange(-N+1, N), acorr, '.-')
>>> plt.margins(0.1, 0.1)
>>> plt.grid(True)
>>> plt.show()
"""
if taps is None:
if nbits not in _mls_taps:
known_taps = np.array(list(_mls_taps.keys()))
raise ValueError('nbits must be between %s and %s if taps is None'
% (known_taps.min(), known_taps.max()))
taps = np.array(_mls_taps[nbits], np.intp)
else:
taps = np.unique(np.array(taps, np.intp))[::-1]
if np.any(taps < 0) or np.any(taps > nbits) or taps.size < 1:
raise ValueError('taps must be non-empty with values between '
'zero and nbits (inclusive)')
taps = np.array(taps) # needed for Cython and Pythran
n_max = (2**nbits) - 1
if length is None:
length = n_max
else:
length = int(length)
if length < 0:
raise ValueError('length must be greater than or equal to 0')
# We use int8 instead of bool here because NumPy arrays of bools
# don't seem to work nicely with Cython
if state is None:
state = np.ones(nbits, dtype=np.int8, order='c')
else:
# makes a copy if need be, ensuring it's 0's and 1's
state = np.array(state, dtype=bool, order='c').astype(np.int8)
if state.ndim != 1 or state.size != nbits:
raise ValueError('state must be a 1-D array of size nbits')
if np.all(state == 0):
raise ValueError('state must not be all zeros')
seq = np.empty(length, dtype=np.int8, order='c')
state = _max_len_seq_inner(taps, state, nbits, length, seq)
return seq, state
| bsd-3-clause |
MaxInGaussian/GomPlex | applications/digital_drawing/evaluate_features.py | 2 | 5417 | ################################################################################
# Github: https://github.com/MaxInGaussian/GomPlex
# Author: Max W. Y. Lam ([email protected])
################################################################################
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
from sklearn.linear_model import LogisticRegression
from DecisionSystem import DecisionSystem
from sys import path
path.append("../../")
from GomPlex import *
DRAWING_RAW_DATA_PATH = 'drawing_raw_data.csv'
model = DecisionSystem(sample_time=100, use_past=4,
use_gender=True, use_age=True, use_edu_level=True,
show_training_drawings=False, show_predicted_drawings=False)
model.load_drawing_data(DRAWING_RAW_DATA_PATH)
num_ci, num_nci = model.ci.sum(), len(model.ci)-model.ci.sum()
# model.show_velocity_graph('MS0045')
# model.show_direction_graph('MS0045')
def get_eval_from_fpr_tpr(fpr, tpr):
cfs_mat = np.array([[tpr*num_ci, num_ci-tpr*num_ci],
[fpr*num_nci, num_nci-fpr*num_nci]])
accuracy = (cfs_mat[0, 0]+cfs_mat[1, 1])/np.sum(cfs_mat)
precision = 0 if np.sum(cfs_mat[:, 0]) == 0 else\
cfs_mat[0, 0]/np.sum(cfs_mat[:, 0])
sensitivity = 0 if np.sum(cfs_mat[0]) == 0 else\
cfs_mat[0, 0]/np.sum(cfs_mat[0])
specificity = 0 if np.sum(cfs_mat[1]) == 0 else\
cfs_mat[1, 1]/np.sum(cfs_mat[1])
F1 = 0 if precision+sensitivity == 0 else\
2*(precision*sensitivity)/(precision+sensitivity)
print("Sensitivity =", sensitivity)
print("Specificity =", specificity)
print("Accuracy =", accuracy)
print("Precision =", precision)
print("F1 Score =", F1)
AUC, F1, cfs_mat, cis, pred_cis = model.eval_features_for_subjects()
fpr, tpr, thresholds = roc_curve(cis, pred_cis)
AUC = auc(fpr, tpr)
arg = np.argmax(tpr-fpr)
plt.plot(fpr, tpr, color='black', lw=2, linestyle='-', label='GPMC (AUC = %0.3f)' % AUC)
plt.scatter(fpr[arg], tpr[arg], s=50, color='k', marker='x')
print('GPMC:')
get_eval_from_fpr_tpr(fpr[arg], tpr[arg])
lr = ['Number of angles', 'Intersection', 'Closure', 'Rotation', 'Closing-in']
lr_mat = model.df_drawing_data[lr].as_matrix().tolist()
lr_y = model.ci.as_matrix().astype(np.int64)
X, y = [], []
for i, lr_vec in enumerate(lr_mat):
avg_V = model.avg_V[model.df_drawing_data.index[i]]
std_V = model.std_V[model.df_drawing_data.index[i]]
avg_T = model.avg_T[model.df_drawing_data.index[i]]
std_T = model.std_T[model.df_drawing_data.index[i]]
if(not np.any(np.isnan([avg_V, std_V, avg_T, std_T]))):
lr_vec.extend([avg_V, std_V, avg_T, std_T])
X.append(lr_vec)
y.append(lr_y[i])
lr_model = LogisticRegression().fit(X[:int(len(X)*0.65)], y[:int(len(X)*0.65)])
pred_cis_lr = lr_model.predict_proba(X[int(len(X)*0.65):])
fpr_lr, tpr_lr, thresholds_lr = roc_curve(y[int(len(X)*0.65):], pred_cis_lr[:, 1])
AUC_lr = auc(fpr_lr, tpr_lr)
arg_lr = np.argmax(tpr_lr-fpr_lr)
plt.plot(fpr_lr, tpr_lr, color='black', lw=2, linestyle='--', label='Logistic Regression (AUC = %0.3f)' % AUC_lr)
plt.scatter(fpr_lr[arg_lr], tpr_lr[arg_lr], s=50, color='k', marker='x')
print('Logistic Regression:')
get_eval_from_fpr_tpr(fpr_lr[arg_lr], tpr_lr[arg_lr])
caffarra = ['Number of angles', 'Intersection', 'Closure', 'Rotation', 'Closing-in']
caffarra_score = model.df_drawing_data[caffarra].sum(axis=1)
pred_cis_caff = np.array(caffarra_score).ravel()/13
fpr_caff, tpr_caff, thresholds_caff = roc_curve(cis, 1-pred_cis_caff)
AUC_caff = auc(fpr_caff, tpr_caff)
arg_caff = np.argmax(tpr_caff-fpr_caff)
plt.plot(fpr_caff, tpr_caff, color='black', lw=2, linestyle='-.', label='Caffarra\'s Method (AUC = %0.3f)' % AUC_caff)
plt.scatter(fpr_caff[arg_caff], tpr_caff[arg_caff], s=50, color='k', marker='x')
print('Caffarra\'s Method:')
get_eval_from_fpr_tpr(fpr_caff[arg_caff], tpr_caff[arg_caff])
mmse = ['Number of angles', 'Intersection']
mmse_score = np.array(model.df_drawing_data[mmse].sum(axis=1)==8)/1.
fpr_mmse, tpr_mmse, thresholds_mmse = roc_curve(cis, 1-mmse_score)
AUC_mmse = auc(fpr_mmse, tpr_mmse)
arg_mmse = np.argmax(tpr_mmse-fpr_mmse)
plt.plot(fpr_mmse, tpr_mmse, color='black', lw=2, linestyle=':', label='MMSE Method (AUC = %0.3f)' % AUC_mmse)
plt.scatter(fpr_mmse[arg_mmse], tpr_mmse[arg_mmse], s=50, color='k', marker='x')
print('MMSE\'s Method:')
get_eval_from_fpr_tpr(fpr_mmse[arg_mmse], tpr_mmse[arg_mmse])
plt.plot([0, 1], [0, 1], 'k-', label='Random Guessing (AUC = 0.5)', alpha=0.3)
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.xlabel('False Positive Rate (1 - Specificity)')
plt.ylabel('True Positive Rate (Sensitivity)')
plt.title('Receiver Operating Characteristic')
plt.legend(loc="lower right")
plt.tight_layout()
plt.show()
def plot_confusion_matrix(cm, classes):
normalize=False
cmap=plt.cm.Blues
plt.imshow(cfs_mat, interpolation='nearest', cmap=cmap)
plt.title('Confusion Matrix')
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
thresh = cfs_mat.max() / 2.
for i, j in itertools.product(range(cfs_mat.shape[0]), range(cfs_mat.shape[1])):
plt.text(j, i, cfs_mat[i, j], horizontalalignment="center",
color="white" if cfs_mat[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label') | bsd-3-clause |
PatrickChrist/scikit-learn | sklearn/cross_decomposition/cca_.py | 209 | 3150 | from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
| bsd-3-clause |
InternationalAirlinesGroup/stubo-app | stubo/ext/parse_date.py | 4 | 6447 | """
:copyright: (c) 2015 by OpenCredo.
:license: GPLv3, see LICENSE for more details.
"""
# adapted from https://github.com/pydata/pandas/blob/master/pandas/tseries/tools.py
from datetime import datetime, timedelta
import re
import sys
from StringIO import StringIO
import logging
import dateutil
from dateutil.parser import parse, DEFAULTPARSER
log = logging.getLogger(__name__)
# raise exception if dateutil 2.0 install on 2.x platform
if (sys.version_info[0] == 2 and
dateutil.__version__ == '2.0'): # pragma: no cover
raise Exception('dateutil 2.0 incompatible with Python 2.x, you must '
'install version 1.5 or 2.1+!')
# otherwise a 2nd import won't show the message
_DATEUTIL_LEXER_SPLIT = None
try:
# Since these are private methods from dateutil, it is safely imported
# here so in case this interface changes, pandas will just fallback
# to not using the functionality
from dateutil.parser import _timelex
if hasattr(_timelex, 'split'):
def _lexer_split_from_str(dt_str):
# The StringIO(str(_)) is for dateutil 2.2 compatibility
return _timelex.split(StringIO(str(dt_str)))
_DATEUTIL_LEXER_SPLIT = _lexer_split_from_str
except (ImportError, AttributeError):
pass
def _guess_datetime_format(dt_str, parsed_datetime, dayfirst,
dt_str_split=_DATEUTIL_LEXER_SPLIT):
"""
Guess the datetime format of a given datetime string.
Parameters
----------
dt_str : string, datetime string to guess the format of
parsed_datetime : result of dateutil.parser.parse
dayfirst : boolean, default True
If True parses dates with the day first, eg 20/01/2005
Warning: dayfirst=True is not strict, but will prefer to parse
with day first (this is a known bug).
dt_str_split : function, defaults to `_DATEUTIL_LEXER_SPLIT` (dateutil)
This function should take in a datetime string and return
a list of strings, the guess of the various specific parts
e.g. '2011/12/30' -> ['2011', '/', '12', '/', '30']
Returns
-------
ret : datetime format string (for `strftime` or `strptime`)
"""
log.debug('_guess_datetime_format, dt_str={0}'.format(dt_str))
if dt_str_split is None:
return None
if not isinstance(dt_str, basestring):
return None
day_attribute_and_format = (('day',), '%d')
datetime_attrs_to_format = [
(('year', 'month', 'day'), '%Y%m%d'),
(('year',), '%Y'),
(('month',), '%B'),
(('month',), '%b'),
(('month',), '%m'),
day_attribute_and_format,
(('hour',), '%H'),
(('minute',), '%M'),
(('second',), '%S'),
(('microsecond',), '%f'),
(('second', 'microsecond'), '%S.%f'),
]
if dayfirst:
datetime_attrs_to_format.remove(day_attribute_and_format)
datetime_attrs_to_format.insert(0, day_attribute_and_format)
if parsed_datetime is None:
return None
try:
log.debug('dt_str_split(dt_str)')
tokens = dt_str_split(dt_str)
except:
# In case the datetime string can't be split, its format cannot
# be guessed
return None
log.debug('split tokens={0}'.format(tokens))
format_guess = [None] * len(tokens)
found_attrs = set()
for attrs, attr_format in datetime_attrs_to_format:
# If a given attribute has been placed in the format string, skip
# over other formats for that same underlying attribute (IE, month
# can be represented in multiple different ways)
if set(attrs) & found_attrs:
continue
if all(getattr(parsed_datetime, attr) is not None for attr in attrs):
for i, token_format in enumerate(format_guess):
if (token_format is None and
tokens[i] == parsed_datetime.strftime(attr_format)):
format_guess[i] = attr_format
found_attrs.update(attrs)
break
log.debug('found_attrs={0}'.format(found_attrs))
log.debug('format_guess={0}'.format(format_guess))
# Only consider it a valid guess if we have a year, month and day
if len(set(['year', 'month', 'day']) & found_attrs) != 3:
return None
output_format = []
for i, guess in enumerate(format_guess):
if guess is not None:
# Either fill in the format placeholder (like %Y)
output_format.append(guess)
else:
# Or just the token separate (IE, the dashes in "01-01-2013")
try:
# If the token is numeric, then we likely didn't parse it
# properly, so our guess is wrong
if float(tokens[i]) != 0.0:
return None
except ValueError:
pass
output_format.append(tokens[i])
guessed_format = ''.join(output_format)
if parsed_datetime.strftime(guessed_format) == dt_str:
return guessed_format
has_time = re.compile('(.+)([\s]|T)+(.+)')
def parse_date_string(date_str, dayfirst=False, yearfirst=True):
"""
Try hard to parse datetime string, leveraging dateutil plus some extras
Parameters
----------
arg : date string
dayfirst : bool,
yearfirst : bool
Returns
-------
datetime, datetime format string (for `strftime` or `strptime`)
or None if unable parse date str
"""
if not isinstance(date_str, basestring):
return None
arg = date_str.upper()
parse_info = DEFAULTPARSER.info
if len(arg) in (7, 8):
mresult = _attempt_monthly(arg)
log.debug('mresult={0}'.format(mresult))
if mresult:
return mresult
parsed_datetime = DEFAULTPARSER.parse(StringIO(str(arg)), dayfirst=dayfirst,
yearfirst=yearfirst, fuzzy=True)
log.debug('parsed_datetime={0}'.format(parsed_datetime))
if parsed_datetime:
date_format = _guess_datetime_format(date_str, parsed_datetime,
dayfirst=dayfirst)
return parsed_datetime, date_format
def _attempt_monthly(val):
pats = ['%Y-%m', '%m-%Y', '%b %Y', '%b-%Y']
for pat in pats:
try:
ret = datetime.strptime(val, pat)
return ret, pat
except Exception:
pass
| gpl-3.0 |
olafhauk/mne-python | tutorials/stats-sensor-space/plot_stats_spatio_temporal_cluster_sensors.py | 18 | 7414 | """
=====================================================
Spatiotemporal permutation F-test on full sensor data
=====================================================
Tests for differential evoked responses in at least
one condition using a permutation clustering test.
The FieldTrip neighbor templates will be used to determine
the adjacency between sensors. This serves as a spatial prior
to the clustering. Spatiotemporal clusters will then
be visualized using custom matplotlib code.
See the `FieldTrip website`_ for a caveat regarding
the possible interpretation of "significant" clusters.
"""
# Authors: Denis Engemann <[email protected]>
# Jona Sassenhagen <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import mne
from mne.stats import spatio_temporal_cluster_test
from mne.datasets import sample
from mne.channels import find_ch_adjacency
from mne.viz import plot_compare_evokeds
print(__doc__)
###############################################################################
# Set parameters
# --------------
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id = {'Aud/L': 1, 'Aud/R': 2, 'Vis/L': 3, 'Vis/R': 4}
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.filter(1, 30, fir_design='firwin')
events = mne.read_events(event_fname)
###############################################################################
# Read epochs for the channel of interest
# ---------------------------------------
picks = mne.pick_types(raw.info, meg='mag', eog=True)
reject = dict(mag=4e-12, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=None, reject=reject, preload=True)
epochs.drop_channels(['EOG 061'])
epochs.equalize_event_counts(event_id)
X = [epochs[k].get_data() for k in event_id] # as 3D matrix
X = [np.transpose(x, (0, 2, 1)) for x in X] # transpose for clustering
###############################################################################
# Find the FieldTrip neighbor definition to setup sensor adjacency
# ----------------------------------------------------------------
adjacency, ch_names = find_ch_adjacency(epochs.info, ch_type='mag')
print(type(adjacency)) # it's a sparse matrix!
plt.imshow(adjacency.toarray(), cmap='gray', origin='lower',
interpolation='nearest')
plt.xlabel('{} Magnetometers'.format(len(ch_names)))
plt.ylabel('{} Magnetometers'.format(len(ch_names)))
plt.title('Between-sensor adjacency')
###############################################################################
# Compute permutation statistic
# -----------------------------
#
# How does it work? We use clustering to "bind" together features which are
# similar. Our features are the magnetic fields measured over our sensor
# array at different times. This reduces the multiple comparison problem.
# To compute the actual test-statistic, we first sum all F-values in all
# clusters. We end up with one statistic for each cluster.
# Then we generate a distribution from the data by shuffling our conditions
# between our samples and recomputing our clusters and the test statistics.
# We test for the significance of a given cluster by computing the probability
# of observing a cluster of that size. For more background read:
# Maris/Oostenveld (2007), "Nonparametric statistical testing of EEG- and
# MEG-data" Journal of Neuroscience Methods, Vol. 164, No. 1., pp. 177-190.
# doi:10.1016/j.jneumeth.2007.03.024
# set cluster threshold
threshold = 50.0 # very high, but the test is quite sensitive on this data
# set family-wise p-value
p_accept = 0.01
cluster_stats = spatio_temporal_cluster_test(X, n_permutations=1000,
threshold=threshold, tail=1,
n_jobs=1, buffer_size=None,
adjacency=adjacency)
T_obs, clusters, p_values, _ = cluster_stats
good_cluster_inds = np.where(p_values < p_accept)[0]
###############################################################################
# Note. The same functions work with source estimate. The only differences
# are the origin of the data, the size, and the adjacency definition.
# It can be used for single trials or for groups of subjects.
#
# Visualize clusters
# ------------------
# configure variables for visualization
colors = {"Aud": "crimson", "Vis": 'steelblue'}
linestyles = {"L": '-', "R": '--'}
# organize data for plotting
evokeds = {cond: epochs[cond].average() for cond in event_id}
# loop over clusters
for i_clu, clu_idx in enumerate(good_cluster_inds):
# unpack cluster information, get unique indices
time_inds, space_inds = np.squeeze(clusters[clu_idx])
ch_inds = np.unique(space_inds)
time_inds = np.unique(time_inds)
# get topography for F stat
f_map = T_obs[time_inds, ...].mean(axis=0)
# get signals at the sensors contributing to the cluster
sig_times = epochs.times[time_inds]
# create spatial mask
mask = np.zeros((f_map.shape[0], 1), dtype=bool)
mask[ch_inds, :] = True
# initialize figure
fig, ax_topo = plt.subplots(1, 1, figsize=(10, 3))
# plot average test statistic and mark significant sensors
f_evoked = mne.EvokedArray(f_map[:, np.newaxis], epochs.info, tmin=0)
f_evoked.plot_topomap(times=0, mask=mask, axes=ax_topo, cmap='Reds',
vmin=np.min, vmax=np.max, show=False,
colorbar=False, mask_params=dict(markersize=10))
image = ax_topo.images[0]
# create additional axes (for ERF and colorbar)
divider = make_axes_locatable(ax_topo)
# add axes for colorbar
ax_colorbar = divider.append_axes('right', size='5%', pad=0.05)
plt.colorbar(image, cax=ax_colorbar)
ax_topo.set_xlabel(
'Averaged F-map ({:0.3f} - {:0.3f} s)'.format(*sig_times[[0, -1]]))
# add new axis for time courses and plot time courses
ax_signals = divider.append_axes('right', size='300%', pad=1.2)
title = 'Cluster #{0}, {1} sensor'.format(i_clu + 1, len(ch_inds))
if len(ch_inds) > 1:
title += "s (mean)"
plot_compare_evokeds(evokeds, title=title, picks=ch_inds, axes=ax_signals,
colors=colors, linestyles=linestyles, show=False,
split_legend=True, truncate_yaxis='auto')
# plot temporal cluster extent
ymin, ymax = ax_signals.get_ylim()
ax_signals.fill_betweenx((ymin, ymax), sig_times[0], sig_times[-1],
color='orange', alpha=0.3)
# clean up viz
mne.viz.tight_layout(fig=fig)
fig.subplots_adjust(bottom=.05)
plt.show()
###############################################################################
# Exercises
# ----------
#
# - What is the smallest p-value you can obtain, given the finite number of
# permutations?
# - use an F distribution to compute the threshold by traditional significance
# levels. Hint: take a look at :obj:`scipy.stats.f`
#
# .. _fieldtrip website:
# http://www.fieldtriptoolbox.org/faq/
# how_not_to_interpret_results_from_a_cluster-based_permutation_test
| bsd-3-clause |
cpcloud/arrow | python/pyarrow/tests/test_convert_builtin.py | 1 | 27951 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from pyarrow.compat import unittest, u # noqa
import pyarrow as pa
import collections
import datetime
import decimal
import itertools
import numpy as np
import six
import pytz
int_type_pairs = [
(np.int8, pa.int8()),
(np.int16, pa.int64()),
(np.int32, pa.int32()),
(np.int64, pa.int64()),
(np.uint8, pa.uint8()),
(np.uint16, pa.uint64()),
(np.uint32, pa.uint32()),
(np.uint64, pa.uint64())]
np_int_types, _ = zip(*int_type_pairs)
class StrangeIterable:
def __init__(self, lst):
self.lst = lst
def __iter__(self):
return self.lst.__iter__()
def check_struct_type(ty, expected):
"""
Check a struct type is as expected, but not taking order into account.
"""
assert pa.types.is_struct(ty)
assert set(ty) == set(expected)
def test_iterable_types():
arr1 = pa.array(StrangeIterable([0, 1, 2, 3]))
arr2 = pa.array((0, 1, 2, 3))
assert arr1.equals(arr2)
def test_empty_iterable():
arr = pa.array(StrangeIterable([]))
assert len(arr) == 0
assert arr.null_count == 0
assert arr.type == pa.null()
assert arr.to_pylist() == []
def test_limited_iterator_types():
arr1 = pa.array(iter(range(3)), type=pa.int64(), size=3)
arr2 = pa.array((0, 1, 2))
assert arr1.equals(arr2)
def test_limited_iterator_size_overflow():
arr1 = pa.array(iter(range(3)), type=pa.int64(), size=2)
arr2 = pa.array((0, 1))
assert arr1.equals(arr2)
def test_limited_iterator_size_underflow():
arr1 = pa.array(iter(range(3)), type=pa.int64(), size=10)
arr2 = pa.array((0, 1, 2))
assert arr1.equals(arr2)
def test_iterator_without_size():
expected = pa.array((0, 1, 2))
arr1 = pa.array(iter(range(3)))
assert arr1.equals(expected)
# Same with explicit type
arr1 = pa.array(iter(range(3)), type=pa.int64())
assert arr1.equals(expected)
def test_infinite_iterator():
expected = pa.array((0, 1, 2))
arr1 = pa.array(itertools.count(0), size=3)
assert arr1.equals(expected)
# Same with explicit type
arr1 = pa.array(itertools.count(0), type=pa.int64(), size=3)
assert arr1.equals(expected)
def _as_list(xs):
return xs
def _as_tuple(xs):
return tuple(xs)
def _as_deque(xs):
# deque is a sequence while neither tuple nor list
return collections.deque(xs)
def _as_dict_values(xs):
# a dict values object is not a sequence, just a regular iterable
dct = {k: v for k, v in enumerate(xs)}
return six.viewvalues(dct)
parametrize_with_iterable_types = pytest.mark.parametrize(
"seq", [_as_list, _as_tuple, _as_deque, _as_dict_values])
@parametrize_with_iterable_types
def test_sequence_types(seq):
arr1 = pa.array(seq([1, 2, 3]))
arr2 = pa.array([1, 2, 3])
assert arr1.equals(arr2)
@parametrize_with_iterable_types
def test_sequence_boolean(seq):
expected = [True, None, False, None]
arr = pa.array(seq(expected))
assert len(arr) == 4
assert arr.null_count == 2
assert arr.type == pa.bool_()
assert arr.to_pylist() == expected
@parametrize_with_iterable_types
def test_sequence_numpy_boolean(seq):
expected = [np.bool(True), None, np.bool(False), None]
arr = pa.array(seq(expected))
assert len(arr) == 4
assert arr.null_count == 2
assert arr.type == pa.bool_()
assert arr.to_pylist() == expected
@parametrize_with_iterable_types
def test_empty_list(seq):
arr = pa.array(seq([]))
assert len(arr) == 0
assert arr.null_count == 0
assert arr.type == pa.null()
assert arr.to_pylist() == []
@parametrize_with_iterable_types
def test_nested_lists(seq):
data = [[], [1, 2], None]
arr = pa.array(seq(data))
assert len(arr) == 3
assert arr.null_count == 1
assert arr.type == pa.list_(pa.int64())
assert arr.to_pylist() == data
# With explicit type
arr = pa.array(seq(data), type=pa.list_(pa.int32()))
assert len(arr) == 3
assert arr.null_count == 1
assert arr.type == pa.list_(pa.int32())
assert arr.to_pylist() == data
@parametrize_with_iterable_types
def test_list_with_non_list(seq):
# List types don't accept non-sequences
with pytest.raises(pa.ArrowTypeError):
pa.array(seq([[], [1, 2], 3]), type=pa.list_(pa.int64()))
@parametrize_with_iterable_types
def test_nested_arrays(seq):
arr = pa.array(seq([np.array([], dtype=int), np.array([1, 2]), None]))
assert len(arr) == 3
assert arr.null_count == 1
assert arr.type == pa.list_(pa.int64())
assert arr.to_pylist() == [[], [1, 2], None]
@parametrize_with_iterable_types
def test_sequence_all_none(seq):
arr = pa.array(seq([None, None]))
assert len(arr) == 2
assert arr.null_count == 2
assert arr.type == pa.null()
assert arr.to_pylist() == [None, None]
@parametrize_with_iterable_types
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
def test_sequence_integer(seq, np_scalar_pa_type):
np_scalar, pa_type = np_scalar_pa_type
expected = [1, None, 3, None,
np.iinfo(np_scalar).min, np.iinfo(np_scalar).max]
arr = pa.array(seq(expected), type=pa_type)
assert len(arr) == 6
assert arr.null_count == 2
assert arr.type == pa_type
assert arr.to_pylist() == expected
@parametrize_with_iterable_types
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
def test_sequence_integer_np_nan(seq, np_scalar_pa_type):
# ARROW-2806: numpy.nan is a double value and thus should produce
# a double array.
_, pa_type = np_scalar_pa_type
with pytest.raises(ValueError):
pa.array(seq([np.nan]), type=pa_type, from_pandas=False)
arr = pa.array(seq([np.nan]), type=pa_type, from_pandas=True)
expected = [None]
assert len(arr) == 1
assert arr.null_count == 1
assert arr.type == pa_type
assert arr.to_pylist() == expected
@parametrize_with_iterable_types
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
def test_sequence_integer_nested_np_nan(seq, np_scalar_pa_type):
# ARROW-2806: numpy.nan is a double value and thus should produce
# a double array.
_, pa_type = np_scalar_pa_type
with pytest.raises(ValueError):
pa.array(seq([[np.nan]]), type=pa.list_(pa_type), from_pandas=False)
arr = pa.array(seq([[np.nan]]), type=pa.list_(pa_type), from_pandas=True)
expected = [[None]]
assert len(arr) == 1
assert arr.null_count == 0
assert arr.type == pa.list_(pa_type)
assert arr.to_pylist() == expected
@parametrize_with_iterable_types
def test_sequence_integer_inferred(seq):
expected = [1, None, 3, None]
arr = pa.array(seq(expected))
assert len(arr) == 4
assert arr.null_count == 2
assert arr.type == pa.int64()
assert arr.to_pylist() == expected
@parametrize_with_iterable_types
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
def test_sequence_numpy_integer(seq, np_scalar_pa_type):
np_scalar, pa_type = np_scalar_pa_type
expected = [np_scalar(1), None, np_scalar(3), None,
np_scalar(np.iinfo(np_scalar).min),
np_scalar(np.iinfo(np_scalar).max)]
arr = pa.array(seq(expected), type=pa_type)
assert len(arr) == 6
assert arr.null_count == 2
assert arr.type == pa_type
assert arr.to_pylist() == expected
@parametrize_with_iterable_types
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
def test_sequence_numpy_integer_inferred(seq, np_scalar_pa_type):
np_scalar, pa_type = np_scalar_pa_type
expected = [np_scalar(1), None, np_scalar(3), None]
if np_scalar != np.uint64:
expected += [np_scalar(np.iinfo(np_scalar).min),
np_scalar(np.iinfo(np_scalar).max)]
else:
# max(uint64) is too large for the inferred int64 type
expected += [0, np.iinfo(np.int64).max]
arr = pa.array(seq(expected))
assert len(arr) == 6
assert arr.null_count == 2
assert arr.type == pa.int64()
assert arr.to_pylist() == expected
@pytest.mark.parametrize("bits", [8, 16, 32, 64])
def test_signed_integer_overflow(bits):
ty = getattr(pa, "int%d" % bits)()
# XXX ideally would raise OverflowError
with pytest.raises((ValueError, pa.ArrowException)):
pa.array([2 ** (bits - 1)], ty)
with pytest.raises((ValueError, pa.ArrowException)):
pa.array([-2 ** (bits - 1) - 1], ty)
@pytest.mark.parametrize("bits", [8, 16, 32, 64])
def test_unsigned_integer_overflow(bits):
ty = getattr(pa, "uint%d" % bits)()
# XXX ideally would raise OverflowError
with pytest.raises((ValueError, pa.ArrowException)):
pa.array([2 ** bits], ty)
with pytest.raises((ValueError, pa.ArrowException)):
pa.array([-1], ty)
def test_garbage_collection():
import gc
# Force the cyclic garbage collector to run
gc.collect()
bytes_before = pa.total_allocated_bytes()
pa.array([1, None, 3, None])
gc.collect()
assert pa.total_allocated_bytes() == bytes_before
def test_sequence_double():
data = [1.5, 1, None, 2.5, None, None]
arr = pa.array(data)
assert len(arr) == 6
assert arr.null_count == 3
assert arr.type == pa.float64()
assert arr.to_pylist() == data
@parametrize_with_iterable_types
@pytest.mark.parametrize("np_scalar", [np.float16, np.float32, np.float64])
@pytest.mark.parametrize("from_pandas", [True, False])
def test_sequence_numpy_double(seq, np_scalar, from_pandas):
data = [np_scalar(1.5), np_scalar(1), None, np_scalar(2.5), None, np.nan]
arr = pa.array(seq(data), from_pandas=from_pandas)
assert len(arr) == 6
if from_pandas:
assert arr.null_count == 3
else:
assert arr.null_count == 2
assert arr.type == pa.float64()
assert arr.to_pylist()[:4] == data[:4]
if from_pandas:
assert arr.to_pylist()[5] is None
else:
assert np.isnan(arr.to_pylist()[5])
@pytest.mark.parametrize("from_pandas", [True, False])
@pytest.mark.parametrize("inner_seq", [np.array, list])
def test_ndarray_nested_numpy_double(from_pandas, inner_seq):
# ARROW-2806
data = np.array([
inner_seq([1., 2.]),
inner_seq([1., 2., 3.]),
inner_seq([np.nan]),
None
])
arr = pa.array(data, from_pandas=from_pandas)
assert len(arr) == 4
assert arr.null_count == 1
assert arr.type == pa.list_(pa.float64())
if from_pandas:
assert arr.to_pylist() == [[1.0, 2.0], [1.0, 2.0, 3.0], [None], None]
else:
np.testing.assert_equal(arr.to_pylist(),
[[1., 2.], [1., 2., 3.], [np.nan], None])
def test_sequence_unicode():
data = [u'foo', u'bar', None, u'mañana']
arr = pa.array(data)
assert len(arr) == 4
assert arr.null_count == 1
assert arr.type == pa.string()
assert arr.to_pylist() == data
def test_sequence_bytes():
u1 = b'ma\xc3\xb1ana'
data = [b'foo',
u1.decode('utf-8'), # unicode gets encoded,
bytearray(b'bar'),
None]
for ty in [None, pa.binary()]:
arr = pa.array(data, type=ty)
assert len(arr) == 4
assert arr.null_count == 1
assert arr.type == pa.binary()
assert arr.to_pylist() == [b'foo', u1, b'bar', None]
def test_sequence_utf8_to_unicode():
# ARROW-1225
data = [b'foo', None, b'bar']
arr = pa.array(data, type=pa.string())
assert arr[0].as_py() == u'foo'
# test a non-utf8 unicode string
val = (u'mañana').encode('utf-16-le')
with pytest.raises(pa.ArrowInvalid):
pa.array([val], type=pa.string())
def test_sequence_fixed_size_bytes():
data = [b'foof', None, bytearray(b'barb'), b'2346']
arr = pa.array(data, type=pa.binary(4))
assert len(arr) == 4
assert arr.null_count == 1
assert arr.type == pa.binary(4)
assert arr.to_pylist() == [b'foof', None, b'barb', b'2346']
def test_fixed_size_bytes_does_not_accept_varying_lengths():
data = [b'foo', None, b'barb', b'2346']
with pytest.raises(pa.ArrowInvalid):
pa.array(data, type=pa.binary(4))
def test_sequence_date():
data = [datetime.date(2000, 1, 1), None, datetime.date(1970, 1, 1),
datetime.date(2040, 2, 26)]
arr = pa.array(data)
assert len(arr) == 4
assert arr.type == pa.date64()
assert arr.null_count == 1
assert arr[0].as_py() == datetime.date(2000, 1, 1)
assert arr[1].as_py() is None
assert arr[2].as_py() == datetime.date(1970, 1, 1)
assert arr[3].as_py() == datetime.date(2040, 2, 26)
def test_sequence_date32():
data = [datetime.date(2000, 1, 1), None]
arr = pa.array(data, type=pa.date32())
data2 = [10957, None]
arr2 = pa.array(data2, type=pa.date32())
for x in [arr, arr2]:
assert len(x) == 2
assert x.type == pa.date32()
assert x.null_count == 1
assert x[0].as_py() == datetime.date(2000, 1, 1)
assert x[1] is pa.NA
# Overflow
data3 = [2**32, None]
with pytest.raises(pa.ArrowException):
pa.array(data3, type=pa.date32())
def test_sequence_timestamp():
data = [
datetime.datetime(2007, 7, 13, 1, 23, 34, 123456),
None,
datetime.datetime(2006, 1, 13, 12, 34, 56, 432539),
datetime.datetime(2010, 8, 13, 5, 46, 57, 437699)
]
arr = pa.array(data)
assert len(arr) == 4
assert arr.type == pa.timestamp('us')
assert arr.null_count == 1
assert arr[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 123456)
assert arr[1].as_py() is None
assert arr[2].as_py() == datetime.datetime(2006, 1, 13, 12,
34, 56, 432539)
assert arr[3].as_py() == datetime.datetime(2010, 8, 13, 5,
46, 57, 437699)
def test_sequence_numpy_timestamp():
data = [
np.datetime64(datetime.datetime(2007, 7, 13, 1, 23, 34, 123456)),
None,
np.datetime64(datetime.datetime(2006, 1, 13, 12, 34, 56, 432539)),
np.datetime64(datetime.datetime(2010, 8, 13, 5, 46, 57, 437699))
]
arr = pa.array(data)
assert len(arr) == 4
assert arr.type == pa.timestamp('us')
assert arr.null_count == 1
assert arr[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 123456)
assert arr[1].as_py() is None
assert arr[2].as_py() == datetime.datetime(2006, 1, 13, 12,
34, 56, 432539)
assert arr[3].as_py() == datetime.datetime(2010, 8, 13, 5,
46, 57, 437699)
def test_sequence_timestamp_with_unit():
data = [
datetime.datetime(2007, 7, 13, 1, 23, 34, 123456),
]
s = pa.timestamp('s')
ms = pa.timestamp('ms')
us = pa.timestamp('us')
ns = pa.timestamp('ns')
arr_s = pa.array(data, type=s)
assert len(arr_s) == 1
assert arr_s.type == s
assert arr_s[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 0)
arr_ms = pa.array(data, type=ms)
assert len(arr_ms) == 1
assert arr_ms.type == ms
assert arr_ms[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 123000)
arr_us = pa.array(data, type=us)
assert len(arr_us) == 1
assert arr_us.type == us
assert arr_us[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 123456)
arr_ns = pa.array(data, type=ns)
assert len(arr_ns) == 1
assert arr_ns.type == ns
assert arr_ns[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 123456)
def test_datetime_subclassing():
class MyDate(datetime.date):
pass
data = [
MyDate(2007, 7, 13),
]
date_type = pa.date32()
arr_date = pa.array(data, type=date_type)
assert len(arr_date) == 1
assert arr_date.type == date_type
assert arr_date[0].as_py() == datetime.date(2007, 7, 13)
class MyDatetime(datetime.datetime):
pass
data = [
MyDatetime(2007, 7, 13, 1, 23, 34, 123456),
]
s = pa.timestamp('s')
ms = pa.timestamp('ms')
us = pa.timestamp('us')
ns = pa.timestamp('ns')
arr_s = pa.array(data, type=s)
assert len(arr_s) == 1
assert arr_s.type == s
assert arr_s[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 0)
arr_ms = pa.array(data, type=ms)
assert len(arr_ms) == 1
assert arr_ms.type == ms
assert arr_ms[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 123000)
arr_us = pa.array(data, type=us)
assert len(arr_us) == 1
assert arr_us.type == us
assert arr_us[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 123456)
arr_ns = pa.array(data, type=ns)
assert len(arr_ns) == 1
assert arr_ns.type == ns
assert arr_ns[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 123456)
def test_sequence_timestamp_from_int_with_unit():
data = [1]
s = pa.timestamp('s')
ms = pa.timestamp('ms')
us = pa.timestamp('us')
ns = pa.timestamp('ns')
arr_s = pa.array(data, type=s)
assert len(arr_s) == 1
assert arr_s.type == s
assert repr(arr_s[0]) == "Timestamp('1970-01-01 00:00:01')"
assert str(arr_s[0]) == "1970-01-01 00:00:01"
arr_ms = pa.array(data, type=ms)
assert len(arr_ms) == 1
assert arr_ms.type == ms
assert repr(arr_ms[0]) == "Timestamp('1970-01-01 00:00:00.001000')"
assert str(arr_ms[0]) == "1970-01-01 00:00:00.001000"
arr_us = pa.array(data, type=us)
assert len(arr_us) == 1
assert arr_us.type == us
assert repr(arr_us[0]) == "Timestamp('1970-01-01 00:00:00.000001')"
assert str(arr_us[0]) == "1970-01-01 00:00:00.000001"
arr_ns = pa.array(data, type=ns)
assert len(arr_ns) == 1
assert arr_ns.type == ns
assert repr(arr_ns[0]) == "Timestamp('1970-01-01 00:00:00.000000001')"
assert str(arr_ns[0]) == "1970-01-01 00:00:00.000000001"
with pytest.raises(pa.ArrowException):
class CustomClass():
pass
pa.array([1, CustomClass()], type=ns)
pa.array([1, CustomClass()], type=pa.date32())
pa.array([1, CustomClass()], type=pa.date64())
def test_sequence_nesting_levels():
data = [1, 2, None]
arr = pa.array(data)
assert arr.type == pa.int64()
assert arr.to_pylist() == data
data = [[1], [2], None]
arr = pa.array(data)
assert arr.type == pa.list_(pa.int64())
assert arr.to_pylist() == data
data = [[1], [2, 3, 4], [None]]
arr = pa.array(data)
assert arr.type == pa.list_(pa.int64())
assert arr.to_pylist() == data
data = [None, [[None, 1]], [[2, 3, 4], None], [None]]
arr = pa.array(data)
assert arr.type == pa.list_(pa.list_(pa.int64()))
assert arr.to_pylist() == data
# Mixed nesting levels are rejected
with pytest.raises(pa.ArrowInvalid):
pa.array([1, 2, [1]])
with pytest.raises(pa.ArrowInvalid):
pa.array([1, 2, []])
with pytest.raises(pa.ArrowInvalid):
pa.array([[1], [2], [None, [1]]])
def test_sequence_mixed_types_fails():
data = ['a', 1, 2.0]
with pytest.raises(pa.ArrowTypeError):
pa.array(data)
def test_sequence_mixed_types_with_specified_type_fails():
data = ['-10', '-5', {'a': 1}, '0', '5', '10']
type = pa.string()
with pytest.raises(TypeError):
pa.array(data, type=type)
def test_sequence_decimal():
data = [decimal.Decimal('1234.183'), decimal.Decimal('8094.234')]
type = pa.decimal128(precision=7, scale=3)
arr = pa.array(data, type=type)
assert arr.to_pylist() == data
def test_sequence_decimal_different_precisions():
data = [
decimal.Decimal('1234234983.183'), decimal.Decimal('80943244.234')
]
type = pa.decimal128(precision=13, scale=3)
arr = pa.array(data, type=type)
assert arr.to_pylist() == data
def test_sequence_decimal_no_scale():
data = [decimal.Decimal('1234234983'), decimal.Decimal('8094324')]
type = pa.decimal128(precision=10)
arr = pa.array(data, type=type)
assert arr.to_pylist() == data
def test_sequence_decimal_negative():
data = [decimal.Decimal('-1234.234983'), decimal.Decimal('-8.094324')]
type = pa.decimal128(precision=10, scale=6)
arr = pa.array(data, type=type)
assert arr.to_pylist() == data
def test_sequence_decimal_no_whole_part():
data = [decimal.Decimal('-.4234983'), decimal.Decimal('.0103943')]
type = pa.decimal128(precision=7, scale=7)
arr = pa.array(data, type=type)
assert arr.to_pylist() == data
def test_sequence_decimal_large_integer():
data = [decimal.Decimal('-394029506937548693.42983'),
decimal.Decimal('32358695912932.01033')]
type = pa.decimal128(precision=23, scale=5)
arr = pa.array(data, type=type)
assert arr.to_pylist() == data
def test_range_types():
arr1 = pa.array(range(3))
arr2 = pa.array((0, 1, 2))
assert arr1.equals(arr2)
def test_empty_range():
arr = pa.array(range(0))
assert len(arr) == 0
assert arr.null_count == 0
assert arr.type == pa.null()
assert arr.to_pylist() == []
def test_structarray():
ints = pa.array([None, 2, 3], type=pa.int64())
strs = pa.array([u'a', None, u'c'], type=pa.string())
bools = pa.array([True, False, None], type=pa.bool_())
arr = pa.StructArray.from_arrays(
[ints, strs, bools],
['ints', 'strs', 'bools'])
expected = [
{'ints': None, 'strs': u'a', 'bools': True},
{'ints': 2, 'strs': None, 'bools': False},
{'ints': 3, 'strs': u'c', 'bools': None},
]
pylist = arr.to_pylist()
assert pylist == expected, (pylist, expected)
def test_struct_from_dicts():
ty = pa.struct([pa.field('a', pa.int32()),
pa.field('b', pa.string()),
pa.field('c', pa.bool_())])
arr = pa.array([], type=ty)
assert arr.to_pylist() == []
data = [{'a': 5, 'b': 'foo', 'c': True},
{'a': 6, 'b': 'bar', 'c': False}]
arr = pa.array(data, type=ty)
assert arr.to_pylist() == data
# With omitted values
data = [{'a': 5, 'c': True},
None,
{},
{'a': None, 'b': 'bar'}]
arr = pa.array(data, type=ty)
expected = [{'a': 5, 'b': None, 'c': True},
None,
{'a': None, 'b': None, 'c': None},
{'a': None, 'b': 'bar', 'c': None}]
assert arr.to_pylist() == expected
def test_struct_from_tuples():
ty = pa.struct([pa.field('a', pa.int32()),
pa.field('b', pa.string()),
pa.field('c', pa.bool_())])
data = [(5, 'foo', True),
(6, 'bar', False)]
expected = [{'a': 5, 'b': 'foo', 'c': True},
{'a': 6, 'b': 'bar', 'c': False}]
arr = pa.array(data, type=ty)
assert arr.to_pylist() == expected
# With omitted values
data = [(5, 'foo', None),
None,
(6, None, False)]
expected = [{'a': 5, 'b': 'foo', 'c': None},
None,
{'a': 6, 'b': None, 'c': False}]
arr = pa.array(data, type=ty)
assert arr.to_pylist() == expected
# Invalid tuple size
for tup in [(5, 'foo'), (), ('5', 'foo', True, None)]:
with pytest.raises(ValueError, match="(?i)tuple size"):
pa.array([tup], type=ty)
def test_struct_from_mixed_sequence():
# It is forbidden to mix dicts and tuples when initializing a struct array
ty = pa.struct([pa.field('a', pa.int32()),
pa.field('b', pa.string()),
pa.field('c', pa.bool_())])
data = [(5, 'foo', True),
{'a': 6, 'b': 'bar', 'c': False}]
with pytest.raises(TypeError):
pa.array(data, type=ty)
def test_struct_from_dicts_inference():
expected_type = pa.struct([pa.field('a', pa.int64()),
pa.field('b', pa.string()),
pa.field('c', pa.bool_())])
data = [{'a': 5, 'b': u'foo', 'c': True},
{'a': 6, 'b': u'bar', 'c': False}]
arr = pa.array(data)
check_struct_type(arr.type, expected_type)
assert arr.to_pylist() == data
# With omitted values
data = [{'a': 5, 'c': True},
None,
{},
{'a': None, 'b': u'bar'}]
expected = [{'a': 5, 'b': None, 'c': True},
None,
{'a': None, 'b': None, 'c': None},
{'a': None, 'b': u'bar', 'c': None}]
arr = pa.array(data)
check_struct_type(arr.type, expected_type)
assert arr.to_pylist() == expected
# Nested
expected_type = pa.struct([
pa.field('a', pa.struct([pa.field('aa', pa.list_(pa.int64())),
pa.field('ab', pa.bool_())])),
pa.field('b', pa.string())])
data = [{'a': {'aa': [5, 6], 'ab': True}, 'b': 'foo'},
{'a': {'aa': None, 'ab': False}, 'b': None},
{'a': None, 'b': 'bar'}]
arr = pa.array(data)
assert arr.to_pylist() == data
# Edge cases
arr = pa.array([{}])
assert arr.type == pa.struct([])
assert arr.to_pylist() == [{}]
# Mixing structs and scalars is rejected
with pytest.raises(pa.ArrowInvalid):
pa.array([1, {'a': 2}])
def test_structarray_from_arrays_coerce():
# ARROW-1706
ints = [None, 2, 3]
strs = [u'a', None, u'c']
bools = [True, False, None]
ints_nonnull = [1, 2, 3]
arrays = [ints, strs, bools, ints_nonnull]
result = pa.StructArray.from_arrays(arrays,
['ints', 'strs', 'bools',
'int_nonnull'])
expected = pa.StructArray.from_arrays(
[pa.array(ints, type='int64'),
pa.array(strs, type='utf8'),
pa.array(bools),
pa.array(ints_nonnull, type='int64')],
['ints', 'strs', 'bools', 'int_nonnull'])
with pytest.raises(ValueError):
pa.StructArray.from_arrays(arrays)
assert result.equals(expected)
def test_decimal_array_with_none_and_nan():
values = [decimal.Decimal('1.234'), None, np.nan, decimal.Decimal('nan')]
array = pa.array(values)
assert array.type == pa.decimal128(4, 3)
assert array.to_pylist() == values[:2] + [None, None]
array = pa.array(values, type=pa.decimal128(10, 4))
assert array.to_pylist() == [decimal.Decimal('1.2340'), None, None, None]
@pytest.mark.parametrize('tz,name', [
(pytz.FixedOffset(90), '+01:30'),
(pytz.FixedOffset(-90), '-01:30'),
(pytz.utc, 'UTC'),
(pytz.timezone('America/New_York'), 'America/New_York')
])
def test_timezone_string(tz, name):
assert pa.lib.tzinfo_to_string(tz) == name
assert pa.lib.string_to_tzinfo(name) == tz
| apache-2.0 |
jiegec/gnuradio | gnuradio-runtime/examples/volk_benchmark/volk_plot.py | 78 | 6117 | #!/usr/bin/env python
import sys, math
import argparse
from volk_test_funcs import *
try:
import matplotlib
import matplotlib.pyplot as plt
except ImportError:
sys.stderr.write("Could not import Matplotlib (http://matplotlib.sourceforge.net/)\n")
sys.exit(1)
def main():
desc='Plot Volk performance results from a SQLite database. ' + \
'Run one of the volk tests first (e.g, volk_math.py)'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-D', '--database', type=str,
default='volk_results.db',
help='Database file to read data from [default: %(default)s]')
parser.add_argument('-E', '--errorbars',
action='store_true', default=False,
help='Show error bars (1 standard dev.)')
parser.add_argument('-P', '--plot', type=str,
choices=['mean', 'min', 'max'],
default='mean',
help='Set the type of plot to produce [default: %(default)s]')
parser.add_argument('-%', '--percent', type=str,
default=None, metavar="table",
help='Show percent difference to the given type [default: %(default)s]')
args = parser.parse_args()
# Set up global plotting properties
matplotlib.rcParams['figure.subplot.bottom'] = 0.2
matplotlib.rcParams['figure.subplot.top'] = 0.95
matplotlib.rcParams['figure.subplot.right'] = 0.98
matplotlib.rcParams['ytick.labelsize'] = 16
matplotlib.rcParams['xtick.labelsize'] = 16
matplotlib.rcParams['legend.fontsize'] = 18
# Get list of tables to compare
conn = create_connection(args.database)
tables = list_tables(conn)
M = len(tables)
# Colors to distinguish each table in the bar graph
# More than 5 tables will wrap around to the start.
colors = ['b', 'r', 'g', 'm', 'k']
# Set up figure for plotting
f0 = plt.figure(0, facecolor='w', figsize=(14,10))
s0 = f0.add_subplot(1,1,1)
# Create a register of names that exist in all tables
tmp_regs = []
for table in tables:
# Get results from the next table
res = get_results(conn, table[0])
tmp_regs.append(list())
for r in res:
try:
tmp_regs[-1].index(r['kernel'])
except ValueError:
tmp_regs[-1].append(r['kernel'])
# Get only those names that are common in all tables
name_reg = tmp_regs[0]
for t in tmp_regs[1:]:
name_reg = list(set(name_reg) & set(t))
name_reg.sort()
# Pull the data out for each table into a dictionary
# we can ref the table by it's name and the data associated
# with a given kernel in name_reg by it's name.
# This ensures there is no sorting issue with the data in the
# dictionary, so the kernels are plotted against each other.
table_data = dict()
for i,table in enumerate(tables):
# Get results from the next table
res = get_results(conn, table[0])
data = dict()
for r in res:
data[r['kernel']] = r
table_data[table[0]] = data
if args.percent is not None:
for i,t in enumerate(table_data):
if args.percent == t:
norm_data = []
for name in name_reg:
if(args.plot == 'max'):
norm_data.append(table_data[t][name]['max'])
elif(args.plot == 'min'):
norm_data.append(table_data[t][name]['min'])
elif(args.plot == 'mean'):
norm_data.append(table_data[t][name]['avg'])
# Plot the results
x0 = xrange(len(name_reg))
i = 0
for t in (table_data):
ydata = []
stds = []
for name in name_reg:
stds.append(math.sqrt(table_data[t][name]['var']))
if(args.plot == 'max'):
ydata.append(table_data[t][name]['max'])
elif(args.plot == 'min'):
ydata.append(table_data[t][name]['min'])
elif(args.plot == 'mean'):
ydata.append(table_data[t][name]['avg'])
if args.percent is not None:
ydata = [-100*(y-n)/y for y,n in zip(ydata,norm_data)]
if(args.percent != t):
# makes x values for this data set placement
# width of bars depends on number of comparisons
wdth = 0.80/(M-1)
x1 = [x + i*wdth for x in x0]
i += 1
s0.bar(x1, ydata, width=wdth,
color=colors[(i-1)%M], label=t,
edgecolor='k', linewidth=2)
else:
# makes x values for this data set placement
# width of bars depends on number of comparisons
wdth = 0.80/M
x1 = [x + i*wdth for x in x0]
i += 1
if(args.errorbars is False):
s0.bar(x1, ydata, width=wdth,
color=colors[(i-1)%M], label=t,
edgecolor='k', linewidth=2)
else:
s0.bar(x1, ydata, width=wdth,
yerr=stds,
color=colors[i%M], label=t,
edgecolor='k', linewidth=2,
error_kw={"ecolor": 'k', "capsize":5,
"linewidth":2})
nitems = res[0]['nitems']
if args.percent is None:
s0.set_ylabel("Processing time (sec) [{0:G} items]".format(nitems),
fontsize=22, fontweight='bold',
horizontalalignment='center')
else:
s0.set_ylabel("% Improvement over {0} [{1:G} items]".format(
args.percent, nitems),
fontsize=22, fontweight='bold')
s0.legend()
s0.set_xticks(x0)
s0.set_xticklabels(name_reg)
for label in s0.xaxis.get_ticklabels():
label.set_rotation(45)
label.set_fontsize(16)
plt.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
arbazkhan002/datasketch | benchmark/b_bit_minhash_benchmark.py | 3 | 2774 | '''
Benchmarking the performance and accuracy of b-bi MinHash.
'''
import time, logging, random
logging.basicConfig(level=logging.INFO)
import pyhash
import numpy as np
from datasketch.minhash import MinHash
from datasketch.b_bit_minhash import bBitMinHash
from similarity_benchmark import _get_exact, _gen_data,\
Hash, _b_bit_minhash_jaccard
def _run_minhash(A, B, data, seed, bs, num_perm):
(a_start, a_end), (b_start, b_end) = A, B
hasher = pyhash.murmur3_32()
m1 = MinHash(num_perm=num_perm, hashobj=Hash)
m2 = MinHash(num_perm=num_perm, hashobj=Hash)
for i in xrange(a_start, a_end):
m1.update(hasher(data[i], seed=seed))
for i in xrange(b_start, b_end):
m2.update(hasher(data[i], seed=seed))
return [m1.jaccard(m2)] + \
[_b_bit_minhash_jaccard(m1, m2, b) for b in bs]
def _run_test(A, B, data, n, bs, num_perm):
logging.info("Run tests with A = (%d, %d), B = (%d, %d), n = %d"
% (A[0], A[1], B[0], B[1], n))
runs = np.array([_run_minhash(A, B, data, i, bs, num_perm)
for i in xrange(n)]).T
return runs
def run_full_tests(attr_pairs, data, n, bs, num_perm):
return [_run_test(A, B, data, n, bs, num_perm)
for A, B in attr_pairs]
def plot(result, bs, exact_sims, num_perm, bins, save):
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
num_row = 1
num_col = len(result)
basesize = 5
size = (basesize*num_col, basesize*num_row)
fig, axes = plt.subplots(num_row, num_col, sharey=True,
sharex=True, figsize=size)
for i, runs in enumerate(result):
minhash = sorted(runs[0])
bbits = [sorted(r) for r in runs[1:]]
exact_sim = exact_sims[i]
ax = axes[i]
l = ax.plot(minhash, label='MinHash')
for b, run in zip(bs, bbits):
l = ax.plot(run, label='%d-bit' % b)
ax.axhline(exact_sim, color='black', linestyle='--', label='Exact')
ax.set_title("%d perm funcs, exact = %.2f" % (num_perm, exact_sim))
ax.grid()
ax.set_xlabel("Runs with random hash functions")
if i == 0:
ax.set_ylabel('Jaccard')
if i == num_col - 1:
ax.legend(loc='lower right')
fig.savefig(save)
if __name__ == "__main__":
data = _gen_data(5000)
attr_pairs = [((0, 3000), (2000, 5000)),
((0, 3500), (1500, 5000)),
((0, 4500), (500, 5000))]
num_perm = 128
bs = [1, 2, 3]
n = 100
save = "b_bit_minhash_benchmark.png"
bins = [i*0.02 for i in range(51)]
exact_sims = [_get_exact(A, B) for A, B in attr_pairs]
result = run_full_tests(attr_pairs, data, n, bs, num_perm)
plot(result, bs, exact_sims, num_perm, bins, save)
| mit |
etkirsch/scikit-learn | sklearn/metrics/tests/test_classification.py | 83 | 49782 | from __future__ import division, print_function
import numpy as np
from scipy import linalg
from functools import partial
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import label_binarize
from sklearn.utils.fixes import np_version
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import zero_one_loss
from sklearn.metrics import brier_score_loss
from sklearn.metrics.classification import _check_targets
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def test_multilabel_accuracy_score_subset_accuracy():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, np.logical_not(y2)), 0)
assert_equal(accuracy_score(y1, np.logical_not(y1)), 0)
assert_equal(accuracy_score(y1, np.zeros(y1.shape)), 0)
assert_equal(accuracy_score(y2, np.zeros(y1.shape)), 0)
def test_precision_recall_f1_score_binary():
# Test Precision Recall and F1 Score for binary classification task
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.85], 2)
assert_array_almost_equal(r, [0.88, 0.68], 2)
assert_array_almost_equal(f, [0.80, 0.76], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1). This is deprecated for average != 'binary'.
assert_dep_warning = partial(assert_warns, DeprecationWarning)
for kwargs, my_assert in [({}, assert_no_warnings),
({'average': 'binary'}, assert_no_warnings),
({'average': 'micro'}, assert_dep_warning)]:
ps = my_assert(precision_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(ps, 0.85, 2)
rs = my_assert(recall_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(rs, 0.68, 2)
fs = my_assert(f1_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(fs, 0.76, 2)
assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2,
**kwargs),
(1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
def test_precision_recall_f_binary_single_class():
# Test precision, recall and F1 score behave with a single positive or
# negative class
# Such a case may occur with non-stratified cross-validation
assert_equal(1., precision_score([1, 1], [1, 1]))
assert_equal(1., recall_score([1, 1], [1, 1]))
assert_equal(1., f1_score([1, 1], [1, 1]))
assert_equal(0., precision_score([-1, -1], [-1, -1]))
assert_equal(0., recall_score([-1, -1], [-1, -1]))
assert_equal(0., f1_score([-1, -1], [-1, -1]))
@ignore_warnings
def test_precision_recall_f_extra_labels():
"""Test handling of explicit additional (not in input) labels to PRF
"""
y_true = [1, 3, 3, 2]
y_pred = [1, 1, 3, 2]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
# No average: zeros in array
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average=None)
assert_array_almost_equal([0., 1., 1., .5, 0.], actual)
# Macro average is changed
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average='macro')
assert_array_almost_equal(np.mean([0., 1., 1., .5, 0.]), actual)
# No effect otheriwse
for average in ['micro', 'weighted', 'samples']:
if average == 'samples' and i == 0:
continue
assert_almost_equal(recall_score(y_true, y_pred,
labels=[0, 1, 2, 3, 4],
average=average),
recall_score(y_true, y_pred, labels=None,
average=average))
# Error when introducing invalid label in multilabel case
# (although it would only affect performance if average='macro'/None)
for average in [None, 'macro', 'micro', 'samples']:
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(6), average=average)
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(-1, 4), average=average)
@ignore_warnings
def test_precision_recall_f_ignored_labels():
"""Test a subset of labels may be requested for PRF"""
y_true = [1, 1, 2, 3]
y_pred = [1, 3, 3, 3]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
recall_13 = partial(recall_score, y_true, y_pred, labels=[1, 3])
recall_all = partial(recall_score, y_true, y_pred, labels=None)
assert_array_almost_equal([.5, 1.], recall_13(average=None))
assert_almost_equal((.5 + 1.) / 2, recall_13(average='macro'))
assert_almost_equal((.5 * 2 + 1. * 1) / 3,
recall_13(average='weighted'))
assert_almost_equal(2. / 3, recall_13(average='micro'))
# ensure the above were meaningful tests:
for average in ['macro', 'weighted', 'micro']:
assert_not_equal(recall_13(average=average),
recall_all(average=average))
def test_average_precision_score_score_non_binary_class():
# Test that average_precision_score function returns an error when trying
# to compute average_precision_score for multiclass task.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
average_precision_score, y_true, y_pred)
def test_average_precision_score_duplicate_values():
# Duplicate values with precision-recall require a different
# processing than when computing the AUC of a ROC, because the
# precision-recall curve is a decreasing curve
# The following situtation corresponds to a perfect
# test statistic, the average_precision_score should be 1
y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1]
assert_equal(average_precision_score(y_true, y_score), 1)
def test_average_precision_score_tied_values():
# Here if we go from left to right in y_true, the 0 values are
# are separated from the 1 values, so it appears that we've
# Correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
y_true = [0, 1, 1]
y_score = [.5, .5, .6]
assert_not_equal(average_precision_score(y_true, y_score), 1.)
@ignore_warnings
def test_precision_recall_fscore_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad beta
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, beta=0.0)
# Bad pos_label
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, pos_label=2, average='macro')
# Bad average option
assert_raises(ValueError, precision_recall_fscore_support,
[0, 1, 2], [1, 2, 0], average='mega')
def test_confusion_matrix_binary():
# Test confusion matrix - binary classification case
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
tp, fp, fn, tn = cm.flatten()
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
test(y_true, y_pred)
test([str(y) for y in y_true],
[str(y) for y in y_pred])
def test_cohen_kappa():
# These label vectors reproduce the contingency matrix from Artstein and
# Poesio (2008), Table 1: np.array([[20, 20], [10, 50]]).
y1 = np.array([0] * 40 + [1] * 60)
y2 = np.array([0] * 20 + [1] * 20 + [0] * 10 + [1] * 50)
kappa = cohen_kappa_score(y1, y2)
assert_almost_equal(kappa, .348, decimal=3)
assert_equal(kappa, cohen_kappa_score(y2, y1))
# Add spurious labels and ignore them.
y1 = np.append(y1, [2] * 4)
y2 = np.append(y2, [2] * 4)
assert_equal(cohen_kappa_score(y1, y2, labels=[0, 1]), kappa)
assert_almost_equal(cohen_kappa_score(y1, y1), 1.)
# Multiclass example: Artstein and Poesio, Table 4.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 52 + [1] * 32 + [2] * 16)
assert_almost_equal(cohen_kappa_score(y1, y2), .8013, decimal=4)
def test_matthews_corrcoef_nan():
assert_equal(matthews_corrcoef([0], [1]), 0.0)
assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0)
def test_precision_recall_f1_score_multiclass():
# Test Precision Recall and F1 Score for multiclass classification task
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2)
assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2)
assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2)
assert_array_equal(s, [24, 31, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.53, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.60, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.51, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.51, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.47, 2)
assert_raises(ValueError, precision_score, y_true, y_pred,
average="samples")
assert_raises(ValueError, recall_score, y_true, y_pred, average="samples")
assert_raises(ValueError, f1_score, y_true, y_pred, average="samples")
assert_raises(ValueError, fbeta_score, y_true, y_pred, average="samples",
beta=0.5)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2)
assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2)
assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2)
assert_array_equal(s, [24, 20, 31])
def test_precision_refcall_f1_score_multilabel_unordered_labels():
# test that labels need not be sorted in the multilabel case
y_true = np.array([[1, 1, 0, 0]])
y_pred = np.array([[0, 0, 1, 1]])
for average in ['samples', 'micro', 'macro', 'weighted', None]:
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[3, 0, 1, 2], warn_for=[], average=average)
assert_array_equal(p, 0)
assert_array_equal(r, 0)
assert_array_equal(f, 0)
if average is None:
assert_array_equal(s, [0, 1, 1, 0])
def test_precision_recall_f1_score_multiclass_pos_label_none():
# Test Precision Recall and F1 Score for multiclass classification task
# GH Issue #1296
# initialize data
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
pos_label=None,
average='weighted')
def test_zero_precision_recall():
# Check that pathological cases do not bring NaNs
old_error_settings = np.seterr(all='raise')
try:
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='weighted'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='weighted'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='weighted'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def test_confusion_matrix_multiclass():
# Test confusion matrix - multi-class case
y_true, y_pred, _ = make_prediction(binary=False)
def test(y_true, y_pred, string_type=False):
# compute confusion matrix with default labels introspection
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[19, 4, 1],
[4, 3, 24],
[0, 2, 18]])
# compute confusion matrix with explicit label ordering
labels = ['0', '2', '1'] if string_type else [0, 2, 1]
cm = confusion_matrix(y_true,
y_pred,
labels=labels)
assert_array_equal(cm, [[19, 1, 4],
[0, 18, 2],
[4, 24, 3]])
test(y_true, y_pred)
test(list(str(y) for y in y_true),
list(str(y) for y in y_pred),
string_type=True)
def test_confusion_matrix_multiclass_subset_labels():
# Test confusion matrix - multi-class case with subset of labels
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with only first two labels considered
cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
assert_array_equal(cm, [[19, 4],
[4, 3]])
# compute confusion matrix with explicit label ordering for only subset
# of labels
cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
assert_array_equal(cm, [[18, 2],
[24, 3]])
def test_classification_report_multiclass():
# Test performance report
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_digits():
# Test performance report with added digits in floating point values
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82609 0.79167 0.80851 24
versicolor 0.33333 0.09677 0.15000 31
virginica 0.41860 0.90000 0.57143 20
avg / total 0.51375 0.53333 0.47310 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, digits=5)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
y_true = np.array(["blue", "green", "red"])[y_true]
y_pred = np.array(["blue", "green", "red"])[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
green 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
expected_report = """\
precision recall f1-score support
a 0.83 0.79 0.81 24
b 0.33 0.10 0.15 31
c 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred,
target_names=["a", "b", "c"])
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_unicode_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array([u"blue\xa2", u"green\xa2", u"red\xa2"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = u"""\
precision recall f1-score support
blue\xa2 0.83 0.79 0.81 24
green\xa2 0.33 0.10 0.15 31
red\xa2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
if np_version[:3] < (1, 7, 0):
expected_message = ("NumPy < 1.7.0 does not implement"
" searchsorted on unicode data correctly.")
assert_raise_message(RuntimeError, expected_message,
classification_report, y_true, y_pred)
else:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_classification_report():
n_classes = 4
n_samples = 50
_, y_true = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=0)
_, y_pred = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=1)
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 24
1 0.51 0.74 0.61 27
2 0.29 0.08 0.12 26
3 0.52 0.56 0.54 27
avg / total 0.45 0.51 0.46 104
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_zero_one_loss_subset():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1)
assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1)
assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1)
assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1)
def test_multilabel_hamming_loss():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, np.logical_not(y2)), 1)
assert_equal(hamming_loss(y1, np.logical_not(y1)), 1)
assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6)
assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5)
def test_multilabel_jaccard_similarity_score():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
# size(y1 \inter y2) = [1, 2]
# size(y1 \union y2) = [2, 2]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, np.logical_not(y2)), 0)
assert_equal(jaccard_similarity_score(y1, np.logical_not(y1)), 0)
assert_equal(jaccard_similarity_score(y1, np.zeros(y1.shape)), 0)
assert_equal(jaccard_similarity_score(y2, np.zeros(y1.shape)), 0)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_1():
# Test precision_recall_f1_score on a crafted multilabel example
# First crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1]])
y_pred = np.array([[0, 1, 0, 0], [0, 1, 0, 0], [1, 0, 1, 0]])
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
# tp = [0, 1, 1, 0]
# fn = [1, 0, 0, 1]
# fp = [1, 1, 0, 0]
# Check per class
assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 1, 1, 1], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2)
# Check macro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="macro"),
np.mean(f2))
# Check micro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
# Check weighted
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
# Check samples
# |h(x_i) inter y_i | = [0, 1, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="samples"),
0.5)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_2():
# Test precision_recall_f1_score on a crafted multilabel example 2
# Second crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 1], [0, 0, 0, 1], [1, 1, 0, 0]])
# tp = [ 0. 1. 0. 0.]
# fp = [ 1. 0. 0. 2.]
# fn = [ 1. 1. 1. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.25)
assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.125)
assert_almost_equal(f, 2 / 12)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 2 / 4)
assert_almost_equal(r, 1 / 4)
assert_almost_equal(f, 2 / 3 * 2 / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# Check samples
# |h(x_i) inter y_i | = [0, 0, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
assert_almost_equal(p, 1 / 6)
assert_almost_equal(r, 1 / 6)
assert_almost_equal(f, 2 / 4 * 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.1666, 2)
def test_precision_recall_f1_score_with_an_empty_prediction():
y_true = np.array([[0, 1, 0, 0], [1, 0, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 0], [0, 0, 0, 1], [0, 1, 1, 0]])
# true_pos = [ 0. 1. 1. 0.]
# false_pos = [ 0. 0. 0. 1.]
# false_neg = [ 1. 1. 0. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 1, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 1.5 / 4)
assert_almost_equal(f, 2.5 / (4 * 1.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 2 / 3)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 3 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, (2 / 1.5 + 1) / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# |h(x_i) inter y_i | = [0, 0, 2]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [0, 1, 2]
assert_almost_equal(p, 1 / 3)
assert_almost_equal(r, 1 / 3)
assert_almost_equal(f, 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.333, 2)
def test_precision_recall_f1_no_labels():
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
# tp = [0, 0, 0]
# fn = [0, 0, 0]
# fp = [0, 0, 0]
# support = [0, 0, 0]
# |y_hat_i inter y_i | = [0, 0, 0]
# |y_i| = [0, 0, 0]
# |y_hat_i| = [0, 0, 0]
for beta in [1]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=None, beta=beta)
assert_array_almost_equal(p, [0, 0, 0], 2)
assert_array_almost_equal(r, [0, 0, 0], 2)
assert_array_almost_equal(f, [0, 0, 0], 2)
assert_array_almost_equal(s, [0, 0, 0], 2)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred, beta=beta, average=None)
assert_array_almost_equal(fbeta, [0, 0, 0], 2)
for average in ["macro", "micro", "weighted", "samples"]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=average,
beta=beta)
assert_almost_equal(p, 0)
assert_almost_equal(r, 0)
assert_almost_equal(f, 0)
assert_equal(s, None)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred,
beta=beta, average=average)
assert_almost_equal(fbeta, 0)
def test_prf_warnings():
# average of per-label scores
f, w = precision_recall_fscore_support, UndefinedMetricWarning
my_assert = assert_warns_message
for average in [None, 'weighted', 'macro']:
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in labels with no predicted samples.')
my_assert(w, msg, f, [0, 1, 2], [1, 1, 2], average=average)
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in labels with no true samples.')
my_assert(w, msg, f, [1, 1, 2], [0, 1, 2], average=average)
# average of per-sample scores
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in samples with no predicted labels.')
my_assert(w, msg, f, np.array([[1, 0], [1, 0]]),
np.array([[1, 0], [0, 0]]), average='samples')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in samples with no true labels.')
my_assert(w, msg, f, np.array([[1, 0], [0, 0]]),
np.array([[1, 0], [1, 0]]),
average='samples')
# single score: micro-average
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]), average='micro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]), average='micro')
# single postive label
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, [1, 1], [-1, -1], average='macro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, [-1, -1], [1, 1], average='macro')
def test_recall_warnings():
assert_no_warnings(recall_score,
np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
recall_score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'Recall is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_precision_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
precision_score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'Precision is ill-defined and '
'being set to 0.0 due to no predicted samples.')
assert_no_warnings(precision_score,
np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
def test_fscore_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
for score in [f1_score, partial(fbeta_score, beta=2)]:
score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no predicted samples.')
score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_prf_average_compat():
# Ensure warning if f1_score et al.'s average is implicit for multiclass
y_true = [1, 2, 3, 3]
y_pred = [1, 2, 3, 1]
y_true_bin = [0, 1, 1]
y_pred_bin = [0, 1, 0]
for metric in [precision_score, recall_score, f1_score,
partial(fbeta_score, beta=2)]:
score = assert_warns(DeprecationWarning, metric, y_true, y_pred)
score_weighted = assert_no_warnings(metric, y_true, y_pred,
average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default')
# check binary passes without warning
assert_no_warnings(metric, y_true_bin, y_pred_bin)
# but binary with pos_label=None should behave like multiclass
score = assert_warns(DeprecationWarning, metric,
y_true_bin, y_pred_bin, pos_label=None)
score_weighted = assert_no_warnings(metric, y_true_bin, y_pred_bin,
pos_label=None, average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default with '
'binary data and pos_label=None')
def test__check_targets():
# Check that _check_targets correctly merges target types, squeezes
# output and fails if input lengths differ.
IND = 'multilabel-indicator'
MC = 'multiclass'
BIN = 'binary'
CNT = 'continuous'
MMC = 'multiclass-multioutput'
MCN = 'continuous-multioutput'
# all of length 3
EXAMPLES = [
(IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])),
# must not be considered binary
(IND, np.array([[0, 1], [1, 0], [1, 1]])),
(MC, [2, 3, 1]),
(BIN, [0, 1, 1]),
(CNT, [0., 1.5, 1.]),
(MC, np.array([[2], [3], [1]])),
(BIN, np.array([[0], [1], [1]])),
(CNT, np.array([[0.], [1.5], [1.]])),
(MMC, np.array([[0, 2], [1, 3], [2, 3]])),
(MCN, np.array([[0.5, 2.], [1.1, 3.], [2., 3.]])),
]
# expected type given input types, or None for error
# (types will be tried in either order)
EXPECTED = {
(IND, IND): IND,
(MC, MC): MC,
(BIN, BIN): BIN,
(MC, IND): None,
(BIN, IND): None,
(BIN, MC): MC,
# Disallowed types
(CNT, CNT): None,
(MMC, MMC): None,
(MCN, MCN): None,
(IND, CNT): None,
(MC, CNT): None,
(BIN, CNT): None,
(MMC, CNT): None,
(MCN, CNT): None,
(IND, MMC): None,
(MC, MMC): None,
(BIN, MMC): None,
(MCN, MMC): None,
(IND, MCN): None,
(MC, MCN): None,
(BIN, MCN): None,
}
for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2):
try:
expected = EXPECTED[type1, type2]
except KeyError:
expected = EXPECTED[type2, type1]
if expected is None:
assert_raises(ValueError, _check_targets, y1, y2)
if type1 != type2:
assert_raise_message(
ValueError,
"Can't handle mix of {0} and {1}".format(type1, type2),
_check_targets, y1, y2)
else:
if type1 not in (BIN, MC, IND):
assert_raise_message(ValueError,
"{0} is not supported".format(type1),
_check_targets, y1, y2)
else:
merged_type, y1out, y2out = _check_targets(y1, y2)
assert_equal(merged_type, expected)
if merged_type.startswith('multilabel'):
assert_equal(y1out.format, 'csr')
assert_equal(y2out.format, 'csr')
else:
assert_array_equal(y1out, np.squeeze(y1))
assert_array_equal(y2out, np.squeeze(y2))
assert_raises(ValueError, _check_targets, y1[:-1], y2)
# Make sure seq of seq is not supported
y1 = [(1, 2,), (0, 2, 3)]
y2 = [(2,), (0, 2,)]
msg = ('You appear to be using a legacy multi-label data representation. '
'Sequence of sequences are no longer supported; use a binary array'
' or sparse matrix instead.')
assert_raise_message(ValueError, msg, _check_targets, y1, y2)
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
def test_hinge_loss_multiclass():
pred_decision = np.array([
[0.36, -0.17, -0.58, -0.99],
[-0.54, -0.37, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.54, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, 0.24],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 3, 2])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_hinge_loss_multiclass_missing_labels_with_labels_none():
y_true = np.array([0, 1, 2, 2])
pred_decision = np.array([
[1.27, 0.034, -0.68, -1.40],
[-1.45, -0.58, -0.38, -0.17],
[-2.36, -0.79, -0.27, 0.24],
[-2.36, -0.79, -0.27, 0.24]
])
error_message = ("Please include all labels in y_true "
"or pass labels as third argument")
assert_raise_message(ValueError,
error_message,
hinge_loss, y_true, pred_decision)
def test_hinge_loss_multiclass_with_missing_labels():
pred_decision = np.array([
[0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 2])
labels = np.array([0, 1, 2, 3])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][2] + pred_decision[4][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision, labels=labels),
dummy_hinge_loss)
def test_hinge_loss_multiclass_invariance_lists():
# Currently, invariance of string and integer labels cannot be tested
# in common invariance tests because invariance tests for multiclass
# decision functions is not implemented yet.
y_true = ['blue', 'green', 'red',
'green', 'white', 'red']
pred_decision = [
[0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, 0.24],
[-1.45, -0.58, -0.38, -0.17]]
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_log_loss():
# binary case with symbolic labels ("no" < "yes")
y_true = ["no", "no", "no", "yes", "yes", "yes"]
y_pred = np.array([[0.5, 0.5], [0.1, 0.9], [0.01, 0.99],
[0.9, 0.1], [0.75, 0.25], [0.001, 0.999]])
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.8817971)
# multiclass case; adapted from http://bit.ly/RJJHWA
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]]
loss = log_loss(y_true, y_pred, normalize=True)
assert_almost_equal(loss, 0.6904911)
# check that we got all the shapes and axes right
# by doubling the length of y_true and y_pred
y_true *= 2
y_pred *= 2
loss = log_loss(y_true, y_pred, normalize=False)
assert_almost_equal(loss, 0.6904911 * 6, decimal=6)
# check eps and handling of absolute zero and one probabilities
y_pred = np.asarray(y_pred) > .5
loss = log_loss(y_true, y_pred, normalize=True, eps=.1)
assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, .1, .9)))
# raise error if number of classes are not equal.
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]]
assert_raises(ValueError, log_loss, y_true, y_pred)
# case when y_true is a string array object
y_true = ["ham", "spam", "spam", "ham"]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
def test_brier_score_loss():
# Check brier_score_loss function
y_true = np.array([0, 1, 1, 0, 1, 1])
y_pred = np.array([0.1, 0.8, 0.9, 0.3, 1., 0.95])
true_score = linalg.norm(y_true - y_pred) ** 2 / len(y_true)
assert_almost_equal(brier_score_loss(y_true, y_true), 0.0)
assert_almost_equal(brier_score_loss(y_true, y_pred), true_score)
assert_almost_equal(brier_score_loss(1. + y_true, y_pred),
true_score)
assert_almost_equal(brier_score_loss(2 * y_true - 1, y_pred),
true_score)
assert_raises(ValueError, brier_score_loss, y_true, y_pred[1:])
assert_raises(ValueError, brier_score_loss, y_true, y_pred + 1.)
assert_raises(ValueError, brier_score_loss, y_true, y_pred - 1.)
| bsd-3-clause |
spbguru/repo1 | external/linux32/lib/python2.6/site-packages/matplotlib/legend.py | 69 | 30705 | """
Place a legend on the axes at location loc. Labels are a
sequence of strings and loc can be a string or an integer
specifying the legend location
The location codes are
'best' : 0, (only implemented for axis legends)
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
Return value is a sequence of text, line instances that make
up the legend
"""
from __future__ import division
import warnings
import numpy as np
from matplotlib import rcParams
from matplotlib.artist import Artist
from matplotlib.cbook import is_string_like, iterable, silent_list, safezip
from matplotlib.font_manager import FontProperties
from matplotlib.lines import Line2D
from matplotlib.patches import Patch, Rectangle, Shadow, FancyBboxPatch
from matplotlib.collections import LineCollection, RegularPolyCollection
from matplotlib.transforms import Bbox
from matplotlib.offsetbox import HPacker, VPacker, PackerBase, TextArea, DrawingArea
class Legend(Artist):
"""
Place a legend on the axes at location loc. Labels are a
sequence of strings and loc can be a string or an integer
specifying the legend location
The location codes are::
'best' : 0, (only implemented for axis legends)
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
loc can be a tuple of the noramilzed coordinate values with
respect its parent.
Return value is a sequence of text, line instances that make
up the legend
"""
codes = {'best' : 0, # only implemented for axis legends
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
}
zorder = 5
def __str__(self):
return "Legend"
def __init__(self, parent, handles, labels,
loc = None,
numpoints = None, # the number of points in the legend line
markerscale = None, # the relative size of legend markers vs. original
scatterpoints = 3, # TODO: may be an rcParam
scatteryoffsets=None,
prop = None, # properties for the legend texts
# the following dimensions are in axes coords
pad = None, # deprecated; use borderpad
labelsep = None, # deprecated; use labelspacing
handlelen = None, # deprecated; use handlelength
handletextsep = None, # deprecated; use handletextpad
axespad = None, # deprecated; use borderaxespad
# spacing & pad defined as a fractionof the font-size
borderpad = None, # the whitespace inside the legend border
labelspacing=None, #the vertical space between the legend entries
handlelength=None, # the length of the legend handles
handletextpad=None, # the pad between the legend handle and text
borderaxespad=None, # the pad between the axes and legend border
columnspacing=None, # spacing between columns
ncol=1, # number of columns
mode=None, # mode for horizontal distribution of columns. None, "expand"
fancybox=None, # True use a fancy box, false use a rounded box, none use rc
shadow = None,
):
"""
- *parent* : the artist that contains the legend
- *handles* : a list of artists (lines, patches) to add to the legend
- *labels* : a list of strings to label the legend
Optional keyword arguments:
================ ==================================================================
Keyword Description
================ ==================================================================
loc a location code or a tuple of coordinates
numpoints the number of points in the legend line
prop the font property
markerscale the relative size of legend markers vs. original
fancybox if True, draw a frame with a round fancybox. If None, use rc
shadow if True, draw a shadow behind legend
scatteryoffsets a list of yoffsets for scatter symbols in legend
borderpad the fractional whitespace inside the legend border
labelspacing the vertical space between the legend entries
handlelength the length of the legend handles
handletextpad the pad between the legend handle and text
borderaxespad the pad between the axes and legend border
columnspacing the spacing between columns
================ ==================================================================
The dimensions of pad and spacing are given as a fraction of the
fontsize. Values from rcParams will be used if None.
"""
from matplotlib.axes import Axes # local import only to avoid circularity
from matplotlib.figure import Figure # local import only to avoid circularity
Artist.__init__(self)
if prop is None:
self.prop=FontProperties(size=rcParams["legend.fontsize"])
else:
self.prop=prop
self.fontsize = self.prop.get_size_in_points()
propnames=['numpoints', 'markerscale', 'shadow', "columnspacing",
"scatterpoints"]
localdict = locals()
for name in propnames:
if localdict[name] is None:
value = rcParams["legend."+name]
else:
value = localdict[name]
setattr(self, name, value)
# Take care the deprecated keywords
deprecated_kwds = {"pad":"borderpad",
"labelsep":"labelspacing",
"handlelen":"handlelength",
"handletextsep":"handletextpad",
"axespad":"borderaxespad"}
# convert values of deprecated keywords (ginve in axes coords)
# to new vaules in a fraction of the font size
# conversion factor
bbox = parent.bbox
axessize_fontsize = min(bbox.width, bbox.height)/self.fontsize
for k, v in deprecated_kwds.items():
# use deprecated value if not None and if their newer
# counter part is None.
if localdict[k] is not None and localdict[v] is None:
warnings.warn("Use '%s' instead of '%s'." % (v, k),
DeprecationWarning)
setattr(self, v, localdict[k]*axessize_fontsize)
continue
# Otherwise, use new keywords
if localdict[v] is None:
setattr(self, v, rcParams["legend."+v])
else:
setattr(self, v, localdict[v])
del localdict
self._ncol = ncol
if self.numpoints <= 0:
raise ValueError("numpoints must be >= 0; it was %d"% numpoints)
# introduce y-offset for handles of the scatter plot
if scatteryoffsets is None:
self._scatteryoffsets = np.array([3./8., 4./8., 2.5/8.])
else:
self._scatteryoffsets = np.asarray(scatteryoffsets)
reps = int(self.numpoints / len(self._scatteryoffsets)) + 1
self._scatteryoffsets = np.tile(self._scatteryoffsets, reps)[:self.scatterpoints]
# _legend_box is an OffsetBox instance that contains all
# legend items and will be initialized from _init_legend_box()
# method.
self._legend_box = None
if isinstance(parent,Axes):
self.isaxes = True
self.set_figure(parent.figure)
elif isinstance(parent,Figure):
self.isaxes = False
self.set_figure(parent)
else:
raise TypeError("Legend needs either Axes or Figure as parent")
self.parent = parent
if loc is None:
loc = rcParams["legend.loc"]
if not self.isaxes and loc in [0,'best']:
loc = 'upper right'
if is_string_like(loc):
if loc not in self.codes:
if self.isaxes:
warnings.warn('Unrecognized location "%s". Falling back on "best"; '
'valid locations are\n\t%s\n'
% (loc, '\n\t'.join(self.codes.keys())))
loc = 0
else:
warnings.warn('Unrecognized location "%s". Falling back on "upper right"; '
'valid locations are\n\t%s\n'
% (loc, '\n\t'.join(self.codes.keys())))
loc = 1
else:
loc = self.codes[loc]
if not self.isaxes and loc == 0:
warnings.warn('Automatic legend placement (loc="best") not implemented for figure legend. '
'Falling back on "upper right".')
loc = 1
self._loc = loc
self._mode = mode
# We use FancyBboxPatch to draw a legend frame. The location
# and size of the box will be updated during the drawing time.
self.legendPatch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=self.fontsize,
snap=True
)
# The width and height of the legendPatch will be set (in the
# draw()) to the length that includes the padding. Thus we set
# pad=0 here.
if fancybox is None:
fancybox = rcParams["legend.fancybox"]
if fancybox == True:
self.legendPatch.set_boxstyle("round",pad=0,
rounding_size=0.2)
else:
self.legendPatch.set_boxstyle("square",pad=0)
self._set_artist_props(self.legendPatch)
self._drawFrame = True
# init with null renderer
self._init_legend_box(handles, labels)
self._last_fontsize_points = self.fontsize
def _set_artist_props(self, a):
"""
set the boilerplate props for artists added to axes
"""
a.set_figure(self.figure)
for c in self.get_children():
c.set_figure(self.figure)
a.set_transform(self.get_transform())
def _findoffset_best(self, width, height, xdescent, ydescent, renderer):
"Heper function to locate the legend at its best position"
ox, oy = self._find_best_position(width, height, renderer)
return ox+xdescent, oy+ydescent
def _findoffset_loc(self, width, height, xdescent, ydescent, renderer):
"Heper function to locate the legend using the location code"
if iterable(self._loc) and len(self._loc)==2:
# when loc is a tuple of axes(or figure) coordinates.
fx, fy = self._loc
bbox = self.parent.bbox
x, y = bbox.x0 + bbox.width * fx, bbox.y0 + bbox.height * fy
else:
bbox = Bbox.from_bounds(0, 0, width, height)
x, y = self._get_anchored_bbox(self._loc, bbox, self.parent.bbox, renderer)
return x+xdescent, y+ydescent
def draw(self, renderer):
"Draw everything that belongs to the legend"
if not self.get_visible(): return
self._update_legend_box(renderer)
renderer.open_group('legend')
# find_offset function will be provided to _legend_box and
# _legend_box will draw itself at the location of the return
# value of the find_offset.
if self._loc == 0:
_findoffset = self._findoffset_best
else:
_findoffset = self._findoffset_loc
def findoffset(width, height, xdescent, ydescent):
return _findoffset(width, height, xdescent, ydescent, renderer)
self._legend_box.set_offset(findoffset)
fontsize = renderer.points_to_pixels(self.fontsize)
# if mode == fill, set the width of the legend_box to the
# width of the paret (minus pads)
if self._mode in ["expand"]:
pad = 2*(self.borderaxespad+self.borderpad)*fontsize
self._legend_box.set_width(self.parent.bbox.width-pad)
if self._drawFrame:
# update the location and size of the legend
bbox = self._legend_box.get_window_extent(renderer)
self.legendPatch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
self.legendPatch.set_mutation_scale(fontsize)
if self.shadow:
shadow = Shadow(self.legendPatch, 2, -2)
shadow.draw(renderer)
self.legendPatch.draw(renderer)
self._legend_box.draw(renderer)
renderer.close_group('legend')
def _approx_text_height(self, renderer=None):
"""
Return the approximate height of the text. This is used to place
the legend handle.
"""
if renderer is None:
return self.fontsize
else:
return renderer.points_to_pixels(self.fontsize)
def _init_legend_box(self, handles, labels):
"""
Initiallize the legend_box. The legend_box is an instance of
the OffsetBox, which is packed with legend handles and
texts. Once packed, their location is calculated during the
drawing time.
"""
fontsize = self.fontsize
# legend_box is a HPacker, horizontally packed with
# columns. Each column is a VPacker, vertically packed with
# legend items. Each legend item is HPacker packed with
# legend handleBox and labelBox. handleBox is an instance of
# offsetbox.DrawingArea which contains legend handle. labelBox
# is an instance of offsetbox.TextArea which contains legend
# text.
text_list = [] # the list of text instances
handle_list = [] # the list of text instances
label_prop = dict(verticalalignment='baseline',
horizontalalignment='left',
fontproperties=self.prop,
)
labelboxes = []
for l in labels:
textbox = TextArea(l, textprops=label_prop,
multilinebaseline=True, minimumdescent=True)
text_list.append(textbox._text)
labelboxes.append(textbox)
handleboxes = []
# The approximate height and descent of text. These values are
# only used for plotting the legend handle.
height = self._approx_text_height() * 0.7
descent = 0.
# each handle needs to be drawn inside a box of (x, y, w, h) =
# (0, -descent, width, height). And their corrdinates should
# be given in the display coordinates.
# NOTE : the coordinates will be updated again in
# _update_legend_box() method.
# The transformation of each handle will be automatically set
# to self.get_trasnform(). If the artist does not uses its
# default trasnform (eg, Collections), you need to
# manually set their transform to the self.get_transform().
for handle in handles:
if isinstance(handle, RegularPolyCollection):
npoints = self.scatterpoints
else:
npoints = self.numpoints
if npoints > 1:
# we put some pad here to compensate the size of the
# marker
xdata = np.linspace(0.3*fontsize,
(self.handlelength-0.3)*fontsize,
npoints)
xdata_marker = xdata
elif npoints == 1:
xdata = np.linspace(0, self.handlelength*fontsize, 2)
xdata_marker = [0.5*self.handlelength*fontsize]
if isinstance(handle, Line2D):
ydata = ((height-descent)/2.)*np.ones(xdata.shape, float)
legline = Line2D(xdata, ydata)
legline.update_from(handle)
self._set_artist_props(legline) # after update
legline.set_clip_box(None)
legline.set_clip_path(None)
legline.set_drawstyle('default')
legline.set_marker('None')
handle_list.append(legline)
legline_marker = Line2D(xdata_marker, ydata[:len(xdata_marker)])
legline_marker.update_from(handle)
self._set_artist_props(legline_marker)
legline_marker.set_clip_box(None)
legline_marker.set_clip_path(None)
legline_marker.set_linestyle('None')
# we don't want to add this to the return list because
# the texts and handles are assumed to be in one-to-one
# correpondence.
legline._legmarker = legline_marker
elif isinstance(handle, Patch):
p = Rectangle(xy=(0., 0.),
width = self.handlelength*fontsize,
height=(height-descent),
)
p.update_from(handle)
self._set_artist_props(p)
p.set_clip_box(None)
p.set_clip_path(None)
handle_list.append(p)
elif isinstance(handle, LineCollection):
ydata = ((height-descent)/2.)*np.ones(xdata.shape, float)
legline = Line2D(xdata, ydata)
self._set_artist_props(legline)
legline.set_clip_box(None)
legline.set_clip_path(None)
lw = handle.get_linewidth()[0]
dashes = handle.get_dashes()[0]
color = handle.get_colors()[0]
legline.set_color(color)
legline.set_linewidth(lw)
legline.set_dashes(dashes)
handle_list.append(legline)
elif isinstance(handle, RegularPolyCollection):
#ydata = self._scatteryoffsets
ydata = height*self._scatteryoffsets
size_max, size_min = max(handle.get_sizes()),\
min(handle.get_sizes())
# we may need to scale these sizes by "markerscale"
# attribute. But other handle types does not seem
# to care about this attribute and it is currently ignored.
if self.scatterpoints < 4:
sizes = [.5*(size_max+size_min), size_max,
size_min]
else:
sizes = (size_max-size_min)*np.linspace(0,1,self.scatterpoints)+size_min
p = type(handle)(handle.get_numsides(),
rotation=handle.get_rotation(),
sizes=sizes,
offsets=zip(xdata_marker,ydata),
transOffset=self.get_transform(),
)
p.update_from(handle)
p.set_figure(self.figure)
p.set_clip_box(None)
p.set_clip_path(None)
handle_list.append(p)
else:
handle_list.append(None)
handlebox = DrawingArea(width=self.handlelength*fontsize,
height=height,
xdescent=0., ydescent=descent)
handle = handle_list[-1]
handlebox.add_artist(handle)
if hasattr(handle, "_legmarker"):
handlebox.add_artist(handle._legmarker)
handleboxes.append(handlebox)
# We calculate number of lows in each column. The first
# (num_largecol) columns will have (nrows+1) rows, and remaing
# (num_smallcol) columns will have (nrows) rows.
nrows, num_largecol = divmod(len(handleboxes), self._ncol)
num_smallcol = self._ncol-num_largecol
# starting index of each column and number of rows in it.
largecol = safezip(range(0, num_largecol*(nrows+1), (nrows+1)),
[nrows+1] * num_largecol)
smallcol = safezip(range(num_largecol*(nrows+1), len(handleboxes), nrows),
[nrows] * num_smallcol)
handle_label = safezip(handleboxes, labelboxes)
columnbox = []
for i0, di in largecol+smallcol:
# pack handleBox and labelBox into itemBox
itemBoxes = [HPacker(pad=0,
sep=self.handletextpad*fontsize,
children=[h, t], align="baseline")
for h, t in handle_label[i0:i0+di]]
# minimumdescent=False for the text of the last row of the column
itemBoxes[-1].get_children()[1].set_minimumdescent(False)
# pack columnBox
columnbox.append(VPacker(pad=0,
sep=self.labelspacing*fontsize,
align="baseline",
children=itemBoxes))
if self._mode == "expand":
mode = "expand"
else:
mode = "fixed"
sep = self.columnspacing*fontsize
self._legend_box = HPacker(pad=self.borderpad*fontsize,
sep=sep, align="baseline",
mode=mode,
children=columnbox)
self._legend_box.set_figure(self.figure)
self.texts = text_list
self.legendHandles = handle_list
def _update_legend_box(self, renderer):
"""
Update the dimension of the legend_box. This is required
becuase the paddings, the hadle size etc. depends on the dpi
of the renderer.
"""
# fontsize in points.
fontsize = renderer.points_to_pixels(self.fontsize)
if self._last_fontsize_points == fontsize:
# no update is needed
return
# each handle needs to be drawn inside a box of
# (x, y, w, h) = (0, -descent, width, height).
# And their corrdinates should be given in the display coordinates.
# The approximate height and descent of text. These values are
# only used for plotting the legend handle.
height = self._approx_text_height(renderer) * 0.7
descent = 0.
for handle in self.legendHandles:
if isinstance(handle, RegularPolyCollection):
npoints = self.scatterpoints
else:
npoints = self.numpoints
if npoints > 1:
# we put some pad here to compensate the size of the
# marker
xdata = np.linspace(0.3*fontsize,
(self.handlelength-0.3)*fontsize,
npoints)
xdata_marker = xdata
elif npoints == 1:
xdata = np.linspace(0, self.handlelength*fontsize, 2)
xdata_marker = [0.5*self.handlelength*fontsize]
if isinstance(handle, Line2D):
legline = handle
ydata = ((height-descent)/2.)*np.ones(xdata.shape, float)
legline.set_data(xdata, ydata)
legline_marker = legline._legmarker
legline_marker.set_data(xdata_marker, ydata[:len(xdata_marker)])
elif isinstance(handle, Patch):
p = handle
p.set_bounds(0., 0.,
self.handlelength*fontsize,
(height-descent),
)
elif isinstance(handle, RegularPolyCollection):
p = handle
ydata = height*self._scatteryoffsets
p.set_offsets(zip(xdata_marker,ydata))
# correction factor
cor = fontsize / self._last_fontsize_points
# helper function to iterate over all children
def all_children(parent):
yield parent
for c in parent.get_children():
for cc in all_children(c): yield cc
#now update paddings
for box in all_children(self._legend_box):
if isinstance(box, PackerBase):
box.pad = box.pad * cor
box.sep = box.sep * cor
elif isinstance(box, DrawingArea):
box.width = self.handlelength*fontsize
box.height = height
box.xdescent = 0.
box.ydescent=descent
self._last_fontsize_points = fontsize
def _auto_legend_data(self):
"""
Returns list of vertices and extents covered by the plot.
Returns a two long list.
First element is a list of (x, y) vertices (in
display-coordinates) covered by all the lines and line
collections, in the legend's handles.
Second element is a list of bounding boxes for all the patches in
the legend's handles.
"""
assert self.isaxes # should always hold because function is only called internally
ax = self.parent
vertices = []
bboxes = []
lines = []
for handle in ax.lines:
assert isinstance(handle, Line2D)
path = handle.get_path()
trans = handle.get_transform()
tpath = trans.transform_path(path)
lines.append(tpath)
for handle in ax.patches:
assert isinstance(handle, Patch)
if isinstance(handle, Rectangle):
transform = handle.get_data_transform()
bboxes.append(handle.get_bbox().transformed(transform))
else:
transform = handle.get_transform()
bboxes.append(handle.get_path().get_extents(transform))
return [vertices, bboxes, lines]
def draw_frame(self, b):
'b is a boolean. Set draw frame to b'
self._drawFrame = b
def get_children(self):
'return a list of child artists'
children = []
if self._legend_box:
children.append(self._legend_box)
return children
def get_frame(self):
'return the Rectangle instance used to frame the legend'
return self.legendPatch
def get_lines(self):
'return a list of lines.Line2D instances in the legend'
return [h for h in self.legendHandles if isinstance(h, Line2D)]
def get_patches(self):
'return a list of patch instances in the legend'
return silent_list('Patch', [h for h in self.legendHandles if isinstance(h, Patch)])
def get_texts(self):
'return a list of text.Text instance in the legend'
return silent_list('Text', self.texts)
def get_window_extent(self):
'return a extent of the the legend'
return self.legendPatch.get_window_extent()
def _get_anchored_bbox(self, loc, bbox, parentbbox, renderer):
"""
Place the *bbox* inside the *parentbbox* according to a given
location code. Return the (x,y) coordinate of the bbox.
- loc: a location code in range(1, 11).
This corresponds to the possible values for self._loc, excluding "best".
- bbox: bbox to be placed, display coodinate units.
- parentbbox: a parent box which will contain the bbox. In
display coordinates.
"""
assert loc in range(1,11) # called only internally
BEST, UR, UL, LL, LR, R, CL, CR, LC, UC, C = range(11)
anchor_coefs={UR:"NE",
UL:"NW",
LL:"SW",
LR:"SE",
R:"E",
CL:"W",
CR:"E",
LC:"S",
UC:"N",
C:"C"}
c = anchor_coefs[loc]
fontsize = renderer.points_to_pixels(self.fontsize)
container = parentbbox.padded(-(self.borderaxespad) * fontsize)
anchored_box = bbox.anchored(c, container=container)
return anchored_box.x0, anchored_box.y0
def _find_best_position(self, width, height, renderer, consider=None):
"""
Determine the best location to place the legend.
`consider` is a list of (x, y) pairs to consider as a potential
lower-left corner of the legend. All are display coords.
"""
assert self.isaxes # should always hold because function is only called internally
verts, bboxes, lines = self._auto_legend_data()
bbox = Bbox.from_bounds(0, 0, width, height)
consider = [self._get_anchored_bbox(x, bbox, self.parent.bbox, renderer) for x in range(1, len(self.codes))]
#tx, ty = self.legendPatch.get_x(), self.legendPatch.get_y()
candidates = []
for l, b in consider:
legendBox = Bbox.from_bounds(l, b, width, height)
badness = 0
badness = legendBox.count_contains(verts)
badness += legendBox.count_overlaps(bboxes)
for line in lines:
if line.intersects_bbox(legendBox):
badness += 1
ox, oy = l, b
if badness == 0:
return ox, oy
candidates.append((badness, (l, b)))
# rather than use min() or list.sort(), do this so that we are assured
# that in the case of two equal badnesses, the one first considered is
# returned.
# NOTE: list.sort() is stable.But leave as it is for now. -JJL
minCandidate = candidates[0]
for candidate in candidates:
if candidate[0] < minCandidate[0]:
minCandidate = candidate
ox, oy = minCandidate[1]
return ox, oy
| gpl-3.0 |
fastread/src | src/util/mar.py | 1 | 21105 | from __future__ import print_function, division
try:
import cPickle as pickle
except:
import pickle
from pdb import set_trace
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
import csv
from collections import Counter
from sklearn import svm
from sklearn import linear_model
import matplotlib.pyplot as plt
import time
import os
import pandas as pd
class MAR(object):
def __init__(self):
self.fea_num = 4000
self.step = 10
self.enough = 30
self.kept=50
self.atleast=100
def create(self,filename):
self.filename=filename
self.name=self.filename.split(".")[0]
self.flag=True
self.hasLabel=True
self.record={"x":[],"pos":[]}
self.body={}
self.est = []
self.est_num = 0
self.last_pos=0
self.last_neg=0
try:
## if model already exists, load it ##
self = self.load()
except:
## otherwise read from file ##
try:
self.loadfile()
self.preprocess()
self.save()
except:
## cannot find file in workspace ##
self.flag=False
self.enable_est=False
return self
### Depreciated
### Use previous knowledge, labeled only
def create_old(self, filename):
with open("../workspace/coded/" + str(filename), "r") as csvfile:
content = [x for x in csv.reader(csvfile, delimiter=',')]
fields = ["Document Title", "Abstract", "Year", "PDF Link", "code", "time"]
header = content[0]
ind0 = header.index("code")
self.last_pos = len([c[ind0] for c in content[1:] if c[ind0] == "yes"])
self.last_neg = len([c[ind0] for c in content[1:] if c[ind0] == "no"])
for field in fields:
ind = header.index(field)
if field == "time":
self.body[field].extend([float(c[ind]) for c in content[1:] if c[ind0] != "undetermined"])
else:
self.body[field].extend([c[ind] for c in content[1:] if c[ind0] != "undetermined"])
try:
ind = header.index("label")
self.body["label"].extend([c[ind] for c in content[1:] if c[ind0]!="undetermined"])
except:
self.body["label"].extend(["unknown"] * (len([c[ind0] for c in content[1:] if c[ind0]!="undetermined"])))
try:
ind = header.index("fixed")
self.body["fixed"].extend([c[ind] for c in content[1:] if c[ind0]!="undetermined"])
except:
self.body["fixed"].extend([0] * (len([c[ind0] for c in content[1:] if c[ind0]!="undetermined"])))
self.preprocess()
self.save()
def loadfile(self):
self.body = pd.read_csv("../workspace/data/" + str(self.filename),encoding = "ISO-8859-1")
fields = ["Document Title", "Abstract", "Year", "PDF Link"]
columns = self.body.columns
n = len(self.body)
for field in fields:
if field not in columns:
self.body[field] = [""]*n
if "label" not in columns:
self.body["label"] = ["unknown"]*n
if "code" not in columns:
self.body["code"] = ["undetermined"]*n
if "time" not in columns:
self.body["time"] = [0]*n
if "fixed" not in columns:
self.body["fixed"] = [0]*n
self.body = self.body.fillna("")
return
def export_feature(self):
with open("../workspace/coded/feature_" + str(self.name) + ".csv", "wb") as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
for i in range(self.csr_mat.shape[0]):
for j in range(self.csr_mat.indptr[i],self.csr_mat.indptr[i+1]):
csvwriter.writerow([i+1,self.csr_mat.indices[j]+1,self.csr_mat.data[j]])
return
def get_numbers(self):
total = len(self.body["code"]) - self.last_pos - self.last_neg
pos = Counter(self.body["code"])["yes"] - self.last_pos
neg = Counter(self.body["code"])["no"] - self.last_neg
try:
tmp=self.record['x'][-1]
except:
tmp=-1
if int(pos+neg)>tmp:
self.record['x'].append(int(pos+neg))
self.record['pos'].append(int(pos))
self.pool = np.where(np.array(self.body['code']) == "undetermined")[0]
self.labeled = list(set(range(len(self.body['code']))) - set(self.pool))
return pos, neg, total
def export(self):
fields = ["Document Title", "Abstract", "Year", "PDF Link", "label", "code","time"]
body = self.body[fields]
body.sort_values(by=['time'], ascending=False)
yes = body.loc[body['code'] == 'yes']
no = body.loc[body['code'] == 'no']
und = body.loc[body['code'] == 'undetermined']
out = pd.concat([yes, no, und], ignore_index=True)
out.to_csv("../workspace/coded/" + str(self.name) + ".csv",columns=fields,index=False)
return
def preprocess(self):
### Combine title and abstract for training ##################
content = [str(self.body["Document Title"][index]) + " " + str(self.body["Abstract"][index]) for index in range(len(self.body))]
#######################################################
### Feature selection by tfidf in order to keep vocabulary ###
tfidfer = TfidfVectorizer(lowercase=True, stop_words="english", norm=None, use_idf=True, smooth_idf=False,
sublinear_tf=False,decode_error="ignore")
tfidf = tfidfer.fit_transform(content)
weight = tfidf.sum(axis=0).tolist()[0]
kept = np.argsort(weight)[-self.fea_num:]
self.voc = np.array(list(tfidfer.vocabulary_.keys()))[np.argsort(list(tfidfer.vocabulary_.values()))][kept]
##############################################################
### Term frequency as feature, L2 normalization ##########
tfer = TfidfVectorizer(lowercase=True, stop_words="english", norm=u'l2', use_idf=False,
vocabulary=self.voc,decode_error="ignore")
# tfer = TfidfVectorizer(lowercase=True, stop_words="english", norm=None, use_idf=False,
# vocabulary=self.voc,decode_error="ignore")
self.csr_mat=tfer.fit_transform(content)
########################################################
return
## save model ##
def save(self):
with open("memory/"+str(self.name)+".pickle","wb") as handle:
pickle.dump(self,handle)
## load model ##
def load(self):
with open("memory/" + str(self.name) + ".pickle", "rb") as handle:
tmp = pickle.load(handle)
return tmp
def estimate_curve(self, clf, reuse=False, num_neg=0):
def prob_sample(probs):
order = np.argsort(probs)[::-1]
count = 0
can = []
sample = []
for i, x in enumerate(probs[order]):
count = count + x
can.append(order[i])
if count >= 1:
# sample.append(np.random.choice(can,1)[0])
sample.append(can[0])
count -= 1
can = []
return sample
poses = np.where(np.array(self.body['code']) == "yes")[0]
negs = np.where(np.array(self.body['code']) == "no")[0]
poses = np.array(poses)[np.argsort(np.array(self.body['time'])[poses])[self.last_pos:]]
negs = np.array(negs)[np.argsort(np.array(self.body['time'])[negs])[self.last_neg:]]
###############################################
# prob = clf.predict_proba(self.csr_mat)[:,:1]
prob1 = clf.decision_function(self.csr_mat)
prob = np.array([[x] for x in prob1])
# prob = self.csr_mat
y = np.array([1 if x == 'yes' else 0 for x in self.body['code']])
y0 = np.copy(y)
if len(poses) and reuse:
all = list(set(poses) | set(negs) | set(self.pool))
else:
all = range(len(y))
pos_num_last = Counter(y0)[1]
lifes = 3
life = lifes
while (True):
C = Counter(y[all])[1]/ num_neg
es = linear_model.LogisticRegression(penalty='l2', fit_intercept=True, C=C)
es.fit(prob[all], y[all])
pos_at = list(es.classes_).index(1)
pre = es.predict_proba(prob[self.pool])[:, pos_at]
y = np.copy(y0)
sample = prob_sample(pre)
for x in self.pool[sample]:
y[x] = 1
pos_num = Counter(y)[1]
if pos_num == pos_num_last:
life = life - 1
if life == 0:
break
else:
life = lifes
pos_num_last = pos_num
esty = pos_num - self.last_pos
pre = es.predict_proba(prob)[:, pos_at]
return esty, pre
## Train model ##
def train(self,pne=True,weighting=True):
clf = svm.SVC(kernel='linear', probability=True, class_weight='balanced') if weighting else svm.SVC(kernel='linear', probability=True)
poses = np.where(np.array(self.body['code']) == "yes")[0]
negs = np.where(np.array(self.body['code']) == "no")[0]
left = poses
decayed = list(left) + list(negs)
unlabeled = np.where(np.array(self.body['code']) == "undetermined")[0]
try:
unlabeled = np.random.choice(unlabeled,size=np.max((len(decayed),2*len(left),self.atleast)),replace=False)
except:
pass
if not pne:
unlabeled=[]
labels=np.array([x if x!='undetermined' else 'no' for x in self.body['code']])
all_neg=list(negs)+list(unlabeled)
sample = list(decayed) + list(unlabeled)
clf.fit(self.csr_mat[sample], labels[sample])
## aggressive undersampling ##
if len(poses)>=self.enough:
train_dist = clf.decision_function(self.csr_mat[all_neg])
pos_at = list(clf.classes_).index("yes")
if pos_at:
train_dist=-train_dist
negs_sel = np.argsort(train_dist)[::-1][:len(left)]
sample = list(left) + list(np.array(all_neg)[negs_sel])
clf.fit(self.csr_mat[sample], labels[sample])
elif pne:
train_dist = clf.decision_function(self.csr_mat[unlabeled])
pos_at = list(clf.classes_).index("yes")
if pos_at:
train_dist = -train_dist
unlabel_sel = np.argsort(train_dist)[::-1][:int(len(unlabeled) / 2)]
sample = list(decayed) + list(np.array(unlabeled)[unlabel_sel])
clf.fit(self.csr_mat[sample], labels[sample])
uncertain_id, uncertain_prob = self.uncertain(clf)
certain_id, certain_prob = self.certain(clf)
if self.enable_est:
if self.last_pos>0 and len(poses)-self.last_pos>0:
self.est_num, self.est = self.estimate_curve(clf, reuse=True, num_neg=len(sample)-len(left))
else:
self.est_num, self.est = self.estimate_curve(clf, reuse=False, num_neg=len(sample)-len(left))
return uncertain_id, self.est[uncertain_id], certain_id, self.est[certain_id], clf
else:
return uncertain_id, uncertain_prob, certain_id, certain_prob, clf
## reuse
def train_reuse(self,pne=True):
pne=True
clf = svm.SVC(kernel='linear', probability=True)
poses = np.where(np.array(self.body['code']) == "yes")[0]
negs = np.where(np.array(self.body['code']) == "no")[0]
left = np.array(poses)[np.argsort(np.array(self.body['time'])[poses])[self.last_pos:]]
negs = np.array(negs)[np.argsort(np.array(self.body['time'])[negs])[self.last_neg:]]
if len(left)==0:
return [], [], self.random(), []
decayed = list(left) + list(negs)
unlabeled = np.where(np.array(self.body['code']) == "undetermined")[0]
try:
unlabeled = np.random.choice(unlabeled, size=np.max((len(decayed), self.atleast)), replace=False)
except:
pass
if not pne:
unlabeled = []
labels = np.array([x if x != 'undetermined' else 'no' for x in self.body['code']])
all_neg = list(negs) + list(unlabeled)
sample = list(decayed) + list(unlabeled)
clf.fit(self.csr_mat[sample], labels[sample])
## aggressive undersampling ##
if len(poses) >= self.enough:
train_dist = clf.decision_function(self.csr_mat[all_neg])
pos_at = list(clf.classes_).index("yes")
if pos_at:
train_dist=-train_dist
negs_sel = np.argsort(train_dist)[::-1][:len(left)]
sample = list(left) + list(np.array(all_neg)[negs_sel])
clf.fit(self.csr_mat[sample], labels[sample])
elif pne:
train_dist = clf.decision_function(self.csr_mat[unlabeled])
pos_at = list(clf.classes_).index("yes")
if pos_at:
train_dist = -train_dist
unlabel_sel = np.argsort(train_dist)[::-1][:int(len(unlabeled) / 2)]
sample = list(decayed) + list(np.array(unlabeled)[unlabel_sel])
clf.fit(self.csr_mat[sample], labels[sample])
uncertain_id, uncertain_prob = self.uncertain(clf)
certain_id, certain_prob = self.certain(clf)
if self.enable_est:
self.est_num, self.est = self.estimate_curve(clf, reuse=False, num_neg=len(sample)-len(left))
return uncertain_id, self.est[uncertain_id], certain_id, self.est[certain_id], clf
else:
return uncertain_id, uncertain_prob, certain_id, certain_prob, clf
## Get suspecious codes
def susp(self,clf):
thres_pos = 1
thres_neg = 0.5
length_pos = 10
length_neg = 10
poses = np.where(np.array(self.body['code']) == "yes")[0]
negs = np.where(np.array(self.body['code']) == "no")[0]
# poses = np.array(poses)[np.argsort(np.array(self.body['time'])[poses])[self.last_pos:]]
# negs = np.array(negs)[np.argsort(np.array(self.body['time'])[negs])[self.last_neg:]]
poses = np.array(poses)[np.where(np.array(self.body['fixed'])[poses] == 0)[0]]
negs = np.array(negs)[np.where(np.array(self.body['fixed'])[negs] == 0)[0]]
if len(poses)>0:
pos_at = list(clf.classes_).index("yes")
prob_pos = clf.predict_proba(self.csr_mat[poses])[:,pos_at]
# se_pos = np.argsort(prob_pos)[:length_pos]
se_pos = np.argsort(prob_pos)
# se_pos = [s for s in se_pos if prob_pos[s]<thres_pos]
sel_pos = poses[se_pos]
probs_pos = prob_pos[se_pos]
else:
sel_pos = np.array([])
probs_pos = np.array([])
if len(negs)>0:
if clf:
neg_at = list(clf.classes_).index("no")
prob_neg = clf.predict_proba(self.csr_mat[negs])[:,neg_at]
# se_neg = np.argsort(prob_neg)[:length_neg]
se_neg = np.argsort(prob_neg)
# se_neg = [s for s in se_neg if prob_neg[s]<thres_neg]
sel_neg = negs[se_neg]
probs_neg = prob_neg[se_neg]
else:
sel_neg = negs
probs_neg = np.array([])
else:
sel_neg = np.array([])
probs_neg = np.array([])
return sel_pos, probs_pos, sel_neg, probs_neg
## BM25 ##
def BM25(self,query):
b=0.75
k1=1.5
### Combine title and abstract for training ###########
content = [str(self.body["Document Title"][index]) + " " + str(self.body["Abstract"][index]) for index in
range(len(self.body["Document Title"]))]
#######################################################
### Feature selection by tfidf in order to keep vocabulary ###
tfidfer = TfidfVectorizer(lowercase=True, stop_words="english", norm=None, use_idf=False, smooth_idf=False,
sublinear_tf=False, decode_error="ignore")
tf = tfidfer.fit_transform(content)
d_avg = np.mean(np.sum(tf, axis=1))
score = {}
for word in query:
score[word]=[]
id= tfidfer.vocabulary_[word]
df = sum([1 for wc in tf[:,id] if wc>0])
idf = np.log((len(content)-df+0.5)/(df+0.5))
for i in range(len(content)):
score[word].append(idf*tf[i,id]/(tf[i,id]+k1*((1-b)+b*np.sum(tf[0],axis=1)[0,0]/d_avg)))
self.bm = np.sum(list(score.values()),axis=0)
def BM25_get(self):
ids = self.pool[np.argsort(self.bm[self.pool])[::-1][:self.step]]
scores = self.bm[ids]
return ids, scores
## Get certain ##
def certain(self,clf):
pos_at = list(clf.classes_).index("yes")
if len(self.pool)==0:
return [],[]
prob = clf.predict_proba(self.csr_mat[self.pool])[:,pos_at]
order = np.argsort(prob)[::-1][:self.step]
return np.array(self.pool)[order],np.array(prob)[order]
## Get uncertain ##
def uncertain(self,clf):
pos_at = list(clf.classes_).index("yes")
if len(self.pool)==0:
return [],[]
prob = clf.predict_proba(self.csr_mat[self.pool])[:, pos_at]
train_dist = clf.decision_function(self.csr_mat[self.pool])
order = np.argsort(np.abs(train_dist))[:self.step] ## uncertainty sampling by distance to decision plane
# order = np.argsort(np.abs(prob-0.5))[:self.step] ## uncertainty sampling by prediction probability
return np.array(self.pool)[order], np.array(prob)[order]
## Get random ##
def random(self):
return np.random.choice(self.pool,size=np.min((self.step,len(self.pool))),replace=False)
## Format ##
def format(self,id,prob=[]):
result=[]
for ind,i in enumerate(id):
tmp = {key: str(self.body[key][i]) for key in self.body}
tmp["id"]=str(i)
if prob!=[]:
tmp["prob"]=prob[ind]
result.append(tmp)
return result
## Code candidate studies ##
def code(self,id,label):
if self.body['code'][id] == label:
self.body['fixed'][id] = 1
self.body["code"][id] = label
self.body["time"][id] = time.time()
## Plot ##
def plot(self):
font = {'family': 'normal',
'weight': 'bold',
'size': 20}
plt.rc('font', **font)
paras = {'lines.linewidth': 5, 'legend.fontsize': 20, 'axes.labelsize': 30, 'legend.frameon': False,
'figure.autolayout': True, 'figure.figsize': (16, 8)}
plt.rcParams.update(paras)
if len(self.labeled)<=0:
return
fig = plt.figure()
order = np.argsort(np.array(self.body['time'])[self.labeled])
seq = np.array(self.body['code'])[np.array(self.labeled)[order]]
counter = 0
rec = [0]
for s in seq:
if s=='yes':
counter+=1
rec.append(counter)
plt.plot(range(len(rec)), rec)
plt.ylabel("Relevant Found")
plt.xlabel("Documents Reviewed")
name=self.name+ "_" + str(int(time.time()))+".png"
dir = "./static/image"
for file in os.listdir(dir):
os.remove(os.path.join(dir, file))
plt.savefig("./static/image/" + name)
plt.close(fig)
return name
def get_allpos(self):
return len([1 for c in self.body["label"] if c=="yes"])-self.last_pos
## Restart ##
def restart(self):
os.remove("./memory/"+self.name+".pickle")
## Get missed relevant docs ##
def get_rest(self):
rest=[x for x in range(len(self.body['label'])) if self.body['label'][x]=='yes' and self.body['code'][x]!='yes']
rests={}
# fields = ["Document Title", "Abstract", "Year", "PDF Link"]
fields = ["Document Title"]
for r in rest:
rests[r]={}
for f in fields:
rests[r][f]=self.body[f][r]
return rests
def latest_labeled(self):
order = np.argsort(np.array(self.body['time'])[self.labeled])[::-1]
return np.array(self.labeled)[order] | mit |
mattjml/wood_cylinder_cut | cut.py | 1 | 5717 | import numpy as np
from math import pi, tan, cos, sin, sqrt
import sys
import argparse
render = True
try:
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
except:
render = False
parser = argparse.ArgumentParser(description=\
"Calculates cutting path around cylinder for certain angled cut.")
parser.add_argument("-r", "--radius", type=int, default=100,
help="Radius of the cylinder in mm")
parser.add_argument("-a", "--angle", type=int, default=45,
help="Angle of the cut in degrees from the cylinder axis")
parser.add_argument("-i", "--interval", type=float, default=.5,
help="Cylinder intersection interval in proportion of circumference (0.0-1.0)")
parser.add_argument('--display', dest='display', action='store_true',
help="Render cut")
parser.add_argument('--no-display', dest='display', action='store_false',
help="Do not render cut")
parser.set_defaults(display=True)
parser.add_argument("-f", "--file", type=str, default='cut.csv',
help="CSV file to write into cut mark positions (around cylinder and along)")
args = parser.parse_args()
radius = args.radius
assert radius > 15, "Radius must be positive and in mm."
angle = args.angle
assert 90 >= angle > 0, "Angle must be between 0 and 90 degrees."
angle = (angle * pi) / 180
interval = args.interval
assert 0.25 >= interval >= 0.005, "Interval must be <= 0.25 and >= 0.005"
render = render and args.display
filename = args.file
assert len(filename) > 0, "Filename must be at least one character long"
circumference = (int)(radius * 2 * pi)
interval = circumference * interval
cyl_length = 2 * radius / tan(angle)
cut_length = 2 * radius / sin(angle)
print("Calculating {0} degree cut of {1}mm radius cylinder. "
"Approximating at {2} mm arc intervals".format(args.angle, radius, interval))
def rotation_matrix(axis,theta):
'''Create a rotation matrix for a theta radian
rotation around the axis given.'''
axis = axis/sqrt(np.dot(axis,axis))
a = cos(theta/2)
b,c,d = -axis*sin(theta/2)
return np.array([[a*a+b*b-c*c-d*d, 2*(b*c-a*d), 2*(b*d+a*c)],
[2*(b*c+a*d), a*a+c*c-b*b-d*d, 2*(c*d-a*b)],
[2*(b*d-a*c), 2*(c*d+a*b), a*a+d*d-b*b-c*c]])
def vertical_plane_normal(p1,p2):
'''Compute a normal to the cutting plane'''
p3 = p1 + [0,0,1]
return np.cross(p2-p1,p3-p1)
# Approximate cylinder with parallel lines
lines_cylinder = []
for i in range(0,int(circumference/interval)):
''' Builds a cylinder intersection approximated as a set of parallel lines
each separated by an arc of length 'interval' '''
theta = (2 * pi) * (i / (circumference/interval))
rotmat = rotation_matrix(np.array([0, 1, 0]), -theta)
lines_cylinder.append(np.dot(rotmat, np.array([0, -cyl_length/2, radius])))
lines_cylinder.append(np.dot(rotmat, np.array([0, cyl_length/2, radius])))
# Create cutting plane (a line will do for now)
rotmat = rotation_matrix(np.array([0,0,1]),angle)
cutting_line_st = np.dot(rotmat, np.array([0, -cut_length/2, 0]))
cutting_line_end = np.dot(rotmat, np.array([0, cut_length/2, 0]))
# Calculate cutting plane/cylinder intersection points.
# Only computes the first 180 degrees as the other 180
# is just a mirror of it.
ixs = []
for i in range(0, len(lines_cylinder), 2):
N = np.array(vertical_plane_normal(lines_cylinder[i], lines_cylinder[i+1]))
ix = cutting_line_st + (np.dot(N, lines_cylinder[i] - cutting_line_st) /
np.dot(N, cutting_line_end - cutting_line_st)) * (cutting_line_end - cutting_line_st)
ix = [lines_cylinder[i][0], ix[1], lines_cylinder[i][2]];
ixs.append(ix)
# Flatten cylinder intersections to give cuts on a 2D plane.
# These can be applied to the real cylinder by wrapping
# this 2D plane around the cylinder. The best way to do this
# is either by printing (to correct scale) the markers and
# wrapping the 2D paper around the cylinder or drawing these
# marks on graph paper and wrapping this around the cylinder.
ixs_flat = []
for i in range(int(len(ixs)/2)):
point = [i * interval, ixs[i][1]]
ixs_flat.append(point)
for i in range(int(len(ixs)/2)):
point = [circumference/2 + (i * interval), - ixs[i][1]]
ixs_flat.append(point)
f4 = np.poly1d(np.polyfit([ix[0] for ix in ixs_flat] , [ix[1] for ix in ixs_flat], 8))
xp = np.linspace(0, circumference, 100)
if render:
# Render 3D cut
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
plt.axis('equal')
for i in range(0, len(lines_cylinder), 2):
l_st = lines_cylinder[i]
l_en = lines_cylinder[i+1]
ax.plot([l_st[0],l_en[0]], [l_st[1],l_en[1]],zs=[l_st[2],l_en[2]])
ax.plot([cutting_line_st[0], cutting_line_end[0]], [cutting_line_st[1], cutting_line_end[1]], [cutting_line_st[2],cutting_line_end[2]])
ax.scatter([ix[0] for ix in ixs], [ix[1] for ix in ixs], zs=[ix[2] for ix in ixs])
ax.set_ylabel('Cylinder Axis (mm)')
ax.set_xlabel('mm')
ax.set_zlabel('mm')
plt.show()
# Render cut marker positions
fig = plt.plot([ix[0] for ix in ixs_flat], [ix[1] for ix in ixs_flat], '.', xp, f4(xp), '-')
plt.ylim(min([ix[1] for ix in ixs_flat]), max([ix[1] for ix in ixs_flat]))
plt.xlabel('Around the Cylinder (mm)')
plt.ylabel('Along the Cylinder (mm)')
plt.title('Unwrapped cylinder cut marker positions (printed and wrapped around cylinder).')
plt.axis('equal')
plt.show()
# Write cut markers to file
print("Writing cut marker positions to {}".format(filename))
file = open(filename, 'w')
file.write("arc pos (mm), length pos (mm)")
for ix in ixs_flat:
file.write("{0[0]:.3f}, {0[1]:.3f}\n".format(ix))
file.close()
print("Finished writing to file")
| apache-2.0 |
jkimlab/mySyntenyPortal | src/third_party/bedtools/test/fisher/plot.py | 11 | 1122 | import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="darkgrid")
fig, axs = plt.subplots(3, figsize=(4, 12))
df = pd.read_csv(sys.argv[1])
axs[0].scatter(df.fisher, df.shuffled, s=4)
axs[0].set_xlim(0, 1)
axs[0].set_ylim(0, 1)
axs[0].set_xlabel('fisher p-value')
axs[0].set_ylabel('shuffled p-value')
axs[0].plot([0, 1], [0, 1], ls='--')
x = -np.log10(df.fisher)
y = -np.log10(df.shuffled)
m = int(max(x.max(), y.max())) + 1
axs[1].scatter(x, y, s=4)
axs[1].set_xlim(0, m)
axs[1].set_ylim(0, m)
axs[1].set_xlabel('-log10(fisher p-value)')
axs[1].set_ylabel('-log10(shuffled p-value)')
axs[1].plot([0, m], [0, m], ls='--')
x = -np.log10(1 - np.minimum(1-1e-6, df.fisher))
y = -np.log10(1 - np.minimum(1-1e-6, df.shuffled))
m = int(max(x.max(), y.max())) + 1
axs[2].scatter(x, y, s=4)
axs[2].set_xlim(0, m)
axs[2].set_ylim(0, m)
axs[2].set_xlabel('-log10(1 - fisher p-value)')
axs[2].set_ylabel('-log10(1 - shuffled p-value)')
axs[2].plot([0, m], [0, m], ls='--')
plt.tight_layout()
plt.savefig(sys.argv[1].replace('.txt', '') + '.png')
fig.show()
| gpl-3.0 |
fraser-lab/EMRinger | Figures/S6/S6B.py | 1 | 1437 | import matplotlib.pyplot as plt
from libtbx import easy_pickle
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-a", dest="folder_a", help='Folder name for unrefined')
parser.add_argument("-b", dest="folder_b", help='Folder name for refined (Transmembrane)')
parser.add_argument("-c", dest="folder_c", help='Folder name for refined')
args = parser.parse_args()
y_a = easy_pickle.load('%s/emringer_scores.pkl' % args.folder_a)
y_b = easy_pickle.load('%s/emringer_scores.pkl' % args.folder_b)
y_c = easy_pickle.load('%s/emringer_scores.pkl' % args.folder_c)
x_a = easy_pickle.load('%s/thresholds.pkl' % args.folder_a)
x_b = easy_pickle.load('%s/thresholds.pkl' % args.folder_b)
x_c = easy_pickle.load('%s/thresholds.pkl' % args.folder_c)
# for i in range(len(y_a)):
# if y_b[i] > 0:
# x_a=x_a[i:]
# x_b=x_b[i:]
# y_a=y_a[i:]
# y_b=y_b[i:]
# break
fig, ax = plt.subplots(figsize=(6,4.5))
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_ylim(-1,4)
ax.set_xlabel('Map Value Threshold', labelpad=10)
ax.set_ylabel('EMRinger Score', labelpad=10)
ax.axhspan(-0.015,0.015,color='0.1',alpha=0.3, linewidth=0)
ax.plot(x_a,y_a,label="Unrefined", linewidth=3.0)
ax.plot(x_b,y_b,label="Unrefined (Transmembrane)", linewidth=3.0)
ax.plot(x_c,y_c,label="Refined (Transmembrane)", linewidth=3.0)
plt.legend(loc='upper right', fontsize="x-small")
fig.savefig('3B.png') | bsd-3-clause |
beiko-lab/gengis | bin/Lib/site-packages/scipy/spatial/tests/test__plotutils.py | 1 | 1511 | from __future__ import division, print_function, absolute_import
from numpy.testing import dec, assert_, assert_array_equal
try:
import matplotlib
matplotlib.rcParams['backend'] = 'Agg'
import matplotlib.pyplot as plt
has_matplotlib = True
except:
has_matplotlib = False
from scipy.spatial import \
delaunay_plot_2d, voronoi_plot_2d, convex_hull_plot_2d, \
Delaunay, Voronoi, ConvexHull
class TestPlotting:
points = [(0,0), (0,1), (1,0), (1,1)]
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_delaunay(self):
# Smoke test
fig = plt.figure()
obj = Delaunay(self.points)
s_before = obj.simplices.copy()
r = delaunay_plot_2d(obj, ax=fig.gca())
assert_array_equal(obj.simplices, s_before) # shouldn't modify
assert_(r is fig)
delaunay_plot_2d(obj, ax=fig.gca())
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_voronoi(self):
# Smoke test
fig = plt.figure()
obj = Voronoi(self.points)
r = voronoi_plot_2d(obj, ax=fig.gca())
assert_(r is fig)
voronoi_plot_2d(obj)
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_convex_hull(self):
# Smoke test
fig = plt.figure()
tri = ConvexHull(self.points)
r = convex_hull_plot_2d(tri, ax=fig.gca())
assert_(r is fig)
convex_hull_plot_2d(tri)
| gpl-3.0 |
keras-team/autokeras | examples/new_pop.py | 1 | 1824 | """shell
pip install autokeras
"""
import pandas as pd
import autokeras as ak
"""
## Social Media Articles Example
Regression tasks estimate a numeric variable, such as the price of a house
or a person's age.
This example estimates the view counts for an article on social media platforms,
trained on a
[News Popularity](
https://archive.ics.uci.edu/ml/datasets/
News+Popularity+in+Multiple+Social+Media+Platforms)
dataset collected from 2015-2016.
First, prepare your text data in a `numpy.ndarray` or `tensorflow.Dataset`
format.
"""
# converting from other formats (such as pandas) to numpy
df = pd.read_csv("./News_Final.csv")
text_inputs = df.Title.to_numpy(dtype="str")
media_success_outputs = df.Facebook.to_numpy(dtype="int")
"""
Next, initialize and train the [TextRegressor](/text_regressor).
"""
# Initialize the text regressor
reg = ak.TextRegressor(max_trials=15) # AutoKeras tries 15 different models.
# Find the best model for the given training data
reg.fit(text_inputs, media_success_outputs)
# Predict with the chosen model:
predict_y = reg.predict(text_inputs)
"""
If your text source has a larger vocabulary (number of distinct words), you may
need to create a custom pipeline in AutoKeras to increase the `max_tokens`
parameter.
"""
text_input = (df.Title + " " + df.Headline).to_numpy(dtype="str")
# text input and tokenization
input_node = ak.TextInput()
output_node = ak.TextToIntSequence(max_tokens=20000)(input_node)
# regression output
output_node = ak.RegressionHead()(output_node)
# initialize AutoKeras and find the best model
reg = ak.AutoModel(inputs=input_node, outputs=output_node, max_trials=15)
reg.fit(text_input, media_success_outputs)
"""
Measure the accuracy of the regressor on an independent test set:
"""
print(reg.evaluate(text_input, media_success_outputs))
| apache-2.0 |
dynaryu/rmtk | tests/vulnerability/tests_TO_BE_CHANGED/NSP/fragility_process/test_fragility_process.py | 4 | 2251 | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 23 11:24:59 2015
@author: chiaracasotto
"""
# Clear existing variables
def clearall():
all = [var for var in globals() if var[0] != "_"]
for var in all:
del globals()[var]
clearall()
# Import functions
import matplotlib.pyplot as plt
import numpy as np
import os
import csv
from rmtk.vulnerability.NSP.fragility_process import fragility_process
from rmtk.vulnerability.common.conversions import from_mean_to_median
import pandas as pd
pi = 3.141592653589793
plt.close("all")
cd = os.getcwd()
# <codecell>
cd = os.getcwd()
noBlg = 5;
Gamma = [1, 1, 1, 1, 1];
T = [1.0037, 0.77964, 0.74098, 0.63234, 0.48996];
Tav = 0.696;
w = [0.1, 0.2, 0.3, 0.3, 0.1];
Sa_ratios = [ 1.6391934, 1.22582094, 1.11783742, 0.92913515, 0.72870817];
SPO = [[0.099, 0.206, 0.25, 0.25, 91.431, 91.431, 84.659],
[0.061, 0.147, 0.29, 0.25, 63.103, 63.103, 54.743],
[0.067, 0.130, 0.20, 0.30, 54.735, 54.735, 45.919],
[0.065, 0.184, 0.27, 0.27, 141.68, 141.68, 120.290],
[0.082, 0.206, 0.3, 0.3, 111.26, 111.26, 108.06]];
Tc = 0.5;
Td = 1.8;
dcroof = [[0.046144456, 0.107312842, 0.212491246, 0.473466774, 0.724976013, 0.940278828, 0.940278828]]*noBlg
EDPlim = [[0.002, 0.005, 0.01, 0.02, 0.04, 0.06, 0.08]]*noBlg
with open(cd+'/inputs/EDPvec-RDvec.csv', 'rb') as f:
reader = csv.reader(f)
newlist = [row for row in reader]
EDPvec = [np.array([float(ele[0]) for ele in newlist])]*noBlg
RDvec = [np.array([float(ele[1]) for ele in newlist])]*noBlg
bUthd = [np.repeat(0.,len(dcroof[0]))]*noBlg
# <codecell>
an_type = 2
in_type = 0
vuln = 0
g = 9.81
iml = np.linspace(0.1,2,50)
plotflag = [1, 1, 1, 1]
linew = 2
fontsize = 10
units = ['[m]', '[kN]', '[g]']
N = 10
MC = 50
Tc = 0.5
Td = 1.8
st = (1./(2.*MC))
en = (1.-(1./(2.*MC)))
xp = np.linspace(st,en,MC)
plot_feature = [plotflag, linew, fontsize, units, iml]
# <codecell>
[log_meanSa, log_stSa] = fragility_process(an_type, T, Gamma, w, EDPlim, dcroof, EDPvec, RDvec, SPO, bUthd, noBlg, g, MC, Sa_ratios, plot_feature, N, Tc, Td)
# <codecell>
data = pd.DataFrame({'1.Sa50':np.exp(log_meanSa),'2.stdev':log_stSa},index = np.arange(len(log_meanSa)))
data.to_csv('results/fragility_process.csv',header=True,index=True)
| agpl-3.0 |
pablocscode/TFG-CAEBAT | representacion1.5.py | 1 | 4170 | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 23 18:44:45 2017
@author: Pablo
Objetivos:
-Lectura y representación de los archivos profiles.out y halfcells.out
-La lectura debe ser capaz de leer los archivos sin importar su longitud
Guía:
-Ambos archivos deben encontrarse en la misma carpeta que este script
"""
import numpy as np
import matplotlib.pyplot as plt
#Leemos todas las líneas del archivo
archivo = open('profiles.out','r')
lineas = archivo.readlines()
archivo.close()
#Calculamos el número de filas del archivo para cada tiempo
i = 4 #Empieza a haber datos a partir de la línea 4
num_distancias = 0
#Se aumenta el contador con cada línea distinta de cero de la primera matriz de
#tiempos
while lineas[i] != ' \n':
num_distancias += 1
i += 1
#Calculamos el número de tiempos del archivo
datos_halfcells = open('halfcells.out','r')
lineas_halfcells = datos_halfcells.readlines()
datos_halfcells.close()
num_tiempos = len(lineas_halfcells)-1 #la primera linea no tiene datos
#Declaramos los vectores que contendrán los valores de las columnas
distancia = np.zeros((num_tiempos,num_distancias)) #Cada columna tiene 101 filas
C_Elec = np.zeros((num_tiempos,num_distancias))
C_Sol_Surf = np.zeros((num_tiempos,num_distancias))
Liq_Pot = np.zeros((num_tiempos,num_distancias))
Solid_Pot = np.zeros((num_tiempos,num_distancias))
J_main = np.zeros((num_tiempos,num_distancias))
tiempo = np.zeros(num_tiempos)
V_neg = np.zeros(num_tiempos)
V_pos = np.zeros(num_tiempos)
Heat_gen = np.zeros(num_tiempos)
#Datos profiles.in
#Inicializamos para empezar el ciclo for
fila =0
columna = 0
#Cada línea (fila) representa los datos para un tiempo concreto
for j in range(4,(num_distancias+6)*num_tiempos,num_distancias+6):
for i in range(j,j+num_distancias): #Empieza a haber datos a partir de la línea 4
#Cada elemento de "lineas" es un línea entera que convertimos en un vector
linea = lineas[i].split(',')
#A cada variable le vamos asignando su valor de cada línea que leemos
distancia[fila,columna] = float(linea[0])
C_Elec[fila,columna] = float(linea[1])
C_Sol_Surf[fila,columna] = float(linea[2])
Liq_Pot[fila,columna] = float(linea[3])
Solid_Pot[fila,columna] = float(linea[4])
J_main[fila,columna] = float(linea[5])
columna = columna +1
#Asignamos el tiempo de cada gráfica
linea = lineas[j-1].split()
tiempo[fila] = float(linea[2])
#Al final del ciclo for pasamos a la siguiente fila y ponemos a cero las columnas
fila = fila+1
columna = 0
#Datos halfcells.out
for i in range(1,num_tiempos+1):
linea = lineas_halfcells[i].split()
V_neg[i-1] = linea[1]
V_pos[i-1] = linea[2]
Heat_gen[i-1] = linea[5]
#Representamos los resultados
def plot(numero):
plt.figure(1)
plt.plot(distancia[numero],C_Elec[numero],'o')
plt.plot(distancia[0],C_Elec[0],'o')
plt.ylabel('Concentración Electrolito')
plt.title(tiempo[numero])
plt.xlabel('Distancia')
plt.figure(2)
plt.plot(distancia[numero],C_Sol_Surf[numero],'o')
plt.plot(distancia[0],C_Sol_Surf[0],'o')
plt.ylabel('Concentración Sólido')
plt.xlabel('Distancia')
plt.title(tiempo[numero])
plt.figure(3)
plt.plot(distancia[numero],Liq_Pot[numero],'o')
plt.plot(distancia[0],Liq_Pot[0],'o')
plt.ylabel('Potencial en el líquido')
plt.xlabel('Distancia')
plt.title(tiempo[numero])
plt.figure(4)
plt.plot(distancia[numero],Solid_Pot[numero],'o')
plt.plot(distancia[0],Solid_Pot[0],'o')
plt.ylabel('Potencial en el sólido')
plt.xlabel('Distancia')
plt.title(('Tiempo =', tiempo[numero],' min'))
plt.figure(5)
plt.plot(tiempo,V_pos-V_neg)
plt.ylabel('Voltaje celda (V)')
plt.xlabel('Tiempo (s)')
"""
plt.figure(6)
plt.plot(distancia[numero],J_main[numero],'o')
plt.ylabel('J main')
plt.xlabel('Distancia')
"""
#Representamos los resultados para el último tiempo
plot(num_tiempos-1)
| gpl-3.0 |
grigorisg9gr/grigorisg9gr.github.io | markdown_generator/talks.py | 199 | 4000 |
# coding: utf-8
# # Talks markdown generator for academicpages
#
# Takes a TSV of talks with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). The core python code is also in `talks.py`. Run either from the `markdown_generator` folder after replacing `talks.tsv` with one containing your data.
#
# TODO: Make this work with BibTex and other databases, rather than Stuart's non-standard TSV format and citation style.
# In[1]:
import pandas as pd
import os
# ## Data format
#
# The TSV needs to have the following columns: title, type, url_slug, venue, date, location, talk_url, description, with a header at the top. Many of these fields can be blank, but the columns must be in the TSV.
#
# - Fields that cannot be blank: `title`, `url_slug`, `date`. All else can be blank. `type` defaults to "Talk"
# - `date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper.
# - The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/talks/YYYY-MM-DD-[url_slug]`
# - The combination of `url_slug` and `date` must be unique, as it will be the basis for your filenames
#
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
talks = pd.read_csv("talks.tsv", sep="\t", header=0)
talks
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
if type(text) is str:
return "".join(html_escape_table.get(c,c) for c in text)
else:
return "False"
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page.
# In[5]:
loc_dict = {}
for row, item in talks.iterrows():
md_filename = str(item.date) + "-" + item.url_slug + ".md"
html_filename = str(item.date) + "-" + item.url_slug
year = item.date[:4]
md = "---\ntitle: \"" + item.title + '"\n'
md += "collection: talks" + "\n"
if len(str(item.type)) > 3:
md += 'type: "' + item.type + '"\n'
else:
md += 'type: "Talk"\n'
md += "permalink: /talks/" + html_filename + "\n"
if len(str(item.venue)) > 3:
md += 'venue: "' + item.venue + '"\n'
if len(str(item.location)) > 3:
md += "date: " + str(item.date) + "\n"
if len(str(item.location)) > 3:
md += 'location: "' + str(item.location) + '"\n'
md += "---\n"
if len(str(item.talk_url)) > 3:
md += "\n[More information here](" + item.talk_url + ")\n"
if len(str(item.description)) > 3:
md += "\n" + html_escape(item.description) + "\n"
md_filename = os.path.basename(md_filename)
#print(md)
with open("../_talks/" + md_filename, 'w') as f:
f.write(md)
# These files are in the talks directory, one directory below where we're working from.
| mit |
zorroblue/scikit-learn | sklearn/manifold/t_sne.py | 10 | 35283 | # Author: Alexander Fabisch -- <[email protected]>
# Author: Christopher Moody <[email protected]>
# Author: Nick Travers <[email protected]>
# License: BSD 3 clause (C) 2014
# This is the exact and Barnes-Hut t-SNE implementation. There are other
# modifications of the algorithm:
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
from time import time
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from scipy.sparse import csr_matrix
from ..neighbors import NearestNeighbors
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..decomposition import PCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
from . import _barnes_hut_tsne
from ..externals.six import string_types
from ..utils import deprecated
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = distances.astype(np.float32, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, None, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _joint_probabilities_nn(distances, neighbors, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances using just nearest
neighbors.
This method is approximately equal to _joint_probabilities. The latter
is O(N), but limiting the joint probability to nearest neighbors improves
this substantially to O(uN).
Parameters
----------
distances : array, shape (n_samples, k)
Distances of samples to its k nearest neighbors.
neighbors : array, shape (n_samples, k)
Indices of the k nearest-neighbors for each samples.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : csr sparse matrix, shape (n_samples, n_samples)
Condensed joint probability matrix with only nearest neighbors.
"""
t0 = time()
# Compute conditional probabilities such that they approximately match
# the desired perplexity
n_samples, k = neighbors.shape
distances = distances.astype(np.float32, copy=False)
neighbors = neighbors.astype(np.int64, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, neighbors, desired_perplexity, verbose)
assert np.all(np.isfinite(conditional_P)), \
"All probabilities should be finite"
# Symmetrize the joint probability distribution using sparse operations
P = csr_matrix((conditional_P.ravel(), neighbors.ravel(),
range(0, n_samples * k + 1, k)),
shape=(n_samples, n_samples))
P = P + P.T
# Normalize the joint probability distribution
sum_P = np.maximum(P.sum(), MACHINE_EPSILON)
P /= sum_P
assert np.all(np.abs(P.data) <= 1.0)
if verbose >= 2:
duration = time() - t0
print("[t-SNE] Computed conditional probabilities in {:.3f}s"
.format(duration))
return P
def _kl_divergence(params, P, degrees_of_freedom, n_samples, n_components,
skip_num_points=0):
"""t-SNE objective function: gradient of the KL divergence
of p_ijs and q_ijs and the absolute error.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
dist = pdist(X_embedded, "sqeuclidean")
dist /= degrees_of_freedom
dist += 1.
dist **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(dist / (2.0 * np.sum(dist)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(np.maximum(P, MACHINE_EPSILON) / Q))
# Gradient: dC/dY
# pdist always returns double precision distances. Thus we need to take
grad = np.ndarray((n_samples, n_components), dtype=params.dtype)
PQd = squareform((P - Q) * dist)
for i in range(skip_num_points, n_samples):
grad[i] = np.dot(np.ravel(PQd[i], order='K'),
X_embedded[i] - X_embedded)
grad = grad.ravel()
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad *= c
return kl_divergence, grad
def _kl_divergence_bh(params, P, degrees_of_freedom, n_samples, n_components,
angle=0.5, skip_num_points=0, verbose=False):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Uses Barnes-Hut tree methods to calculate the gradient that
runs in O(NlogN) instead of O(N^2)
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : csr sparse matrix, shape (n_samples, n_sample)
Sparse approximate joint probability matrix, computed only for the
k nearest-neighbors and symmetrized.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
angle : float (default: 0.5)
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
verbose : int
Verbosity level.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
params = params.astype(np.float32, copy=False)
X_embedded = params.reshape(n_samples, n_components)
val_P = P.data.astype(np.float32, copy=False)
neighbors = P.indices.astype(np.int64, copy=False)
indptr = P.indptr.astype(np.int64, copy=False)
grad = np.zeros(X_embedded.shape, dtype=np.float32)
error = _barnes_hut_tsne.gradient(val_P, X_embedded, neighbors, indptr,
grad, angle, n_components, verbose,
dof=degrees_of_freedom)
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad = grad.ravel()
grad *= c
return error, grad
def _gradient_descent(objective, p0, it, n_iter,
n_iter_check=1, n_iter_without_progress=300,
momentum=0.8, learning_rate=200.0, min_gain=0.01,
min_grad_norm=1e-7, verbose=0, args=None, kwargs=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector. When expensive to compute, the cost can optionally
be None and can be computed every n_iter_check steps using
the objective_error function.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_check : int
Number of iterations before evaluating the global error. If the error
is sufficiently low, we abort the optimization.
n_iter_without_progress : int, optional (default: 300)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.8)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 200.0)
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
kwargs : dict
Keyword arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = i = it
tic = time()
for i in range(it, n_iter):
error, grad = objective(p, *args, **kwargs)
grad_norm = linalg.norm(grad)
inc = update * grad < 0.0
dec = np.invert(inc)
gains[inc] += 0.2
gains[dec] *= 0.8
np.clip(gains, min_gain, np.inf, out=gains)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if (i + 1) % n_iter_check == 0:
toc = time()
duration = toc - tic
tic = toc
if verbose >= 2:
print("[t-SNE] Iteration %d: error = %.7f,"
" gradient norm = %.7f"
" (%s iterations in %0.3fs)"
% (i + 1, error, grad_norm, n_iter_check, duration))
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in \mathcal{N}_{i}^{k}} \max(0, (r(i, j) - k))
where for each sample i, :math:`\mathcal{N}_{i}^{k}` are its k nearest
neighbors in the output space, and every sample j is its :math:`r(i, j)`-th
nearest neighbor in the input space. In other words, any unexpected nearest
neighbors in the output space are penalised in proportion to their rank in
the input space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = NearestNeighbors(n_neighbors).fit(X_embedded).kneighbors(
return_distance=False)
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selecting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 12.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 200.0)
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers. If the cost function gets stuck in a bad local
minimum increasing the learning rate may help.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 250.
n_iter_without_progress : int, optional (default: 300)
Maximum number of iterations without progress before we abort the
optimization, used after 250 initial iterations with early
exaggeration. Note that progress is only checked every 50 iterations so
this value is rounded to the next multiple of 50.
.. versionadded:: 0.17
parameter *n_iter_without_progress* to control stopping criteria.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be stopped.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string or numpy array, optional (default: "random")
Initialization of embedding. Possible options are 'random', 'pca',
and a numpy array of shape (n_samples, n_components).
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int, RandomState instance or None, optional (default: None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Note that different initializations might result in
different local minima of the cost function.
method : string (default: 'barnes_hut')
By default the gradient calculation algorithm uses Barnes-Hut
approximation running in O(NlogN) time. method='exact'
will run on the slower, but exact, algorithm in O(N^2) time. The
exact algorithm should be used when nearest-neighbor errors need
to be better than 3%. However, the exact method cannot scale to
millions of examples.
.. versionadded:: 0.17
Approximate optimization *method* via the Barnes-Hut.
angle : float (default: 0.5)
Only used if method='barnes_hut'
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kl_divergence_ : float
Kullback-Leibler divergence after optimization.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> X_embedded = TSNE(n_components=2).fit_transform(X)
>>> X_embedded.shape
(4, 2)
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
[3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms.
Journal of Machine Learning Research 15(Oct):3221-3245, 2014.
http://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf
"""
# Control the number of exploration iterations with early_exaggeration on
_EXPLORATION_N_ITER = 250
# Control the number of iterations between progress checks
_N_ITER_CHECK = 50
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=12.0, learning_rate=200.0, n_iter=1000,
n_iter_without_progress=300, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None, method='barnes_hut', angle=0.5):
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
self.method = method
self.angle = angle
def _fit(self, X, skip_num_points=0):
"""Fit the model using X as training data.
Note that sparse arrays can only be handled by method='exact'.
It is recommended that you convert your sparse array to dense
(e.g. `X.toarray()`) if it fits in memory, or otherwise using a
dimensionality reduction technique (e.g. TruncatedSVD).
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. Note that this
when method='barnes_hut', X cannot be a sparse array and if need be
will be converted to a 32 bit float array. Method='exact' allows
sparse arrays and 64bit floating point inputs.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
"""
if self.method not in ['barnes_hut', 'exact']:
raise ValueError("'method' must be 'barnes_hut' or 'exact'")
if self.angle < 0.0 or self.angle > 1.0:
raise ValueError("'angle' must be between 0.0 - 1.0")
if self.metric == "precomputed":
if isinstance(self.init, string_types) and self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
if np.any(X < 0):
raise ValueError("All distances should be positive, the "
"precomputed distances given as X is not "
"correct")
if self.method == 'barnes_hut' and sp.issparse(X):
raise TypeError('A sparse matrix was passed, but dense '
'data is required for method="barnes_hut". Use '
'X.toarray() to convert to a dense numpy array if '
'the array is small enough for it to fit in '
'memory. Otherwise consider dimensionality '
'reduction techniques (e.g. TruncatedSVD)')
if self.method == 'barnes_hut':
X = check_array(X, ensure_min_samples=2,
dtype=[np.float32, np.float64])
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=[np.float32, np.float64])
if self.method == 'barnes_hut' and self.n_components > 3:
raise ValueError("'n_components' should be inferior to 4 for the "
"barnes_hut algorithm as it relies on "
"quad-tree or oct-tree.")
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is {}"
.format(self.early_exaggeration))
if self.n_iter < 250:
raise ValueError("n_iter should be at least 250")
n_samples = X.shape[0]
neighbors_nn = None
if self.method == "exact":
# Retrieve the distance matrix, either using the precomputed one or
# computing it.
if self.metric == "precomputed":
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric,
squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
if np.any(distances < 0):
raise ValueError("All distances should be positive, the "
"metric given is not correct")
# compute the joint probability distribution for the input space
P = _joint_probabilities(distances, self.perplexity, self.verbose)
assert np.all(np.isfinite(P)), "All probabilities should be finite"
assert np.all(P >= 0), "All probabilities should be non-negative"
assert np.all(P <= 1), ("All probabilities should be less "
"or then equal to one")
else:
# Cpmpute the number of nearest neighbors to find.
# LvdM uses 3 * perplexity as the number of neighbors.
# In the event that we have very small # of points
# set the neighbors to n - 1.
k = min(n_samples - 1, int(3. * self.perplexity + 1))
if self.verbose:
print("[t-SNE] Computing {} nearest neighbors...".format(k))
# Find the nearest neighbors for every point
knn = NearestNeighbors(algorithm='auto', n_neighbors=k,
metric=self.metric)
t0 = time()
knn.fit(X)
duration = time() - t0
if self.verbose:
print("[t-SNE] Indexed {} samples in {:.3f}s...".format(
n_samples, duration))
t0 = time()
distances_nn, neighbors_nn = knn.kneighbors(
None, n_neighbors=k)
duration = time() - t0
if self.verbose:
print("[t-SNE] Computed neighbors for {} samples in {:.3f}s..."
.format(n_samples, duration))
# Free the memory used by the ball_tree
del knn
if self.metric == "euclidean":
# knn return the euclidean distance but we need it squared
# to be consistent with the 'exact' method. Note that the
# the method was derived using the euclidean method as in the
# input space. Not sure of the implication of using a different
# metric.
distances_nn **= 2
# compute the joint probability distribution for the input space
P = _joint_probabilities_nn(distances_nn, neighbors_nn,
self.perplexity, self.verbose)
if isinstance(self.init, np.ndarray):
X_embedded = self.init
elif self.init == 'pca':
pca = PCA(n_components=self.n_components, svd_solver='randomized',
random_state=random_state)
X_embedded = pca.fit_transform(X).astype(np.float32, copy=False)
elif self.init == 'random':
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
X_embedded = 1e-4 * random_state.randn(
n_samples, self.n_components).astype(np.float32)
else:
raise ValueError("'init' must be 'pca', 'random', or "
"a numpy array")
# Degrees of freedom of the Student's t-distribution. The suggestion
# degrees_of_freedom = n_components - 1 comes from
# "Learning a Parametric Embedding by Preserving Local Structure"
# Laurens van der Maaten, 2009.
degrees_of_freedom = max(self.n_components - 1.0, 1)
return self._tsne(P, degrees_of_freedom, n_samples,
X_embedded=X_embedded,
neighbors=neighbors_nn,
skip_num_points=skip_num_points)
@property
@deprecated("Attribute n_iter_final was deprecated in version 0.19 and "
"will be removed in 0.21. Use ``n_iter_`` instead")
def n_iter_final(self):
return self.n_iter_
def _tsne(self, P, degrees_of_freedom, n_samples, X_embedded,
neighbors=None, skip_num_points=0):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with two stages:
# * initial optimization with early exaggeration and momentum at 0.5
# * final optimization with momentum at 0.8
params = X_embedded.ravel()
opt_args = {
"it": 0,
"n_iter_check": self._N_ITER_CHECK,
"min_grad_norm": self.min_grad_norm,
"learning_rate": self.learning_rate,
"verbose": self.verbose,
"kwargs": dict(skip_num_points=skip_num_points),
"args": [P, degrees_of_freedom, n_samples, self.n_components],
"n_iter_without_progress": self._EXPLORATION_N_ITER,
"n_iter": self._EXPLORATION_N_ITER,
"momentum": 0.5,
}
if self.method == 'barnes_hut':
obj_func = _kl_divergence_bh
opt_args['kwargs']['angle'] = self.angle
# Repeat verbose argument for _kl_divergence_bh
opt_args['kwargs']['verbose'] = self.verbose
else:
obj_func = _kl_divergence
# Learning schedule (part 1): do 250 iteration with lower momentum but
# higher learning rate controlled via the early exageration parameter
P *= self.early_exaggeration
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
if self.verbose:
print("[t-SNE] KL divergence after %d iterations with early "
"exaggeration: %f" % (it + 1, kl_divergence))
# Learning schedule (part 2): disable early exaggeration and finish
# optimization with a higher momentum at 0.8
P /= self.early_exaggeration
remaining = self.n_iter - self._EXPLORATION_N_ITER
if it < self._EXPLORATION_N_ITER or remaining > 0:
opt_args['n_iter'] = self.n_iter
opt_args['it'] = it + 1
opt_args['momentum'] = 0.8
opt_args['n_iter_without_progress'] = self.n_iter_without_progress
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
# Save the final number of iterations
self.n_iter_ = it
if self.verbose:
print("[t-SNE] Error after %d iterations: %f"
% (it + 1, kl_divergence))
X_embedded = params.reshape(n_samples, self.n_components)
self.kl_divergence_ = kl_divergence
return X_embedded
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed
output.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
y : Ignored
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
embedding = self._fit(X)
self.embedding_ = embedding
return self.embedding_
def fit(self, X, y=None):
"""Fit X into an embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'.
y : Ignored
"""
self.fit_transform(X)
return self
| bsd-3-clause |
jm-begon/scikit-learn | examples/tree/plot_iris.py | 271 | 2186 | """
================================================================
Plot the decision surface of a decision tree on the iris dataset
================================================================
Plot the decision surface of a decision tree trained on pairs
of features of the iris dataset.
See :ref:`decision tree <tree>` for more information on the estimator.
For each pair of iris features, the decision tree learns decision
boundaries made of combinations of simple thresholding rules inferred from
the training samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
plot_colors = "bry"
plot_step = 0.02
# Load data
iris = load_iris()
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
[1, 2], [1, 3], [2, 3]]):
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = DecisionTreeClassifier().fit(X, y)
# Plot the decision boundary
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
plt.axis("tight")
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.axis("tight")
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend()
plt.show()
| bsd-3-clause |
deepesch/scikit-learn | sklearn/utils/multiclass.py | 83 | 12343 |
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.ptp(y.data) == 0 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not np.all(clf.classes_ == unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integrs of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its wieght with the wieght
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implict zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| bsd-3-clause |
UASLab/ImageAnalysis | video/5b-cam-mount-from-gyro1.py | 1 | 12642 | #!/usr/bin/env python3
# use feature-based motion (affine) roll, pitch, yaw rates. Compare
# rates in camera space vs. imu space and try to find an optimal
# transform to minimize the idfference between them.
import argparse
import csv
import math
from matplotlib import pyplot as plt
import numpy as np
import os
import pandas as pd
from scipy import interpolate # strait up linear interpolation, nothing fancy
import scipy.signal as signal
import sys
sys.path.append('../scripts')
from lib import transformations
from aurauas_flightdata import flight_loader, flight_interp
import camera
import correlate
from feat_data import FeatureData
from horiz_data import HorizonData
parser = argparse.ArgumentParser(description='correlate movie data to flight data.')
parser.add_argument('--flight', required=True, help='load specified aura flight log')
parser.add_argument('--video', required=True, help='original video')
parser.add_argument('--cam-mount', choices=['forward', 'down', 'rear'],
default='forward',
help='approximate camera mounting orientation')
parser.add_argument('--resample-hz', type=float, default=60.0,
help='resample rate (hz)')
parser.add_argument('--time-shift', type=float,
help='skip autocorrelation and use this offset time')
parser.add_argument('--plot', action='store_true',
help='Plot stuff at the end of the run')
args = parser.parse_args()
smooth_cutoff_hz = 10
# pathname work
abspath = os.path.abspath(args.video)
filename, ext = os.path.splitext(abspath)
dirname = os.path.dirname(args.video)
video_rates = filename + "_rates.csv"
video_horiz = filename + "_horiz.csv"
ekf_error = filename + "_error.csv"
local_config = dirname + "/camera.json"
# load the camera config (we will modify the mounting offset later)
camera = camera.VirtualCamera()
camera.load(None, local_config)
cam_yaw, cam_pitch, cam_roll = camera.get_ypr()
K = camera.get_K()
dist = camera.get_dist()
print('Camera:', camera.get_name())
# load the flight data
flight_data, flight_format = flight_loader.load(args.flight)
print("imu records:", len(flight_data['imu']))
print("gps records:", len(flight_data['gps']))
if 'air' in flight_data:
print("airdata records:", len(flight_data['air']))
print("filter records:", len(flight_data['filter']))
if 'pilot' in flight_data:
print("pilot records:", len(flight_data['pilot']))
if 'act' in flight_data:
print("act records:", len(flight_data['act']))
if len(flight_data['imu']) == 0 and len(flight_data['filter']) == 0:
print("not enough data loaded to continue.")
quit()
interp = flight_interp.InterpolationGroup(flight_data)
iter = flight_interp.IterateGroup(flight_data)
# for convenience
hz = args.resample_hz
r2d = 180.0 / math.pi
d2r = math.pi / 180.0
# load camera rotation rate data (derived from feature matching video
# frames)
feat_data = FeatureData()
feat_data.load(video_rates)
feat_data.smooth(smooth_cutoff_hz)
feat_data.make_interp()
if args.plot:
feat_data.plot()
feat_interp = feat_data.resample(args.resample_hz)
plt.figure()
# plt.plot(data[:,0], data[:,1], label="video roll")
# plt.plot(data[:,0], data[:,3], label="ekf roll")
# plt.legend()
# plt.show()
# smooth imu gyro data
# prep to smooth flight data (noisy data can create tiny local minima
# that the optimizer can get stuck within.
imu = pd.DataFrame(flight_data['imu'])
imu.set_index('time', inplace=True, drop=False)
plt.plot(imu['p'], label='orig')
imu_min = imu['time'].iat[0]
imu_max = imu['time'].iat[-1]
imu_count = len(imu)
imu_fs = int(round((imu_count / (imu_max - imu_min))))
print("imu fs:", imu_fs)
b, a = signal.butter(2, smooth_cutoff_hz, fs=imu_fs)
imu['p'] = signal.filtfilt(b, a, imu['p'])
plt.plot(imu['p'], label='smooth')
imu['q'] = signal.filtfilt(b, a, imu['q'])
imu['r'] = signal.filtfilt(b, a, imu['r'])
plt.plot(feat_data.data['p (rad/sec)'], label='video (smooth)')
plt.legend()
#plt.show()
# resample (now smoothed) flight data
print("flight range = %.3f - %.3f (%.3f)" % (imu_min, imu_max,
imu_max-imu_min))
flight_interp = []
flight_len = imu_max - imu_min
p_interp = interpolate.interp1d(imu['time'], imu['p'], bounds_error=False, fill_value=0.0)
q_interp = interpolate.interp1d(imu['time'], imu['q'], bounds_error=False, fill_value=0.0)
r_interp = interpolate.interp1d(imu['time'], imu['r'], bounds_error=False, fill_value=0.0)
alt_interp = interp.group['filter'].interp['alt']
for x in np.linspace(imu_min, imu_max, int(round(flight_len*hz))):
flight_interp.append( [x, p_interp(x), q_interp(x), r_interp(x) ] )
print("flight len:", len(flight_interp))
# find the time correlation of video vs flight data
time_shift = \
correlate.sync_gyros(flight_interp, feat_interp, feat_data.span_sec,
hz=hz, cam_mount=args.cam_mount,
force_time_shift=args.time_shift, plot=args.plot)
# optimizer stuffs
from scipy.optimize import least_squares
# presample datas to save work in the error function
tmin = np.amax( [feat_data.tmin + time_shift, imu_min ] )
tmax = np.amin( [feat_data.tmax + time_shift, imu_max ] )
tlen = tmax - tmin
print("overlap range (flight sec):", tmin, " - ", tmax)
# Scan altitude range so we can match the portion of the flight that
# is up and away. This means the EKF will have had a better chance to
# converge, and the horizon detection should be getting a clear view.
min_alt = None
max_alt = None
for x in np.linspace(tmin, tmax, int(round(tlen*hz))):
alt = alt_interp(x)
if min_alt is None or alt < min_alt:
min_alt = alt
if max_alt is None or alt > max_alt:
max_alt = alt
print("altitude range: %.1f - %.1f (m)" % (min_alt, max_alt))
if max_alt - min_alt > 30:
alt_threshold = min_alt + (max_alt - min_alt) * 0.5
else:
alt_threshold = min_alt
print("Altitude threshold: %.1f (m)" % alt_threshold)
data = []
for x in np.linspace(tmin, tmax, int(round(tlen*hz))):
# feature-based
vp, vq, vr = feat_data.get_vals(x - time_shift)
# flight data
fp = p_interp(x)
fq = q_interp(x)
fr = r_interp(x)
alt = alt_interp(x)
if alt >= alt_threshold:
data.append( [x, vp, vq, vr, fp, fq, fr] )
print("Data points passing altitude threshold:", len(data))
# y, p, r, imu_gyro_biases (p, q, r)
initial = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
print("starting est:", initial)
# data = np.array(data)
# plt.figure()
# plt.plot(data[:,0], data[:,1], label="video roll")
# plt.plot(data[:,0], data[:,3], label="ekf roll")
# plt.legend()
# plt.show()
def errorFunc(xk):
print(" Trying:", xk)
# order is yaw, pitch, roll
R = transformations.euler_matrix(xk[0], xk[1], xk[2], 'rzyx')[:3,:3]
#print("R:\n", R)
# compute error function using global data structures
result = []
for r in data:
cam_gyro = r[1:4]
imu_gyro = r[4:7] + np.array(xk[3:6])
#print(r[4:7], imu_gyro)
#cam_gyro[1] = 0
#imu_gyro[1] = 0
#cam_gyro[2] = 0
#imu_gyro[2] = 0
#print("cam_gyro:", cam_gyro, "imu_gyro:", imu_gyro)
proj_gyro = R @ cam_gyro
#print("proj_gyro:", proj_gyro)
diff = imu_gyro - proj_gyro
#print("diff:", diff)
result.append( np.linalg.norm(diff) )
return np.array(result)
if True:
print("Optimizing...")
res = least_squares(errorFunc, initial, verbose=2)
#res = least_squares(errorFunc, initial, diff_step=0.0001, verbose=2)
print(res)
print(res['x'] * r2d)
print("Camera mount offset:")
print("Yaw: %.2f" % (res['x'][0]*r2d))
print("Pitch: %.2f" % (res['x'][1]*r2d))
print("Roll: %.2f" % (res['x'][2]*r2d))
initial = res['x']
def myopt(func, xk, spread):
print("Hunting for best result...")
done = False
estimate = list(xk)
while not done:
for n in range(len(estimate)):
xdata = []
ydata = []
center = estimate[n]
for x in np.linspace(center-spread, center+spread, num=11):
estimate[n] = x
result = func(estimate)
avg = np.mean(result)
std = np.std(result)
print("angle (deg) %.2f:" % (x*r2d),
"avg: %.6f" % np.mean(result),
"std: %.4f" % np.std(result))
xdata.append(x)
ydata.append(avg)
fit = np.polynomial.polynomial.polyfit( np.array(xdata), np.array(ydata), 2 )
print("poly fit:", fit)
poly = np.polynomial.polynomial.Polynomial(fit)
deriv = np.polynomial.polynomial.polyder(fit)
roots = np.polynomial.polynomial.polyroots(deriv)
print("roots:", roots)
estimate[n] = roots[0]
if args.plot:
plt.figure()
x = np.linspace(center-spread, center+spread, num=1000)
plt.plot(x, poly(x), 'r-')
plt.plot(xdata, ydata, 'b*')
plt.show()
spread = spread / 4
if spread < 0.001: # rad
done = True
print("Minimal error for index n at angle %.2f (deg)\n" % (estimate[n] * r2d))
return estimate
if False:
print("Optimizing...")
spread = 30*d2r
est = list(initial)
result = myopt(errorFunc, est, spread)
print("Best result:", np.array(result)*r2d)
if False:
# load horizon log data (derived from video)
horiz_data = HorizonData()
horiz_data.load(video_horiz)
horiz_data.smooth(smooth_cutoff_hz)
horiz_data.make_interp()
if args.plot:
horiz_data.plot()
horiz_interp = horiz_data.resample(args.resample_hz)
# restructure ekf data
ekf = pd.DataFrame(flight_data['filter'])
ekf.set_index('time', inplace=True, drop=False)
phi_interp = interpolate.interp1d(ekf['time'], ekf['phi'], bounds_error=False, fill_value=0.0)
the_interp = interpolate.interp1d(ekf['time'], ekf['the'], bounds_error=False, fill_value=0.0)
psix_interp = interpolate.interp1d(ekf['time'], ekf['psix'], bounds_error=False, fill_value=0.0)
psiy_interp = interpolate.interp1d(ekf['time'], ekf['psiy'], bounds_error=False, fill_value=0.0)
# overwrite 'data' array with new stuff
data = []
roll_sum = 0
pitch_sum = 0
for x in np.linspace(tmin, tmax, int(round(tlen*hz))):
# horizon
hphi, hthe, hp, hr = horiz_data.get_vals(x - time_shift)
# flight data
fphi = phi_interp(x)
fthe = the_interp(x)
psix = psix_interp(x)
psiy = psiy_interp(x)
fpsi = math.atan2(psiy, psix)
alt = alt_interp(x)
if alt >= alt_threshold:
data.append( [x, hphi, hthe, fpsi, fthe, fphi] )
roll_sum += hphi - fphi
pitch_sum += hthe - fthe
def horiz_errorFunc(xk):
print(" Trying:", xk)
camera.set_ypr(xk[0]*r2d, xk[1]*r2d, xk[2]*r2d) # cam mount offset
# compute error function using global data structures
horiz_ned = [0, 0, 0] # any value works here (as long as it's consistent
result = []
for r in data:
camera.update_PROJ(horiz_ned, r[3], r[4], r[5]) # aircraft body attit
#print("video:", r[1]*r2d, r[2]*r2d)
roll, pitch = camera.find_horizon()
#result.append( r[1] - (r[5] + xk[2]) )
#result.append( r[2] - (r[4] + xk[1]) )
if not roll is None:
result.append( r[1] - roll )
result.append( r[2] - pitch )
return np.array(result)
print("Plotting final result...")
result = horiz_errorFunc(np.array(res['x']))
rollerr = result[::2]
pitcherr = result[1::2]
# write to file
csvfile = open(ekf_error, 'w')
fieldnames = [ 'video time',
'ekf roll error (deg)', 'ekf pitch error (deg)' ]
csv_writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
csv_writer.writeheader()
for i in range(len(data)):
row = { 'video time': "%.4f" % data[i][0],
'ekf roll error (rad)': "%.3f" % rollerr[i],
'ekf pitch error (rad)': "%.3f" % pitcherr[i] }
csv_writer.writerow(row)
csvfile.close()
print(len(result), len(data), len(data[::2]), len(rollerr), len(pitcherr))
data = np.array(data)
plt.figure()
plt.plot(data[:,0], rollerr*r2d, label="roll error")
plt.plot(data[:,0], pitcherr*r2d, label="pitch error")
plt.ylabel("Angle error (deg)")
plt.xlabel("Flight time (sec)")
plt.grid()
plt.legend()
plt.show()
| mit |
christobal54/aei-grad-school | bin/nirv-test.py | 1 | 12970 | #!/usr/bin/python
#####
# assesses drivers of variance of nir-v using PROSAIL canopy modeling
#####
import numpy as np
import pyprosail
import random
import scipy
import math
import spectral as spectral
import matplotlib.pyplot as plt
from sklearn import tree
from sklearn import metrics
from sklearn.model_selection import train_test_split
#####
# set up output files and processing parameters
#####
# set number of random veg, bundles to simulate
n_bundles = 2000
# set the number of output bands (default prosail is 2101)
nb = 2101
# set a color palette
palette = ["#CC79A7", "#0072B2", "#D55E00", "#009E73", "#56B4E9", "#F0E442", "#E69F00", "#000000"]
# set output files to store the results
output_csv = 'prosail_spectra.csv'
output_sli = 'prosail_spectra.sli'
output_hdr = 'prosail_spectra.hdr'
output_spec = []
#####
# set up the leaf and canopy modeling parameters
#####
N = []
chloro = []
caroten = []
brown = []
EWT = []
LMA = []
soil_reflectance = []
LAI = []
hot_spot = []
LAD_inclination = []
LAD_bimodality = []
s_az = []
s_za = []
v_az = []
v_za = []
nir_v = []
brightness = []
# set the wavelengths to use in calculating nir_v
red_wl = 0.680
nir_wl = 0.800
# set a dummy run of prosail to find the index for each wavelength
spec = pyprosail.run(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
red_band = np.where(spec[:,0] == red_wl)[0][0]
nir_band = np.where(spec[:,0] == nir_wl)[0][0]
# create a function to calculate nir_v
def calc_nir_v(red, nir):
return nir * (nir - red) / (nir + red)
# and some functions to optimize linear and logistic fits for plotting later
def func_sigmoid(x, x0, k, a, c):
return a / (1 + np.exp(-k * (x - x0))) + c
def func_linear(x, m, b):
return m * x + b
def func_fit(x, y, function):
opt, cov = scipy.optimize.curve_fit(function, x, y)
y_fit = function(np.array(x), *opt)
rsq = metrics.r2_score(y, y_fit)
rms = np.sqrt(metrics.mean_squared_error(y, y_fit))
return [y_fit, rsq, rms]
def bn(spectrum):
return np.sqrt((spectrum ** 2).sum())
# find the wavelengths where water absorption does its thing
# and remove them when calculating brightness
water_bands = [[1.350, 1.460], [1.790, 1.960]]
# start with nir-swir1 transition
gt = np.where(spec[:,0] > water_bands[0][0])
lt = np.where(spec[:,0] < water_bands[0][1])
nd1 = np.intersect1d(gt[0], lt[0])
# then swir1-swir2 transition
gt = np.where(spec[:,0] > water_bands[1][0])
lt = np.where(spec[:,0] < water_bands[1][1])
nd2 = np.intersect1d(gt[0], lt[0])
# concatenate into a single list of indices to remove
water_inds = np.concatenate((nd1, nd2))
# loop through the bundles and generate random canopy parameters
for i in range(n_bundles):
# structural coefficient (arbitrary units)
# range 1.3 - 2.5 from Rivera et al. 2013 http://dx.doi.org/10.3390/rs5073280
N.append(random.uniform(1.3,2.5))
# total chlorophyll content (ug/cm^2)
# range ~ 5 - 75 from Rivera et al. 2013, but i'll set a little more conservative
chloro.append(random.gauss(35, 30))
while chloro[-1] < 10 or chloro[-1] > 60:
chloro[-1] = random.gauss(35, 30)
# total carotenoid content (ug/cm^2)
# kinda fudged this to be like 1/4 of total chl
caroten.append(random.gauss(8.75, 7.5))
while caroten[-1] < 2 or caroten[-1] > 15:
caroten[-1] = random.gauss(8.75, 7.5)
# brown pigment content (arbitrary units) - not gonna mess with this
brown.append(0)
# equivalent water thickness (cm)
# range 0.002 - 0.05 from Rivera et al. 2013
EWT.append(random.uniform(0.002, 0.05))
# leaf mass per area (g/cm^2)
# global range 0.0022 - 0.0365 (median 0.01)
# from Asner et al. 2011 http://dx.doi.org/10.1016/j.rse.2011.08.020
# gonna go a little more conservative
LMA.append(random.gauss(0.012, 0.005))
while LMA[-1] < 0.005 or LMA[-1] > 0.0250:
LMA[-1] = random.gauss(0.012, 0.005)
# soil reflectance metric (wet soil = 0, dry soil = 1)
soil_reflectance.append(random.uniform(0,1))
# leaf area index (unitless, cm^2 leaf area/cm^2 ground area)
# range 0.01 - 18.0 (5.5 mean) globally
# range 0.2 - 8.7 (3.6 mean) for crops
# range 0.6 - 2.8 (1.3 mean) for desert plants
# range 0.5 - 6.2 (2.6 mean) for boreal broadleaf forest
# range 0.5 - 8.5 (4.6 mean) for boreal needle forest
# range 0.8 - 11.6 (5.1 mean) for temperate broadleaf forest
# range 0.01 - 15.0 (5.5 mean) for temperate needle forest
# range 0.6 - 8.9 (4.8 mean) for tropical broadleaf forest
# range 0.3 - 5.0 (1.7 mean) for grasslands
# range 1.6 - 18.0 (8.7 mean) for plantations
# range 0.4 - 4.5 (2.1 mean) for shrublands
# range 0.2 - 5.3 (1.9 mean) for tundra
# range 2.5 - 8.4 (6.3 mean) for wetlands
# from Asner, Scurlock and Hicke 2003 http://dx.doi.org/10.1046/j.1466-822X.2003.00026.x
LAI.append(random.gauss(3,2))
while LAI[-1] < 0.5 or LAI[-1] > 15:
LAI[-1] = random.gauss(3,2)
# hot spot parameter (derived from brdf model)
# range 0.05-0.5 from Rivera et al. 2013
hot_spot.append(random.uniform(0.05, 0.5))
# leaf distribution function parameter.
# range LAD_inc -0.4 - 0.4, LAD_bim -0.1 - 0.2 for trees
# range LAD_inc -0.1 - 0.3, LAD_bim 0.3 - 0.5 for lianas
# range LAD_inc -0.8 - -0.2, LAD_bim -0.1 - 0.3 for Palms
# from Asner et al. 2011
LAD_inclination.append(random.uniform(-0.4, 0.4))
LAD_bimodality.append(random.uniform(-0.1, 0.2))
# viewing and solar angle parameters
# solar zenith ranges cludged from http://gis.stackexchange.com/questions/191692/maximum-solar-zenith-angle-for-landsat-8-images
# I couldn't find good data on the range of possible solar or viewing azimuth.
# I decided to set view parameters to 0 to assume nice, clean nadir viewing, and let the sun vary.
s_za.append(random.uniform(20, 70))
s_az.append(random.uniform(0,360))
v_az.append(0)
v_za.append(0)
#####
# set up the loop for each atmosphere/canopy model
#####
# first create the output array that will contain all the resulting spectra
output_array = np.zeros([nb, (n_bundles) + 1])
# loop through each veg / wood / soil bundle
for j in range(n_bundles):
# load prosail and run the canopy model
LIDF = (LAD_inclination[j], LAD_bimodality[j])
spectrum = pyprosail.run(N[j], chloro[j], caroten[j],
brown[j], EWT[j], LMA[j], soil_reflectance[j],
LAI[j], hot_spot[j], s_za[j], s_az[j],
v_za[j], v_az[j], LIDF)
# add the modeled spectrum to the output array
output_array[:, (j+1)] = spectrum[:,1]
# add a new name to label in the output spectral library
output_spec.append('veg bundle ' + str(j+1))
# calculate nirv for this spectrum
nir_v.append(calc_nir_v(spectrum[red_band, 1], spectrum[nir_band, 1]))
# calculate the brightness scalar for this spectrum
brightness.append(bn(np.delete(spectrum[:,1], water_inds)))
# now that the loop has finished we can export our results to a csv file
output_array[:, 0] = spectrum[:,0]
np.savetxt(output_csv, output_array.transpose(), delimiter=",", fmt = '%.3f')
# output a spectral library
with open(output_sli, 'w') as f:
output_array[:,1:].transpose().tofile(f)
# write the ENVI header file for the spectral library
metadata = {
'samples' : nb,
'lines' : n_bundles,
'bands' : 1,
'data type' : 5,
'header offset' : 0,
'interleave' : 'bsq',
'byte order' : 0,
'sensor type' : 'prosail',
'spectra names' : output_spec,
'wavelength units' : 'micrometers',
'wavelength' : output_array[:,0]
}
spectral.envi.write_envi_header(output_hdr, metadata, is_library=True)
# start running some analysis
y = nir_v
x = []
for j in range(n_bundles):
x.append([N[j], chloro[j], caroten[j], LMA[j], soil_reflectance[j], LAI[j], hot_spot[j],
LAD_inclination[j], LAD_bimodality[j], s_az[j], s_za[j], brightness[j]])
# split in to train/test splits to eval regression
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size = 0.3)
# train a few models
mod_simple = tree.DecisionTreeRegressor(max_depth = 2)
mod_medium = tree.DecisionTreeRegressor(max_depth = 5)
mod_large = tree.DecisionTreeRegressor()
mod_simple.fit(x_train, y_train)
mod_medium.fit(x_train, y_train)
mod_large.fit(x_train, y_train)
# run some predictions
y_test_simple = mod_simple.predict(x_test)
y_test_medium = mod_medium.predict(x_test)
y_test_large = mod_large.predict(x_test)
y_test_list = [y_test_simple, y_test_medium, y_test_large]
# print some outputs
mod = [mod_simple, mod_medium, mod_large]
names = ['simple', 'medium', 'large']
for i in range(len(mod)):
print("{mod} model".format(mod = names[i]))
print("r-squared : {:0.3f}".format(metrics.r2_score(y_test, y_test_list[i])))
print("explained var: {:0.3f}".format(metrics.explained_variance_score(y_test, y_test_list[i])))
print("rmse : {:0.3f}".format(np.sqrt(metrics.mean_squared_error(y_test, y_test_list[i]))))
print("mean abs err : {:0.3f}".format(metrics.mean_absolute_error(y_test, y_test_list[i])))
print("feature importance")
print("N : {:0.3f}".format(mod[i].feature_importances_[0]))
print("chlorophyll : {:0.3f}".format(mod[i].feature_importances_[1]))
print("carotenoids : {:0.3f}".format(mod[i].feature_importances_[2]))
print("LMA : {:0.3f}".format(mod[i].feature_importances_[3]))
print("soil_refl : {:0.3f}".format(mod[i].feature_importances_[4]))
print("LAI : {:0.3f}".format(mod[i].feature_importances_[5]))
print("hot spot : {:0.3f}".format(mod[i].feature_importances_[6]))
print("LAD_incl : {:0.3f}".format(mod[i].feature_importances_[7]))
print("LAD_biomod : {:0.3f}".format(mod[i].feature_importances_[8]))
print("solar azi : {:0.3f}".format(mod[i].feature_importances_[9]))
print("solar zen : {:0.3f}".format(mod[i].feature_importances_[10]))
print("brightness : {:0.3f}".format(mod[i].feature_importances_[11]))
print("-----")
print("")
# start plotting some outputs
plt.figure(1)
plt.suptitle("Canopy trait drivers of NIRv\nLAI {:0.1f} - {:0.1f}".format(min(LAI), max(LAI)))
# first, LAI and nir_v
plt.subplot(321)
lai_fit = func_fit(np.array(LAI), nir_v, func_linear)
plt.scatter(LAI, nir_v, c = palette[0], label = "rmse: {:0.3f}".format(lai_fit[2]))
plt.plot(LAI, lai_fit[0], c = 'black', label = "r-squared: {:0.3f}".format(lai_fit[1]))
plt.title("LAI")
plt.ylabel("NIRv")
plt.legend()
# then, LMA and nir_v
plt.subplot(322)
lma_fit = func_fit(LMA, nir_v, func_linear)
plt.scatter(LMA, nir_v, c = palette[1], label = "rmse: {:0.3f}".format(lma_fit[2]))
plt.plot(LMA, lma_fit[0], c = 'black', label = "r-squared: {:0.3f}".format(lma_fit[1]))
plt.title("LMA")
plt.legend()
# then leaf angle distribution
plt.subplot(323)
lad_fit = func_fit(LAD_inclination, nir_v, func_linear)
plt.scatter(LAD_inclination, nir_v, c = palette[2], label = "rmse: {:0.3f}".format(lad_fit[2]))
plt.plot(LAD_inclination, lad_fit[0], c = "black", label = "r-squared: {:0.3f}".format(lad_fit[1]))
plt.title("LAD")
plt.ylabel("NIRv")
plt.legend()
# then chlorophyll
plt.subplot(324)
chl_fit = func_fit(chloro, nir_v, func_linear)
plt.scatter(chloro, nir_v, c = palette[3], label = "rmse: {:0.3f}".format(chl_fit[2]))
plt.plot(chloro, chl_fit[0], c = "black", label = "r-squared: {:0.3f}".format(chl_fit[1]))
plt.title("CHL")
plt.legend()
# then brightness
plt.subplot(325)
brt_fit = func_fit(brightness, nir_v, func_linear)
plt.scatter(brightness, nir_v, c = palette[4], label = "rmse: {:0.3f}".format(brt_fit[2]))
plt.plot(brightness, brt_fit[0], c = "black", label = "r-squared: {:0.3f}".format(brt_fit[1]))
plt.title("Brightness")
plt.ylabel("NIRv")
plt.legend()
# save the plot, fool
plt.savefig("nirv-params.png")
plt.show()
plt.close()
# plot some canopy spectra
spec_min = output_array[:, np.where(nir_v == min(nir_v))[0] + 1]
spec_max = output_array[:, np.where(nir_v == max(nir_v))[0] + 1]
spec_mean = np.mean(np.array(output_array[:,1:]), axis = 1)
plt.plot(output_array[:,0], spec_min, c = palette[6], label = 'Min NIRv: {:0.3f}'.format(min(nir_v)))
plt.plot(output_array[:,0], spec_mean, c = 'black', label = 'Mean reflectance')
plt.plot(output_array[:,0], spec_max, c = palette[3], label = 'Max NIRv: {:0.3f}'.format(max(nir_v)))
plt.xlabel("Wavelength (um)")
plt.ylabel("Reflectance (%)")
plt.title("Simulated canopy reflectance")
plt.legend()
# then, prediction results from these parameters
plt.figure()
plt.scatter(y_test_large, y_test, c = palette[4], label = "r-squared: {:0.3f}".format(metrics.r2_score(y_test, y_test_large)))
plt.xlabel("predicted")
plt.ylabel("test data")
plt.title("Predicted NIR-v from PROSAIL parameters")
plt.legend()
plt.savefig("nirv-predicted.png")
plt.show()
plt.close() | mit |
mrtukkin/ifp | infer.py | 1 | 2350 | model_root = '../../model/'
data_root = '../../data/'
results_root = '../../results/'
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
from PIL import Image
from scipy.misc import imsave, imread
import sys
import getopt
sys.path.insert(0, '../../lib/caffe/python')
import caffe
import os
import site
site.addsitedir('./oylmpic_layer')
from progress.bar import Bar
import shelve
import ifp_morris
def loadim(path):
im = Image.open(path)
in_ = np.array(im, dtype=np.float32)
in_ = in_[:,:,::-1]
in_ -= np.array((103.939, 116.779, 123.68))
in_ = in_.transpose((2,0,1))
return in_
def inferfile(net, path_file, im_head):
print 'Reading ' + path_file
paths = open(path_file, 'r').read().splitlines()
if not os.path.exists(results_root + 'OlympicSports/fcn/'):
os.makedirs(results_root + 'OlympicSports/fcn/')
db = shelve.open(results_root + 'OlympicSports/fcn/activationsDB')
bar = Bar(path_file, max=len(paths))
for path in paths:
in_ = loadim(im_head + path + 'png')
net.blobs['data'].reshape(1, *in_.shape)
net.blobs['data'].data[...] = in_
net.forward()
out = net.blobs['deconv3'].data[0]
maxidx = np.argmax(np.sum(out, axis=(1,2)))
db[path] = maxidx
db.sync()
maxim = out[maxidx, ...]
resdir = results_root + 'OlympicSports/fcn/' + path[:-8]
if not os.path.exists(resdir):
os.makedirs(resdir)
imsave(resdir + path[-8:] + 'png', maxim)
bar.next()
db.close()
def main(argv):
sport = 'long_jump'
model = 'snap_iter_50000.caffemodel'
#---
weights = model_root + 'fcn/' + sport + '/' + model
netf = './fcn/' + sport + '/deploy.prototxt'
gpu = 0
caffe.set_device(gpu)
caffe.set_mode_gpu()
net = caffe.Net(netf, weights, caffe.TEST)
im_head = '/export/home/mfrank/data/OlympicSports/clips/'
im_head = '/export/home/mfrank/data/OlympicSports/patches/'
test_path_file = 'fcn/' + sport + '/test.txt'
train_path_file = 'fcn/' + sport + '/train.txt'
inferfile(net, train_path_file, im_head)
ifp_morris.apply_overlayfcn(train_path_file, factor=4)
inferfile(net, test_path_file, im_head)
ifp_morris.apply_overlayfcn(test_path_file, factor=4)
if __name__ == "__main__":
main(sys.argv[1:])
| gpl-3.0 |
depet/scikit-learn | sklearn/kernel_approximation.py | 5 | 17029 | """
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps base on Fourier transforms.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import (array2d, atleast2d_or_csr, check_random_state,
as_float_array)
from .utils.extmath import safe_sparse_dot
from .metrics.pairwise import pairwise_kernels
class RBFSampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of an RBF kernel by Monte Carlo approximation
of its Fourier transform.
Parameters
----------
gamma : float
Parameter of RBF kernel: exp(-gamma * x^2)
n_components : int
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
"""
def __init__(self, gamma=1., n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = atleast2d_or_csr(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = (np.sqrt(self.gamma) * random_state.normal(
size=(n_features, self.n_components)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = atleast2d_or_csr(X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of the "skewed chi-squared" kernel by Monte
Carlo approximation of its Fourier transform.
Parameters
----------
skewedness : float
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
See also
--------
AdditiveChi2Sampler : A different approach for approximating an additive
variant of the chi squared kernel.
sklearn.metrics.chi2_kernel : The exact chi squared kernel.
"""
def __init__(self, skewedness=1., n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = array2d(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = (1. / np.pi
* np.log(np.tan(np.pi / 2. * uniform)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = as_float_array(X, copy=True)
X = array2d(X, copy=False)
if (X < 0).any():
raise ValueError("X may not contain entries smaller than zero.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Parameters
----------
sample_steps : int, optional
Gives the number of (complex) sampling points.
sample_interval : float, optional
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
See also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.additive_chi2_kernel : The exact additive chi squared
kernel.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://eprints.pascal-network.org/archive/00006964/01/vedaldi10.pdf>`_
Vedaldi, A. and Zisserman, A., Computer Vision and Pattern Recognition 2010
"""
def __init__(self, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set parameters."""
X = atleast2d_or_csr(X)
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError("If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval")
else:
self.sample_interval_ = self.interval
return self
def transform(self, X, y=None):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Returns
-------
X_new : {array, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array of sparse matrix depends on
the type of the input X.
"""
X = atleast2d_or_csr(X)
sparse = sp.issparse(X)
# check if X has negative values. Doesn't play well with np.log.
if ((X.data if sparse else X) < 0).any():
raise ValueError("Entries of X must be non-negative.")
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = (X != 0.0)
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
return sp.hstack(X_new)
class Nystroem(BaseEstimator, TransformerMixin):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
n_components : int
Number of features to construct.
How many data points will be used to construct the mapping.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Attributes
----------
`components_` : array, shape (n_components, n_features)
Subset of training points used to construct the feature map.
`component_indices_` : array, shape (n_components)
Indices of ``components_`` in the training set.
`normalization_` : array, shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
See also
--------
RBFSampler : An approximation to the RBF kernel using random Fourier
features.
sklearn.metric.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3,
kernel_params=None, n_components=100, random_state=None):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Training data.
"""
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn("n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel.")
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
if False:
basis_kernel = self.kernel(basis, basis)
else:
basis_kernel = pairwise_kernels(basis, metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
self.normalization_ = np.dot(U * 1. / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : array, shape=(n_samples, n_components)
Transformed data.
"""
if False:
embedded = self.kernel(X, self.components_)
else:
embedded = pairwise_kernels(X, self.components_,
metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
return params
| bsd-3-clause |
fbagirov/scikit-learn | sklearn/utils/tests/test_random.py | 230 | 7344 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
berkeley-stat159/project-zeta | code/tsa_s5.py | 3 | 10496 | from __future__ import print_function, division
import numpy as np
import numpy.linalg as npl
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib import gridspec
import os
import re
import json
import nibabel as nib
from utils import subject_class as sc
from utils import outlier
from utils import diagnostics as diagnos
from utils import get_object_neural as neural
from utils import stimuli
from utils import convolution as convol
from utils import linear_model as lm
from utils import maskfunc as msk
import copy
import statsmodels.api as sm
from operator import add
# important path:
base_path = os.path.abspath(os.path.dirname(__file__))
base_path = os.path.join(base_path, "..")
figure_path = os.path.join(base_path, "code", "images", "")
file_path = os.path.join(base_path, "code", "txt", "")
# help to make directory to save figure and txt
if not os.path.exists(figure_path):
os.makedirs(figure_path)
if not os.path.exists(file_path):
os.makedirs(file_path)
# separator:
separator = "-" * 80
# which subject to work on?
subid = "sub005"
# work on results from this subject:
########################################
print (separator)
print ("Project-Zeta: use times series to study ds105 dataset")
print (separator)
print ("Focus on %s for the analysis" % subid)
print (separator)
sub = sc.subject(subid)
# get image files of this subject:
sub_img = sub.run_img_result
# get data for those figures
print ("Get data from images...")
sub_data = {}
for key, img in sub_img.iteritems():
sub_data[key] = img.get_data()
print ("Complete!")
print (separator)
brain1 = sub_data["sub005_run001"]
x1 = copy.deepcopy(sub_data)
maskedDict, volDict = msk.generateMaskedBrain(x1)
s5r1Masked = maskedDict["sub005_run001"]
s5r1Vol = volDict["sub005_run001"]
brain2 = sub_data["sub005_run002"]
s5r2Masked = maskedDict["sub005_run002"]
s5r2Vol = volDict["sub005_run002"]
brain3 = sub_data["sub005_run003"]
s5r3Masked = maskedDict["sub005_run003"]
s5r3Vol = volDict["sub005_run003"]
brain4 = sub_data["sub005_run004"]
s5r4Masked = maskedDict["sub005_run004"]
s5r4Vol = volDict["sub005_run004"]
brain5 = sub_data["sub005_run005"]
s5r5Masked = maskedDict["sub005_run005"]
s5r5Vol = volDict["sub005_run005"]
brain6 = sub_data["sub005_run006"]
s5r6Masked = maskedDict["sub005_run006"]
s5r6Vol = volDict["sub005_run006"]
brain7 = sub_data["sub005_run007"]
s5r7Masked = maskedDict["sub005_run007"]
s5r7Vol = volDict["sub005_run007"]
brain8 = sub_data["sub005_run008"]
s5r8Masked = maskedDict["sub005_run008"]
s5r8Vol = volDict["sub005_run008"]
brain9 = sub_data["sub005_run009"]
s5r9Masked = maskedDict["sub005_run009"]
s5r9Vol = volDict["sub005_run009"]
brain10 = sub_data["sub005_run010"]
s5r10Masked = maskedDict["sub005_run010"]
s5r10Vol = volDict["sub005_run010"]
brain11 = sub_data["sub005_run011"]
s5r11Masked = maskedDict["sub005_run011"]
s5r11Vol = volDict["sub005_run011"]
# Focus on z = 33:38, y = 25 to 50, all of x
# brain = brain[:, 25:50, 32, :]
# s1r1Masked = s1r1Masked[:, 25:50, 32]
# brain = brain[s1r1Masked, :]
brains5r1 = brain1[:, 25:50, 33:38, :]
s5r1Masked = s5r1Masked[:, 25:50, 33:38]
brains5r1 = brains5r1[s5r1Masked, :]
brains5r2 = brain2[:, 25:50, 33:38, :]
s5r2Masked = s5r2Masked[:, 25:50, 33:38]
brains5r2 = brains5r2[s5r2Masked, :]
brains5r3 = brain3[:, 25:50, 33:38, :]
s5r3Masked = s5r3Masked[:, 25:50, 33:38]
brains5r3 = brains5r3[s5r3Masked, :]
brains5r4 = brain4[:, 25:50, 33:38, :]
s5r4Masked = s5r4Masked[:, 25:50, 33:38]
brains5r4 = brains5r4[s5r4Masked, :]
brains5r5 = brain5[:, 25:50, 33:38, :]
s5r5Masked = s5r5Masked[:, 25:50, 33:38]
brains5r5 = brains5r5[s5r5Masked, :]
brains5r6 = brain6[:, 25:50, 33:38, :]
s5r6Masked = s5r6Masked[:, 25:50, 33:38]
brains5r6 = brains5r6[s5r6Masked, :]
brains5r7 = brain7[:, 25:50, 33:38, :]
s5r7Masked = s5r7Masked[:, 25:50, 33:38]
brains5r7 = brains5r7[s5r7Masked, :]
brains5r8 = brain8[:, 25:50, 33:38, :]
s5r8Masked = s5r8Masked[:, 25:50, 33:38]
brains5r8 = brains5r8[s5r8Masked, :]
brains5r9 = brain9[:, 25:50, 33:38, :]
s5r9Masked = s5r9Masked[:, 25:50, 33:38]
brains5r9 = brains5r9[s5r9Masked, :]
brains5r10 = brain10[:, 25:50, 33:38, :]
s5r10Masked = s5r10Masked[:, 25:50, 33:38]
brains5r10 = brains5r10[s5r10Masked, :]
brains5r11 = brain11[:, 25:50, 33:38, :]
s5r11Masked = s5r11Masked[:, 25:50, 33:38]
brains5r11 = brains5r11[s5r11Masked, :]
arr1 = [0.0] * 121
for i in range(121):
arr1[i] = np.mean(brains5r1[:, i])
r1house = arr1[93:103]
r1scram = arr1[107:117]
r1cat = arr1[21:31]
r1shoe = arr1[35:45]
r1bottle = arr1[78:88]
r1scissor = arr1[64:74]
r1chair = arr1[50:60]
r1face = arr1[6:16]
arr2 = [0.0] * 121
for i in range(121):
arr2[i] = np.mean(brains5r2[:, i])
r2house = arr2[6:16]
r2scram = arr2[107:117]
r2cat = arr2[93:103]
r2shoe = arr2[78:88]
r2bottle = arr2[35:45]
r2scissor = arr2[21:31]
r2chair = arr2[64:74]
r2face = arr2[50:60]
arr3 = [0.0] * 121
for i in range(121):
arr3[i] = np.mean(brains5r3[:, i])
r3house = arr3[78:88]
r3scram = arr3[21:31]
r3cat = arr3[6:16]
r3shoe = arr3[64:74]
r3bottle = arr3[50:60]
r3scissor = arr3[107:117]
r3chair = arr3[35:45]
r3face = arr3[93:103]
arr4 = [0.0] * 121
for i in range(121):
arr4[i] = np.mean(brains5r4[:, i])
r4house = arr4[107:117]
r4scram = arr4[21:31]
r4cat = arr4[78:88]
r4shoe = arr4[50:60]
r4bottle = arr4[64:74]
r4scissor = arr4[35:45]
r4chair = arr4[93:103]
r4face = arr4[6:16]
arr5 = [0.0] * 121
for i in range(121):
arr5[i] = np.mean(brains5r5[:, i])
r5house = arr5[50:60]
r5scram = arr5[64:74]
r5cat = arr5[35:45]
r5shoe = arr5[78:88]
r5bottle = arr5[93:103]
r5scissor = arr5[107:117]
r5chair = arr5[21:31]
r5face = arr5[6:16]
arr6 = [0.0] * 121
for i in range(121):
arr6[i] = np.mean(brains5r6[:, i])
r6house = arr6[6:16]
r6scram = arr6[21:31]
r6cat = arr6[78:88]
r6shoe = arr6[50:60]
r6bottle = arr6[93:103]
r6scissor = arr6[107:117]
r6chair = arr6[64:74]
r6face = arr6[35:45]
arr7 = [0.0] * 121
for i in range(121):
arr7[i] = np.mean(brains5r7[:, i])
r7house = arr7[78:88]
r7scram = arr7[64:74]
r7cat = arr7[93:103]
r7shoe = arr7[50:60]
r7bottle = arr7[107:117]
r7scissor = arr7[35:45]
r7chair = arr7[21:31]
r7face = arr7[6:16]
arr8 = [0.0] * 121
for i in range(121):
arr8[i] = np.mean(brains5r8[:, i])
r8house = arr8[50:60]
r8scram = arr8[35:45]
r8cat = arr8[21:31]
r8shoe = arr8[93:103]
r8bottle = arr8[107:117]
r8scissor = arr8[64:74]
r8chair = arr8[78:88]
r8face = arr8[6:16]
arr9 = [0.0] * 121
for i in range(121):
arr9[i] = np.mean(brains5r9[:, i])
r9house = arr9[21:31]
r9scram = arr9[50:60]
r9cat = arr9[93:103]
r9shoe = arr9[78:88]
r9bottle = arr9[6:16]
r9scissor = arr9[107:117]
r9chair = arr9[35:45]
r9face = arr9[64:74]
arr10 = [0.0] * 121
for i in range(121):
arr10[i] = np.mean(brains5r10[:, i])
r10house = arr10[21:31]
r10scram = arr10[78:88]
r10cat = arr10[50:60]
r10shoe = arr10[6:16]
r10bottle = arr10[93:103]
r10scissor = arr10[107:117]
r10chair = arr10[35:45]
r10face = arr10[64:74]
arr11 = [0.0] * 121
for i in range(121):
arr11[i] = np.mean(brains5r11[:, i])
r11house = arr11[64:74]
r11scram = arr11[78:88]
r11cat = arr11[35:45]
r11shoe = arr11[50:60]
r11bottle = arr11[93:103]
r11scissor = arr11[6:16]
r11chair = arr11[107:117]
r11face = arr11[21:31]
evenHouse = (np.array(r2house) + np.array(r4house) +
np.array(r6house) + np.array(r8house) +
np.array(r10house)) / 5
oddHouse = (np.array(r1house) + np.array(r3house) +
np.array(r5house) + np.array(r7house) +
np.array(r9house) + np.array(r11house)) / 6
evenScram = (np.array(r2scram) + np.array(r4scram) +
np.array(r6scram) + np.array(r8scram) +
np.array(r10scram)) / 5
oddScram = (np.array(r1scram) + np.array(r3scram) +
np.array(r5scram) + np.array(r7scram) +
np.array(r9scram) + np.array(r11scram)) / 6
evenCat = (np.array(r2cat) + np.array(r4cat) +
np.array(r6cat) + np.array(r8cat) +
np.array(r10cat)) / 5
oddCat = (np.array(r1cat) + np.array(r3cat) +
np.array(r5cat) + np.array(r7cat) +
np.array(r9cat) + np.array(r11cat)) / 6
evenShoe = (np.array(r2shoe) + np.array(r4shoe) +
np.array(r6shoe) + np.array(r8shoe) +
np.array(r10shoe)) / 5
oddShoe = (np.array(r1shoe) + np.array(r3shoe) +
np.array(r5shoe) + np.array(r7shoe) +
np.array(r9shoe) + np.array(r11shoe)) / 6
evenBottle = (np.array(r2bottle) + np.array(r4bottle) +
np.array(r6bottle) + np.array(r8bottle) +
np.array(r10bottle)) / 5
oddBottle = (np.array(r1bottle) + np.array(r3bottle) +
np.array(r5bottle) + np.array(r7bottle) +
np.array(r9bottle) + np.array(r11bottle)) / 6
evenScissor = (np.array(r2scissor) + np.array(r4scissor) +
np.array(r6scissor) + np.array(r8scissor) +
np.array(r10scissor)) / 5
oddScissor = (np.array(r1scissor) + np.array(r3scissor) +
np.array(r5scissor) + np.array(r7scissor) +
np.array(r9scissor) + np.array(r11scissor)) / 6
evenChair = (np.array(r2chair) + np.array(r4chair) +
np.array(r6chair) + np.array(r8chair) +
np.array(r10chair)) / 5
oddChair = (np.array(r1chair) + np.array(r3chair) +
np.array(r5chair) + np.array(r7chair) +
np.array(r9chair) + np.array(r11chair)) / 6
evenFace = (np.array(r2face) + np.array(r4face) +
np.array(r6face) + np.array(r8face) +
np.array(r10face)) / 5
oddFace = (np.array(r1face) + np.array(r3face) +
np.array(r5face) + np.array(r7face) +
np.array(r9face) + np.array(r11face)) / 6
evenRun = [evenBottle, evenCat, evenChair, evenFace,
evenHouse, evenScissor, evenScram, evenShoe]
oddRun = [oddBottle, oddCat, oddChair, oddFace,
oddHouse, oddScissor, oddScram, oddShoe]
all_results = [0.0] * 64
all_results = np.reshape(all_results, (8, 8))
for i in range(8):
for j in range(8):
all_results[i, j] = np.corrcoef(evenRun[i], oddRun[j])[0, 1]
object_list = ["bottle", "cat", "chair", "face",
"house", "scissor", "scram", "shoe"]
fig = plt.figure(figsize=(8, 4))
plt.subplot(111, frameon=False, xticks=[], yticks=[])
table = plt.table(cellText=all_results.round(4), colLabels=object_list,
rowLabels=object_list, loc='center', cellLoc='center')
plt.subplots_adjust(left=0.3, bottom=0, top=0.95)
fig.text(0.55, 0.75, 'Odd runs', ha='left', fontsize=12)
fig.text(0.05, 0.52, 'Even runs', ha='left', rotation=90, fontsize=12)
fig.text(0.3, 0.85, "Correlation of TSA brain images of %s" % subid,
weight='bold')
table.scale(1.2, 1.2)
plt.savefig(figure_path + "subtracted_correlation_table_%s.png" % subid)
plt.close() | bsd-3-clause |
z01nl1o02/tests | ocrft/6showproto.py | 1 | 1140 | import os,sys,pdb,cPickle
import numpy as np
from sklearn.cluster import KMeans
from toshape import SHAPE_FEAT
import math
import cv2
def load_one(infile):
with open(infile, 'rb') as f:
strokes = cPickle.load(f)
return strokes
def run(indir,outdir,w = 32*3,h = 64*3):
try:
os.makedirs(outdir)
except Exception,e:
print e
pass
folderset = set([])
sft = SHAPE_FEAT(False,8*3,8*3)
for pkl in os.listdir(indir):
if os.path.splitext(pkl)[1] != '.pkl':
continue
strokes = load_one( os.path.join(indir,pkl))
cid = os.path.splitext(pkl)[0]
img = np.zeros((h * 2,w * 2),np.uint8)
shape_resized = []
for stroke in strokes:
cx = np.int64(stroke[0] * w + w/2)
cy = np.int64(stroke[1] * h + h/2)
ori = stroke[2] * math.pi
shape_resized.append( (cx,cy,ori) )
img = sft.draw_shape(shape_resized,img)
outpath = os.path.join(outdir,'proto%s.jpg'%(cid))
cv2.imwrite(outpath,img)
if __name__=="__main__":
run('proto','show')
| gpl-2.0 |
sandeepkbhat/pylearn2 | pylearn2/sandbox/cuda_convnet/specialized_bench.py | 44 | 3906 | __authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
from pylearn2.testing.skip import skip_if_no_gpu
skip_if_no_gpu()
import numpy as np
from theano.compat.six.moves import xrange
from theano import shared
from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
from theano.tensor.nnet.conv import conv2d
from theano import function
import time
import matplotlib.pyplot as plt
def make_funcs(batch_size, rows, cols, channels, filter_rows,
num_filters):
rng = np.random.RandomState([2012,10,9])
filter_cols = filter_rows
base_image_value = rng.uniform(-1., 1., (channels, rows, cols,
batch_size)).astype('float32')
base_filters_value = rng.uniform(-1., 1., (channels, filter_rows,
filter_cols, num_filters)).astype('float32')
images = shared(base_image_value)
filters = shared(base_filters_value, name='filters')
# bench.py should always be run in gpu mode so we should not need a gpu_from_host here
layer_1_detector = FilterActs()(images, filters)
layer_1_pooled_fake = layer_1_detector[:,0:layer_1_detector.shape[0]:2,
0:layer_1_detector.shape[1]:2, :]
base_filters2_value = rng.uniform(-1., 1., (num_filters, filter_rows,
filter_cols, num_filters)).astype('float32')
filters2 = shared(base_filters_value, name='filters')
layer_2_detector = FilterActs()(images, filters2)
output = layer_2_detector
output_shared = shared( output.eval() )
cuda_convnet = function([], updates = { output_shared : output } )
cuda_convnet.name = 'cuda_convnet'
images_bc01 = base_image_value.transpose(3,0,1,2)
filters_bc01 = base_filters_value.transpose(3,0,1,2)
filters_bc01 = filters_bc01[:,:,::-1,::-1]
images_bc01 = shared(images_bc01)
filters_bc01 = shared(filters_bc01)
output_conv2d = conv2d(images_bc01, filters_bc01,
border_mode='valid')
output_conv2d_shared = shared(output_conv2d.eval())
baseline = function([], updates = { output_conv2d_shared : output_conv2d } )
baseline.name = 'baseline'
return cuda_convnet, baseline
def bench(f):
for i in xrange(3):
f()
trials = 10
t1 = time.time()
for i in xrange(trials):
f()
t2 = time.time()
return (t2-t1)/float(trials)
def get_speedup( *args, **kwargs):
cuda_convnet, baseline = make_funcs(*args, **kwargs)
return bench(baseline) / bench(cuda_convnet)
def get_time_per_10k_ex( *args, **kwargs):
cuda_convnet, baseline = make_funcs(*args, **kwargs)
batch_size = kwargs['batch_size']
return 10000 * bench(cuda_convnet) / float(batch_size)
def make_batch_size_plot(yfunc, yname, batch_sizes, rows, cols, channels, filter_rows, num_filters):
speedups = []
for batch_size in batch_sizes:
speedup = yfunc(batch_size = batch_size,
rows = rows,
cols = cols,
channels = channels,
filter_rows = filter_rows,
num_filters = num_filters)
speedups.append(speedup)
plt.plot(batch_sizes, speedups)
plt.title("cuda-convnet benchmark")
plt.xlabel("Batch size")
plt.ylabel(yname)
plt.show()
"""
make_batch_size_plot(get_speedup, "Speedup factor", batch_sizes = [1,2,5,25,32,50,63,64,65,96,100,127,128,129,159,160,161,191,192,193,200,255,256,257],
rows = 32,
cols = 32,
channels = 3,
filter_rows = 7,
num_filters = 64)
"""
make_batch_size_plot(get_time_per_10k_ex, "Time per 10k examples", batch_sizes = [1,2,5,25,32,50,63,64,65,96,100,127,128,129,159,160,161,191,192,193,200,255,256,257],
rows = 32,
cols = 32,
channels = 3,
filter_rows = 5,
num_filters = 64)
| bsd-3-clause |
GaZ3ll3/numpy | doc/example.py | 81 | 3581 | """This is the docstring for the example.py module. Modules names should
have short, all-lowercase names. The module name may have underscores if
this improves readability.
Every module should have a docstring at the very top of the file. The
module's docstring may extend over multiple lines. If your docstring does
extend over multiple lines, the closing three quotation marks must be on
a line by itself, preferably preceeded by a blank line.
"""
from __future__ import division, absolute_import, print_function
import os # standard library imports first
# Do NOT import using *, e.g. from numpy import *
#
# Import the module using
#
# import numpy
#
# instead or import individual functions as needed, e.g
#
# from numpy import array, zeros
#
# If you prefer the use of abbreviated module names, we suggest the
# convention used by NumPy itself::
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# These abbreviated names are not to be used in docstrings; users must
# be able to paste and execute docstrings after importing only the
# numpy module itself, unabbreviated.
from my_module import my_func, other_func
def foo(var1, var2, long_var_name='hi') :
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
"""
pass
| bsd-3-clause |
ctorney/socialInfluence | py_analysis/1D_model.py | 1 | 3065 |
#!/usr/bin/python
import sympy as sp
from IPython.display import display
import numpy as np
import matplotlib.pyplot as plt
import math as m
import matplotlib as mpl
K = 80
wg = 0.3
ws = 0.514
alpha = 0.0
NA = 64.0
N = 1024
def psw( j ):
gc = np.log(ws/(1-ws))*(K-2*j)/(4*wg)
return 0.5 + 0.5*m.erf(m.sqrt(wg)*(1.0-gc))
def tup( xx ):
return psw(K*xx)
def tdown( x,xx ):
return (x) * (1 - psw(K*xx))
numX = 64
Xs = np.zeros(numX+1)
PAB = np.zeros(numX+1)
thGridup=np.zeros(numX+1)
thGriddown=np.zeros(numX+1)
a=np.loadtxt("/home/ctorney/data.txt")
for p in np.unique(a[:,1]): print p, psw(K*np.mean(a[a[:,1]==p,2]))
l = 20
lf = 2.0*(1.0-m.exp(-l*0.5))
for fX in range(32,33):#0,numX+1):
X = fX/float(numX)
ex_up=0.0
ds = int(m.floor(0.5*(numX-fX)))
for d in range(ds):
dist = (d+0.0)/numX#0.5*(1.0-X)*float(av)/100.0
if dist<0.5-X:
f = (1.0/lf)*(m.exp(-l*dist) - m.exp(-l*(dist+X)))
print [X, dist, f, tup(f)]
elif dist<1.0-X/2.0:
f = (1.0/lf)*(m.exp(-l*dist) + m.exp(-l*(1.0-dist-X)) - 2.0*m.exp(-l*(0.5)))
print [X, dist, f, tup(f)]
ex_up+=2.0*tup(f)
if (numX-fX)%2>0:
dist = (ds +0.0)/numX
if dist<0.5-X:
f = (1.0/lf)*(m.exp(-l*dist) - m.exp(-l*(dist+X)))
print [X, dist, f, tup(f)]
elif dist<1.0-X/2.0:
f = (1.0/lf)*(m.exp(-l*dist) + m.exp(-l*(1.0-dist-X)) - 2.0*m.exp(-l*(0.5)))
print [X, dist, f, tup(f)]
ex_up+=tup(f)
if X<1:
ex_up=ex_up/float(numX-fX)
Xs[fX] = X
PAB[fX] = ((f*(X)/(f*X+(1.0-X))))
thGridup[fX] = (1-X)*ex_up#tup(X,PAB[fX])
thGriddown[fX] = tdown(X,PAB[fX])
plt.plot(Xs,thGridup,marker='o',label='theory up')
for p in thGridup: print p
#
#l = 0.0001
#
#lf = 2.0*(1.0-m.exp(-l*0.5))
#for fX in range(0,65):#0,numX+1):
# X = fX/float(numX)
# ex_up=0.0
# for av in range(100):
# dist = 0.5*(1.0-X)*float(av)/100.0
#
# if dist<0.5-X:
# f = (1.0/lf)*(m.exp(-l*dist) - m.exp(-l*(dist+X)))
# #print [X, dist, f]
# elif dist<1.0-X/2.0:
# f = (1.0/lf)*(m.exp(-l*dist) + m.exp(-l*(1.0-dist-X)) - 2.0*m.exp(-l*(0.5)))
# #print [X, dist, f]
# ex_up+=tup(f)
# ex_up=ex_up/100.0
# Xs[fX] = X
# PAB[fX] = ((f*(X)/(f*X+(1.0-X))))
# thGridup[fX] = (1-X)*ex_up#tup(X,PAB[fX])
# thGriddown[fX] = tdown(X,PAB[fX])
#
#plt.plot(Xs,thGridup,marker='o',label='theory up')
##plt.plot(Xs,thGriddown,marker='o',label='theory down')
##
##
##sampleP = np.load('../potential_v2/build/potential2-0.npy')
##xGrid=np.arange(65)/64.0
##yGrid = sampleP[:,0]#np.concatenate((sampleP[0,0:64:2],sampleP[1,1:64:2]))
###plt.plot(xGrid,yGrid,marker='o',label='sim up')
##
###yGrid = np.concatenate((sampleP[0,1:64:2],sampleP[1,0:64:2]))
##yGrid = sampleP[:,1]#np.concatenate((sampleP[0,0:64:2],sampleP[1,1:64:2]))
###plt.plot(xGrid,yGrid,label='sim down')
##
###plt.plot(Xs,PAB)
###plt.plot(Xs,Xs)
##
##
##
## | mit |
louispotok/pandas | pandas/tests/test_nanops.py | 3 | 43136 | # -*- coding: utf-8 -*-
from __future__ import division, print_function
from functools import partial
import pytest
import warnings
import numpy as np
import pandas as pd
from pandas import Series, isna
from pandas.core.dtypes.common import is_integer_dtype
import pandas.core.nanops as nanops
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.compat.numpy import _np_version_under1p13
use_bn = nanops._USE_BOTTLENECK
class TestnanopsDataFrame(object):
def setup_method(self, method):
np.random.seed(11235)
nanops._USE_BOTTLENECK = False
self.arr_shape = (11, 7, 5)
self.arr_float = np.random.randn(*self.arr_shape)
self.arr_float1 = np.random.randn(*self.arr_shape)
self.arr_complex = self.arr_float + self.arr_float1 * 1j
self.arr_int = np.random.randint(-10, 10, self.arr_shape)
self.arr_bool = np.random.randint(0, 2, self.arr_shape) == 0
self.arr_str = np.abs(self.arr_float).astype('S')
self.arr_utf = np.abs(self.arr_float).astype('U')
self.arr_date = np.random.randint(0, 20000,
self.arr_shape).astype('M8[ns]')
self.arr_tdelta = np.random.randint(0, 20000,
self.arr_shape).astype('m8[ns]')
self.arr_nan = np.tile(np.nan, self.arr_shape)
self.arr_float_nan = np.vstack([self.arr_float, self.arr_nan])
self.arr_float1_nan = np.vstack([self.arr_float1, self.arr_nan])
self.arr_nan_float1 = np.vstack([self.arr_nan, self.arr_float1])
self.arr_nan_nan = np.vstack([self.arr_nan, self.arr_nan])
self.arr_inf = self.arr_float * np.inf
self.arr_float_inf = np.vstack([self.arr_float, self.arr_inf])
self.arr_float1_inf = np.vstack([self.arr_float1, self.arr_inf])
self.arr_inf_float1 = np.vstack([self.arr_inf, self.arr_float1])
self.arr_inf_inf = np.vstack([self.arr_inf, self.arr_inf])
self.arr_nan_inf = np.vstack([self.arr_nan, self.arr_inf])
self.arr_float_nan_inf = np.vstack([self.arr_float, self.arr_nan,
self.arr_inf])
self.arr_nan_float1_inf = np.vstack([self.arr_float, self.arr_inf,
self.arr_nan])
self.arr_nan_nan_inf = np.vstack([self.arr_nan, self.arr_nan,
self.arr_inf])
self.arr_obj = np.vstack([self.arr_float.astype(
'O'), self.arr_int.astype('O'), self.arr_bool.astype(
'O'), self.arr_complex.astype('O'), self.arr_str.astype(
'O'), self.arr_utf.astype('O'), self.arr_date.astype('O'),
self.arr_tdelta.astype('O')])
with np.errstate(invalid='ignore'):
self.arr_nan_nanj = self.arr_nan + self.arr_nan * 1j
self.arr_complex_nan = np.vstack([self.arr_complex,
self.arr_nan_nanj])
self.arr_nan_infj = self.arr_inf * 1j
self.arr_complex_nan_infj = np.vstack([self.arr_complex,
self.arr_nan_infj])
self.arr_float_2d = self.arr_float[:, :, 0]
self.arr_float1_2d = self.arr_float1[:, :, 0]
self.arr_complex_2d = self.arr_complex[:, :, 0]
self.arr_int_2d = self.arr_int[:, :, 0]
self.arr_bool_2d = self.arr_bool[:, :, 0]
self.arr_str_2d = self.arr_str[:, :, 0]
self.arr_utf_2d = self.arr_utf[:, :, 0]
self.arr_date_2d = self.arr_date[:, :, 0]
self.arr_tdelta_2d = self.arr_tdelta[:, :, 0]
self.arr_nan_2d = self.arr_nan[:, :, 0]
self.arr_float_nan_2d = self.arr_float_nan[:, :, 0]
self.arr_float1_nan_2d = self.arr_float1_nan[:, :, 0]
self.arr_nan_float1_2d = self.arr_nan_float1[:, :, 0]
self.arr_nan_nan_2d = self.arr_nan_nan[:, :, 0]
self.arr_nan_nanj_2d = self.arr_nan_nanj[:, :, 0]
self.arr_complex_nan_2d = self.arr_complex_nan[:, :, 0]
self.arr_inf_2d = self.arr_inf[:, :, 0]
self.arr_float_inf_2d = self.arr_float_inf[:, :, 0]
self.arr_nan_inf_2d = self.arr_nan_inf[:, :, 0]
self.arr_float_nan_inf_2d = self.arr_float_nan_inf[:, :, 0]
self.arr_nan_nan_inf_2d = self.arr_nan_nan_inf[:, :, 0]
self.arr_float_1d = self.arr_float[:, 0, 0]
self.arr_float1_1d = self.arr_float1[:, 0, 0]
self.arr_complex_1d = self.arr_complex[:, 0, 0]
self.arr_int_1d = self.arr_int[:, 0, 0]
self.arr_bool_1d = self.arr_bool[:, 0, 0]
self.arr_str_1d = self.arr_str[:, 0, 0]
self.arr_utf_1d = self.arr_utf[:, 0, 0]
self.arr_date_1d = self.arr_date[:, 0, 0]
self.arr_tdelta_1d = self.arr_tdelta[:, 0, 0]
self.arr_nan_1d = self.arr_nan[:, 0, 0]
self.arr_float_nan_1d = self.arr_float_nan[:, 0, 0]
self.arr_float1_nan_1d = self.arr_float1_nan[:, 0, 0]
self.arr_nan_float1_1d = self.arr_nan_float1[:, 0, 0]
self.arr_nan_nan_1d = self.arr_nan_nan[:, 0, 0]
self.arr_nan_nanj_1d = self.arr_nan_nanj[:, 0, 0]
self.arr_complex_nan_1d = self.arr_complex_nan[:, 0, 0]
self.arr_inf_1d = self.arr_inf.ravel()
self.arr_float_inf_1d = self.arr_float_inf[:, 0, 0]
self.arr_nan_inf_1d = self.arr_nan_inf[:, 0, 0]
self.arr_float_nan_inf_1d = self.arr_float_nan_inf[:, 0, 0]
self.arr_nan_nan_inf_1d = self.arr_nan_nan_inf[:, 0, 0]
def teardown_method(self, method):
nanops._USE_BOTTLENECK = use_bn
def check_results(self, targ, res, axis, check_dtype=True):
res = getattr(res, 'asm8', res)
res = getattr(res, 'values', res)
# timedeltas are a beast here
def _coerce_tds(targ, res):
if hasattr(targ, 'dtype') and targ.dtype == 'm8[ns]':
if len(targ) == 1:
targ = targ[0].item()
res = res.item()
else:
targ = targ.view('i8')
return targ, res
try:
if axis != 0 and hasattr(
targ, 'shape') and targ.ndim and targ.shape != res.shape:
res = np.split(res, [targ.shape[0]], axis=0)[0]
except:
targ, res = _coerce_tds(targ, res)
try:
tm.assert_almost_equal(targ, res, check_dtype=check_dtype)
except:
# handle timedelta dtypes
if hasattr(targ, 'dtype') and targ.dtype == 'm8[ns]':
targ, res = _coerce_tds(targ, res)
tm.assert_almost_equal(targ, res, check_dtype=check_dtype)
return
# There are sometimes rounding errors with
# complex and object dtypes.
# If it isn't one of those, re-raise the error.
if not hasattr(res, 'dtype') or res.dtype.kind not in ['c', 'O']:
raise
# convert object dtypes to something that can be split into
# real and imaginary parts
if res.dtype.kind == 'O':
if targ.dtype.kind != 'O':
res = res.astype(targ.dtype)
else:
try:
res = res.astype('c16')
except:
res = res.astype('f8')
try:
targ = targ.astype('c16')
except:
targ = targ.astype('f8')
# there should never be a case where numpy returns an object
# but nanops doesn't, so make that an exception
elif targ.dtype.kind == 'O':
raise
tm.assert_almost_equal(targ.real, res.real,
check_dtype=check_dtype)
tm.assert_almost_equal(targ.imag, res.imag,
check_dtype=check_dtype)
def check_fun_data(self, testfunc, targfunc, testarval, targarval,
targarnanval, check_dtype=True, empty_targfunc=None,
**kwargs):
for axis in list(range(targarval.ndim)) + [None]:
for skipna in [False, True]:
targartempval = targarval if skipna else targarnanval
if skipna and empty_targfunc and isna(targartempval).all():
targ = empty_targfunc(targartempval, axis=axis, **kwargs)
else:
targ = targfunc(targartempval, axis=axis, **kwargs)
try:
res = testfunc(testarval, axis=axis, skipna=skipna,
**kwargs)
self.check_results(targ, res, axis,
check_dtype=check_dtype)
if skipna:
res = testfunc(testarval, axis=axis, **kwargs)
self.check_results(targ, res, axis,
check_dtype=check_dtype)
if axis is None:
res = testfunc(testarval, skipna=skipna, **kwargs)
self.check_results(targ, res, axis,
check_dtype=check_dtype)
if skipna and axis is None:
res = testfunc(testarval, **kwargs)
self.check_results(targ, res, axis,
check_dtype=check_dtype)
except BaseException as exc:
exc.args += ('axis: %s of %s' % (axis, testarval.ndim - 1),
'skipna: %s' % skipna, 'kwargs: %s' % kwargs)
raise
if testarval.ndim <= 1:
return
try:
testarval2 = np.take(testarval, 0, axis=-1)
targarval2 = np.take(targarval, 0, axis=-1)
targarnanval2 = np.take(targarnanval, 0, axis=-1)
except ValueError:
return
self.check_fun_data(testfunc, targfunc, testarval2, targarval2,
targarnanval2, check_dtype=check_dtype,
empty_targfunc=empty_targfunc, **kwargs)
def check_fun(self, testfunc, targfunc, testar, targar=None,
targarnan=None, empty_targfunc=None, **kwargs):
if targar is None:
targar = testar
if targarnan is None:
targarnan = testar
testarval = getattr(self, testar)
targarval = getattr(self, targar)
targarnanval = getattr(self, targarnan)
try:
self.check_fun_data(testfunc, targfunc, testarval, targarval,
targarnanval, empty_targfunc=empty_targfunc,
**kwargs)
except BaseException as exc:
exc.args += ('testar: %s' % testar, 'targar: %s' % targar,
'targarnan: %s' % targarnan)
raise
def check_funs(self, testfunc, targfunc, allow_complex=True,
allow_all_nan=True, allow_str=True, allow_date=True,
allow_tdelta=True, allow_obj=True, **kwargs):
self.check_fun(testfunc, targfunc, 'arr_float', **kwargs)
self.check_fun(testfunc, targfunc, 'arr_float_nan', 'arr_float',
**kwargs)
self.check_fun(testfunc, targfunc, 'arr_int', **kwargs)
self.check_fun(testfunc, targfunc, 'arr_bool', **kwargs)
objs = [self.arr_float.astype('O'), self.arr_int.astype('O'),
self.arr_bool.astype('O')]
if allow_all_nan:
self.check_fun(testfunc, targfunc, 'arr_nan', **kwargs)
if allow_complex:
self.check_fun(testfunc, targfunc, 'arr_complex', **kwargs)
self.check_fun(testfunc, targfunc, 'arr_complex_nan',
'arr_complex', **kwargs)
if allow_all_nan:
self.check_fun(testfunc, targfunc, 'arr_nan_nanj', **kwargs)
objs += [self.arr_complex.astype('O')]
if allow_str:
self.check_fun(testfunc, targfunc, 'arr_str', **kwargs)
self.check_fun(testfunc, targfunc, 'arr_utf', **kwargs)
objs += [self.arr_str.astype('O'), self.arr_utf.astype('O')]
if allow_date:
try:
targfunc(self.arr_date)
except TypeError:
pass
else:
self.check_fun(testfunc, targfunc, 'arr_date', **kwargs)
objs += [self.arr_date.astype('O')]
if allow_tdelta:
try:
targfunc(self.arr_tdelta)
except TypeError:
pass
else:
self.check_fun(testfunc, targfunc, 'arr_tdelta', **kwargs)
objs += [self.arr_tdelta.astype('O')]
if allow_obj:
self.arr_obj = np.vstack(objs)
# some nanops handle object dtypes better than their numpy
# counterparts, so the numpy functions need to be given something
# else
if allow_obj == 'convert':
targfunc = partial(self._badobj_wrap, func=targfunc,
allow_complex=allow_complex)
self.check_fun(testfunc, targfunc, 'arr_obj', **kwargs)
def _badobj_wrap(self, value, func, allow_complex=True, **kwargs):
if value.dtype.kind == 'O':
if allow_complex:
value = value.astype('c16')
else:
value = value.astype('f8')
return func(value, **kwargs)
def test_nanany(self):
self.check_funs(nanops.nanany, np.any, allow_all_nan=False,
allow_str=False, allow_date=False, allow_tdelta=False)
def test_nanall(self):
self.check_funs(nanops.nanall, np.all, allow_all_nan=False,
allow_str=False, allow_date=False, allow_tdelta=False)
def test_nansum(self):
self.check_funs(nanops.nansum, np.sum, allow_str=False,
allow_date=False, allow_tdelta=True, check_dtype=False,
empty_targfunc=np.nansum)
def test_nanmean(self):
self.check_funs(nanops.nanmean, np.mean, allow_complex=False,
allow_obj=False, allow_str=False, allow_date=False,
allow_tdelta=True)
def test_nanmean_overflow(self):
# GH 10155
# In the previous implementation mean can overflow for int dtypes, it
# is now consistent with numpy
for a in [2 ** 55, -2 ** 55, 20150515061816532]:
s = Series(a, index=range(500), dtype=np.int64)
result = s.mean()
np_result = s.values.mean()
assert result == a
assert result == np_result
assert result.dtype == np.float64
def test_returned_dtype(self):
dtypes = [np.int16, np.int32, np.int64, np.float32, np.float64]
if hasattr(np, 'float128'):
dtypes.append(np.float128)
for dtype in dtypes:
s = Series(range(10), dtype=dtype)
group_a = ['mean', 'std', 'var', 'skew', 'kurt']
group_b = ['min', 'max']
for method in group_a + group_b:
result = getattr(s, method)()
if is_integer_dtype(dtype) and method in group_a:
assert result.dtype == np.float64
else:
assert result.dtype == dtype
def test_nanmedian(self):
with warnings.catch_warnings(record=True):
self.check_funs(nanops.nanmedian, np.median, allow_complex=False,
allow_str=False, allow_date=False,
allow_tdelta=True, allow_obj='convert')
@pytest.mark.parametrize('ddof', range(3))
def test_nanvar(self, ddof):
self.check_funs(nanops.nanvar, np.var, allow_complex=False,
allow_str=False, allow_date=False,
allow_tdelta=True, allow_obj='convert', ddof=ddof)
@pytest.mark.parametrize('ddof', range(3))
def test_nanstd(self, ddof):
self.check_funs(nanops.nanstd, np.std, allow_complex=False,
allow_str=False, allow_date=False,
allow_tdelta=True, allow_obj='convert', ddof=ddof)
@td.skip_if_no('scipy', min_version='0.17.0')
@pytest.mark.parametrize('ddof', range(3))
def test_nansem(self, ddof):
from scipy.stats import sem
with np.errstate(invalid='ignore'):
self.check_funs(nanops.nansem, sem, allow_complex=False,
allow_str=False, allow_date=False,
allow_tdelta=False, allow_obj='convert', ddof=ddof)
def _minmax_wrap(self, value, axis=None, func=None):
# numpy warns if all nan
res = func(value, axis)
if res.dtype.kind == 'm':
res = np.atleast_1d(res)
return res
def test_nanmin(self):
with warnings.catch_warnings(record=True):
func = partial(self._minmax_wrap, func=np.min)
self.check_funs(nanops.nanmin, func,
allow_str=False, allow_obj=False)
def test_nanmax(self):
with warnings.catch_warnings(record=True):
func = partial(self._minmax_wrap, func=np.max)
self.check_funs(nanops.nanmax, func,
allow_str=False, allow_obj=False)
def _argminmax_wrap(self, value, axis=None, func=None):
res = func(value, axis)
nans = np.min(value, axis)
nullnan = isna(nans)
if res.ndim:
res[nullnan] = -1
elif (hasattr(nullnan, 'all') and nullnan.all() or
not hasattr(nullnan, 'all') and nullnan):
res = -1
return res
def test_nanargmax(self):
with warnings.catch_warnings(record=True):
func = partial(self._argminmax_wrap, func=np.argmax)
self.check_funs(nanops.nanargmax, func,
allow_str=False, allow_obj=False,
allow_date=True, allow_tdelta=True)
def test_nanargmin(self):
with warnings.catch_warnings(record=True):
func = partial(self._argminmax_wrap, func=np.argmin)
self.check_funs(nanops.nanargmin, func, allow_str=False,
allow_obj=False)
def _skew_kurt_wrap(self, values, axis=None, func=None):
if not isinstance(values.dtype.type, np.floating):
values = values.astype('f8')
result = func(values, axis=axis, bias=False)
# fix for handling cases where all elements in an axis are the same
if isinstance(result, np.ndarray):
result[np.max(values, axis=axis) == np.min(values, axis=axis)] = 0
return result
elif np.max(values) == np.min(values):
return 0.
return result
@td.skip_if_no('scipy', min_version='0.17.0')
def test_nanskew(self):
from scipy.stats import skew
func = partial(self._skew_kurt_wrap, func=skew)
with np.errstate(invalid='ignore'):
self.check_funs(nanops.nanskew, func, allow_complex=False,
allow_str=False, allow_date=False,
allow_tdelta=False)
@td.skip_if_no('scipy', min_version='0.17.0')
def test_nankurt(self):
from scipy.stats import kurtosis
func1 = partial(kurtosis, fisher=True)
func = partial(self._skew_kurt_wrap, func=func1)
with np.errstate(invalid='ignore'):
self.check_funs(nanops.nankurt, func, allow_complex=False,
allow_str=False, allow_date=False,
allow_tdelta=False)
@td.skip_if_no("numpy", min_version="1.10.0")
def test_nanprod(self):
self.check_funs(nanops.nanprod, np.prod, allow_str=False,
allow_date=False, allow_tdelta=False,
empty_targfunc=np.nanprod)
def check_nancorr_nancov_2d(self, checkfun, targ0, targ1, **kwargs):
res00 = checkfun(self.arr_float_2d, self.arr_float1_2d, **kwargs)
res01 = checkfun(self.arr_float_2d, self.arr_float1_2d,
min_periods=len(self.arr_float_2d) - 1, **kwargs)
tm.assert_almost_equal(targ0, res00)
tm.assert_almost_equal(targ0, res01)
res10 = checkfun(self.arr_float_nan_2d, self.arr_float1_nan_2d,
**kwargs)
res11 = checkfun(self.arr_float_nan_2d, self.arr_float1_nan_2d,
min_periods=len(self.arr_float_2d) - 1, **kwargs)
tm.assert_almost_equal(targ1, res10)
tm.assert_almost_equal(targ1, res11)
targ2 = np.nan
res20 = checkfun(self.arr_nan_2d, self.arr_float1_2d, **kwargs)
res21 = checkfun(self.arr_float_2d, self.arr_nan_2d, **kwargs)
res22 = checkfun(self.arr_nan_2d, self.arr_nan_2d, **kwargs)
res23 = checkfun(self.arr_float_nan_2d, self.arr_nan_float1_2d,
**kwargs)
res24 = checkfun(self.arr_float_nan_2d, self.arr_nan_float1_2d,
min_periods=len(self.arr_float_2d) - 1, **kwargs)
res25 = checkfun(self.arr_float_2d, self.arr_float1_2d,
min_periods=len(self.arr_float_2d) + 1, **kwargs)
tm.assert_almost_equal(targ2, res20)
tm.assert_almost_equal(targ2, res21)
tm.assert_almost_equal(targ2, res22)
tm.assert_almost_equal(targ2, res23)
tm.assert_almost_equal(targ2, res24)
tm.assert_almost_equal(targ2, res25)
def check_nancorr_nancov_1d(self, checkfun, targ0, targ1, **kwargs):
res00 = checkfun(self.arr_float_1d, self.arr_float1_1d, **kwargs)
res01 = checkfun(self.arr_float_1d, self.arr_float1_1d,
min_periods=len(self.arr_float_1d) - 1, **kwargs)
tm.assert_almost_equal(targ0, res00)
tm.assert_almost_equal(targ0, res01)
res10 = checkfun(self.arr_float_nan_1d, self.arr_float1_nan_1d,
**kwargs)
res11 = checkfun(self.arr_float_nan_1d, self.arr_float1_nan_1d,
min_periods=len(self.arr_float_1d) - 1, **kwargs)
tm.assert_almost_equal(targ1, res10)
tm.assert_almost_equal(targ1, res11)
targ2 = np.nan
res20 = checkfun(self.arr_nan_1d, self.arr_float1_1d, **kwargs)
res21 = checkfun(self.arr_float_1d, self.arr_nan_1d, **kwargs)
res22 = checkfun(self.arr_nan_1d, self.arr_nan_1d, **kwargs)
res23 = checkfun(self.arr_float_nan_1d, self.arr_nan_float1_1d,
**kwargs)
res24 = checkfun(self.arr_float_nan_1d, self.arr_nan_float1_1d,
min_periods=len(self.arr_float_1d) - 1, **kwargs)
res25 = checkfun(self.arr_float_1d, self.arr_float1_1d,
min_periods=len(self.arr_float_1d) + 1, **kwargs)
tm.assert_almost_equal(targ2, res20)
tm.assert_almost_equal(targ2, res21)
tm.assert_almost_equal(targ2, res22)
tm.assert_almost_equal(targ2, res23)
tm.assert_almost_equal(targ2, res24)
tm.assert_almost_equal(targ2, res25)
def test_nancorr(self):
targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.corrcoef(self.arr_float_2d.flat,
self.arr_float1_2d.flat)[0, 1]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1)
targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1]
targ1 = np.corrcoef(self.arr_float_1d.flat,
self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1,
method='pearson')
def test_nancorr_pearson(self):
targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.corrcoef(self.arr_float_2d.flat,
self.arr_float1_2d.flat)[0, 1]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1,
method='pearson')
targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1]
targ1 = np.corrcoef(self.arr_float_1d.flat,
self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1,
method='pearson')
@td.skip_if_no_scipy
def test_nancorr_kendall(self):
from scipy.stats import kendalltau
targ0 = kendalltau(self.arr_float_2d, self.arr_float1_2d)[0]
targ1 = kendalltau(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1,
method='kendall')
targ0 = kendalltau(self.arr_float_1d, self.arr_float1_1d)[0]
targ1 = kendalltau(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1,
method='kendall')
@td.skip_if_no_scipy
def test_nancorr_spearman(self):
from scipy.stats import spearmanr
targ0 = spearmanr(self.arr_float_2d, self.arr_float1_2d)[0]
targ1 = spearmanr(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1,
method='spearman')
targ0 = spearmanr(self.arr_float_1d, self.arr_float1_1d)[0]
targ1 = spearmanr(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1,
method='spearman')
def test_nancov(self):
targ0 = np.cov(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.cov(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]
self.check_nancorr_nancov_2d(nanops.nancov, targ0, targ1)
targ0 = np.cov(self.arr_float_1d, self.arr_float1_1d)[0, 1]
targ1 = np.cov(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancov, targ0, targ1)
def check_nancomp(self, checkfun, targ0):
arr_float = self.arr_float
arr_float1 = self.arr_float1
arr_nan = self.arr_nan
arr_nan_nan = self.arr_nan_nan
arr_float_nan = self.arr_float_nan
arr_float1_nan = self.arr_float1_nan
arr_nan_float1 = self.arr_nan_float1
while targ0.ndim:
try:
res0 = checkfun(arr_float, arr_float1)
tm.assert_almost_equal(targ0, res0)
if targ0.ndim > 1:
targ1 = np.vstack([targ0, arr_nan])
else:
targ1 = np.hstack([targ0, arr_nan])
res1 = checkfun(arr_float_nan, arr_float1_nan)
tm.assert_numpy_array_equal(targ1, res1, check_dtype=False)
targ2 = arr_nan_nan
res2 = checkfun(arr_float_nan, arr_nan_float1)
tm.assert_numpy_array_equal(targ2, res2, check_dtype=False)
except Exception as exc:
exc.args += ('ndim: %s' % arr_float.ndim, )
raise
try:
arr_float = np.take(arr_float, 0, axis=-1)
arr_float1 = np.take(arr_float1, 0, axis=-1)
arr_nan = np.take(arr_nan, 0, axis=-1)
arr_nan_nan = np.take(arr_nan_nan, 0, axis=-1)
arr_float_nan = np.take(arr_float_nan, 0, axis=-1)
arr_float1_nan = np.take(arr_float1_nan, 0, axis=-1)
arr_nan_float1 = np.take(arr_nan_float1, 0, axis=-1)
targ0 = np.take(targ0, 0, axis=-1)
except ValueError:
break
def test_nangt(self):
targ0 = self.arr_float > self.arr_float1
self.check_nancomp(nanops.nangt, targ0)
def test_nange(self):
targ0 = self.arr_float >= self.arr_float1
self.check_nancomp(nanops.nange, targ0)
def test_nanlt(self):
targ0 = self.arr_float < self.arr_float1
self.check_nancomp(nanops.nanlt, targ0)
def test_nanle(self):
targ0 = self.arr_float <= self.arr_float1
self.check_nancomp(nanops.nanle, targ0)
def test_naneq(self):
targ0 = self.arr_float == self.arr_float1
self.check_nancomp(nanops.naneq, targ0)
def test_nanne(self):
targ0 = self.arr_float != self.arr_float1
self.check_nancomp(nanops.nanne, targ0)
def check_bool(self, func, value, correct, *args, **kwargs):
while getattr(value, 'ndim', True):
try:
res0 = func(value, *args, **kwargs)
if correct:
assert res0
else:
assert not res0
except BaseException as exc:
exc.args += ('dim: %s' % getattr(value, 'ndim', value), )
raise
if not hasattr(value, 'ndim'):
break
try:
value = np.take(value, 0, axis=-1)
except ValueError:
break
def test__has_infs(self):
pairs = [('arr_complex', False), ('arr_int', False),
('arr_bool', False), ('arr_str', False), ('arr_utf', False),
('arr_complex', False), ('arr_complex_nan', False),
('arr_nan_nanj', False), ('arr_nan_infj', True),
('arr_complex_nan_infj', True)]
pairs_float = [('arr_float', False), ('arr_nan', False),
('arr_float_nan', False), ('arr_nan_nan', False),
('arr_float_inf', True), ('arr_inf', True),
('arr_nan_inf', True), ('arr_float_nan_inf', True),
('arr_nan_nan_inf', True)]
for arr, correct in pairs:
val = getattr(self, arr)
try:
self.check_bool(nanops._has_infs, val, correct)
except BaseException as exc:
exc.args += (arr, )
raise
for arr, correct in pairs_float:
val = getattr(self, arr)
try:
self.check_bool(nanops._has_infs, val, correct)
self.check_bool(nanops._has_infs, val.astype('f4'), correct)
self.check_bool(nanops._has_infs, val.astype('f2'), correct)
except BaseException as exc:
exc.args += (arr, )
raise
def test__isfinite(self):
pairs = [('arr_complex', False), ('arr_int', False),
('arr_bool', False), ('arr_str', False), ('arr_utf', False),
('arr_complex', False), ('arr_complex_nan', True),
('arr_nan_nanj', True), ('arr_nan_infj', True),
('arr_complex_nan_infj', True)]
pairs_float = [('arr_float', False), ('arr_nan', True),
('arr_float_nan', True), ('arr_nan_nan', True),
('arr_float_inf', True), ('arr_inf', True),
('arr_nan_inf', True), ('arr_float_nan_inf', True),
('arr_nan_nan_inf', True)]
func1 = lambda x: np.any(nanops._isfinite(x).ravel())
# TODO: unused?
# func2 = lambda x: np.any(nanops._isfinite(x).values.ravel())
for arr, correct in pairs:
val = getattr(self, arr)
try:
self.check_bool(func1, val, correct)
except BaseException as exc:
exc.args += (arr, )
raise
for arr, correct in pairs_float:
val = getattr(self, arr)
try:
self.check_bool(func1, val, correct)
self.check_bool(func1, val.astype('f4'), correct)
self.check_bool(func1, val.astype('f2'), correct)
except BaseException as exc:
exc.args += (arr, )
raise
def test__bn_ok_dtype(self):
assert nanops._bn_ok_dtype(self.arr_float.dtype, 'test')
assert nanops._bn_ok_dtype(self.arr_complex.dtype, 'test')
assert nanops._bn_ok_dtype(self.arr_int.dtype, 'test')
assert nanops._bn_ok_dtype(self.arr_bool.dtype, 'test')
assert nanops._bn_ok_dtype(self.arr_str.dtype, 'test')
assert nanops._bn_ok_dtype(self.arr_utf.dtype, 'test')
assert not nanops._bn_ok_dtype(self.arr_date.dtype, 'test')
assert not nanops._bn_ok_dtype(self.arr_tdelta.dtype, 'test')
assert not nanops._bn_ok_dtype(self.arr_obj.dtype, 'test')
class TestEnsureNumeric(object):
def test_numeric_values(self):
# Test integer
assert nanops._ensure_numeric(1) == 1
# Test float
assert nanops._ensure_numeric(1.1) == 1.1
# Test complex
assert nanops._ensure_numeric(1 + 2j) == 1 + 2j
def test_ndarray(self):
# Test numeric ndarray
values = np.array([1, 2, 3])
assert np.allclose(nanops._ensure_numeric(values), values)
# Test object ndarray
o_values = values.astype(object)
assert np.allclose(nanops._ensure_numeric(o_values), values)
# Test convertible string ndarray
s_values = np.array(['1', '2', '3'], dtype=object)
assert np.allclose(nanops._ensure_numeric(s_values), values)
# Test non-convertible string ndarray
s_values = np.array(['foo', 'bar', 'baz'], dtype=object)
pytest.raises(ValueError, lambda: nanops._ensure_numeric(s_values))
def test_convertable_values(self):
assert np.allclose(nanops._ensure_numeric('1'), 1.0)
assert np.allclose(nanops._ensure_numeric('1.1'), 1.1)
assert np.allclose(nanops._ensure_numeric('1+1j'), 1 + 1j)
def test_non_convertable_values(self):
pytest.raises(TypeError, lambda: nanops._ensure_numeric('foo'))
pytest.raises(TypeError, lambda: nanops._ensure_numeric({}))
pytest.raises(TypeError, lambda: nanops._ensure_numeric([]))
class TestNanvarFixedValues(object):
# xref GH10242
def setup_method(self, method):
# Samples from a normal distribution.
self.variance = variance = 3.0
self.samples = self.prng.normal(scale=variance ** 0.5, size=100000)
def test_nanvar_all_finite(self):
samples = self.samples
actual_variance = nanops.nanvar(samples)
tm.assert_almost_equal(actual_variance, self.variance,
check_less_precise=2)
def test_nanvar_nans(self):
samples = np.nan * np.ones(2 * self.samples.shape[0])
samples[::2] = self.samples
actual_variance = nanops.nanvar(samples, skipna=True)
tm.assert_almost_equal(actual_variance, self.variance,
check_less_precise=2)
actual_variance = nanops.nanvar(samples, skipna=False)
tm.assert_almost_equal(actual_variance, np.nan, check_less_precise=2)
def test_nanstd_nans(self):
samples = np.nan * np.ones(2 * self.samples.shape[0])
samples[::2] = self.samples
actual_std = nanops.nanstd(samples, skipna=True)
tm.assert_almost_equal(actual_std, self.variance ** 0.5,
check_less_precise=2)
actual_std = nanops.nanvar(samples, skipna=False)
tm.assert_almost_equal(actual_std, np.nan,
check_less_precise=2)
def test_nanvar_axis(self):
# Generate some sample data.
samples_norm = self.samples
samples_unif = self.prng.uniform(size=samples_norm.shape[0])
samples = np.vstack([samples_norm, samples_unif])
actual_variance = nanops.nanvar(samples, axis=1)
tm.assert_almost_equal(actual_variance, np.array(
[self.variance, 1.0 / 12]), check_less_precise=2)
def test_nanvar_ddof(self):
n = 5
samples = self.prng.uniform(size=(10000, n + 1))
samples[:, -1] = np.nan # Force use of our own algorithm.
variance_0 = nanops.nanvar(samples, axis=1, skipna=True, ddof=0).mean()
variance_1 = nanops.nanvar(samples, axis=1, skipna=True, ddof=1).mean()
variance_2 = nanops.nanvar(samples, axis=1, skipna=True, ddof=2).mean()
# The unbiased estimate.
var = 1.0 / 12
tm.assert_almost_equal(variance_1, var,
check_less_precise=2)
# The underestimated variance.
tm.assert_almost_equal(variance_0, (n - 1.0) / n * var,
check_less_precise=2)
# The overestimated variance.
tm.assert_almost_equal(variance_2, (n - 1.0) / (n - 2.0) * var,
check_less_precise=2)
def test_ground_truth(self):
# Test against values that were precomputed with Numpy.
samples = np.empty((4, 4))
samples[:3, :3] = np.array([[0.97303362, 0.21869576, 0.55560287
], [0.72980153, 0.03109364, 0.99155171],
[0.09317602, 0.60078248, 0.15871292]])
samples[3] = samples[:, 3] = np.nan
# Actual variances along axis=0, 1 for ddof=0, 1, 2
variance = np.array([[[0.13762259, 0.05619224, 0.11568816
], [0.20643388, 0.08428837, 0.17353224],
[0.41286776, 0.16857673, 0.34706449]],
[[0.09519783, 0.16435395, 0.05082054
], [0.14279674, 0.24653093, 0.07623082],
[0.28559348, 0.49306186, 0.15246163]]])
# Test nanvar.
for axis in range(2):
for ddof in range(3):
var = nanops.nanvar(samples, skipna=True, axis=axis, ddof=ddof)
tm.assert_almost_equal(var[:3], variance[axis, ddof])
assert np.isnan(var[3])
# Test nanstd.
for axis in range(2):
for ddof in range(3):
std = nanops.nanstd(samples, skipna=True, axis=axis, ddof=ddof)
tm.assert_almost_equal(std[:3], variance[axis, ddof] ** 0.5)
assert np.isnan(std[3])
def test_nanstd_roundoff(self):
# Regression test for GH 10242 (test data taken from GH 10489). Ensure
# that variance is stable.
data = Series(766897346 * np.ones(10))
for ddof in range(3):
result = data.std(ddof=ddof)
assert result == 0.0
@property
def prng(self):
return np.random.RandomState(1234)
class TestNanskewFixedValues(object):
# xref GH 11974
def setup_method(self, method):
# Test data + skewness value (computed with scipy.stats.skew)
self.samples = np.sin(np.linspace(0, 1, 200))
self.actual_skew = -0.1875895205961754
def test_constant_series(self):
# xref GH 11974
for val in [3075.2, 3075.3, 3075.5]:
data = val * np.ones(300)
skew = nanops.nanskew(data)
assert skew == 0.0
def test_all_finite(self):
alpha, beta = 0.3, 0.1
left_tailed = self.prng.beta(alpha, beta, size=100)
assert nanops.nanskew(left_tailed) < 0
alpha, beta = 0.1, 0.3
right_tailed = self.prng.beta(alpha, beta, size=100)
assert nanops.nanskew(right_tailed) > 0
def test_ground_truth(self):
skew = nanops.nanskew(self.samples)
tm.assert_almost_equal(skew, self.actual_skew)
def test_axis(self):
samples = np.vstack([self.samples,
np.nan * np.ones(len(self.samples))])
skew = nanops.nanskew(samples, axis=1)
tm.assert_almost_equal(skew, np.array([self.actual_skew, np.nan]))
def test_nans(self):
samples = np.hstack([self.samples, np.nan])
skew = nanops.nanskew(samples, skipna=False)
assert np.isnan(skew)
def test_nans_skipna(self):
samples = np.hstack([self.samples, np.nan])
skew = nanops.nanskew(samples, skipna=True)
tm.assert_almost_equal(skew, self.actual_skew)
@property
def prng(self):
return np.random.RandomState(1234)
class TestNankurtFixedValues(object):
# xref GH 11974
def setup_method(self, method):
# Test data + kurtosis value (computed with scipy.stats.kurtosis)
self.samples = np.sin(np.linspace(0, 1, 200))
self.actual_kurt = -1.2058303433799713
def test_constant_series(self):
# xref GH 11974
for val in [3075.2, 3075.3, 3075.5]:
data = val * np.ones(300)
kurt = nanops.nankurt(data)
assert kurt == 0.0
def test_all_finite(self):
alpha, beta = 0.3, 0.1
left_tailed = self.prng.beta(alpha, beta, size=100)
assert nanops.nankurt(left_tailed) < 0
alpha, beta = 0.1, 0.3
right_tailed = self.prng.beta(alpha, beta, size=100)
assert nanops.nankurt(right_tailed) > 0
def test_ground_truth(self):
kurt = nanops.nankurt(self.samples)
tm.assert_almost_equal(kurt, self.actual_kurt)
def test_axis(self):
samples = np.vstack([self.samples,
np.nan * np.ones(len(self.samples))])
kurt = nanops.nankurt(samples, axis=1)
tm.assert_almost_equal(kurt, np.array([self.actual_kurt, np.nan]))
def test_nans(self):
samples = np.hstack([self.samples, np.nan])
kurt = nanops.nankurt(samples, skipna=False)
assert np.isnan(kurt)
def test_nans_skipna(self):
samples = np.hstack([self.samples, np.nan])
kurt = nanops.nankurt(samples, skipna=True)
tm.assert_almost_equal(kurt, self.actual_kurt)
@property
def prng(self):
return np.random.RandomState(1234)
def test_use_bottleneck():
if nanops._BOTTLENECK_INSTALLED:
pd.set_option('use_bottleneck', True)
assert pd.get_option('use_bottleneck')
pd.set_option('use_bottleneck', False)
assert not pd.get_option('use_bottleneck')
pd.set_option('use_bottleneck', use_bn)
@pytest.mark.parametrize("numpy_op, expected", [
(np.sum, 10),
(np.nansum, 10),
(np.mean, 2.5),
(np.nanmean, 2.5),
(np.median, 2.5),
(np.nanmedian, 2.5),
(np.min, 1),
(np.max, 4),
])
def test_numpy_ops(numpy_op, expected):
# GH8383
result = numpy_op(pd.Series([1, 2, 3, 4]))
assert result == expected
@pytest.mark.parametrize("numpy_op, expected", [
(np.nanmin, 1),
(np.nanmax, 4),
])
def test_numpy_ops_np_version_under1p13(numpy_op, expected):
# GH8383
result = numpy_op(pd.Series([1, 2, 3, 4]))
if _np_version_under1p13:
# bug for numpy < 1.13, where result is a series, should be a scalar
with pytest.raises(ValueError):
assert result == expected
else:
assert result == expected
| bsd-3-clause |
BigTone2009/sms-tools | lectures/08-Sound-transformations/plots-code/sineModelFreqScale-orchestra.py | 21 | 2666 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, hanning, triang, blackmanharris, resample
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/transformations/'))
import sineModel as SM
import stft as STFT
import utilFunctions as UF
import sineTransformations as SMT
(fs, x) = UF.wavread('../../../sounds/orchestra.wav')
w = np.hamming(801)
N = 2048
t = -90
minSineDur = .005
maxnSines = 150
freqDevOffset = 20
freqDevSlope = 0.02
Ns = 512
H = Ns/4
mX, pX = STFT.stftAnal(x, fs, w, N, H)
tfreq, tmag, tphase = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
freqScaling = np.array([0, .8, 1, 1.2])
ytfreq = SMT.sineFreqScaling(tfreq, freqScaling)
y = SM.sineModelSynth(ytfreq, tmag, np.array([]), Ns, H, fs)
mY, pY = STFT.stftAnal(y, fs, w, N, H)
UF.wavwrite(y,fs, 'sineModelFreqScale-orchestra.wav')
maxplotfreq = 4000.0
plt.figure(1, figsize=(9.5, 7))
plt.subplot(4,1,1)
plt.plot(np.arange(x.size)/float(fs), x, 'b')
plt.axis([0,x.size/float(fs),min(x),max(x)])
plt.title('x (orchestra.wav)')
plt.subplot(4,1,2)
numFrames = int(tfreq[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
tracks = tfreq*np.less(tfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k', lw=1)
plt.autoscale(tight=True)
plt.title('sine frequencies')
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:maxplotbin+1]))
plt.autoscale(tight=True)
plt.subplot(4,1,3)
numFrames = int(ytfreq[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
tracks = ytfreq*np.less(ytfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k', lw=1)
plt.autoscale(tight=True)
plt.title('freq-scaled sine frequencies')
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mY[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mY[:,:maxplotbin+1]))
plt.autoscale(tight=True)
plt.subplot(4,1,4)
plt.plot(np.arange(y.size)/float(fs), y, 'b')
plt.axis([0,y.size/float(fs),min(y),max(y)])
plt.title('y')
plt.tight_layout()
plt.savefig('sineModelFreqScale-orchestra.png')
plt.show()
| agpl-3.0 |
mattgiguere/scikit-learn | examples/cluster/plot_digits_linkage.py | 369 | 2959 | """
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
| bsd-3-clause |
kernc/scikit-learn | sklearn/tests/test_multiclass.py | 18 | 24010 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.utils.multiclass import check_classification_targets, type_of_target
from sklearn.utils import shuffle
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression,
SGDClassifier)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_check_classification_targets():
# Test that check_classification_target return correct type. #5782
y = np.array([0.0, 1.1, 2.0, 3.0])
msg = type_of_target(y)
assert_raise_message(ValueError, msg, check_classification_targets, y)
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_partial_fit():
# Test if partial_fit is working as intented
X, y = shuffle(iris.data, iris.target, random_state=0)
ovr = OneVsRestClassifier(MultinomialNB())
ovr.partial_fit(X[:100], y[:100], np.unique(y))
ovr.partial_fit(X[100:], y[100:])
pred = ovr.predict(X)
ovr2 = OneVsRestClassifier(MultinomialNB())
pred2 = ovr2.fit(X, y).predict(X)
assert_almost_equal(pred, pred2)
assert_equal(len(ovr.estimators_), len(np.unique(y)))
assert_greater(np.mean(y == pred), 0.65)
# Test when mini batches doesn't have all classes
ovr = OneVsRestClassifier(MultinomialNB())
ovr.partial_fit(iris.data[:60], iris.target[:60], np.unique(iris.target))
ovr.partial_fit(iris.data[60:], iris.target[60:])
pred = ovr.predict(iris.data)
ovr2 = OneVsRestClassifier(MultinomialNB())
pred2 = ovr2.fit(iris.data, iris.target).predict(iris.data)
assert_almost_equal(pred, pred2)
assert_equal(len(ovr.estimators_), len(np.unique(iris.target)))
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
clf = OneVsRestClassifier(base_clf).fit(X, y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
decision_only.fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0), LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
# don't densify sparse coefficients
assert_equal(sp.issparse(ovr.estimators_[0].coef_), sp.issparse(ovr.coef_))
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
prediction_from_list = ovo.fit(iris.data,
list(iris.target)).predict(iris.data)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_partial_fit_predict():
X, y = shuffle(iris.data, iris.target)
ovo1 = OneVsOneClassifier(MultinomialNB())
ovo1.partial_fit(X[:100], y[:100], np.unique(y))
ovo1.partial_fit(X[100:], y[100:])
pred1 = ovo1.predict(X)
ovo2 = OneVsOneClassifier(MultinomialNB())
ovo2.fit(X, y)
pred2 = ovo2.predict(X)
assert_equal(len(ovo1.estimators_), n_classes * (n_classes - 1) / 2)
assert_greater(np.mean(y == pred1), 0.65)
assert_almost_equal(pred1, pred2)
# Test when mini-batches don't have all target classes
ovo1 = OneVsOneClassifier(MultinomialNB())
ovo1.partial_fit(iris.data[:60], iris.target[:60], np.unique(iris.target))
ovo1.partial_fit(iris.data[60:], iris.target[60:])
pred1 = ovo1.predict(iris.data)
ovo2 = OneVsOneClassifier(MultinomialNB())
pred2 = ovo2.fit(iris.data, iris.target).predict(iris.data)
assert_almost_equal(pred1, pred2)
assert_equal(len(ovo1.estimators_), len(np.unique(iris.target)))
assert_greater(np.mean(iris.target == pred1), 0.65)
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert_equal(decisions.shape, (n_samples, n_classes))
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.])))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert_greater(len(np.unique(decisions[:, class_idx])), 146)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
| bsd-3-clause |
alvarofierroclavero/scikit-learn | sklearn/linear_model/tests/test_randomized_l1.py | 214 | 4690 | # Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
X, y, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
| bsd-3-clause |
vibhorag/scikit-learn | examples/model_selection/randomized_search.py | 201 | 3214 | """
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from operator import itemgetter
from scipy.stats import randint as sp_randint
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
digits = load_digits()
X, y = digits.data, digits.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(1, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.grid_scores_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.grid_scores_)))
report(grid_search.grid_scores_)
| bsd-3-clause |
anooptoffy/ML-Algorithms-and-Applications | Applications/Panorama/panorama.py | 2 | 5076 | import os
import cv2
import numpy as np
from matplotlib import pyplot as plt
"""
Images for each panorama is stored in the directory structure
as show below:
data
├── Panorama
├── One
│ ├── institute11.jpg
│ └── institute21.jpg
└── Two
├── secondPic1.jpg
└── secondPic2.jpg
"""
class Panorama:
""" Panorama class for Generating Panorama images"""
def __init__(self, location):
self.location = location # image location
self.images = [] # stores the images for creating panorama
# descriptors for layout of pyplot
self.count = 1
self.m = 3
self.n = 2
# Load's images from a directory to a list and returns the list.
def load_images(self):
paths = os.listdir(self.location)
for path in range(len(paths)):
self.images.append(self.location + "/" + paths[
path])
def list_images(self):
for img in range(len(self.images)):
self.plot(self.m, self.n, self.images[img],
str("Image "+str(img + 1)))
# self.show_plot()
# converts bgr channel to gray channel
def cvt_gray(self, image):
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# converts the bgr channel to rgb channel
def cvt_bgr2rgb(self, image):
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# initializes the surf descriptor
@staticmethod
def surf(hessian):
return cv2.xfeatures2d.SURF_create(hessianThreshold=hessian,
upright=True,
extended=True)
# initialize flann matcher
@staticmethod
def flann():
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
return cv2.FlannBasedMatcher(index_params, search_params)
# main logic goes here.
def create_panorama(self):
# initialize a surf detector with hessian as 400
surf = self.surf(400)
imgOne = cv2.imread(self.images[0])
imgTwo = cv2.imread(self.images[1])
grayOne = self.cvt_gray(imgOne)
grayTwo = self.cvt_gray(imgTwo)
# extract the keypoinst and descriptors for individaul images
kpOne, desOne = surf.detectAndCompute(grayOne, None)
kpTwo, desTwo = surf.detectAndCompute(grayTwo, None)
imgOneU = cv2.drawKeypoints(imgOne, kpOne, None, (0, 127,
0),
4)
imgTwoU = cv2.drawKeypoints(imgTwo, kpTwo, None, (0, 127,
0),
4)
# initialize flann matcher
flann = self.flann()
matches = flann.knnMatch(np.array(desOne), np.array(desTwo),
k=2)
# store all the good matches
good = []
for m, n in matches:
if m.distance < 0.7 * n.distance:
good.append(m)
src_pts = np.float32([kpOne[m.queryIdx].pt for m in good])
dst_pts = np.float32([kpTwo[m.trainIdx].pt for m in good])
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,
5.0)
im_out = cv2.warpPerspective(imgOne, M, (
imgOne.shape[1] + imgTwo.shape[1],
imgOne.shape[0]))
im_out[0:imgTwo.shape[0], 0:imgTwo.shape[1]] = imgTwo
self.plot_img(self.m, self.n, imgOneU, "Keypoints 1")
self.plot_img(self.m, self.n, imgTwoU, "Keypoints 2")
img3 = cv2.drawMatchesKnn(imgOne, kpOne, imgTwo, kpTwo,
matches[:100], None,
matchColor=(0, 127, 255), flags=2)
self.plot_img(self.m, self.n, img3, "Matching Keypoints")
self.plot_img(self.m, self.n, im_out, "Panorama")
self.show_plot()
def show_plot(self):
# plt.show()
# save the Panorama created in to the disk
plt.savefig("Panorama" + str(self.count) + ".png",
bbox_inches="tight", dpi=200)
# plot the image
def plot_img(self, m, n, image, label):
img = self.cvt_bgr2rgb(image)
plt.subplot(m, n, self.count), plt.imshow(img), plt.xticks([
]), \
plt.yticks([])
plt.xlabel(label)
self.count += 1
# Reads an image from path and plots
def plot(self, m, n, image, label):
img = self.cvt_bgr2rgb(cv2.imread(image))
plt.subplot(m, n, self.count), plt.imshow(img), plt.xticks([
]), \
plt.yticks([])
plt.xlabel(label)
self.count += 1
def main():
# creates an instance of the Panorama class
instance = Panorama("data/Panorama/One")
instance.load_images()
instance.list_images()
instance.create_panorama()
if __name__ == '__main__':
main()
| gpl-3.0 |
wazeerzulfikar/scikit-learn | sklearn/tree/tests/test_tree.py | 7 | 64758 | """
Testing for the tree module (sklearn.tree).
"""
import copy
import pickle
from functools import partial
from itertools import product
import struct
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.exceptions import NotFittedError
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree._tree import TREE_LEAF
from sklearn.tree.tree import CRITERIA_CLF
from sklearn.tree.tree import CRITERIA_REG
from sklearn import datasets
from sklearn.utils import compute_sample_weight
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", "mae", "friedman_mse")
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
presort=True),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
presort=True),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = ["DecisionTreeClassifier", "DecisionTreeRegressor",
"ExtraTreeClassifier", "ExtraTreeRegressor"]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(reg.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=5000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [[-2, -1, 1]] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=.6).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=0.).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=3.).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=0.0).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=1.1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=2.5).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_impurity_split=-1.0).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_impurity_decrease=-1.0).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_split():
"""Test min_samples_split parameter"""
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test for integer parameter
est = TreeEstimator(min_samples_split=10,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert_greater(np.min(node_samples), 9,
"Failed with {0}".format(name))
# test for float parameter
est = TreeEstimator(min_samples_split=0.2,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert_greater(np.min(node_samples), 9,
"Failed with {0}".format(name))
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test integer parameter
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
# test float parameter
est = TreeEstimator(min_samples_leaf=0.1,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
# test case with no weights passed in
total_weight = X.shape[0]
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def check_min_weight_fraction_leaf_with_min_samples_leaf(name, datasets,
sparse=False):
"""Test the interaction between min_weight_fraction_leaf and min_samples_leaf
when sample_weights is not provided in fit."""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
total_weight = X.shape[0]
TreeEstimator = ALL_TREES[name]
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 3)):
# test integer min_samples_leaf
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
min_samples_leaf=5,
random_state=0)
est.fit(X, y)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
max((total_weight *
est.min_weight_fraction_leaf), 5),
"Failed with {0} "
"min_weight_fraction_leaf={1}, "
"min_samples_leaf={2}".format(name,
est.min_weight_fraction_leaf,
est.min_samples_leaf))
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 3)):
# test float min_samples_leaf
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
min_samples_leaf=.1,
random_state=0)
est.fit(X, y)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
max((total_weight * est.min_weight_fraction_leaf),
(total_weight * est.min_samples_leaf)),
"Failed with {0} "
"min_weight_fraction_leaf={1}, "
"min_samples_leaf={2}".format(name,
est.min_weight_fraction_leaf,
est.min_samples_leaf))
def test_min_weight_fraction_leaf_with_min_samples_leaf():
# Check on dense input
for name in ALL_TREES:
yield (check_min_weight_fraction_leaf_with_min_samples_leaf,
name, "iris")
# Check on sparse input
for name in SPARSE_TREES:
yield (check_min_weight_fraction_leaf_with_min_samples_leaf,
name, "multilabel", True)
def test_min_impurity_split():
# test if min_impurity_split creates leaves with impurity
# [0, min_impurity_split) when min_samples_leaf = 1 and
# min_samples_split = 2.
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
min_impurity_split = .5
# verify leaf nodes without min_impurity_split less than
# impurity 1e-7
est = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
random_state=0)
assert_true(est.min_impurity_split is None,
"Failed, min_impurity_split = {0} > 1e-7".format(
est.min_impurity_split))
try:
assert_warns(DeprecationWarning, est.fit, X, y)
except AssertionError:
pass
for node in range(est.tree_.node_count):
if (est.tree_.children_left[node] == TREE_LEAF or
est.tree_.children_right[node] == TREE_LEAF):
assert_equal(est.tree_.impurity[node], 0.,
"Failed with {0} "
"min_impurity_split={1}".format(
est.tree_.impurity[node],
est.min_impurity_split))
# verify leaf nodes have impurity [0,min_impurity_split] when using
# min_impurity_split
est = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
min_impurity_split=min_impurity_split,
random_state=0)
assert_warns_message(DeprecationWarning,
"Use the min_impurity_decrease",
est.fit, X, y)
for node in range(est.tree_.node_count):
if (est.tree_.children_left[node] == TREE_LEAF or
est.tree_.children_right[node] == TREE_LEAF):
assert_greater_equal(est.tree_.impurity[node], 0,
"Failed with {0}, "
"min_impurity_split={1}".format(
est.tree_.impurity[node],
est.min_impurity_split))
assert_less_equal(est.tree_.impurity[node], min_impurity_split,
"Failed with {0}, "
"min_impurity_split={1}".format(
est.tree_.impurity[node],
est.min_impurity_split))
def test_min_impurity_decrease():
# test if min_impurity_decrease ensure that a split is made only if
# if the impurity decrease is atleast that value
X, y = datasets.make_classification(n_samples=10000, random_state=42)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# Check default value of min_impurity_decrease, 1e-7
est1 = TreeEstimator(max_leaf_nodes=max_leaf_nodes, random_state=0)
# Check with explicit value of 0.05
est2 = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
min_impurity_decrease=0.05, random_state=0)
# Check with a much lower value of 0.0001
est3 = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
min_impurity_decrease=0.0001, random_state=0)
# Check with a much lower value of 0.1
est4 = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
min_impurity_decrease=0.1, random_state=0)
for est, expected_decrease in ((est1, 1e-7), (est2, 0.05),
(est3, 0.0001), (est4, 0.1)):
assert_less_equal(est.min_impurity_decrease, expected_decrease,
"Failed, min_impurity_decrease = {0} > {1}"
.format(est.min_impurity_decrease,
expected_decrease))
est.fit(X, y)
for node in range(est.tree_.node_count):
# If current node is a not leaf node, check if the split was
# justified w.r.t the min_impurity_decrease
if est.tree_.children_left[node] != TREE_LEAF:
imp_parent = est.tree_.impurity[node]
wtd_n_node = est.tree_.weighted_n_node_samples[node]
left = est.tree_.children_left[node]
wtd_n_left = est.tree_.weighted_n_node_samples[left]
imp_left = est.tree_.impurity[left]
wtd_imp_left = wtd_n_left * imp_left
right = est.tree_.children_right[node]
wtd_n_right = est.tree_.weighted_n_node_samples[right]
imp_right = est.tree_.impurity[right]
wtd_imp_right = wtd_n_right * imp_right
wtd_avg_left_right_imp = wtd_imp_right + wtd_imp_left
wtd_avg_left_right_imp /= wtd_n_node
fractional_node_weight = (
est.tree_.weighted_n_node_samples[node] / X.shape[0])
actual_decrease = fractional_node_weight * (
imp_parent - wtd_avg_left_right_imp)
assert_greater_equal(actual_decrease, expected_decrease,
"Failed with {0} "
"expected min_impurity_decrease={1}"
.format(actual_decrease,
expected_decrease))
for name, TreeEstimator in ALL_TREES.items():
if "Classifier" in name:
X, y = iris.data, iris.target
else:
X, y = boston.data, boston.target
est = TreeEstimator(random_state=0)
est.fit(X, y)
score = est.score(X, y)
fitted_attribute = dict()
for attribute in ["max_depth", "node_count", "capacity"]:
fitted_attribute[attribute] = getattr(est.tree_, attribute)
serialized_object = pickle.dumps(est)
est2 = pickle.loads(serialized_object)
assert_equal(type(est2), est.__class__)
score2 = est2.score(X, y)
assert_equal(score, score2,
"Failed to generate same score after pickling "
"with {0}".format(name))
for attribute in fitted_attribute:
assert_equal(getattr(est2.tree_, attribute),
fitted_attribute[attribute],
"Failed to generate same attribute {0} after "
"pickling with {1}".format(attribute, name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = compute_sample_weight("balanced", unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if not est.presort:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 100)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0], [1]], [0, 1]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-3 <= value.flat[0] < 3,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_behaviour_constant_feature_after_splits():
X = np.transpose(np.vstack(([[0, 0, 0, 0, 0, 1, 2, 4, 5, 6, 7]],
np.zeros((4, 11)))))
y = [0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 3]
for name, TreeEstimator in ALL_TREES.items():
# do not check extra random trees
if "ExtraTree" not in name:
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 2)
assert_equal(est.tree_.node_count, 5)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._utils import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = 8 * struct.calcsize("P")
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree_type, dataset in product(SPARSE_TREES, ("clf_small", "toy",
"digits", "multilabel",
"sparse-pos",
"sparse-neg",
"sparse-mix", "zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree_type, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree_type, dataset in product(SPARSE_TREES, ["boston", "reg_small"]):
if tree_type in REG_TREES:
yield (check_sparse_input, tree_type, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree_type, dataset in product(SPARSE_TREES, ["sparse-pos",
"sparse-neg",
"sparse-mix", "zeros"]):
yield (check_sparse_parameters, tree_type, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree_type, dataset in product(SPARSE_TREES, ["sparse-pos",
"sparse-neg",
"sparse-mix", "zeros"]):
yield (check_sparse_criterion, tree_type, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.tree_.decision_path(X1).toarray(),
d.tree_.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
d.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
s.tree_.decision_path(X1).toarray())
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree_type in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree_type)
@ignore_warnings
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, [X])
@ignore_warnings
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if not TreeEstimator().presort:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
def check_presort_sparse(est, X, y):
assert_raises(ValueError, est.fit, X, y)
def test_presort_sparse():
ests = (DecisionTreeClassifier(presort=True),
DecisionTreeRegressor(presort=True))
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for est, sparse_matrix in product(ests, sparse_matrices):
yield check_presort_sparse, est, sparse_matrix(X), y
def test_decision_path_hardcoded():
X = iris.data
y = iris.target
est = DecisionTreeClassifier(random_state=0, max_depth=1).fit(X, y)
node_indicator = est.decision_path(X[:2]).toarray()
assert_array_equal(node_indicator, [[1, 1, 0], [1, 0, 1]])
def check_decision_path(name):
X = iris.data
y = iris.target
n_samples = X.shape[0]
TreeEstimator = ALL_TREES[name]
est = TreeEstimator(random_state=0, max_depth=2)
est.fit(X, y)
node_indicator_csr = est.decision_path(X)
node_indicator = node_indicator_csr.toarray()
assert_equal(node_indicator.shape, (n_samples, est.tree_.node_count))
# Assert that leaves index are correct
leaves = est.apply(X)
leave_indicator = [node_indicator[i, j] for i, j in enumerate(leaves)]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
# Ensure only one leave node per sample
all_leaves = est.tree_.children_left == TREE_LEAF
assert_array_almost_equal(np.dot(node_indicator, all_leaves),
np.ones(shape=n_samples))
# Ensure max depth is consistent with sum of indicator
max_depth = node_indicator.sum(axis=1).max()
assert_less_equal(est.tree_.max_depth, max_depth)
def test_decision_path():
for name in ALL_TREES:
yield (check_decision_path, name)
def check_no_sparse_y_support(name):
X, y = X_multilabel, csr_matrix(y_multilabel)
TreeEstimator = ALL_TREES[name]
assert_raises(TypeError, TreeEstimator(random_state=0).fit, X, y)
def test_no_sparse_y_support():
# Currently we don't support sparse y
for name in ALL_TREES:
yield (check_no_sparse_y_support, name)
def test_mae():
# check MAE criterion produces correct results
# on small toy dataset
dt_mae = DecisionTreeRegressor(random_state=0, criterion="mae",
max_leaf_nodes=2)
dt_mae.fit([[3], [5], [3], [8], [5]], [6, 7, 3, 4, 3])
assert_array_equal(dt_mae.tree_.impurity, [1.4, 1.5, 4.0/3.0])
assert_array_equal(dt_mae.tree_.value.flat, [4, 4.5, 4.0])
dt_mae.fit([[3], [5], [3], [8], [5]], [6, 7, 3, 4, 3],
[0.6, 0.3, 0.1, 1.0, 0.3])
assert_array_equal(dt_mae.tree_.impurity, [7.0/2.3, 3.0/0.7, 4.0/1.6])
assert_array_equal(dt_mae.tree_.value.flat, [4.0, 6.0, 4.0])
def test_criterion_copy():
# Let's check whether copy of our criterion has the same type
# and properties as original
n_outputs = 3
n_classes = np.arange(3, dtype=np.intp)
n_samples = 100
def _pickle_copy(obj):
return pickle.loads(pickle.dumps(obj))
for copy_func in [copy.copy, copy.deepcopy, _pickle_copy]:
for _, typename in CRITERIA_CLF.items():
criteria = typename(n_outputs, n_classes)
result = copy_func(criteria).__reduce__()
typename_, (n_outputs_, n_classes_), _ = result
assert_equal(typename, typename_)
assert_equal(n_outputs, n_outputs_)
assert_array_equal(n_classes, n_classes_)
for _, typename in CRITERIA_REG.items():
criteria = typename(n_outputs, n_samples)
result = copy_func(criteria).__reduce__()
typename_, (n_outputs_, n_samples_), _ = result
assert_equal(typename, typename_)
assert_equal(n_outputs, n_outputs_)
assert_equal(n_samples, n_samples_)
| bsd-3-clause |
SanPen/GridCal | src/GridCal/Gui/Main/matplotlibwidget.py | 4 | 7192 | # This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
from PySide2.QtWidgets import *
import matplotlib
matplotlib.use('Qt5Agg')
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as Navigationtoolbar
from matplotlib.figure import Figure
from matplotlib import pyplot as plt
plt.style.use('fivethirtyeight')
class MplCanvas(FigureCanvas):
def __init__(self):
self.press = None
self.cur_xlim = None
self.cur_ylim = None
self.x0 = None
self.y0 = None
self.x1 = None
self.y1 = None
self.xpress = None
self.ypress = None
self.zoom_x_limits = None
self.zoom_y_limits = None
self.fig = Figure()
try:
self.ax = self.fig.add_subplot(111, facecolor='white')
except Exception as ex:
self.ax = self.fig.add_subplot(111, axisbg='white')
FigureCanvas.__init__(self, self.fig)
FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
scale = 1.2
f = self.zoom_factory(self.ax, base_scale=scale)
# p = self.pan_factory(self.ax)
self.dragged = None
self.element_dragged = None
self.pick_pos = (0, 0)
self.is_point = False
self.index = None
# Connect events and callbacks
# self.fig.canvas.mpl_connect("pick_event", self.on_pick_event)
# self.fig.canvas.mpl_connect("button_release_event", self.on_release_event)
def setTitle(self, text):
"""
Sets the figure title
"""
self.fig.suptitle(text)
def set_graph_mode(self):
"""
Sets the borders to nicely display graphs
"""
self.fig.subplots_adjust(left=0, bottom=0, right=1, top=0.9, wspace=0, hspace=0)
def zoom_factory(self, ax, base_scale=1.2):
"""
Mouse zoom handler
"""
def zoom(event):
cur_xlim = ax.get_xlim()
cur_ylim = ax.get_ylim()
xdata = event.xdata # get event x location
ydata = event.ydata # get event y location
if event.button == 'down':
# deal with zoom in
scale_factor = 1 / base_scale
elif event.button == 'up':
# deal with zoom out
scale_factor = base_scale
else:
# deal with something that should never happen
scale_factor = 1
new_width = (cur_xlim[1] - cur_xlim[0]) * scale_factor
new_height = (cur_ylim[1] - cur_ylim[0]) * scale_factor
relx = (cur_xlim[1] - xdata)/(cur_xlim[1] - cur_xlim[0])
rely = (cur_ylim[1] - ydata)/(cur_ylim[1] - cur_ylim[0])
self.zoom_x_limits = [xdata - new_width * (1-relx), xdata + new_width * relx]
self.zoom_y_limits = [ydata - new_height * (1-rely), ydata + new_height * rely]
ax.set_xlim(self.zoom_x_limits )
ax.set_ylim(self.zoom_y_limits)
ax.figure.canvas.draw()
fig = ax.get_figure() # get the figure of interest
fig.canvas.mpl_connect('scroll_event', zoom)
return zoom
def rec_zoom(self):
self.zoom_x_limits = self.ax.get_xlim()
self.zoom_y_limits = self.ax.get_ylim()
def set_last_zoom(self):
if self.zoom_x_limits is not None:
self.ax.set_xlim(self.zoom_x_limits )
self.ax.set_ylim(self.zoom_y_limits)
def pan_factory(self, ax):
"""
Mouse pan handler
"""
def onPress(event):
if event.inaxes != ax:
return
self.cur_xlim = ax.get_xlim()
self.cur_ylim = ax.get_ylim()
self.press = self.x0, self.y0, event.xdata, event.ydata
self.x0, self.y0, self.xpress, self.ypress = self.press
def onRelease(event):
self.press = None
ax.figure.canvas.draw()
def onMotion(event):
if self.press is None:
return
if event.inaxes != ax:
return
dx = event.xdata - self.xpress
dy = event.ydata - self.ypress
self.cur_xlim -= dx
self.cur_ylim -= dy
ax.set_xlim(self.cur_xlim)
ax.set_ylim(self.cur_ylim)
ax.figure.canvas.draw()
fig = ax.get_figure() # get the figure of interest
# attach the call back
fig.canvas.mpl_connect('button_press_event', onPress)
fig.canvas.mpl_connect('button_release_event', onRelease)
fig.canvas.mpl_connect('motion_notify_event', onMotion)
# return the function
return onMotion
class MatplotlibWidget(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.frame = QWidget()
self.canvas = MplCanvas()
self.canvas.setParent(self.frame)
self.mpltoolbar = Navigationtoolbar(self.canvas, self.frame)
self.vbl = QVBoxLayout()
self.vbl.addWidget(self.canvas)
self.vbl.addWidget(self.mpltoolbar)
self.setLayout(self.vbl)
self.mpltoolbar.toggleViewAction()
def setTitle(self, text):
"""
Sets the figure title
"""
self.canvas.setTitle(text)
def get_axis(self):
return self.canvas.ax
def get_figure(self):
return self.canvas.fig
def clear(self, force=False):
"""
Clear the interface
Args:
force: Remove the object and create a new one (brute force)
Returns:
"""
if force:
self.canvas.fig.clear()
self.canvas.ax = self.canvas.fig.add_subplot(111)
# self.canvas.ax.clear()
# self.canvas = MplCanvas()
else:
self.canvas.ax.clear()
self.redraw()
def redraw(self):
"""
Redraw the interface
Returns:
"""
self.canvas.ax.figure.canvas.draw()
def plot(self, x, y, title='', xlabel='', ylabel=''):
"""
Plot series
Args:
x: X values
y: Y values
title: Title
xlabel: Label for X
ylabel: Label for Y
Returns:
"""
self.setTitle(title)
self.canvas.ax.plot(x, y)
self.canvas.ax.set_xlabel(xlabel)
self.canvas.ax.set_ylabel(ylabel)
self.redraw()
| gpl-3.0 |
chilang/zeppelin | python/src/main/resources/python/zeppelin_python.py | 7 | 9381 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, sys, getopt, traceback, json, re
from py4j.java_gateway import java_import, JavaGateway, GatewayClient
from py4j.protocol import Py4JJavaError, Py4JNetworkError
import warnings
import ast
import traceback
import warnings
import signal
import base64
from io import BytesIO
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
# for back compatibility
class Logger(object):
def __init__(self):
pass
def write(self, message):
intp.appendOutput(message)
def reset(self):
pass
def flush(self):
pass
class PyZeppelinContext(object):
""" A context impl that uses Py4j to communicate to JVM
"""
def __init__(self, z):
self.z = z
self.paramOption = gateway.jvm.org.apache.zeppelin.display.ui.OptionInput.ParamOption
self.javaList = gateway.jvm.java.util.ArrayList
self.max_result = 1000
self._displayhook = lambda *args: None
self._setup_matplotlib()
def getInterpreterContext(self):
return self.z.getCurrentInterpreterContext()
def input(self, name, defaultValue=""):
return self.z.getGui().input(name, defaultValue)
def select(self, name, options, defaultValue=""):
javaOptions = gateway.new_array(self.paramOption, len(options))
i = 0
for tuple in options:
javaOptions[i] = self.paramOption(tuple[0], tuple[1])
i += 1
return self.z.getGui().select(name, defaultValue, javaOptions)
def checkbox(self, name, options, defaultChecked=[]):
javaOptions = gateway.new_array(self.paramOption, len(options))
i = 0
for tuple in options:
javaOptions[i] = self.paramOption(tuple[0], tuple[1])
i += 1
javaDefaultCheck = self.javaList()
for check in defaultChecked:
javaDefaultCheck.append(check)
return self.z.getGui().checkbox(name, javaDefaultCheck, javaOptions)
def show(self, p, **kwargs):
if hasattr(p, '__name__') and p.__name__ == "matplotlib.pyplot":
self.show_matplotlib(p, **kwargs)
elif type(p).__name__ == "DataFrame": # does not play well with sub-classes
# `isinstance(p, DataFrame)` would req `import pandas.core.frame.DataFrame`
# and so a dependency on pandas
self.show_dataframe(p, **kwargs)
elif hasattr(p, '__call__'):
p() #error reporting
def show_dataframe(self, df, show_index=False, **kwargs):
"""Pretty prints DF using Table Display System
"""
limit = len(df) > self.max_result
header_buf = StringIO("")
if show_index:
idx_name = str(df.index.name) if df.index.name is not None else ""
header_buf.write(idx_name + "\t")
header_buf.write(str(df.columns[0]))
for col in df.columns[1:]:
header_buf.write("\t")
header_buf.write(str(col))
header_buf.write("\n")
body_buf = StringIO("")
rows = df.head(self.max_result).values if limit else df.values
index = df.index.values
for idx, row in zip(index, rows):
if show_index:
body_buf.write("%html <strong>{}</strong>".format(idx))
body_buf.write("\t")
body_buf.write(str(row[0]))
for cell in row[1:]:
body_buf.write("\t")
body_buf.write(str(cell))
body_buf.write("\n")
body_buf.seek(0); header_buf.seek(0)
#TODO(bzz): fix it, so it shows red notice, as in Spark
print("%table " + header_buf.read() + body_buf.read()) # +
# ("\n<font color=red>Results are limited by {}.</font>" \
# .format(self.max_result) if limit else "")
#)
body_buf.close(); header_buf.close()
def show_matplotlib(self, p, fmt="png", width="auto", height="auto",
**kwargs):
"""Matplotlib show function
"""
if fmt == "png":
img = BytesIO()
p.savefig(img, format=fmt)
img_str = b"data:image/png;base64,"
img_str += base64.b64encode(img.getvalue().strip())
img_tag = "<img src={img} style='width={width};height:{height}'>"
# Decoding is necessary for Python 3 compability
img_str = img_str.decode("ascii")
img_str = img_tag.format(img=img_str, width=width, height=height)
elif fmt == "svg":
img = StringIO()
p.savefig(img, format=fmt)
img_str = img.getvalue()
else:
raise ValueError("fmt must be 'png' or 'svg'")
html = "%html <div style='width:{width};height:{height}'>{img}<div>"
print(html.format(width=width, height=height, img=img_str))
img.close()
def configure_mpl(self, **kwargs):
import mpl_config
mpl_config.configure(**kwargs)
def _setup_matplotlib(self):
# If we don't have matplotlib installed don't bother continuing
try:
import matplotlib
except ImportError:
return
# Make sure custom backends are available in the PYTHONPATH
rootdir = os.environ.get('ZEPPELIN_HOME', os.getcwd())
mpl_path = os.path.join(rootdir, 'interpreter', 'lib', 'python')
if mpl_path not in sys.path:
sys.path.append(mpl_path)
# Finally check if backend exists, and if so configure as appropriate
try:
matplotlib.use('module://backend_zinline')
import backend_zinline
# Everything looks good so make config assuming that we are using
# an inline backend
self._displayhook = backend_zinline.displayhook
self.configure_mpl(width=600, height=400, dpi=72,
fontsize=10, interactive=True, format='png')
except ImportError:
# Fall back to Agg if no custom backend installed
matplotlib.use('Agg')
warnings.warn("Unable to load inline matplotlib backend, "
"falling back to Agg")
def handler_stop_signals(sig, frame):
sys.exit("Got signal : " + str(sig))
signal.signal(signal.SIGINT, handler_stop_signals)
host = "127.0.0.1"
if len(sys.argv) >= 3:
host = sys.argv[2]
_zcUserQueryNameSpace = {}
client = GatewayClient(address=host, port=int(sys.argv[1]))
#gateway = JavaGateway(client, auto_convert = True)
gateway = JavaGateway(client)
intp = gateway.entry_point
intp.onPythonScriptInitialized(os.getpid())
java_import(gateway.jvm, "org.apache.zeppelin.display.Input")
z = __zeppelin__ = PyZeppelinContext(intp)
__zeppelin__._setup_matplotlib()
_zcUserQueryNameSpace["__zeppelin__"] = __zeppelin__
_zcUserQueryNameSpace["z"] = z
output = Logger()
sys.stdout = output
#sys.stderr = output
while True :
req = intp.getStatements()
if req == None:
break
try:
stmts = req.statements().split("\n")
final_code = []
# Get post-execute hooks
try:
global_hook = intp.getHook('post_exec_dev')
except:
global_hook = None
try:
user_hook = __zeppelin__.getHook('post_exec')
except:
user_hook = None
nhooks = 0
for hook in (global_hook, user_hook):
if hook:
nhooks += 1
for s in stmts:
if s == None:
continue
# skip comment
s_stripped = s.strip()
if len(s_stripped) == 0 or s_stripped.startswith("#"):
continue
final_code.append(s)
if final_code:
# use exec mode to compile the statements except the last statement,
# so that the last statement's evaluation will be printed to stdout
code = compile('\n'.join(final_code), '<stdin>', 'exec', ast.PyCF_ONLY_AST, 1)
to_run_hooks = []
if (nhooks > 0):
to_run_hooks = code.body[-nhooks:]
to_run_exec, to_run_single = (code.body[:-(nhooks + 1)],
[code.body[-(nhooks + 1)]])
try:
for node in to_run_exec:
mod = ast.Module([node])
code = compile(mod, '<stdin>', 'exec')
exec(code, _zcUserQueryNameSpace)
for node in to_run_single:
mod = ast.Interactive([node])
code = compile(mod, '<stdin>', 'single')
exec(code, _zcUserQueryNameSpace)
for node in to_run_hooks:
mod = ast.Module([node])
code = compile(mod, '<stdin>', 'exec')
exec(code, _zcUserQueryNameSpace)
except:
raise Exception(traceback.format_exc())
intp.setStatementsFinished("", False)
except Py4JJavaError:
excInnerError = traceback.format_exc() # format_tb() does not return the inner exception
innerErrorStart = excInnerError.find("Py4JJavaError:")
if innerErrorStart > -1:
excInnerError = excInnerError[innerErrorStart:]
intp.setStatementsFinished(excInnerError + str(sys.exc_info()), True)
except Py4JNetworkError:
# lost connection from gateway server. exit
sys.exit(1)
except:
intp.setStatementsFinished(traceback.format_exc(), True)
output.reset()
| apache-2.0 |
piyush0609/scipy | scipy/signal/wavelets.py | 6 | 10472 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.dual import eig
from scipy.special import comb
from scipy import linspace, pi, exp
from scipy.signal import convolve
__all__ = ['daub', 'qmf', 'cascade', 'morlet', 'ricker', 'cwt']
def daub(p):
"""
The coefficients for the FIR low-pass filter producing Daubechies wavelets.
p>=1 gives the order of the zero at f=1/2.
There are 2p filter coefficients.
Parameters
----------
p : int
Order of the zero at f=1/2, can have values from 1 to 34.
Returns
-------
daub : ndarray
Return
"""
sqrt = np.sqrt
if p < 1:
raise ValueError("p must be at least 1.")
if p == 1:
c = 1 / sqrt(2)
return np.array([c, c])
elif p == 2:
f = sqrt(2) / 8
c = sqrt(3)
return f * np.array([1 + c, 3 + c, 3 - c, 1 - c])
elif p == 3:
tmp = 12 * sqrt(10)
z1 = 1.5 + sqrt(15 + tmp) / 6 - 1j * (sqrt(15) + sqrt(tmp - 15)) / 6
z1c = np.conj(z1)
f = sqrt(2) / 8
d0 = np.real((1 - z1) * (1 - z1c))
a0 = np.real(z1 * z1c)
a1 = 2 * np.real(z1)
return f / d0 * np.array([a0, 3 * a0 - a1, 3 * a0 - 3 * a1 + 1,
a0 - 3 * a1 + 3, 3 - a1, 1])
elif p < 35:
# construct polynomial and factor it
if p < 35:
P = [comb(p - 1 + k, k, exact=1) for k in range(p)][::-1]
yj = np.roots(P)
else: # try different polynomial --- needs work
P = [comb(p - 1 + k, k, exact=1) / 4.0**k
for k in range(p)][::-1]
yj = np.roots(P) / 4
# for each root, compute two z roots, select the one with |z|>1
# Build up final polynomial
c = np.poly1d([1, 1])**p
q = np.poly1d([1])
for k in range(p - 1):
yval = yj[k]
part = 2 * sqrt(yval * (yval - 1))
const = 1 - 2 * yval
z1 = const + part
if (abs(z1)) < 1:
z1 = const - part
q = q * [1, -z1]
q = c * np.real(q)
# Normalize result
q = q / np.sum(q) * sqrt(2)
return q.c[::-1]
else:
raise ValueError("Polynomial factorization does not work "
"well for p too large.")
def qmf(hk):
"""
Return high-pass qmf filter from low-pass
Parameters
----------
hk : array_like
Coefficients of high-pass filter.
"""
N = len(hk) - 1
asgn = [{0: 1, 1: -1}[k % 2] for k in range(N + 1)]
return hk[::-1] * np.array(asgn)
def cascade(hk, J=7):
"""
Return (x, phi, psi) at dyadic points ``K/2**J`` from filter coefficients.
Parameters
----------
hk : array_like
Coefficients of low-pass filter.
J : int, optional
Values will be computed at grid points ``K/2**J``. Default is 7.
Returns
-------
x : ndarray
The dyadic points ``K/2**J`` for ``K=0...N * (2**J)-1`` where
``len(hk) = len(gk) = N+1``.
phi : ndarray
The scaling function ``phi(x)`` at `x`:
``phi(x) = sum(hk * phi(2x-k))``, where k is from 0 to N.
psi : ndarray, optional
The wavelet function ``psi(x)`` at `x`:
``phi(x) = sum(gk * phi(2x-k))``, where k is from 0 to N.
`psi` is only returned if `gk` is not None.
Notes
-----
The algorithm uses the vector cascade algorithm described by Strang and
Nguyen in "Wavelets and Filter Banks". It builds a dictionary of values
and slices for quick reuse. Then inserts vectors into final vector at the
end.
"""
N = len(hk) - 1
if (J > 30 - np.log2(N + 1)):
raise ValueError("Too many levels.")
if (J < 1):
raise ValueError("Too few levels.")
# construct matrices needed
nn, kk = np.ogrid[:N, :N]
s2 = np.sqrt(2)
# append a zero so that take works
thk = np.r_[hk, 0]
gk = qmf(hk)
tgk = np.r_[gk, 0]
indx1 = np.clip(2 * nn - kk, -1, N + 1)
indx2 = np.clip(2 * nn - kk + 1, -1, N + 1)
m = np.zeros((2, 2, N, N), 'd')
m[0, 0] = np.take(thk, indx1, 0)
m[0, 1] = np.take(thk, indx2, 0)
m[1, 0] = np.take(tgk, indx1, 0)
m[1, 1] = np.take(tgk, indx2, 0)
m *= s2
# construct the grid of points
x = np.arange(0, N * (1 << J), dtype=float) / (1 << J)
phi = 0 * x
psi = 0 * x
# find phi0, and phi1
lam, v = eig(m[0, 0])
ind = np.argmin(np.absolute(lam - 1))
# a dictionary with a binary representation of the
# evaluation points x < 1 -- i.e. position is 0.xxxx
v = np.real(v[:, ind])
# need scaling function to integrate to 1 so find
# eigenvector normalized to sum(v,axis=0)=1
sm = np.sum(v)
if sm < 0: # need scaling function to integrate to 1
v = -v
sm = -sm
bitdic = {}
bitdic['0'] = v / sm
bitdic['1'] = np.dot(m[0, 1], bitdic['0'])
step = 1 << J
phi[::step] = bitdic['0']
phi[(1 << (J - 1))::step] = bitdic['1']
psi[::step] = np.dot(m[1, 0], bitdic['0'])
psi[(1 << (J - 1))::step] = np.dot(m[1, 1], bitdic['0'])
# descend down the levels inserting more and more values
# into bitdic -- store the values in the correct location once we
# have computed them -- stored in the dictionary
# for quicker use later.
prevkeys = ['1']
for level in range(2, J + 1):
newkeys = ['%d%s' % (xx, yy) for xx in [0, 1] for yy in prevkeys]
fac = 1 << (J - level)
for key in newkeys:
# convert key to number
num = 0
for pos in range(level):
if key[pos] == '1':
num += (1 << (level - 1 - pos))
pastphi = bitdic[key[1:]]
ii = int(key[0])
temp = np.dot(m[0, ii], pastphi)
bitdic[key] = temp
phi[num * fac::step] = temp
psi[num * fac::step] = np.dot(m[1, ii], pastphi)
prevkeys = newkeys
return x, phi, psi
def morlet(M, w=5.0, s=1.0, complete=True):
"""
Complex Morlet wavelet.
Parameters
----------
M : int
Length of the wavelet.
w : float, optional
Omega0. Default is 5
s : float, optional
Scaling factor, windowed from ``-s*2*pi`` to ``+s*2*pi``. Default is 1.
complete : bool, optional
Whether to use the complete or the standard version.
Returns
-------
morlet : (M,) ndarray
See Also
--------
scipy.signal.gausspulse
Notes
-----
The standard version::
pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2))
This commonly used wavelet is often referred to simply as the
Morlet wavelet. Note that this simplified version can cause
admissibility problems at low values of w.
The complete version::
pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2))
The complete version of the Morlet wavelet, with a correction
term to improve admissibility. For w greater than 5, the
correction term is negligible.
Note that the energy of the return wavelet is not normalised
according to s.
The fundamental frequency of this wavelet in Hz is given
by ``f = 2*s*w*r / M`` where r is the sampling rate.
"""
x = linspace(-s * 2 * pi, s * 2 * pi, M)
output = exp(1j * w * x)
if complete:
output -= exp(-0.5 * (w**2))
output *= exp(-0.5 * (x**2)) * pi**(-0.25)
return output
def ricker(points, a):
"""
Return a Ricker wavelet, also known as the "Mexican hat wavelet".
It models the function:
``A (1 - x^2/a^2) exp(-x^2/2 a^2)``,
where ``A = 2/sqrt(3a)pi^1/4``.
Parameters
----------
points : int
Number of points in `vector`.
Will be centered around 0.
a : scalar
Width parameter of the wavelet.
Returns
-------
vector : (N,) ndarray
Array of length `points` in shape of ricker curve.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> points = 100
>>> a = 4.0
>>> vec2 = signal.ricker(points, a)
>>> print(len(vec2))
100
>>> plt.plot(vec2)
>>> plt.show()
"""
A = 2 / (np.sqrt(3 * a) * (np.pi**0.25))
wsq = a**2
vec = np.arange(0, points) - (points - 1.0) / 2
xsq = vec**2
mod = (1 - xsq / wsq)
gauss = np.exp(-xsq / (2 * wsq))
total = A * mod * gauss
return total
def cwt(data, wavelet, widths):
"""
Continuous wavelet transform.
Performs a continuous wavelet transform on `data`,
using the `wavelet` function. A CWT performs a convolution
with `data` using the `wavelet` function, which is characterized
by a width parameter and length parameter.
Parameters
----------
data : (N,) ndarray
data on which to perform the transform.
wavelet : function
Wavelet function, which should take 2 arguments.
The first argument is the number of points that the returned vector
will have (len(wavelet(width,length)) == length).
The second is a width parameter, defining the size of the wavelet
(e.g. standard deviation of a gaussian). See `ricker`, which
satisfies these requirements.
widths : (M,) sequence
Widths to use for transform.
Returns
-------
cwt: (M, N) ndarray
Will have shape of (len(widths), len(data)).
Notes
-----
::
length = min(10 * width[ii], len(data))
cwt[ii,:] = signal.convolve(data, wavelet(length,
width[ii]), mode='same')
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 200, endpoint=False)
>>> sig = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2)
>>> widths = np.arange(1, 31)
>>> cwtmatr = signal.cwt(sig, signal.ricker, widths)
>>> plt.imshow(cwtmatr, extent=[-1, 1, 1, 31], cmap='PRGn', aspect='auto',
... vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())
>>> plt.show()
"""
output = np.zeros([len(widths), len(data)])
for ind, width in enumerate(widths):
wavelet_data = wavelet(min(10 * width, len(data)), width)
output[ind, :] = convolve(data, wavelet_data,
mode='same')
return output
| bsd-3-clause |
ibis-project/ibis | ibis/backends/postgres/tests/test_functions.py | 1 | 47205 | import operator
import os
import string
import warnings
from datetime import date, datetime
import numpy as np
import pandas as pd
import pandas.testing as tm
import pytest
import sqlalchemy as sa
from pytest import param
from sqlalchemy.dialects import postgresql
import ibis
import ibis.config as config
import ibis.expr.datatypes as dt
import ibis.expr.types as ir
from ibis import literal as L
from ibis.expr.window import rows_with_max_lookback
pytestmark = pytest.mark.postgres
@pytest.fixture
def guid(con):
name = ibis.util.guid()
try:
yield name
finally:
con.drop_table(name, force=True)
@pytest.fixture
def guid2(con):
name = ibis.util.guid()
try:
yield name
finally:
con.drop_table(name, force=True)
@pytest.mark.parametrize(
('left_func', 'right_func'),
[
param(
lambda t: t.double_col.cast('int8'),
lambda at: sa.cast(at.c.double_col, sa.SMALLINT),
id='double_to_int8',
),
param(
lambda t: t.double_col.cast('int16'),
lambda at: sa.cast(at.c.double_col, sa.SMALLINT),
id='double_to_int16',
),
param(
lambda t: t.string_col.cast('double'),
lambda at: sa.cast(at.c.string_col, postgresql.DOUBLE_PRECISION),
id='string_to_double',
),
param(
lambda t: t.string_col.cast('float'),
lambda at: sa.cast(at.c.string_col, sa.REAL),
id='string_to_float',
),
param(
lambda t: t.string_col.cast('decimal'),
lambda at: sa.cast(at.c.string_col, sa.NUMERIC(9, 0)),
id='string_to_decimal_no_params',
),
param(
lambda t: t.string_col.cast('decimal(9, 3)'),
lambda at: sa.cast(at.c.string_col, sa.NUMERIC(9, 3)),
id='string_to_decimal_params',
),
],
)
def test_cast(alltypes, at, translate, left_func, right_func):
left = left_func(alltypes)
right = right_func(at)
assert str(translate(left).compile()) == str(right.compile())
def test_date_cast(alltypes, at, translate):
result = alltypes.date_string_col.cast('date')
expected = sa.cast(at.c.date_string_col, sa.DATE)
assert str(translate(result)) == str(expected)
@pytest.mark.parametrize(
'column',
[
'index',
'Unnamed: 0',
'id',
'bool_col',
'tinyint_col',
'smallint_col',
'int_col',
'bigint_col',
'float_col',
'double_col',
'date_string_col',
'string_col',
'timestamp_col',
'year',
'month',
],
)
def test_noop_cast(alltypes, at, translate, column):
col = alltypes[column]
result = col.cast(col.type())
expected = at.c[column]
assert result.equals(col)
assert str(translate(result)) == str(expected)
def test_timestamp_cast_noop(alltypes, at, translate):
# See GH #592
result1 = alltypes.timestamp_col.cast('timestamp')
result2 = alltypes.int_col.cast('timestamp')
assert isinstance(result1, ir.TimestampColumn)
assert isinstance(result2, ir.TimestampColumn)
expected1 = at.c.timestamp_col
expected2 = sa.func.timezone('UTC', sa.func.to_timestamp(at.c.int_col))
assert str(translate(result1)) == str(expected1)
assert str(translate(result2)) == str(expected2)
@pytest.mark.parametrize(
('func', 'expected'),
[
param(operator.methodcaller('year'), 2015, id='year'),
param(operator.methodcaller('month'), 9, id='month'),
param(operator.methodcaller('day'), 1, id='day'),
param(operator.methodcaller('hour'), 14, id='hour'),
param(operator.methodcaller('minute'), 48, id='minute'),
param(operator.methodcaller('second'), 5, id='second'),
param(operator.methodcaller('millisecond'), 359, id='millisecond'),
param(lambda x: x.day_of_week.index(), 1, id='day_of_week_index'),
param(
lambda x: x.day_of_week.full_name(),
'Tuesday',
id='day_of_week_full_name',
),
],
)
def test_simple_datetime_operations(con, func, expected, translate):
value = ibis.timestamp('2015-09-01 14:48:05.359')
assert con.execute(func(value)) == expected
@pytest.mark.parametrize(
'pattern',
[
# there could be pathological failure at midnight somewhere, but
# that's okay
'%Y%m%d %H',
# test quoting behavior
'DD BAR %w FOO "DD"',
'DD BAR %w FOO "D',
'DD BAR "%w" FOO "D',
'DD BAR "%d" FOO "D',
param(
'DD BAR "%c" FOO "D',
marks=pytest.mark.xfail(
condition=os.name == 'nt',
reason='Locale-specific format specs not available on Windows',
),
),
param(
'DD BAR "%x" FOO "D',
marks=pytest.mark.xfail(
condition=os.name == 'nt',
reason='Locale-specific format specs not available on Windows',
),
),
param(
'DD BAR "%X" FOO "D',
marks=pytest.mark.xfail(
condition=os.name == 'nt',
reason='Locale-specific format specs not available on Windows',
),
),
],
)
def test_strftime(con, pattern):
value = ibis.timestamp('2015-09-01 14:48:05.359')
raw_value = datetime(
year=2015,
month=9,
day=1,
hour=14,
minute=48,
second=5,
microsecond=359000,
)
assert con.execute(value.strftime(pattern)) == raw_value.strftime(pattern)
@pytest.mark.parametrize(
('func', 'left', 'right', 'expected'),
[
param(operator.add, L(3), L(4), 7, id='add'),
param(operator.sub, L(3), L(4), -1, id='sub'),
param(operator.mul, L(3), L(4), 12, id='mul'),
param(operator.truediv, L(12), L(4), 3, id='truediv_no_remainder'),
param(operator.pow, L(12), L(2), 144, id='pow'),
param(operator.mod, L(12), L(5), 2, id='mod'),
param(operator.truediv, L(7), L(2), 3.5, id='truediv_remainder'),
param(operator.floordiv, L(7), L(2), 3, id='floordiv'),
param(
lambda x, y: x.floordiv(y), L(7), 2, 3, id='floordiv_no_literal'
),
param(
lambda x, y: x.rfloordiv(y), L(2), 7, 3, id='rfloordiv_no_literal'
),
],
)
def test_binary_arithmetic(con, func, left, right, expected):
expr = func(left, right)
result = con.execute(expr)
assert result == expected
@pytest.mark.parametrize(
('value', 'expected'),
[
param(L('foo_bar'), 'text', id='text'),
param(L(5), 'integer', id='integer'),
param(ibis.NA, 'null', id='null'),
# TODO(phillipc): should this really be double?
param(L(1.2345), 'numeric', id='numeric'),
param(
L(
datetime(
2015,
9,
1,
hour=14,
minute=48,
second=5,
microsecond=359000,
)
),
'timestamp without time zone',
id='timestamp_without_time_zone',
),
param(L(date(2015, 9, 1)), 'date', id='date'),
],
)
def test_typeof(con, value, expected):
assert con.execute(value.typeof()) == expected
@pytest.mark.parametrize(('value', 'expected'), [(0, None), (5.5, 5.5)])
def test_nullifzero(con, value, expected):
assert con.execute(L(value).nullifzero()) == expected
@pytest.mark.parametrize(('value', 'expected'), [('foo_bar', 7), ('', 0)])
def test_string_length(con, value, expected):
assert con.execute(L(value).length()) == expected
@pytest.mark.parametrize(
('op', 'expected'),
[
param(operator.methodcaller('left', 3), 'foo', id='left'),
param(operator.methodcaller('right', 3), 'bar', id='right'),
param(operator.methodcaller('substr', 0, 3), 'foo', id='substr_0_3'),
param(operator.methodcaller('substr', 4, 3), 'bar', id='substr_4, 3'),
param(operator.methodcaller('substr', 1), 'oo_bar', id='substr_1'),
],
)
def test_string_substring(con, op, expected):
value = L('foo_bar')
assert con.execute(op(value)) == expected
@pytest.mark.parametrize(
('opname', 'expected'),
[('lstrip', 'foo '), ('rstrip', ' foo'), ('strip', 'foo')],
)
def test_string_strip(con, opname, expected):
op = operator.methodcaller(opname)
value = L(' foo ')
assert con.execute(op(value)) == expected
@pytest.mark.parametrize(
('opname', 'count', 'char', 'expected'),
[('lpad', 6, ' ', ' foo'), ('rpad', 6, ' ', 'foo ')],
)
def test_string_pad(con, opname, count, char, expected):
op = operator.methodcaller(opname, count, char)
value = L('foo')
assert con.execute(op(value)) == expected
def test_string_reverse(con):
assert con.execute(L('foo').reverse()) == 'oof'
def test_string_upper(con):
assert con.execute(L('foo').upper()) == 'FOO'
def test_string_lower(con):
assert con.execute(L('FOO').lower()) == 'foo'
@pytest.mark.parametrize(
('haystack', 'needle', 'expected'),
[
('foobar', 'bar', True),
('foobar', 'foo', True),
('foobar', 'baz', False),
('100%', '%', True),
('a_b_c', '_', True),
],
)
def test_string_contains(con, haystack, needle, expected):
value = L(haystack)
expr = value.contains(needle)
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('value', 'expected'),
[('foo bar foo', 'Foo Bar Foo'), ('foobar Foo', 'Foobar Foo')],
)
def test_capitalize(con, value, expected):
assert con.execute(L(value).capitalize()) == expected
def test_repeat(con):
expr = L('bar ').repeat(3)
assert con.execute(expr) == 'bar bar bar '
def test_re_replace(con):
expr = L('fudge|||chocolate||candy').re_replace('\\|{2,3}', ', ')
assert con.execute(expr) == 'fudge, chocolate, candy'
def test_translate(con):
expr = L('faab').translate('a', 'b')
assert con.execute(expr) == 'fbbb'
@pytest.mark.parametrize(
('raw_value', 'expected'), [('a', 0), ('b', 1), ('d', -1), (None, 3)]
)
def test_find_in_set(con, raw_value, expected):
value = L(raw_value, dt.string)
haystack = ['a', 'b', 'c', None]
expr = value.find_in_set(haystack)
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('raw_value', 'opname', 'expected'),
[
(None, 'isnull', True),
(1, 'isnull', False),
(None, 'notnull', False),
(1, 'notnull', True),
],
)
def test_isnull_notnull(con, raw_value, opname, expected):
lit = L(raw_value)
op = operator.methodcaller(opname)
expr = op(lit)
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
param(L('foobar').find('bar'), 3, id='find_pos'),
param(L('foobar').find('baz'), -1, id='find_neg'),
param(L('foobar').like('%bar'), True, id='like_left_pattern'),
param(L('foobar').like('foo%'), True, id='like_right_pattern'),
param(L('foobar').like('%baz%'), False, id='like_both_sides_pattern'),
param(L('foobar').like(['%bar']), True, id='like_list_left_side'),
param(L('foobar').like(['foo%']), True, id='like_list_right_side'),
param(L('foobar').like(['%baz%']), False, id='like_list_both_sides'),
param(
L('foobar').like(['%bar', 'foo%']), True, id='like_list_multiple'
),
param(L('foobarfoo').replace('foo', 'H'), 'HbarH', id='replace'),
param(L('a').ascii_str(), ord('a'), id='ascii_str'),
],
)
def test_string_functions(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
param(L('abcd').re_search('[a-z]'), True, id='re_search_match'),
param(L('abcd').re_search(r'[\d]+'), False, id='re_search_no_match'),
param(
L('1222').re_search(r'[\d]+'), True, id='re_search_match_number'
),
],
)
def test_regexp(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
param(
L('abcd').re_extract('([a-z]+)', 0), 'abcd', id='re_extract_whole'
),
param(
L('abcd').re_extract('(ab)(cd)', 1), 'cd', id='re_extract_first'
),
# valid group number but no match => empty string
param(L('abcd').re_extract(r'(\d)', 0), '', id='re_extract_no_match'),
# match but not a valid group number => NULL
param(L('abcd').re_extract('abcd', 3), None, id='re_extract_match'),
],
)
def test_regexp_extract(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
param(ibis.NA.fillna(5), 5, id='filled'),
param(L(5).fillna(10), 5, id='not_filled'),
param(L(5).nullif(5), None, id='nullif_null'),
param(L(10).nullif(5), 10, id='nullif_not_null'),
],
)
def test_fillna_nullif(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
param(ibis.coalesce(5, None, 4), 5, id='first'),
param(ibis.coalesce(ibis.NA, 4, ibis.NA), 4, id='second'),
param(ibis.coalesce(ibis.NA, ibis.NA, 3.14), 3.14, id='third'),
],
)
def test_coalesce(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
param(ibis.coalesce(ibis.NA, ibis.NA), None, id='all_null'),
param(
ibis.coalesce(ibis.NA, ibis.NA, ibis.NA.cast('double')),
None,
id='all_nulls_with_one_cast',
),
param(
ibis.coalesce(
ibis.NA.cast('int8'),
ibis.NA.cast('int8'),
ibis.NA.cast('int8'),
),
None,
id='all_nulls_with_all_cast',
),
],
)
def test_coalesce_all_na(con, expr, expected):
assert con.execute(expr) == expected
def test_numeric_builtins_work(alltypes, df):
expr = alltypes.double_col.fillna(0)
result = expr.execute()
expected = df.double_col.fillna(0)
expected.name = 'tmp'
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('op', 'pandas_op'),
[
param(
lambda t: (t.double_col > 20).ifelse(10, -20),
lambda df: pd.Series(
np.where(df.double_col > 20, 10, -20), dtype='int8'
),
id='simple',
),
param(
lambda t: (t.double_col > 20).ifelse(10, -20).abs(),
lambda df: pd.Series(
np.where(df.double_col > 20, 10, -20), dtype='int8'
).abs(),
id='abs',
),
],
)
def test_ifelse(alltypes, df, op, pandas_op):
expr = op(alltypes)
result = expr.execute()
result.name = None
expected = pandas_op(df)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('func', 'pandas_func'),
[
# tier and histogram
param(
lambda d: d.bucket([0, 10, 25, 50, 100]),
lambda s: pd.cut(
s, [0, 10, 25, 50, 100], right=False, labels=False
),
id='include_over_false',
),
param(
lambda d: d.bucket([0, 10, 25, 50], include_over=True),
lambda s: pd.cut(
s, [0, 10, 25, 50, np.inf], right=False, labels=False
),
id='include_over_true',
),
param(
lambda d: d.bucket([0, 10, 25, 50], close_extreme=False),
lambda s: pd.cut(s, [0, 10, 25, 50], right=False, labels=False),
id='close_extreme_false',
),
param(
lambda d: d.bucket(
[0, 10, 25, 50], closed='right', close_extreme=False
),
lambda s: pd.cut(
s,
[0, 10, 25, 50],
include_lowest=False,
right=True,
labels=False,
),
id='closed_right',
),
param(
lambda d: d.bucket([10, 25, 50, 100], include_under=True),
lambda s: pd.cut(
s, [0, 10, 25, 50, 100], right=False, labels=False
),
id='include_under_true',
),
],
)
def test_bucket(alltypes, df, func, pandas_func):
expr = func(alltypes.double_col)
result = expr.execute()
expected = pandas_func(df.double_col).astype('category')
tm.assert_series_equal(result, expected, check_names=False)
def test_category_label(alltypes, df):
t = alltypes
d = t.double_col
bins = [0, 10, 25, 50, 100]
labels = ['a', 'b', 'c', 'd']
bucket = d.bucket(bins)
expr = bucket.label(labels)
result = expr.execute()
with warnings.catch_warnings():
warnings.simplefilter('ignore')
result = pd.Series(pd.Categorical(result, ordered=True))
result.name = 'double_col'
expected = pd.cut(df.double_col, bins, labels=labels, right=False)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('distinct1', 'distinct2', 'expected1', 'expected2'),
[
(True, True, 'UNION', 'UNION'),
(True, False, 'UNION', 'UNION ALL'),
(False, True, 'UNION ALL', 'UNION'),
(False, False, 'UNION ALL', 'UNION ALL'),
],
)
def test_union_cte(alltypes, distinct1, distinct2, expected1, expected2):
t = alltypes
expr1 = t.group_by(t.string_col).aggregate(metric=t.double_col.sum())
expr2 = expr1.view()
expr3 = expr1.view()
expr = expr1.union(expr2, distinct=distinct1).union(
expr3, distinct=distinct2
)
result = '\n'.join(
map(
lambda line: line.rstrip(), # strip trailing whitespace
str(
expr.compile().compile(compile_kwargs={'literal_binds': True})
).splitlines(),
)
)
expected = """\
WITH anon_1 AS
(SELECT t0.string_col AS string_col, sum(t0.double_col) AS metric
FROM functional_alltypes AS t0 GROUP BY t0.string_col),
anon_2 AS
(SELECT t0.string_col AS string_col, sum(t0.double_col) AS metric
FROM functional_alltypes AS t0 GROUP BY t0.string_col),
anon_3 AS
(SELECT t0.string_col AS string_col, sum(t0.double_col) AS metric
FROM functional_alltypes AS t0 GROUP BY t0.string_col)
(SELECT anon_1.string_col, anon_1.metric
FROM anon_1 {} SELECT anon_2.string_col, anon_2.metric
FROM anon_2) {} SELECT anon_3.string_col, anon_3.metric
FROM anon_3""".format(
expected1, expected2
)
assert str(result) == expected
@pytest.mark.parametrize(
('func', 'pandas_func'),
[
param(
lambda t, cond: t.bool_col.count(),
lambda df, cond: df.bool_col.count(),
id='count',
),
param(
lambda t, cond: t.bool_col.any(),
lambda df, cond: df.bool_col.any(),
id='any',
),
param(
lambda t, cond: t.bool_col.all(),
lambda df, cond: df.bool_col.all(),
id='all',
),
param(
lambda t, cond: t.bool_col.notany(),
lambda df, cond: ~df.bool_col.any(),
id='notany',
),
param(
lambda t, cond: t.bool_col.notall(),
lambda df, cond: ~df.bool_col.all(),
id='notall',
),
param(
lambda t, cond: t.double_col.sum(),
lambda df, cond: df.double_col.sum(),
id='sum',
),
param(
lambda t, cond: t.double_col.mean(),
lambda df, cond: df.double_col.mean(),
id='mean',
),
param(
lambda t, cond: t.double_col.min(),
lambda df, cond: df.double_col.min(),
id='min',
),
param(
lambda t, cond: t.double_col.max(),
lambda df, cond: df.double_col.max(),
id='max',
),
param(
lambda t, cond: t.double_col.var(),
lambda df, cond: df.double_col.var(),
id='var',
),
param(
lambda t, cond: t.double_col.std(),
lambda df, cond: df.double_col.std(),
id='std',
),
param(
lambda t, cond: t.double_col.var(how='sample'),
lambda df, cond: df.double_col.var(ddof=1),
id='samp_var',
),
param(
lambda t, cond: t.double_col.std(how='pop'),
lambda df, cond: df.double_col.std(ddof=0),
id='pop_std',
),
param(
lambda t, cond: t.bool_col.count(where=cond),
lambda df, cond: df.bool_col[cond].count(),
id='count_where',
),
param(
lambda t, cond: t.double_col.sum(where=cond),
lambda df, cond: df.double_col[cond].sum(),
id='sum_where',
),
param(
lambda t, cond: t.double_col.mean(where=cond),
lambda df, cond: df.double_col[cond].mean(),
id='mean_where',
),
param(
lambda t, cond: t.double_col.min(where=cond),
lambda df, cond: df.double_col[cond].min(),
id='min_where',
),
param(
lambda t, cond: t.double_col.max(where=cond),
lambda df, cond: df.double_col[cond].max(),
id='max_where',
),
param(
lambda t, cond: t.double_col.var(where=cond),
lambda df, cond: df.double_col[cond].var(),
id='var_where',
),
param(
lambda t, cond: t.double_col.std(where=cond),
lambda df, cond: df.double_col[cond].std(),
id='std_where',
),
param(
lambda t, cond: t.double_col.var(where=cond, how='sample'),
lambda df, cond: df.double_col[cond].var(),
id='samp_var_where',
),
param(
lambda t, cond: t.double_col.std(where=cond, how='pop'),
lambda df, cond: df.double_col[cond].std(ddof=0),
id='pop_std_where',
),
],
)
def test_aggregations(alltypes, df, func, pandas_func):
table = alltypes.limit(100)
df = df.head(table.count().execute())
cond = table.string_col.isin(['1', '7'])
expr = func(table, cond)
result = expr.execute()
expected = pandas_func(df, cond.execute())
np.testing.assert_allclose(result, expected)
def test_not_contains(alltypes, df):
n = 100
table = alltypes.limit(n)
expr = table.string_col.notin(['1', '7'])
result = expr.execute()
expected = ~df.head(n).string_col.isin(['1', '7'])
tm.assert_series_equal(result, expected, check_names=False)
def test_group_concat(alltypes, df):
expr = alltypes.string_col.group_concat()
result = expr.execute()
expected = ','.join(df.string_col.dropna())
assert result == expected
def test_distinct_aggregates(alltypes, df):
expr = alltypes.limit(100).double_col.nunique()
result = expr.execute()
assert result == df.head(100).double_col.nunique()
def test_not_exists(alltypes, df):
t = alltypes
t2 = t.view()
expr = t[~((t.string_col == t2.string_col).any())]
result = expr.execute()
left, right = df, t2.execute()
expected = left[left.string_col != right.string_col]
tm.assert_frame_equal(
result, expected, check_index_type=False, check_dtype=False
)
def test_interactive_repr_shows_error(alltypes):
# #591. Doing this in PostgreSQL because so many built-in functions are
# not available
expr = alltypes.double_col.approx_median()
with config.option_context('interactive', True):
result = repr(expr)
assert 'no translation rule' in result.lower()
def test_subquery(alltypes, df):
t = alltypes
expr = (
t.mutate(d=t.double_col.fillna(0))
.limit(1000)
.group_by('string_col')
.size()
)
result = expr.execute().sort_values('string_col').reset_index(drop=True)
expected = (
df.assign(d=df.double_col.fillna(0))
.head(1000)
.groupby('string_col')
.string_col.count()
.reset_index(name='count')
.sort_values('string_col')
.reset_index(drop=True)
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('func', ['mean', 'sum', 'min', 'max'])
def test_simple_window(alltypes, func, df):
t = alltypes
f = getattr(t.double_col, func)
df_f = getattr(df.double_col, func)
result = (
t.projection([(t.double_col - f()).name('double_col')])
.execute()
.double_col
)
expected = df.double_col - df_f()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('func', ['mean', 'sum', 'min', 'max'])
def test_rolling_window(alltypes, func, df):
t = alltypes
df = (
df[['double_col', 'timestamp_col']]
.sort_values('timestamp_col')
.reset_index(drop=True)
)
window = ibis.window(order_by=t.timestamp_col, preceding=6, following=0)
f = getattr(t.double_col, func)
df_f = getattr(df.double_col.rolling(7, min_periods=0), func)
result = (
t.projection([f().over(window).name('double_col')])
.execute()
.double_col
)
expected = df_f()
tm.assert_series_equal(result, expected)
def test_rolling_window_with_mlb(alltypes):
t = alltypes
window = ibis.trailing_window(
preceding=rows_with_max_lookback(3, ibis.interval(days=5)),
order_by=t.timestamp_col,
)
expr = t['double_col'].sum().over(window)
with pytest.raises(NotImplementedError):
expr.execute()
@pytest.mark.parametrize('func', ['mean', 'sum', 'min', 'max'])
def test_partitioned_window(alltypes, func, df):
t = alltypes
window = ibis.window(
group_by=t.string_col,
order_by=t.timestamp_col,
preceding=6,
following=0,
)
def roller(func):
def rolled(df):
torder = df.sort_values('timestamp_col')
rolling = torder.double_col.rolling(7, min_periods=0)
return getattr(rolling, func)()
return rolled
f = getattr(t.double_col, func)
expr = f().over(window).name('double_col')
result = t.projection([expr]).execute().double_col
expected = (
df.groupby('string_col').apply(roller(func)).reset_index(drop=True)
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('func', ['sum', 'min', 'max'])
def test_cumulative_simple_window(alltypes, func, df):
t = alltypes
f = getattr(t.double_col, func)
col = t.double_col - f().over(ibis.cumulative_window())
expr = t.projection([col.name('double_col')])
result = expr.execute().double_col
expected = df.double_col - getattr(df.double_col, 'cum%s' % func)()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('func', ['sum', 'min', 'max'])
def test_cumulative_partitioned_window(alltypes, func, df):
t = alltypes
df = df.sort_values('string_col').reset_index(drop=True)
window = ibis.cumulative_window(group_by=t.string_col)
f = getattr(t.double_col, func)
expr = t.projection([(t.double_col - f().over(window)).name('double_col')])
result = expr.execute().double_col
expected = df.groupby(df.string_col).double_col.transform(
lambda c: c - getattr(c, 'cum%s' % func)()
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('func', ['sum', 'min', 'max'])
def test_cumulative_ordered_window(alltypes, func, df):
t = alltypes
df = df.sort_values('timestamp_col').reset_index(drop=True)
window = ibis.cumulative_window(order_by=t.timestamp_col)
f = getattr(t.double_col, func)
expr = t.projection([(t.double_col - f().over(window)).name('double_col')])
result = expr.execute().double_col
expected = df.double_col - getattr(df.double_col, 'cum%s' % func)()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('func', ['sum', 'min', 'max'])
def test_cumulative_partitioned_ordered_window(alltypes, func, df):
t = alltypes
df = df.sort_values(['string_col', 'timestamp_col']).reset_index(drop=True)
window = ibis.cumulative_window(
order_by=t.timestamp_col, group_by=t.string_col
)
f = getattr(t.double_col, func)
expr = t.projection([(t.double_col - f().over(window)).name('double_col')])
result = expr.execute().double_col
method = operator.methodcaller('cum{}'.format(func))
expected = df.groupby(df.string_col).double_col.transform(
lambda c: c - method(c)
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(('func', 'shift_amount'), [('lead', -1), ('lag', 1)])
def test_analytic_shift_functions(alltypes, df, func, shift_amount):
method = getattr(alltypes.double_col, func)
expr = method(1)
result = expr.execute().rename('double_col')
expected = df.double_col.shift(shift_amount)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('func', 'expected_index'), [('first', -1), ('last', 0)]
)
def test_first_last_value(alltypes, df, func, expected_index):
col = alltypes.sort_by(ibis.desc(alltypes.string_col)).double_col
method = getattr(col, func)
expr = method()
result = expr.execute().rename('double_col')
expected = pd.Series(
df.double_col.iloc[expected_index],
index=pd.RangeIndex(len(df)),
name='double_col',
)
tm.assert_series_equal(result, expected)
def test_null_column(alltypes):
t = alltypes
nrows = t.count().execute()
expr = t.mutate(na_column=ibis.NA).na_column
result = expr.execute()
tm.assert_series_equal(result, pd.Series([None] * nrows, name='na_column'))
def test_null_column_union(alltypes, df):
t = alltypes
s = alltypes[['double_col']].mutate(string_col=ibis.NA.cast('string'))
expr = t[['double_col', 'string_col']].union(s)
result = expr.execute()
nrows = t.count().execute()
expected = pd.concat(
[
df[['double_col', 'string_col']],
pd.concat(
[
df[['double_col']],
pd.DataFrame({'string_col': [None] * nrows}),
],
axis=1,
),
],
axis=0,
ignore_index=True,
)
tm.assert_frame_equal(result, expected)
def test_window_with_arithmetic(alltypes, df):
t = alltypes
w = ibis.window(order_by=t.timestamp_col)
expr = t.mutate(new_col=ibis.row_number().over(w) / 2)
df = (
df[['timestamp_col']]
.sort_values('timestamp_col')
.reset_index(drop=True)
)
expected = df.assign(new_col=[x / 2.0 for x in range(len(df))])
result = expr['timestamp_col', 'new_col'].execute()
tm.assert_frame_equal(result, expected)
def test_anonymous_aggregate(alltypes, df):
t = alltypes
expr = t[t.double_col > t.double_col.mean()]
result = expr.execute()
expected = df[df.double_col > df.double_col.mean()].reset_index(drop=True)
tm.assert_frame_equal(result, expected)
@pytest.fixture
def array_types(con):
return con.table('array_types')
def test_array_length(array_types):
expr = array_types.projection(
[
array_types.x.length().name('x_length'),
array_types.y.length().name('y_length'),
array_types.z.length().name('z_length'),
]
)
result = expr.execute()
expected = pd.DataFrame(
{
'x_length': [3, 2, 2, 3, 3, 4],
'y_length': [3, 2, 2, 3, 3, 4],
'z_length': [3, 2, 2, 0, None, 4],
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
('column', 'value_type'),
[('x', dt.int64), ('y', dt.string), ('z', dt.double)],
)
def test_array_schema(array_types, column, value_type):
assert array_types[column].type() == dt.Array(value_type)
def test_array_collect(array_types):
expr = array_types.group_by(array_types.grouper).aggregate(
collected=lambda t: t.scalar_column.collect()
)
result = expr.execute().sort_values('grouper').reset_index(drop=True)
expected = pd.DataFrame(
{
'grouper': list('abc'),
'collected': [[1.0, 2.0, 3.0], [4.0, 5.0], [6.0]],
}
)[['grouper', 'collected']]
tm.assert_frame_equal(result, expected, check_column_type=False)
@pytest.mark.parametrize(
['start', 'stop'],
[
(1, 3),
(1, 1),
(2, 3),
(2, 5),
(None, 3),
(None, None),
(3, None),
# negative slices are not supported
param(
-3,
None,
marks=pytest.mark.xfail(
raises=ValueError, reason='Negative slicing not supported'
),
),
param(
None,
-3,
marks=pytest.mark.xfail(
raises=ValueError, reason='Negative slicing not supported'
),
),
param(
-3,
-1,
marks=pytest.mark.xfail(
raises=ValueError, reason='Negative slicing not supported'
),
),
param(
-3,
-1,
marks=pytest.mark.xfail(
raises=ValueError, reason='Negative slicing not supported'
),
),
],
)
def test_array_slice(array_types, start, stop):
expr = array_types[array_types.y[start:stop].name('sliced')]
result = expr.execute()
expected = pd.DataFrame(
{'sliced': array_types.y.execute().map(lambda x: x[start:stop])}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('index', [1, 3, 4, 11])
def test_array_index(array_types, index):
expr = array_types[array_types.y[index].name('indexed')]
result = expr.execute()
expected = pd.DataFrame(
{
'indexed': array_types.y.execute().map(
lambda x: x[index] if index < len(x) else None
)
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('n', [1, 3, 4, 7, -2])
@pytest.mark.parametrize(
'mul',
[
param(lambda x, n: x * n, id='mul'),
param(lambda x, n: n * x, id='rmul'),
],
)
def test_array_repeat(array_types, n, mul):
expr = array_types.projection([mul(array_types.x, n).name('repeated')])
result = expr.execute()
expected = pd.DataFrame(
{'repeated': array_types.x.execute().map(lambda x, n=n: mul(x, n))}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
'catop',
[
param(lambda x, y: x + y, id='concat'),
param(lambda x, y: y + x, id='rconcat'),
],
)
def test_array_concat(array_types, catop):
t = array_types
x, y = t.x.cast('array<string>').name('x'), t.y
expr = t.projection([catop(x, y).name('catted')])
result = expr.execute()
tuples = t.projection([x, y]).execute().itertuples(index=False)
expected = pd.DataFrame({'catted': [catop(i, j) for i, j in tuples]})
tm.assert_frame_equal(result, expected)
def test_array_concat_mixed_types(array_types):
with pytest.raises(TypeError):
array_types.x + array_types.x.cast('array<double>')
@pytest.fixture
def t(con, guid):
con.raw_sql(
"""
CREATE TABLE "{}" (
id SERIAL PRIMARY KEY,
name TEXT
)
""".format(
guid
)
)
return con.table(guid)
@pytest.fixture
def s(con, t, guid, guid2):
assert t.op().name == guid
assert t.op().name != guid2
con.raw_sql(
"""
CREATE TABLE "{}" (
id SERIAL PRIMARY KEY,
left_t_id INTEGER REFERENCES "{}",
cost DOUBLE PRECISION
)
""".format(
guid2, guid
)
)
return con.table(guid2)
@pytest.fixture
def trunc(con, guid):
con.raw_sql(
"""
CREATE TABLE "{}" (
id SERIAL PRIMARY KEY,
name TEXT
)
""".format(
guid
)
)
con.raw_sql(
"""INSERT INTO "{}" (name) VALUES ('a'), ('b'), ('c')""".format(guid)
)
return con.table(guid)
def test_semi_join(t, s):
t_a, s_a = t.op().sqla_table.alias('t0'), s.op().sqla_table.alias('t1')
expr = t.semi_join(s, t.id == s.id)
result = expr.compile().compile(compile_kwargs={'literal_binds': True})
base = sa.select([t_a.c.id, t_a.c.name]).where(
sa.exists(sa.select([1]).where(t_a.c.id == s_a.c.id))
)
expected = sa.select([base.c.id, base.c.name])
assert str(result) == str(expected)
def test_anti_join(t, s):
t_a, s_a = t.op().sqla_table.alias('t0'), s.op().sqla_table.alias('t1')
expr = t.anti_join(s, t.id == s.id)
result = expr.compile().compile(compile_kwargs={'literal_binds': True})
base = sa.select([t_a.c.id, t_a.c.name]).where(
~sa.exists(sa.select([1]).where(t_a.c.id == s_a.c.id))
)
expected = sa.select([base.c.id, base.c.name])
assert str(result) == str(expected)
def test_create_table_from_expr(con, trunc, guid2):
con.create_table(guid2, expr=trunc)
t = con.table(guid2)
assert list(t.name.execute()) == list('abc')
def test_truncate_table(con, trunc):
assert list(trunc.name.execute()) == list('abc')
con.truncate_table(trunc.op().name)
assert not len(trunc.execute())
def test_head(con):
t = con.table('functional_alltypes')
result = t.head().execute()
expected = t.limit(5).execute()
tm.assert_frame_equal(result, expected)
def test_identical_to(con, df):
# TODO: abstract this testing logic out into parameterized fixtures
t = con.table('functional_alltypes')
dt = df[['tinyint_col', 'double_col']]
expr = t.tinyint_col.identical_to(t.double_col)
result = expr.execute()
expected = (dt.tinyint_col.isnull() & dt.double_col.isnull()) | (
dt.tinyint_col == dt.double_col
)
expected.name = result.name
tm.assert_series_equal(result, expected)
def test_rank(con):
t = con.table('functional_alltypes')
expr = t.double_col.rank()
sqla_expr = expr.compile()
result = str(sqla_expr.compile(compile_kwargs={'literal_binds': True}))
expected = (
"SELECT rank() OVER (ORDER BY t0.double_col) - 1 AS tmp \n"
"FROM functional_alltypes AS t0"
)
assert result == expected
def test_percent_rank(con):
t = con.table('functional_alltypes')
expr = t.double_col.percent_rank()
sqla_expr = expr.compile()
result = str(sqla_expr.compile(compile_kwargs={'literal_binds': True}))
expected = (
"SELECT percent_rank() OVER (ORDER BY t0.double_col) AS "
"tmp \nFROM functional_alltypes AS t0"
)
assert result == expected
def test_ntile(con):
t = con.table('functional_alltypes')
expr = t.double_col.ntile(7)
sqla_expr = expr.compile()
result = str(sqla_expr.compile(compile_kwargs={'literal_binds': True}))
expected = (
"SELECT ntile(7) OVER (ORDER BY t0.double_col) - 1 AS tmp \n"
"FROM functional_alltypes AS t0"
)
assert result == expected
@pytest.mark.parametrize('opname', ['invert', 'neg'])
def test_not_and_negate_bool(con, opname, df):
op = getattr(operator, opname)
t = con.table('functional_alltypes').limit(10)
expr = t.projection([op(t.bool_col).name('bool_col')])
result = expr.execute().bool_col
expected = op(df.head(10).bool_col)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
'field',
[
'tinyint_col',
'smallint_col',
'int_col',
'bigint_col',
'float_col',
'double_col',
'year',
'month',
],
)
def test_negate_non_boolean(con, field, df):
t = con.table('functional_alltypes').limit(10)
expr = t.projection([(-t[field]).name(field)])
result = expr.execute()[field]
expected = -df.head(10)[field]
tm.assert_series_equal(result, expected)
def test_negate_boolean(con, df):
t = con.table('functional_alltypes').limit(10)
expr = t.projection([(-t.bool_col).name('bool_col')])
result = expr.execute().bool_col
expected = -df.head(10).bool_col
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('opname', 'expected'),
[
('year', {2009, 2010}),
('month', set(range(1, 13))),
('day', set(range(1, 32))),
],
)
def test_date_extract_field(db, opname, expected):
op = operator.methodcaller(opname)
t = db.functional_alltypes
expr = op(t.timestamp_col.cast('date')).distinct()
result = expr.execute().astype(int)
assert set(result) == expected
@pytest.mark.parametrize('opname', ['sum', 'mean', 'min', 'max', 'std', 'var'])
def test_boolean_reduction(alltypes, opname, df):
op = operator.methodcaller(opname)
expr = op(alltypes.bool_col)
result = expr.execute()
assert result == op(df.bool_col)
def test_boolean_summary(alltypes):
expr = alltypes.bool_col.summary()
result = expr.execute()
expected = pd.DataFrame(
[[7300, 0, 0, 1, 3650, 0.5, 2]],
columns=[
'count',
'nulls',
'min',
'max',
'sum',
'mean',
'approx_nunique',
],
)
type_conversions = {
'count': 'int64',
'nulls': 'int64',
'min': 'bool',
'max': 'bool',
'sum': 'int64',
'approx_nunique': 'int64',
}
for k, v in type_conversions.items():
expected[k] = expected[k].astype(v)
tm.assert_frame_equal(result, expected)
def test_timestamp_with_timezone(con):
t = con.table('tzone')
result = t.ts.execute()
assert str(result.dtype.tz)
@pytest.fixture(
params=[
None,
'UTC',
'America/New_York',
'America/Los_Angeles',
'Europe/Paris',
'Chile/Continental',
'Asia/Tel_Aviv',
'Asia/Tokyo',
'Africa/Nairobi',
'Australia/Sydney',
]
)
def tz(request):
return request.param
@pytest.fixture
def tzone_compute(con, guid, tz):
schema = ibis.schema(
[('ts', dt.Timestamp(tz)), ('b', 'double'), ('c', 'string')]
)
con.create_table(guid, schema=schema)
t = con.table(guid)
n = 10
df = pd.DataFrame(
{
'ts': pd.date_range('2017-04-01', periods=n, tz=tz).values,
'b': np.arange(n).astype('float64'),
'c': list(string.ascii_lowercase[:n]),
}
)
df.to_sql(
guid,
con.con,
index=False,
if_exists='append',
dtype={'ts': sa.TIMESTAMP(timezone=True), 'b': sa.FLOAT, 'c': sa.TEXT},
)
try:
yield t
finally:
con.drop_table(guid)
assert guid not in con.list_tables()
def test_ts_timezone_is_preserved(tzone_compute, tz):
assert dt.Timestamp(tz).equals(tzone_compute.ts.type())
def test_timestamp_with_timezone_select(tzone_compute, tz):
ts = tzone_compute.ts.execute()
assert str(getattr(ts.dtype, 'tz', None)) == str(tz)
def test_timestamp_type_accepts_all_timezones(con):
assert all(
dt.Timestamp(row.name).timezone == row.name
for row in con.con.execute('SELECT name FROM pg_timezone_names')
)
@pytest.mark.parametrize(
('left', 'right', 'type'),
[
param(L('2017-04-01'), date(2017, 4, 2), dt.date, id='ibis_date'),
param(date(2017, 4, 2), L('2017-04-01'), dt.date, id='python_date'),
param(
L('2017-04-01 01:02:33'),
datetime(2017, 4, 1, 1, 3, 34),
dt.timestamp,
id='ibis_timestamp',
),
param(
datetime(2017, 4, 1, 1, 3, 34),
L('2017-04-01 01:02:33'),
dt.timestamp,
id='python_datetime',
),
],
)
@pytest.mark.parametrize('opname', ['eq', 'ne', 'lt', 'le', 'gt', 'ge'])
def test_string_temporal_compare(con, opname, left, right, type):
op = getattr(operator, opname)
expr = op(left, right)
result = con.execute(expr)
left_raw = con.execute(L(left).cast(type))
right_raw = con.execute(L(right).cast(type))
expected = op(left_raw, right_raw)
assert result == expected
@pytest.mark.parametrize(
('left', 'right'),
[
param(L('2017-03-31').cast(dt.date), date(2017, 4, 2), id='ibis_date'),
param(
date(2017, 3, 31), L('2017-04-02').cast(dt.date), id='python_date'
),
param(
L('2017-03-31 00:02:33').cast(dt.timestamp),
datetime(2017, 4, 1, 1, 3, 34),
id='ibis_timestamp',
),
param(
datetime(2017, 3, 31, 0, 2, 33),
L('2017-04-01 01:03:34').cast(dt.timestamp),
id='python_datetime',
),
],
)
@pytest.mark.parametrize(
'op',
[
param(
lambda left, right: ibis.timestamp('2017-04-01 00:02:34').between(
left, right
),
id='timestamp',
),
param(
lambda left, right: (
ibis.timestamp('2017-04-01').cast(dt.date).between(left, right)
),
id='date',
),
],
)
def test_string_temporal_compare_between(con, op, left, right):
expr = op(left, right)
result = con.execute(expr)
assert isinstance(result, (bool, np.bool_))
assert result
def test_scalar_parameter(con):
start = ibis.param(dt.date)
end = ibis.param(dt.date)
t = con.table('functional_alltypes')
col = t.date_string_col.cast('date')
expr = col.between(start, end)
start_string, end_string = '2009-03-01', '2010-07-03'
result = expr.execute(params={start: start_string, end: end_string})
expected = col.between(start_string, end_string).execute()
tm.assert_series_equal(result, expected)
def test_string_to_binary_cast(con):
t = con.table('functional_alltypes').limit(10)
expr = t.string_col.cast('binary')
result = expr.execute()
sql_string = (
"SELECT decode(string_col, 'escape') AS tmp "
"FROM functional_alltypes LIMIT 10"
)
raw_data = [row[0][0] for row in con.raw_sql(sql_string).fetchall()]
expected = pd.Series(raw_data, name='tmp')
tm.assert_series_equal(result, expected)
def test_string_to_binary_round_trip(con):
t = con.table('functional_alltypes').limit(10)
expr = t.string_col.cast('binary').cast('string')
result = expr.execute()
sql_string = (
"SELECT encode(decode(string_col, 'escape'), 'escape') AS tmp "
"FROM functional_alltypes LIMIT 10"
)
expected = pd.Series(
[row[0][0] for row in con.raw_sql(sql_string).fetchall()], name='tmp'
)
tm.assert_series_equal(result, expected)
| apache-2.0 |
neherlab/ffpopsim | examples/genetic_drift.py | 2 | 2160 | '''
author: Richard Neher, Fabio Zanini
date: 11/07/12
content: Example on genetic drift using haploid_highd
'''
# Import modules (setting the path should not be necessary when the module is
# installed in the PYTHONPATH)
import sys
sys.path.insert(0, '../pkg/python')
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.cm as cm
import FFPopSim as h
# specify parameters
L = 256 # simulate 256 loci
# set up population
pop = h.haploid_highd(L) # produce an instance of haploid_highd with L loci
pop.carrying_capacity = 50000 # set the average population size to 50000
pop.outcrossing_rate = 1 # make the species obligate outcrossing
pop.crossover_rate = 0.02 / pop.L # set the crossover rate of the segment to 2 centimorgans
pop.mutation_rate = 0.1 / pop.carrying_capacity # per locus mutation rate equal to 0.1/N
# initialize the population in linkage equilibrium with the specified allele frequencies
initial_allele_frequencies = 0.5*np.ones(pop.L) # define some initial allele frequencies as 1/2
pop.set_allele_frequencies(initial_allele_frequencies, pop.carrying_capacity)
# evolve for 2000 generations and track the allele frequencies
maxgen = 2000
allele_frequencies = [pop.get_allele_frequencies()]
tp = [pop.generation]
print "Illustrate genetic drift on allele frequency trajectories."
pop.status() #print status message
while pop.generation < maxgen:
if (pop.generation%(maxgen/10)==0): print pop.generation,"out of",maxgen, "generations"
pop.evolve(10)
# save allele frequencies and time
allele_frequencies.append(pop.get_allele_frequencies())
tp.append(pop.generation)
# convert to an array to enable slicing
allele_frequencies = np.array(allele_frequencies)
# plot the result
plt.figure()
for locus in xrange(5,pop.L,50): # plot a few neutral trajectories
plt.plot(tp, allele_frequencies[:,locus], c=cm.cool(locus), lw=2)
plt.title('Genetic Drift')
plt.xlabel('Time [generations]')
plt.ylabel('Allele frequencies')
plt.ion()
plt.show()
| gpl-3.0 |
JizhouZhang/SDR | gr-analog/examples/fmtest.py | 40 | 7941 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
from gnuradio import analog
from gnuradio import channels
import sys, math, time
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class fmtx(gr.hier_block2):
def __init__(self, lo_freq, audio_rate, if_rate):
gr.hier_block2.__init__(self, "build_fm",
gr.io_signature(1, 1, gr.sizeof_float),
gr.io_signature(1, 1, gr.sizeof_gr_complex))
fmtx = analog.nbfm_tx(audio_rate, if_rate, max_dev=5e3, tau=75e-6)
# Local oscillator
lo = analog.sig_source_c(if_rate, # sample rate
analog.GR_SIN_WAVE, # waveform type
lo_freq, # frequency
1.0, # amplitude
0) # DC Offset
mixer = blocks.multiply_cc()
self.connect(self, fmtx, (mixer, 0))
self.connect(lo, (mixer, 1))
self.connect(mixer, self)
class fmtest(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._nsamples = 1000000
self._audio_rate = 8000
# Set up N channels with their own baseband and IF frequencies
self._N = 5
chspacing = 16000
freq = [10, 20, 30, 40, 50]
f_lo = [0, 1*chspacing, -1*chspacing, 2*chspacing, -2*chspacing]
self._if_rate = 4*self._N*self._audio_rate
# Create a signal source and frequency modulate it
self.sum = blocks.add_cc()
for n in xrange(self._N):
sig = analog.sig_source_f(self._audio_rate, analog.GR_SIN_WAVE, freq[n], 0.5)
fm = fmtx(f_lo[n], self._audio_rate, self._if_rate)
self.connect(sig, fm)
self.connect(fm, (self.sum, n))
self.head = blocks.head(gr.sizeof_gr_complex, self._nsamples)
self.snk_tx = blocks.vector_sink_c()
self.channel = channels.channel_model(0.1)
self.connect(self.sum, self.head, self.channel, self.snk_tx)
# Design the channlizer
self._M = 10
bw = chspacing/2.0
t_bw = chspacing/10.0
self._chan_rate = self._if_rate / self._M
self._taps = filter.firdes.low_pass_2(1, self._if_rate, bw, t_bw,
attenuation_dB=100,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
tpc = math.ceil(float(len(self._taps)) / float(self._M))
print "Number of taps: ", len(self._taps)
print "Number of channels: ", self._M
print "Taps per channel: ", tpc
self.pfb = filter.pfb.channelizer_ccf(self._M, self._taps)
self.connect(self.channel, self.pfb)
# Create a file sink for each of M output channels of the filter and connect it
self.fmdet = list()
self.squelch = list()
self.snks = list()
for i in xrange(self._M):
self.fmdet.append(analog.nbfm_rx(self._audio_rate, self._chan_rate))
self.squelch.append(analog.standard_squelch(self._audio_rate*10))
self.snks.append(blocks.vector_sink_f())
self.connect((self.pfb, i), self.fmdet[i], self.squelch[i], self.snks[i])
def num_tx_channels(self):
return self._N
def num_rx_channels(self):
return self._M
def main():
fm = fmtest()
tstart = time.time()
fm.run()
tend = time.time()
if 1:
fig1 = pylab.figure(1, figsize=(12,10), facecolor="w")
fig2 = pylab.figure(2, figsize=(12,10), facecolor="w")
fig3 = pylab.figure(3, figsize=(12,10), facecolor="w")
Ns = 10000
Ne = 100000
fftlen = 8192
winfunc = scipy.blackman
# Plot transmitted signal
fs = fm._if_rate
d = fm.snk_tx.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = sp1_f.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
visible=False)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-120.0, 20.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b-o")
#p1_t = sp1_t.plot(t_in, x_in.imag, "r-o")
sp1_t.set_ylim([-5, 5])
# Set up the number of rows and columns for plotting the subfigures
Ncols = int(scipy.floor(scipy.sqrt(fm.num_rx_channels())))
Nrows = int(scipy.floor(fm.num_rx_channels() / Ncols))
if(fm.num_rx_channels() % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = fm._audio_rate
for i in xrange(len(fm.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = fm.snks[i].data()[Ns:Ne]
sp2_f = fig2.add_subplot(Nrows, Ncols, 1+i)
X,freq = sp2_f.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
visible=False)
#X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
X_o = 10.0*scipy.log10(abs(X))
#f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
f_o = scipy.arange(0, fs_o/2.0, fs_o/2.0/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+0.1])
sp2_f.set_ylim([-120.0, 20.0])
sp2_f.grid(True)
sp2_f.set_title(("Channel %d" % i), weight="bold")
sp2_f.set_xlabel("Frequency (kHz)")
sp2_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs_o
Tmax = len(d)*Ts
t_o = scipy.arange(0, Tmax, Ts)
x_t = scipy.array(d)
sp2_t = fig3.add_subplot(Nrows, Ncols, 1+i)
p2_t = sp2_t.plot(t_o, x_t.real, "b")
p2_t = sp2_t.plot(t_o, x_t.imag, "r")
sp2_t.set_xlim([min(t_o), max(t_o)+1])
sp2_t.set_ylim([-1, 1])
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
dharryman/BPM_Test_Framework | Tests/Beam_Power_Dependence.py | 1 | 7974 | from pkg_resources import require
require("numpy")
require("cothread")
require("matplotlib")
import numpy as np
import matplotlib.pyplot as plt
import time
def Beam_Power_Dependence(
RFObject,
BPMObject,
frequency,
start_power=-100,
end_power=0,
samples=10,
settling_time=1,
ReportObject=None,
sub_directory=""):
"""Tests the relationship between RF output power and values read from the BPM.
An RF signal is output, and then different parameters are measured from the BPM.
The signal is linearly ramped up in dBm at a single frequency. The number of samples to take,
and settling time between each measurement can be decided using the arguments.
Args:
RFObject (RFSignalGenerator Obj): Object to interface with the RF hardware.
BPMObject (BPMDevice Obj): Object to interface with the BPM hardware.
frequency (float): Output frequency for the tests, set as a float that will
use the assumed units of MHz.
start_power (float): Starting output power for the tests, default value is
-100 dBm. The input values are floats and dBm is assumed.
end_power (float): Final output power for the tests, default value is 0 dBm.
The input values are floats and dBm assumed.
samples (int): Number of samples take is this value + 1.
settling_time (float): Time in seconds, that the program will wait in between
setting an output power on the RF, and reading the values of the BPM.
ReportObject (LaTeX Report Obj): Specific report that the test results will be recorded
to. If no report is sent to the test then it will just display the results in
a graph.
sub_directory (str): String that can change where the graphs will be saved to.
Returns:
float array: Power output from the RF
float array: Power read at the BPM
float array: Beam Current read at the BPM
float array: X Positions read from the BPM
float array: Y Positions read from the BPM
"""
intro_text = r"""Tests the relationship between RF output power and values read from the BPM.
An RF signal is output, and then different parameters are measured from the BPM.
The signal is linearly ramped up in dBm at a single frequency. The number of samples to take,
and settling time between each measurement can be decided using the arguments. \\~\\
Args:\\
RFObject (RFSignalGenerator Obj): Object to interface with the RF hardware.\\
BPMObject (BPMDevice Obj): Object to interface with the BPM hardware.\\
frequency (float): Output frequency for the tests, set as a float that will
use the assumed units of MHz. \\
start\_power (float): Starting output power for the tests, default value is
-100 dBm. The input values are floats and dBm is assumed. \\
end\_power (float): Final output power for the tests, default value is 0 dBm.
The input values are floats and dBm assumed. \\
samples (int): Number of samples take is this value + 1.\\
settling\_time (float): Time in seconds, that the program will wait in between
setting an output power on the RF, and reading the values of the BPM. \\
ReportObject (LaTeX Report Obj): Specific report that the test results will be recorded
to. If no report is sent to the test then it will just display the results in
a graph. \\
sub\_directory (str): String that can change where the graphs will be saved to.\\~\\
Returns:\\
float array: Power output from the RF\\
float array: Power read at the BPM\\
float array: Beam Current read at the BPM\\
float array: X Positions read from the BPM\\
float array: Y Positions read from the BPM\\~\\
"""
# Formats the test name and tells the user the test has started
test_name = __name__
test_name = test_name.rsplit("Tests.")[1]
test_name = test_name.replace("_", " ")
print("Starting test \"" + test_name + "\"")
# Get the device names for the report
device_names = []
device_names.append(RFObject.get_device_ID())
device_names.append(BPMObject.get_device_ID())
# Get the parameter values for the report
parameter_names = []
parameter_names.append("Frequency: " + str(frequency)+"MHz")
parameter_names.append("Starting output power: "+str(start_power)+"dBm")
parameter_names.append("Final output power: "+str(end_power)+"dBm")
parameter_names.append("Samples: " + str(samples))
parameter_names.append("Settling time: " + str(settling_time)+"s")
# Set the initial state of the RF device
power = np.linspace(start_power, end_power, samples) # Creates samples to test
RFObject.set_frequency(frequency)
RFObject.set_output_power(start_power)
RFObject.turn_on_RF()
time.sleep(settling_time)
# Build up the arrays where the final values will be saved
X_pos = np.array([])
Y_pos = np.array([])
beam_current = np.array([])
output_power = np.array([])
input_power = np.array([])
ADC_sum = np.array([])
# Perform the test
for index in power:
RFObject.set_output_power(index) # Set next output power value
time.sleep(settling_time) # Wait for signal to settle
beam_current = np.append(beam_current, BPMObject.get_beam_current()) # record beam current
X_pos = np.append(X_pos, BPMObject.get_X_position()) # record X pos
Y_pos = np.append(Y_pos, BPMObject.get_Y_position()) # record Y pos
output_power = np.append(output_power, RFObject.get_output_power()[0])
input_power = np.append(input_power, BPMObject.get_input_power())
ADC_sum = np.append(ADC_sum, BPMObject.get_ADC_sum())
#turn off the RF
RFObject.turn_off_RF()
# add the test details to the report
ReportObject.setup_test(test_name, intro_text, device_names, parameter_names)
# make a caption and headings for a table of results
caption = "Beam Power Dependence Results"
headings = [["Output Power", "Input Power", "BPM Current", "X Position", "Y Position", "ADC Sum"],
["(dBm)", "(dBm)", "(mA)", "(mm)", "(mm)", "(Counts)"]]
data = [output_power, input_power, beam_current, X_pos, Y_pos, ADC_sum]
# copy the values to the report
ReportObject.add_table_to_test('|c|c|c|c|c|c|', data, headings, caption)
# Get the plot values in a format thats easy to iterate
format_plot = []# x axis, y axis, x axis title, y axis title, title of file, caption
format_plot.append((output_power, input_power,'RF Source Power Output (dBm)', 'Power input at BPM (dBm)',"power_vs_power.pdf"))
format_plot.append((output_power, beam_current, 'RF Source Power Output (dBm)', 'Beam Current at BPM (mA)', "power_vs_current.pdf"))
format_plot.append((output_power, X_pos, 'RF Source Power Output (dBm)', 'Horizontal Beam Position (mm)', "power_vs_X.pdf"))
format_plot.append((output_power, Y_pos, 'RF Source Power Output (dBm)', 'Vertical Beam Position (mm)', "power_vs_Y.pdf"))
format_plot.append((output_power, ADC_sum, 'RF Source Power Output (dBm)', 'ADC Sum (counts)', 'power_vs_ADC_sum.pdf'))
# plot all of the graphs
for index in format_plot:
plt.plot(index[0], index[1])
plt.xlabel(index[2])
plt.ylabel(index[3])
plt.grid(True)
plt.savefig(sub_directory+index[4])
plt.cla() # Clear axis
plt.clf() # Clear figure
ReportObject.add_figure_to_test(sub_directory + index[4], "")
# return the full data sets
return output_power, input_power, beam_current, X_pos, Y_pos
| apache-2.0 |
joernhees/scikit-learn | sklearn/feature_selection/__init__.py | 140 | 1302 | """
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from .univariate_selection import chi2
from .univariate_selection import f_classif
from .univariate_selection import f_oneway
from .univariate_selection import f_regression
from .univariate_selection import SelectPercentile
from .univariate_selection import SelectKBest
from .univariate_selection import SelectFpr
from .univariate_selection import SelectFdr
from .univariate_selection import SelectFwe
from .univariate_selection import GenericUnivariateSelect
from .variance_threshold import VarianceThreshold
from .rfe import RFE
from .rfe import RFECV
from .from_model import SelectFromModel
from .mutual_info_ import mutual_info_regression, mutual_info_classif
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectFromModel',
'SelectPercentile',
'VarianceThreshold',
'chi2',
'f_classif',
'f_oneway',
'f_regression',
'mutual_info_classif',
'mutual_info_regression']
| bsd-3-clause |
liyu1990/sklearn | benchmarks/bench_plot_approximate_neighbors.py | 244 | 6011 | """
Benchmark for approximate nearest neighbor search using
locality sensitive hashing forest.
There are two types of benchmarks.
First, accuracy of LSHForest queries are measured for various
hyper-parameters and index sizes.
Second, speed up of LSHForest queries compared to brute force
method in exact nearest neighbors is measures for the
aforementioned settings. In general, speed up is increasing as
the index size grows.
"""
from __future__ import division
import numpy as np
from tempfile import gettempdir
from time import time
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors.approximate import LSHForest
from sklearn.datasets import make_blobs
from sklearn.externals.joblib import Memory
m = Memory(cachedir=gettempdir())
@m.cache()
def make_data(n_samples, n_features, n_queries, random_state=0):
"""Create index and query data."""
print('Generating random blob-ish data')
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=100,
shuffle=True, random_state=random_state)
# Keep the last samples as held out query vectors: note since we used
# shuffle=True we have ensured that index and query vectors are
# samples from the same distribution (a mixture of 100 gaussians in this
# case)
return X[:n_samples], X[n_samples:]
def calc_exact_neighbors(X, queries, n_queries, n_neighbors):
"""Measures average times for exact neighbor queries."""
print ('Building NearestNeighbors for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
average_time = 0
t0 = time()
neighbors = nbrs.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time = (time() - t0) / n_queries
return neighbors, average_time
def calc_accuracy(X, queries, n_queries, n_neighbors, exact_neighbors,
average_time_exact, **lshf_params):
"""Calculates accuracy and the speed up of LSHForest."""
print('Building LSHForest for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
lshf = LSHForest(**lshf_params)
t0 = time()
lshf.fit(X)
lshf_build_time = time() - t0
print('Done in %0.3fs' % lshf_build_time)
accuracy = 0
t0 = time()
approx_neighbors = lshf.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time_approx = (time() - t0) / n_queries
for i in range(len(queries)):
accuracy += np.in1d(approx_neighbors[i], exact_neighbors[i]).mean()
accuracy /= n_queries
speed_up = average_time_exact / average_time_approx
print('Average time for lshf neighbor queries: %0.3fs' %
average_time_approx)
print ('Average time for exact neighbor queries: %0.3fs' %
average_time_exact)
print ('Average Accuracy : %0.2f' % accuracy)
print ('Speed up: %0.1fx' % speed_up)
return speed_up, accuracy
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Initialize index sizes
n_samples = [int(1e3), int(1e4), int(1e5), int(1e6)]
n_features = int(1e2)
n_queries = 100
n_neighbors = 10
X_index, X_query = make_data(np.max(n_samples), n_features, n_queries,
random_state=0)
params_list = [{'n_estimators': 3, 'n_candidates': 50},
{'n_estimators': 5, 'n_candidates': 70},
{'n_estimators': 10, 'n_candidates': 100}]
accuracies = np.zeros((len(n_samples), len(params_list)), dtype=float)
speed_ups = np.zeros((len(n_samples), len(params_list)), dtype=float)
for i, sample_size in enumerate(n_samples):
print ('==========================================================')
print ('Sample size: %i' % sample_size)
print ('------------------------')
exact_neighbors, average_time_exact = calc_exact_neighbors(
X_index[:sample_size], X_query, n_queries, n_neighbors)
for j, params in enumerate(params_list):
print ('LSHF parameters: n_estimators = %i, n_candidates = %i' %
(params['n_estimators'], params['n_candidates']))
speed_ups[i, j], accuracies[i, j] = calc_accuracy(
X_index[:sample_size], X_query, n_queries, n_neighbors,
exact_neighbors, average_time_exact, random_state=0, **params)
print ('')
print ('==========================================================')
# Set labels for LSHForest parameters
colors = ['c', 'm', 'y']
legend_rects = [plt.Rectangle((0, 0), 0.1, 0.1, fc=color)
for color in colors]
legend_labels = ['n_estimators={n_estimators}, '
'n_candidates={n_candidates}'.format(**p)
for p in params_list]
# Plot precision
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, accuracies[:, i], c=colors[i])
plt.plot(n_samples, accuracies[:, i], c=colors[i])
plt.ylim([0, 1.3])
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Precision@10")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Precision of first 10 neighbors with index size")
# Plot speed up
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, speed_ups[:, i], c=colors[i])
plt.plot(n_samples, speed_ups[:, i], c=colors[i])
plt.ylim(0, np.max(speed_ups))
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Speed up")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Relationship between Speed up and index size")
plt.show()
| bsd-3-clause |
MartinDelzant/scikit-learn | sklearn/utils/fixes.py | 39 | 13318 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import warnings
import sys
import functools
import os
import errno
import numpy as np
import scipy.sparse as sp
import scipy
try:
from inspect import signature
except ImportError:
from ..externals.funcsigs import signature
def _parse_version(version_string):
version = []
for x in version_string.split('.'):
try:
version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
version.append(x)
return tuple(version)
np_version = _parse_version(np.__version__)
sp_version = _parse_version(scipy.__version__)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.empty(np.atleast_1d(x).shape, dtype=np.float64)
out[:] = x
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out.reshape(np.shape(x))
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in signature(np.copy).parameters:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument
def astype(array, dtype, copy=True):
if not copy and array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
with warnings.catch_warnings(record=True):
# Don't raise the numpy deprecation warnings that appear in
# 1.9, but avoid Python bug due to simplefilter('ignore')
warnings.simplefilter('always')
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from itertools import combinations_with_replacement
except ImportError:
# Backport of itertools.combinations_with_replacement for Python 2.6,
# from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright
# Python Software Foundation (https://docs.python.org/3/license.html)
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
try:
from numpy import isclose
except ImportError:
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within
a tolerance.
This function was added to numpy v1.7.0, and the version you are
running has been backported from numpy v1.8.1. See its documentation
for more details.
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Since we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
cond[np.isnan(x) & np.isnan(y)] = True
return cond
if np_version < (1, 7):
# Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg.
def frombuffer_empty(buf, dtype):
if len(buf) == 0:
return np.empty(0, dtype=dtype)
else:
return np.frombuffer(buf, dtype=dtype)
else:
frombuffer_empty = np.frombuffer
if np_version < (1, 8):
def in1d(ar1, ar2, assume_unique=False, invert=False):
# Backport of numpy function in1d 1.8.1 to support numpy 1.6.2
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
else:
from numpy import in1d
if sp_version < (0, 15):
# Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142
from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr
else:
from scipy.sparse.linalg import lsqr as sparse_lsqr
if sys.version_info < (2, 7, 0):
# partial cannot be pickled in Python 2.6
# http://bugs.python.org/issue1398
class partial(object):
def __init__(self, func, *args, **keywords):
functools.update_wrapper(self, func)
self.func = func
self.args = args
self.keywords = keywords
def __call__(self, *args, **keywords):
args = self.args + args
kwargs = self.keywords.copy()
kwargs.update(keywords)
return self.func(*args, **kwargs)
else:
from functools import partial
if np_version < (1, 6, 2):
# Allow bincount to accept empty arrays
# https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040
def bincount(x, weights=None, minlength=None):
if len(x) > 0:
return np.bincount(x, weights, minlength)
else:
if minlength is None:
minlength = 0
minlength = np.asscalar(np.asarray(minlength, dtype=np.intp))
return np.zeros(minlength, dtype=np.intp)
else:
from numpy import bincount
if 'exist_ok' in signature(os.makedirs).parameters:
makedirs = os.makedirs
else:
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works
like mkdir, except that any intermediate path segment (not just the
rightmost) will be created if it does not exist. If the target
directory already exists, raise an OSError if exist_ok is False.
Otherwise no exception is raised. This is recursive.
"""
try:
os.makedirs(name, mode=mode)
except OSError as e:
if (not exist_ok or e.errno != errno.EEXIST
or not os.path.isdir(name)):
raise
if np_version < (1, 8, 1):
def array_equal(a1, a2):
# copy-paste from numpy 1.8.1
try:
a1, a2 = np.asarray(a1), np.asarray(a2)
except:
return False
if a1.shape != a2.shape:
return False
return bool(np.asarray(a1 == a2).all())
else:
from numpy import array_equal
| bsd-3-clause |
mattilyra/scikit-learn | examples/classification/plot_lda.py | 142 | 2419 | """
====================================================================
Normal and Shrinkage Linear Discriminant Analysis for classification
====================================================================
Shows how shrinkage improves classification.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
n_train = 20 # samples for training
n_test = 200 # samples for testing
n_averages = 50 # how often to repeat classification
n_features_max = 75 # maximum number of features
step = 4 # step size for the calculation
def generate_data(n_samples, n_features):
"""Generate random blob-ish data with noisy features.
This returns an array of input data with shape `(n_samples, n_features)`
and an array of `n_samples` target labels.
Only one feature contains discriminative information, the other features
contain only noise.
"""
X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])
# add non-discriminative features
if n_features > 1:
X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])
return X, y
acc_clf1, acc_clf2 = [], []
n_features_range = range(1, n_features_max + 1, step)
for n_features in n_features_range:
score_clf1, score_clf2 = 0, 0
for _ in range(n_averages):
X, y = generate_data(n_train, n_features)
clf1 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage='auto').fit(X, y)
clf2 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage=None).fit(X, y)
X, y = generate_data(n_test, n_features)
score_clf1 += clf1.score(X, y)
score_clf2 += clf2.score(X, y)
acc_clf1.append(score_clf1 / n_averages)
acc_clf2.append(score_clf2 / n_averages)
features_samples_ratio = np.array(n_features_range) / n_train
plt.plot(features_samples_ratio, acc_clf1, linewidth=2,
label="Linear Discriminant Analysis with shrinkage", color='navy')
plt.plot(features_samples_ratio, acc_clf2, linewidth=2,
label="Linear Discriminant Analysis", color='gold')
plt.xlabel('n_features / n_samples')
plt.ylabel('Classification accuracy')
plt.legend(loc=1, prop={'size': 12})
plt.suptitle('Linear Discriminant Analysis vs. \
shrinkage Linear Discriminant Analysis (1 discriminative feature)')
plt.show()
| bsd-3-clause |
dougbenjamin/panda-harvester | pandaharvester/harvestersubmitter/act_submitter.py | 1 | 4730 | import arc
import json
import time
import urllib
from pandaharvester.harvestercore import core_utils
from pandaharvester.harvestercore.plugin_base import PluginBase
from pandaharvester.harvestercore.queue_config_mapper import QueueConfigMapper
from pandaharvester.harvesterconfig import harvester_config
from act.common.aCTConfig import aCTConfigARC
from act.common.aCTProxy import aCTProxy
from act.atlas.aCTDBPanda import aCTDBPanda
# logger
baseLogger = core_utils.setup_logger('act_submitter')
# submitter for aCT
class ACTSubmitter(PluginBase):
# constructor
def __init__(self, **kwarg):
PluginBase.__init__(self, **kwarg)
# Set up aCT DB connection
self.log = core_utils.make_logger(baseLogger, 'aCT submitter', method_name='__init__')
self.actDB = aCTDBPanda(self.log)
# Credential dictionary role: proxy file
self.certs = dict(zip([r.split('=')[1] for r in list(harvester_config.credmanager.voms)],
list(harvester_config.credmanager.outCertFile)))
# Map of role to aCT proxyid
self.proxymap = {}
# Get proxy info
# TODO: better to send aCT the proxy file and let it handle it
for role, proxy in self.certs.items():
cred_type = arc.initializeCredentialsType(arc.initializeCredentialsType.SkipCredentials)
uc = arc.UserConfig(cred_type)
uc.ProxyPath(str(proxy))
cred = arc.Credential(uc)
dn = cred.GetIdentityName()
self.log.info("Proxy {0} with DN {1} and role {2}".format(proxy, dn, role))
actp = aCTProxy(self.log)
attr = '/atlas/Role='+role
proxyid = actp.getProxyId(dn, attr)
if not proxyid:
raise Exception("Proxy with DN {0} and attribute {1} was not found in proxies table".format(dn, attr))
self.proxymap[role] = proxyid
# submit workers
def submit_workers(self, workspec_list):
retList = []
for workSpec in workspec_list:
tmpLog = core_utils.make_logger(baseLogger, 'workerID={0}'.format(workSpec.workerID),
method_name='submit_workers')
queueconfigmapper = QueueConfigMapper()
queueconfig = queueconfigmapper.get_queue(workSpec.computingSite)
prodSourceLabel = queueconfig.get_source_label()
# If jobSpec is defined we are in push mode, if not pull mode
# Both assume one to one worker to job mapping
jobSpec = workSpec.get_jobspec_list()
if jobSpec:
jobSpec = jobSpec[0]
tmpLog.debug("JobSpec: {0}".format(jobSpec.values_map()))
desc = {}
desc['pandastatus'] = 'sent'
desc['actpandastatus'] = 'sent'
desc['siteName'] = workSpec.computingSite
desc['proxyid'] = self.proxymap['pilot' if prodSourceLabel == 'user' else 'production']
desc['sendhb'] = 0
metadata = {'harvesteraccesspoint': workSpec.get_access_point(),
'schedulerid': 'harvester-{}'.format(harvester_config.master.harvester_id)}
desc['metadata'] = json.dumps(metadata)
if jobSpec:
# push mode: aCT takes the url-encoded job description (like it gets from panda server)
pandaid = jobSpec.PandaID
actjobdesc = urllib.urlencode(jobSpec.jobParams)
else:
# pull mode: just set pandaid (to workerid) and prodsourcelabel
pandaid = workSpec.workerID
actjobdesc = 'PandaID=%d&prodSourceLabel=%s' % (pandaid, prodSourceLabel)
tmpLog.info("Inserting job {0} into aCT DB: {1}".format(pandaid, str(desc)))
try:
batchid = self.actDB.insertJob(pandaid, actjobdesc, desc)['LAST_INSERT_ID()']
except Exception as e:
result = (False, "Failed to insert job into aCT DB: {0}".format(str(e)))
else:
tmpLog.info("aCT batch id {0}".format(batchid))
workSpec.batchID = str(batchid)
# Set log files in workSpec
today = time.strftime('%Y-%m-%d', time.gmtime())
logurl = '/'.join([queueconfig.submitter.get('logBaseURL'), today, workSpec.computingSite, str(pandaid)])
workSpec.set_log_file('batch_log', '{0}.log'.format(logurl))
workSpec.set_log_file('stdout', '{0}.out'.format(logurl))
workSpec.set_log_file('stderr', '{0}.err'.format(logurl))
result = (True, '')
retList.append(result)
return retList
| apache-2.0 |
sameeptandon/sail-car-log | process/VideoReader.py | 1 | 4735 | # import matplotlib.pylab as pp
import cv2, cv
import os
from glob import glob
from zipfile import ZipFile
import numpy as np
class VideoReader(object):
def __new__(cls, filename, in_splits=True, num_splits=10):
if '.avi' in filename:
return AVIVideoReader(filename, in_splits, num_splits)
elif '.zip' in filename:
return ZIPVideoReader(filename)
else:
return JPEGVideoReader(filename)
class AVIVideoReader:
def __init__(self, filename, in_splits=True, num_splits=10):
assert(in_splits) # for now
self.in_splits = in_splits
self.num_splits = num_splits
self.filename = filename
self.initReader()
self.jump = 1
def initReader(self):
path, basename = os.path.split(self.filename)
if path == '':
path = './'
self.names = [ ]
for j in range(self.num_splits):
self.names.append(path + '/' + 'split_' + str(j) + '_' + basename)
self.captures = list()
self.frame_counts = list()
for j in range(self.num_splits):
self.captures.append(cv2.VideoCapture(self.names[j]))
# NOTE CV_CAP_PROP_FRAME_COUNT is known to sometimes be inaccurate
# -> may need to use ffmpeg to estimate count
self.frame_counts.append(self.captures[-1].get(cv.CV_CAP_PROP_FRAME_COUNT))
self.framenum = 0;
self.total_frame_count = sum(self.frame_counts)
self.subsample = False
def setSubsample(self, subs):
self.subsample = subs
def getNextFrame(self):
self.jump = 10 if self.subsample else 1
self.framenum = self.framenum + self.jump;
success, img = self.captures[self.framenum % self.num_splits].read()
return (success, img)
def setFrame(self, framenum, verbose=False):
self.framenum = framenum;
for j in range(1,self.num_splits+1, self.jump):
capture_framenum = (framenum/10 + 1) if j-1 < framenum % 10 else framenum/10
self.captures[j % self.num_splits].set(cv.CV_CAP_PROP_POS_FRAMES, capture_framenum)
#self.captures[j % self.num_splits].set(cv.CV_CAP_PROP_POS_MSEC, float(capture_framenum)/0.05)
#a,b = self.captures[j % self.num_splits].read()
if (verbose):
print self.captures[j % self.num_splits].get(cv.CV_CAP_PROP_FPS)
print j, capture_framenum
print self.captures[j % self.num_splits].get(cv.CV_CAP_PROP_POS_MSEC)
print self.captures[j % self.num_splits].get(cv.CV_CAP_PROP_FRAME_COUNT)
def playVideo(self):
self.framenum = 0;
while True:
(success, img) = self.getNextFrame()
if success == False:
break;
savename = '/scail/group/deeplearning/driving_data/stillimgs/280N_right_%d.png'%(self.framenum)
img = img[:,:,::-1]
print savename
# pp.imsave(savename, img)
# cv2.imshow("video", img)
# key = cv2.waitKey(5);
class JPEGVideoReader:
def __init__(self, frame_folder):
self.framenum = 0
self.total_frame_count = len(glob(frame_folder + '/*.jpg'))
self.captures = sorted(glob(frame_folder + '/*'), key=lambda x:
int(os.path.basename(x).split('.')[0]))
def getNextFrame(self):
self.framenum += 1
return self.getFrame(self.framenum)
def getFrame(self, framenum):
self.framenum = framenum
if self.framenum < self.total_frame_count:
return (True, cv2.imread(self.captures[self.framenum - 1]))
else:
return (False, None)
def setFrame(self, framenum):
self.framenum = framenum
class ZIPVideoReader:
def __init__(self, video_zip):
self.framenum = 0
self.video_zip = video_zip
self.zip_file = ZipFile(self.video_zip, 'r')
self.total_frame_count = len(self.zip_file.infolist())
zipfiles = [zf for zf in self.zip_file.namelist() if zf.split('/')[1] != '']
self.captures = sorted(zipfiles, key=lambda x: int(os.path.basename(x).split('.')[0]))
def getNextFrame(self):
self.framenum += 1
return self.getFrame(self.framenum)
def getFrame(self, framenum):
self.framenum = framenum
if self.framenum < self.total_frame_count:
with self.zip_file.open(self.captures[self.framenum - 1]) as img_file:
imgstr = img_file.read()
img = cv2.imdecode(np.fromstring(imgstr, np.uint8),
cv2.CV_LOAD_IMAGE_COLOR)
if img is None:
# This happens sometimes
print 'Corrupt jpeg: frame number', self.framenum
return (True, img)
else:
return (False, None)
def setFrame(self, framenum):
self.framenum = framenum
def close(self):
self.zip_file.close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
return isinstance(value, TypeError)
| bsd-2-clause |
tomlof/scikit-learn | sklearn/manifold/tests/test_mds.py | 99 | 1873 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.manifold import mds
from sklearn.utils.testing import assert_raises
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
carthach/essentia | src/examples/python/outdated/beatogram4.py | 1 | 27500 | #!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
import os, sys
from os.path import join
import essentia
from essentia.streaming import *
import essentia.standard as std
from pylab import median, mean, argmax
import matplotlib
#matplotlib.use('Agg') # in order to not grab focus on screen while batch processing
import matplotlib.pyplot as pyplot
import numpy as np
from numpy import shape, zeros, fabs
import scipy
# for key input
import termios, sys, os, subprocess
TERMIOS = termios
import copy
# for alsa
if sys.platform =='linux2':
import wave, alsaaudio
import time
import thread
barkBands = [0.0, 50.0, 100.0, 150.0, 200.0, 300.0, 400.0, 510.0, 630.0, 770.0,
920.0, 1080.0, 1270.0, 1480.0, 1720.0, 2000.0, 2320.0, 2700.0,
3150.0, 3700.0, 4400.0, 5300.0, 6400.0, 7700.0,
9500.0, 12000.0, 15500.0, 20500.0, 27000.0]
scheirerBands = [ 0.0, 50.0, 100.0, 150.0, 200.0, 400.0, 800.0, 1600.0, 3200.0, 5000.0, 10000.0]
EqBands = [20.0, 150.0, 400.0, 3200.0, 7000.0, 22000.0]
EqBands2 =[0.0, 75.0, 150.0, 400.0, 3200.0, 7000.0]
DOWNMIX ='mix'
# defines for novelty curve:
FRAMESIZE = 1024
HOPSIZE = FRAMESIZE/2
WEIGHT='flat' #'supplied' #'flat'
SAMPLERATE=44100.0
WINDOW='hann' #'blackmanharris92'
BEATWINDOW=16 # number of beats where to compute statistics
# tempogram defines:
FRAMERATE = float(SAMPLERATE)/HOPSIZE
TEMPO_FRAMESIZE = 4;
TEMPO_OVERLAP=2;
STARTTIME = 0
ENDTIME = 2000
def computeOnsets(filename, pool):
loader = EasyLoader(filename=filename,
sampleRate=pool['samplerate'],
startTime=STARTTIME, endTime=ENDTIME,
downmix=pool['downmix'])
onset = OnsetRate()
loader.audio >> onset.signal
onset.onsetTimes >> (pool, 'ticks')
onset.onsetRate >> None
essentia.run(loader)
pool.set('size', loader.audio.totalProduced())
pool.set('length', pool['size']/pool['samplerate'])
def computeSegmentation(filename, pool):
sampleRate = 44100
frameSize = 2048
hopSize = frameSize/2
audio = EqloudLoader(filename = filename,
downmix=pool['downmix'],
sampleRate=sampleRate)
fc = FrameCutter(frameSize=frameSize, hopSize=hopSize, silentFrames='keep')
w = Windowing(type='blackmanharris62')
spec = Spectrum()
mfcc = MFCC(highFrequencyBound=8000)
tmpPool = essentia.Pool()
audio.audio >> fc.signal
fc.frame >> w.frame >> spec.frame
spec.spectrum >> mfcc.spectrum
mfcc.bands >> (tmpPool, 'mfcc_bands')
mfcc.mfcc>> (tmpPool, 'mfcc_coeff')
essentia.run(audio)
# compute transpose of features array, don't call numpy.matrix.transpose
# because essentia fucks it up!!
features = copy.deepcopy(tmpPool['mfcc_coeff'].transpose())
segments = std.SBic(cpw=1.5, size1=1000, inc1=300, size2=600, inc2=50)(features)
for segment in segments:
pool.add('segments', segment*hopSize/sampleRate)
#print pool['segments']
def computeNoveltyCurve(filename, pool):
loader = EasyLoader(filename=filename,
sampleRate=pool['samplerate'],
startTime=STARTTIME, endTime=ENDTIME,
downmix=pool['downmix'])
fc = FrameCutter(frameSize=int(pool['framesize']),
silentFrames ='noise',
hopSize=int(pool['hopsize']),
startFromZero=False)
window = Windowing(type=pool['window'],
zeroPhase=False)
#freqBands = FrequencyBands(frequencyBands=EqBands, sampleRate=pool['samplerate'])
freqBands = FrequencyBands(sampleRate=pool['samplerate'])
spec = Spectrum()
hfc = HFC()
loader.audio >> fc.signal
fc.frame >> window.frame >> spec.frame
spec.spectrum >> freqBands.spectrum
spec.spectrum >> hfc.spectrum
freqBands.bands >> (pool, 'frequency_bands')
hfc.hfc >> (pool, 'hfc')
essentia.run(loader)
pool.set('size', loader.audio.totalProduced())
pool.set('length', pool['size']/pool['samplerate'])
# compute a weighting curve that is according to frequency bands:
frequencyBands = pool['frequency_bands']
nFrames = len(frequencyBands)
weightCurve= np.sum(frequencyBands, axis=0)
weightCurve = [val/float(nFrames) for val in weightCurve]
weightCurve = essentia.normalize(weightCurve)
#pyplot.plot(weightCurve)
#pyplot.show()
noveltyCurve = std.NoveltyCurve(frameRate=pool['framerate'],
weightCurveType=pool['weight'],
weightCurve=weightCurve,
normalize=False)(frequencyBands)
#for x in noveltyCurve: pool.add('novelty_curve', x)
#return
# derivative of hfc seems to help in finding more precise beats...
hfc = std.MovingAverage(size=int(0.1*pool['framerate']))(pool['hfc'])
hfc = normalize(hfc)
noveltyCurve = normalize(noveltyCurve)
#noveltyCurve = essentia.normalize(noveltyCurve)
dhfc = derivative(hfc)
print max(hfc), max(noveltyCurve)
for i, val in enumerate(dhfc):
if val< 0: continue
noveltyCurve[i] += 0.1*val
# low pass filter novelty curve:
env = std.Envelope(attackTime=0.001*pool['framerate'],
releaseTime=0.001*pool['framerate'])(noveltyCurve)
# apply median filter:
windowSize = 60./560.*pool['framerate'] #samples
size = len(env)
filtered = zeros(size, dtype='f4')
for i in range(size):
start = i-windowSize
if start < 0: start = 0
end = start + windowSize
if end > size:
end = size
start = size-windowSize
window = env[start:end]
filtered[i] = env[i] - np.median(window) #max(np.median(window), np.mean(window))
if filtered[i] < 0: filtered[i] = 0
#pyplot.subplot(311)
#pyplot.plot(noveltyCurve)
#pyplot.subplot(312)
#pyplot.plot(env, 'r')
#pyplot.subplot(313)
#pyplot.plot(filtered, 'g')
#pyplot.show()
#for x in noveltyCurve: pool.add('novelty_curve', x)
#for x in filtered: pool.add('novelty_curve', x)
#filtered = normalize(filtered)
pool.set('novelty_curve', filtered)
pool.set('original_novelty_curve', noveltyCurve)
def normalize(array):
maxVal = max(array)
if maxVal == 0: return zeros(len(array))
return array/maxVal
def derivative(array):
return scipy.diff(array)
def computeBeats(filename, pool):
computeNoveltyCurve(filename, pool)
recompute = True
novelty = pool['novelty_curve']
count = 0
first_round = True
bpmTolerance = 5
minBpm = 30
maxBpm =560
while recompute:
gen = VectorInput(novelty)
bpmHist = BpmHistogram(frameRate=pool['framerate'],
frameSize=pool['tempo_framesize'],
overlap=int(pool['tempo_overlap']),
maxPeaks=10,
windowType='hann',
minBpm=minBpm,
maxBpm=maxBpm,
normalize=False,
constantTempo=False,
tempoChange=5,
weightByMagnitude=True)
gen.data >> bpmHist.novelty
bpmHist.bpm >> (pool, 'peaksBpm')
bpmHist.bpmMagnitude >> (pool, 'peaksMagnitude')
bpmHist.harmonicBpm >> (pool, 'harmonicBpm')
bpmHist.harmonicBpm >> (pool, 'harmonicBpm')
bpmHist.confidence >> (pool, 'confidence')
bpmHist.ticks >> (pool, 'ticks')
bpmHist.ticksMagnitude >> (pool, 'ticksMagnitude')
bpmHist.sinusoid >> (pool, 'sinusoid')
essentia.run(gen)
print pool['peaksBpm']
bpm = pool['harmonicBpm'][0]
# align ticks with novelty curve
#ticks, _ = alignTicks(pool['sinusoid'], pool['original_novelty_curve'], #novelty,
# pool['framerate'], bpm, pool['length'])
# or don't align ticks?
ticks = pool['ticks']
_, _, bestBpm= getMostStableTickLength(ticks)
print 'estimated bpm:', bpm, 'bestBpm:', bestBpm, 'diff:', fabs(bpm-bestBpm)
if first_round:
pool.set('first_estimated_bpms', pool['peaksBpm'])
first_round = False
recompute = False
if fabs(bestBpm - bpm) < bpmTolerance: recompute = False
else:
count+=1
if count >= 5:
bpmTolerance += 1
count = 0
print "recomputing!!!!"
novelty = copy.deepcopy(pool['sinusoid'])
pool.remove('sinusoid')
pool.remove('novelty_curve')
pool.remove('peaksBpm')
pool.remove('peaksMagnitude')
pool.remove('harmonicBpm')
pool.remove('harmonicBpm')
pool.remove('confidence')
pool.remove('ticks')
pool.remove('ticksMagnitude')
#ticks = essentia.postProcessTicks(ticks)
#ticks, ticksAmp = alignTicks(pool['sinusoid'], pool['original_novelty_curve'],
# pool['framerate'], bpm, pool['length'])
print 'bpms:', pool['peaksBpm']
print 'first estimated bpms:', pool['first_estimated_bpms']
if step>1:
ticks = essentia.array(map(lambda i: ticks[i],
filter(lambda i: i%step == 0,range(len(ticks)))))
pool.remove('ticks')
pool.set('ticks', ticks)
def longestChain(dticks, startpos, period, tolerance):
pos = startpos
ubound = period*(1+tolerance)
lbound = period*(1-tolerance)
while (pos < len(dticks)) and\
(lbound < dticks[pos] and dticks[pos] < ubound):
pos += 1
return pos - startpos
def alignTicks(sine, novelty, frameRate, bpm, size):
''' Aligns the sine function with the novelty function. Parameters:
@sine: the sinusoid from bpmHistogram,
@novelty: the novelty curve
@frameRate: the frameRate
@size: the audio size, in order to not to have more ticks than audiosize
@bpm: the estimated bpm'''
#pyplot.plot(novelty, 'k')
#pyplot.plot(sine, 'r')
#for i in range(len(novelty)-1):
# diff = novelty[i+1]-novelty[i]
# if diff > 0: novelty[i] = diff
# else: novelty[i] = 0
#pyplot.plot(novelty, 'r')
noveltySize = len(novelty)
prodPulse = zeros(noveltySize, dtype='f4')
i = 0
while i < noveltySize:
if sine[i] <= 0:
i += 1
continue
window = []
while i < noveltySize and sine[i] != 0:
window.append(novelty[i]*sine[i])
i+=1
peakPos = argmax(window)
peakPos = i - len(window) + peakPos
prodPulse[peakPos] = novelty[peakPos]
#pyplot.plot(prodPulse, 'g')
#pyplot.show()
ticks = []
ticksAmp = []
tatum = 60./bpm
diffTick = 2*tatum
prevTick = -1
prevAmp = -1
for i, x in enumerate(prodPulse):
if x != 0:
newTick = float(i)/frameRate
if newTick < 0 or newTick >= size:
continue
ticks.append(newTick)
ticksAmp.append(x)
#if x != 0:
# newTick = float(i)/frameRate
# if newTick < 0 or newTick >= size: continue
# if prevTick < 0:
# ticks.append(newTick)
# ticksAmp.append(x)
# prevTick = newTick
# prevAmp = x
# else:
# print 'ok'
# diff = newTick-prevTick
# if (diff >= 0.9*tatum) :
# ticks.append(newTick)
# ticksAmp.append(x)
# prevTick = newTick
# prevAmp = x
# else: #(newTick-prevTick) < 0.75*tatum:
# print 'newTick:', newTick, 'prevTick', prevTick, 'diff:', newTick-prevTick, 'tatum', tatum, 0.9*tatum
# newTick = (newTick*x+prevTick*prevAmp)/(x+prevAmp)
# ticks[-1] = newTick
# ticksAmp[-1] = (x+prevAmp)/2.
# prevTick = newTick
# prevAmp = (x+prevAmp)/2.
return ticks, ticksAmp
def getMostStableTickLength(ticks):
nticks = len(ticks)
dticks = zeros(nticks-1)
for i in range(nticks-1):
dticks[i] = (ticks[i+1] - ticks[i])
hist, distx = np.histogram(dticks, bins=50*(1+(max(dticks)-min(dticks))))
bestPeriod = distx[argmax(hist)] # there may be more than one candidate!!
bestBpm = 60./bestPeriod
print 'best period', bestPeriod
print 'best bpm:', bestBpm
#print 'hist:', hist, distx
maxLength = 0
idx = 0
for startpos in range(nticks-1):
l = longestChain(dticks, startpos, bestPeriod, 0.1)
if l > maxLength :
maxLength = l;
idx = startpos;
print 'max stable length:', idx, maxLength
return idx, maxLength, bestBpm
def postProcessTicks(audioFilename, ticks, ticksAmp, pool):
'''Computes delta energy in order to find the correct position of the ticks'''
# get rid of beats of beats > audio.length
# if t < 0 or t > pool['length']: continue
# ticks.append(float(t))
# ticksAmp.append(float(amp))
#ticks = essentia.postProcessTicks(ticks, ticksAmp, 60./pool['harmonicBpm'][0]);
beatWindowDuration = 0.01 # seconds
beatDuration = 0.005 # seconds
rmsFrameSize = 64
rmsHopSize = rmsFrameSize/2
audio = std.MonoLoader(filename=audioFilename,
sampleRate=pool['samplerate'],
downmix=pool['downmix'])()
for i, tick in enumerate(ticks):
startTime = tick - beatWindowDuration/2.0
if startTime < 0: startTime = 0
endTime = startTime + beatWindowDuration + beatDuration + 0.0001
slice = std.Trimmer(sampleRate=pool['samplerate'],
startTime=startTime,
endTime=endTime)(audio)
frames = std.FrameGenerator(slice, frameSize=rmsFrameSize, hopSize=rmsHopSize)
maxDeltaRms=0
RMS = std.RMS()
prevRms = 0
pos = 0
tickPos = pos
for frame in frames:
rms = RMS(frame)
diff = rms - prevRms
if diff > maxDeltaRms:
tickPos = pos
maxDeltaRms = diff
pos+=1
prevRms = rms
ticks[i]= tick + tickPos*float(rmsHopSize)/pool['samplerate']
return ticks
def writeBeatFile(filename, pool) :
beatFilename = os.path.splitext(filename)[0] + '_beat.wav' #'out_beat.wav' #
audio = EasyLoader(filename=filename, downmix='mix', startTime=STARTTIME, endTime=ENDTIME)
writer = MonoWriter(filename=beatFilename)
onsetsMarker = AudioOnsetsMarker(onsets=pool['ticks'])
audio.audio >> onsetsMarker.signal >> writer.audio
essentia.run(audio)
return beatFilename
def computeBeatsLoudness(filename, pool):
loader = MonoLoader(filename=filename,
sampleRate=pool['samplerate'],
downmix=pool['downmix'])
ticks = pool['ticks']#[pool['bestTicksStart']:pool['bestTicksStart']+32]
beatsLoud = BeatsLoudness(sampleRate = pool['samplerate'],
frequencyBands = barkBands, #EqBands, #scheirerBands, #barkBands,
beats=ticks)
loader.audio >> beatsLoud.signal
beatsLoud.loudness >> (pool, 'loudness')
beatsLoud.loudnessBandRatio >> (pool, 'loudnessBandRatio')
essentia.run(loader)
def computeSpectrum(signal):
#gen = VectorInput(signal)
#fc = FrameCutter(startFromZero=False, frameSize=48, hopSize=1)
#w = Windowing(zeroPhase=False)
#spec = Spectrum()
#p = essentia.Pool()
#gen.data >> fc.signal
#fc.frame >> w.frame >> spec.frame
#spec.spectrum >> (p,'spectrum')
#essentia.run(gen)
#pyplot.imshow(p['spectrum'], cmap=pyplot.cm.hot, aspect='auto', origin='lower')
corr = std.AutoCorrelation()(signal)
pyplot.plot(corr)
pyplot.show()
print argmax(corr[2:])+2
def isPowerTwo(n):
return (n&(n-1))==0
def isEvenHarmonic(a,b):
if a < 2 or b < 2: return False
if (a<b): return isEvenHarmonic(b,a)
return (a%b == 0) and isPowerTwo(a/b)
def isHarmonic(a,b):
if a < 2 or b < 2: return False
if (a<b): return isHarmonic(b,a)
return (a%b == 0)
def getHarmonics(array):
size = len(array)
hist = [0]*size
counts = [1]*size
for idx1, x in enumerate(array):
for idx2, y in enumerate(array):
if isEvenHarmonic(idx1, idx2):
hist[idx1] += y
counts[idx1] += 1
hist = [hist[i]/float(counts[i]) for i in range(size)]
return hist
def plot(pool, title, outputfile='out.svg', subplot=111):
''' plots bars for each beat'''
#computeSpectrum(pool['loudness'])
ticks = pool['ticks']
#barSize = min([ticks[i+1] - ticks[i] for i in range(len(ticks[:-1]))])/2.
barSize = 0.8
offset = barSize/2.
loudness = pool['loudness']
loudnessBand = pool['loudnessBandRatio'] # ticks x bands
medianRatiosPerTick = []
meanRatiosPerTick = []
for tick, energy in enumerate(loudnessBand):
medianRatiosPerTick.append(median(energy))
meanRatiosPerTick.append(mean(energy))
loudnessBand = copy.deepcopy(loudnessBand.transpose()) # bands x ticks
#xcorr = std.CrossCorrelation(minLag=0, maxLag=16)
#acorr = std.AutoCorrelation()
#bandCorr = []
#for iBand, band in enumerate(loudnessBand):
# bandCorr.append(acorr(essentia.array(band)))
nBands = len(loudnessBand)
nticks = len(loudness)
maxRatiosPerBand = []
medianRatiosPerBand = []
meanRatiosPerBand = []
for idxBand, band in enumerate(loudnessBand):
maxRatiosPerBand.append([0]*nticks)
medianRatiosPerBand.append([0]*nticks)
meanRatiosPerBand.append([0]*nticks)
for idxTick in range(nticks):
start = idxTick
end = start+BEATWINDOW
if (end>nticks):
howmuch = end-nticks
end = nticks-1
start = end-howmuch
if start < 0: start = 0
medianRatiosPerBand[idxBand][idxTick] = median(band[start:end])
maxRatiosPerBand[idxBand][idxTick] = max(band[start:end])
meanRatiosPerBand[idxBand][idxTick] = mean(band[start:end])
for iBand, band in enumerate(loudnessBand):
for tick, ratio in enumerate(band):
#if ratio < medianRatiosPerBand[iBand][tick] and\
# ratio <= medianRatiosPerTick[tick]: loudnessBand[iBand][tick]=0
bandThreshold = max(medianRatiosPerBand[iBand][tick],
meanRatiosPerBand[iBand][tick])
tickThreshold = max(medianRatiosPerTick[tick],
meanRatiosPerTick[tick])
if ratio < bandThreshold and ratio <= tickThreshold:
loudnessBand[iBand][tick]=0
else:
loudnessBand[iBand][tick] *= loudness[tick]
#if loudnessBand[iBand][tick] > 1 : loudnessBand[iBand][tick] = 1
acorr = std.AutoCorrelation()
bandCorr = []
maxCorr = []
for iBand, band in enumerate(loudnessBand):
bandCorr.append(acorr(essentia.array(band)))
maxCorr.append(argmax(bandCorr[-1][2:])+2)
# use as much window space as possible:
pyplot.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.95)
pyplot.subplot(511)
pyplot.imshow(bandCorr, cmap=pyplot.cm.hot, aspect='auto', origin='lower', interpolation='nearest')
print 'max correlation', maxCorr
sumCorr = []
for tick in range(nticks):
total = 0
for band in bandCorr:
total += band[tick]
sumCorr.append(total)
sumCorr[0] = 0
sumCorr[1] = 0
pyplot.subplot(512)
maxAlpha = max(sumCorr)
for i,val in enumerate(sumCorr):
alpha = max(0,min(val/maxAlpha, 1))
pyplot.bar(i, 1 , barSize, align='edge',
bottom=0,alpha=alpha,
color='r', edgecolor='w', linewidth=.3)
print 'max sum correlation', argmax(sumCorr[2:])+2
hist = getHarmonics(sumCorr)
maxHist = argmax(hist)
print 'max histogram', maxHist
#for idx,val in enumerate(hist):
# if val < maxHist: hist[idx] = 0
pyplot.subplot(513)
for i,val in enumerate(hist):
pyplot.bar(i, val , barSize, align='edge',
bottom=0, color='r', edgecolor='w', linewidth=.3)
peakDetect = std.PeakDetection(maxPeaks=5,
orderBy='amplitude',
minPosition=0,
maxPosition=len(sumCorr)-1,
range=len(sumCorr)-1)
peaks = peakDetect(sumCorr)[0]
peaks = [round(x+1e-15) for x in peaks]
print 'Peaks:',peaks
pyplot.subplot(514)
maxAlpha = max(sumCorr)
for i,val in enumerate(sumCorr):
alpha = max(0,min(val/maxAlpha, 1))
pyplot.bar(i, val, barSize, align='edge',
bottom=0,alpha=alpha,
color='r', edgecolor='w', linewidth=.3)
# multiply both histogram and sum corr to have a weighted histogram:
wHist = essentia.array(hist)*sumCorr*acorr(loudness)
maxHist = argmax(wHist)
print 'max weighted histogram', maxHist
pyplot.subplot(515)
maxAlpha = max(wHist)
for i,val in enumerate(wHist):
alpha = max(0,min(val/maxAlpha, 1))
pyplot.bar(i, val, barSize, align='edge',
bottom=0,alpha=alpha,
color='r', edgecolor='w', linewidth=.3)
pyplot.savefig(outputfile, dpi=300)
#pyplot.show()
return
def ossplay(filename): # play audio thru oss
from wave import open as waveOpen
from ossaudiodev import open as ossOpen
s = waveOpen(filename,'rb')
(nc,sw,fr,nf,comptype, compname) = s.getparams( )
dsp = ossOpen('/dev/dsp','w')
try:
from ossaudiodev import AFMT_S16_NE
except ImportError:
if byteorder == "little":
AFMT_S16_NE = ossaudiodev.AFMT_S16_LE
else:
AFMT_S16_NE = ossaudiodev.AFMT_S16_BE
dsp.setparameters(AFMT_S16_NE, nc, fr)
data = s.readframes(nf)
s.close()
dsp.write(data)
dsp.close()
def getkey(audioFilename, device, f, card, lock):
c = None
b = True
while b:
#fd = sys.stdin.fileno()
#old = termios.tcgetattr(fd)
#new = termios.tcgetattr(fd)
#new[3] = new[3] & ~TERMIOS.ICANON & ~TERMIOS.ECHO
#new[6][TERMIOS.VMIN] = 1
#new[6][TERMIOS.VTIME] = 0
#termios.tcsetattr(fd, TERMIOS.TCSANOW, new)
#c = None
lock.acquire()
#try:
# c = os.read(fd, 1)
#finally:
# termios.tcsetattr(fd, TERMIOS.TCSAFLUSH, old)
#if c == '\n': ## break on a Return/Enter keypress
# b = False
# return
#if c==' ': playAudio(audioFilename)
#else: print 'got', c
#ossplay(audioFilename)
alsaplay(audioFilename, device, f, card)
lock.release()
time.sleep(0.1)
def alsaplay(filename, device, f, card):
device.setchannels(f.getnchannels())
device.setrate(f.getframerate())
# 8bit is unsigned in wav files
if f.getsampwidth() == 1:
device.setformat(alsaaudio.PCM_FORMAT_U8)
# Otherwise we assume signed data, little endian
elif f.getsampwidth() == 2:
device.setformat(alsaaudio.PCM_FORMAT_S16_LE)
elif f.getsampwidth() == 3:
device.setformat(alsaaudio.PCM_FORMAT_S24_LE)
elif f.getsampwidth() == 4:
device.setformat(alsaaudio.PCM_FORMAT_S32_LE)
else:
raise ValueError('Unsupported format')
device.setperiodsize(320)
data = f.readframes(320)
while data:
device.write(data)
data = f.readframes(320)
f.setpos(0)
if __name__ == '__main__':
if len(sys.argv) < 1:
usage()
sys.exit(1)
step = 1
if len(sys.argv) > 2:
step = int(sys.argv[-1])
inputfilename = sys.argv[1]
ext = os.path.splitext(inputfilename)[1]
if ext == '.txt': # input file contains a list of audio files
files = open(inputfilename).read().split('\n')[:-1]
else: files = [inputfilename]
for audiofile in files:
print "*"*70
print "Processing ", audiofile
print "*"*70
try:
bpmfile = audiofile.replace('wav', 'bpm')
print "bpmfile:", bpmfile
print 'realBpm', open(bpmfile).read()
except:
print 'realBpm not found'
pool = essentia.Pool()
pool.set('downmix', DOWNMIX)
pool.set('framesize', FRAMESIZE)
pool.set('hopsize', HOPSIZE)
pool.set('weight', WEIGHT)
pool.set('samplerate', SAMPLERATE)
pool.set('window', WINDOW)
pool.set('framerate', FRAMERATE)
pool.set('tempo_framesize', TEMPO_FRAMESIZE)
pool.set('tempo_overlap', TEMPO_OVERLAP)
pool.set('step', step)
#computeSegmentation(audiofile, pool)
#segments = pool['segments']
computeBeats(audiofile, pool)
beatFilename = writeBeatFile(audiofile, pool)
computeBeatsLoudness(audiofile, pool)
imgfilename = os.path.splitext(audiofile)[0]+'.png'
#imgfilename = imgfilename.split(os.sep)[-1]
#print 'plotting', imgfilename
if sys.platform == 'darwin' or sys.platform == 'win32':
plot(pool,'beats loudness ' + str(audiofile), imgfilename);
else:
# card = 'default'
# f = wave.open(beatFilename, 'rb')
## print '%d channels, sampling rate: %d \n' % (f.getnchannels(),
## f.getframerate())
# device = alsaaudio.PCM(card=card)
# lock = thread.allocate_lock()
# thread.start_new_thread(getkey, (beatFilename, device, f, card, lock))
plot(pool,'beats loudness ' + audiofile, imgfilename);
# f.close()
# thread.exit()
#print 'deleting beatfile:', beatFilename
#subprocess.call(['rm', beatFilename])
| agpl-3.0 |
demis001/biopandas | bioframes/sequenceframes.py | 2 | 3520 | from __future__ import print_function
from operator import attrgetter as attr
from operator import itemgetter
from Bio import SeqIO
from func import compose, compose_all, imap, apply_each, _id, get_funcs
from bioframes import error, get_fastq, check_np_type
import pandas as pd
import numpy as np
from schema import Schema
from collections import namedtuple
from functools import partial
''' All the boring stuff to interact with SeqRecord API '''
#TODO: seperate out the business logic
#TODO: fix for 32-bit systems
#return pd.DataFrame(final_dict, index=index)
#get_fastq_row = compose(get_row, get_fastq)
class BioFrame(pd.DataFrame):
def find_duplicates(frame):
''' not sure why this one works. '''
''' for example, find pcr duplicates by finding duplicate reads'''
#df2[df2['b'] == df2['b'] & (df2['a'] == df2['a'])]
''' duplicated by defaults returns all but the first occurrence; take_last takes only the first occurence,
so unioning them (via | (or)), returns all duplicates. accepts any number of keys. '''
frame[frame.duplicated(['b'],take_last=True) | frame.duplicated(['b'])]
#class FastqFrame(BioFrame):
#TODO:
'''
create another function which acts on this closure, exctracts the getters,
and applies them to the object. maybe the getters can have the same name as the columns for simplicity.
here we have get_error which could be the same in samframe, may be able to avoid redefining it.def
'''
def fqframe(fileh):
final_schema = Schema({
'id' : str,
'seq' : str,
'quality' : str,
'qual_ints' : check_np_type('int64'),
'error' : check_np_type('float64'),
'description' : str
})
#get_object = _id
index = ['id']
columns = ('id', 'seq', 'quality', 'description', 'qual_ints', 'error')
SANGER = True
get_id = attr('id')
get_seq= compose(str, attr('seq'))
get_qual_ints = compose_all(np.array, itemgetter('phred_quality'), attr('_per_letter_annotations'))
get_description = attr('description')
get_quality = SeqIO.QualityIO._get_sanger_quality_str
get_error = compose(error, get_qual_ints)
#get_error = error_from_ints(get_qual_ints)
getters = [get_id, get_seq, get_quality, get_description, get_qual_ints, get_error]
assert len(getters) == len(columns)
metadata = {'filename' : fileh.name}
iterator = get_fastq(fileh)
get_raw_record = partial(next, iterator)
# def get_row(record):
# #record = next(fileh)
## import sys
## __module__ = sys.modules[__name__]
## get_getter = compose(attr, "get_{0}".format)
## _getters = map(get_getter, columns)
## self_getters = apply_each(_getters, __module__) #fzip(_getters, repeat(__module__, clen))
# results = apply_each(self_getters, record)
# final_dict = dict(zip(columns, results))
# final_schema.validate(final_dict)
# return final_dict
# def load_fastq():
# fq = get_fastq(fileh)
# dicts = map(get_row, fq)
# return pd.DataFrame(dicts).set_index(index) #, index=index, columns=columns)
#jreturn nameddict(
return { 'obj_func' : get_raw_record,
'columns' : columns,
'getters' : getters,
'validator' : final_schema,
'dictgetters' : None
}
#return namedtuple('FastqFrame', ['obj_func', ])(get_row, load_fastq)#{'get_row' : get_row, 'load_fastq' : load_fastq}
#FastqFrame = namedtuple('FastqFrame', 'get_row', 'load_fastq')
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.