repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
pratapvardhan/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 268 | 2723 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
mganeva/mantid | scripts/DiamondAttenuationCorrection/FitTrans.py | 1 | 47894 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
'''
1. all the functions are defined and built consistently.
Data types:
- Use only numpy arrays to ensure consistency across formatting and type
*** x, x0 = parameters vector - 1D numpy array
*** setang1, setang2 = angles for refinement - 1D numpy arrays, 3 elements
*** hkl1, hkl2 = numpy arrays (3 columns) having all the hkl indices from the 2 diamonds
*** UB1, UB2 = numpy arrays (3x3) holds UB matrices from input files
***
'''
# Import all needed libraries
from __future__ import (absolute_import, division, print_function)
from matplotlib import pyplot as plt
import numpy as np
import itertools as itt
import UBMatrixGenerator as UBMG
import scipy.optimize as sp
__author__ = 'cip'
# Define global variables
global hkl1, hkl2
global UB1, pkcalcint1
global UB2, pkcalcint2
global pktype
global lam, y, e, TOF
global L1
global ttot
global fxsamediam
global neqv1, eqvlab1, neqv2, eqvlab2
global difa, function_verbose
global figure_name_attenuation, run_number
def dlmread(filename):
'''
Function to read parameters from file after previous fit
'''
content = []
with open(filename, "r") as f:
for line in f.readlines():
content.append(float(line))
return np.array(content)
def calcDspacing(a, b, c, alp, bet, gam, h, k, l):
'''
%CALCDSPACING for general unit cell: a,b,c,alp,bet,gam returns d-spacing for
%reflection h,k,l
%
'''
ca = np.cos(np.radians(alp))
cb = np.cos(np.radians(bet))
cg = np.cos(np.radians(gam))
sa = np.sin(np.radians(alp))
sb = np.sin(np.radians(bet))
sg = np.sin(np.radians(gam))
oneoverdsq = (1.0 - ca ** 2 - cb ** 2 - cg ** 2 + 2 * ca * cb * cg) ** (-1) * \
((h * sa / a) ** 2 + (k * sb / b) ** 2 + (l * sg / c) ** 2
+ (2 * k * l / (b * c)) * (cb * cg - ca) + (2 * l * h / (c * a)) * (cg * ca - cb)
+ (2 * h * k / (a * b)) * (ca * cb - cg))
d = np.sqrt(1.0 / oneoverdsq)
return d
def genhkl(hmin, hmax, kmin, kmax, lmin, lmax):
'''
genhkl generates array of hkl values
total number of points will be (hmax-hmin)
'''
hvals = np.arange(hmin, hmax + 1, 1)
kvals = np.arange(kmin, kmax + 1, 1)
lvals = np.arange(lmin, lmax + 1, 1)
nh = len(hvals)
nk = len(kvals)
nl = len(lvals)
l = 0
hkl = np.zeros(shape=(nh * nl * nk, 3))
for i in range(nh):
for j in range(nk):
for k in range(nl):
hkl[l][0] = hvals[i]
hkl[l][1] = kvals[j]
hkl[l][2] = lvals[k]
l += 1
return hkl
def mod(a, b):
return a % b
def forbidden(h, k, l):
'''
%returns logical positive if this hkl is fobidden according to
% diamond reflections conditions....
'''
ah = abs(h)
ak = abs(k)
al = abs(l)
if ((h == 0) and (k == 0) and (l == 0)):
result = 1
boolresult = bool(result)
return boolresult
else:
result = 0
if ((ah == 2) and (ak == 2) and (al == 2)): # allowed, but vanishingly weak
result = 1
boolresult = bool(result)
return boolresult
else:
result = 0
# condition 1
if ((h != 0) and (k != 0) and (l != 0)): # general hkl
term1 = h + k
term2 = h + l # all have to be even
term3 = k + l
if not ((term1 % 2) == 0 and (term2 % 2) == 0 and (term3 % 2) == 0):
result = 1
boolresult = bool(result)
return boolresult
else:
result = 0
# % condition 2
if ((h == 0) and (k != 0) and (l != 0)): # 0kl reflections
term1 = k + l
mod4 = mod(term1, 4)
if not (mod4 == 0 and mod(k, 2) == 0 and mod(l, 2) == 0):
result = 1
boolresult = bool(result)
return boolresult
else:
result = 0
# condition 3
if (h == k): # hhl reflections
if not (mod(h + l, 2) == 0):
result = 1
boolresult = bool(result)
return boolresult
else:
result = 0
# condition 4
if ((h == 0) and (k == 0) and (l != 0)): # 00l reflections not including 000
mod4 = mod(l, 4)
if not (mod4 == 0):
result = 1
boolresult = bool(result)
return boolresult
else:
result = 0
boolresult = bool(result)
return boolresult
def allowedDiamRefs(hmin, hmax, kmin, kmax, lmin, lmax):
'''
%UNTITLED6 generates a list of allowed reflections for diamond between
% limits provided sorted descending according to d-spacing
'''
# obtain all hkl within limits...
allhkl = genhkl(hmin, hmax, kmin, kmax, lmin, lmax)
# now purge those violating extinction conditions...
n = len(allhkl)
# set all forbidden hkl's to zero
# hkl or lhk or klh
for i in range(n):
h = allhkl[i][0]
k = allhkl[i][1]
l = allhkl[i][2]
if forbidden(h, k, l) or forbidden(l, h, k) or forbidden(k, l, h):
allhkl[i] = 0 # set equal to zero
k = 0
d = [] # np.zeros(0)
# create new array with all h!=0 k!=0 l!=0
hkl = np.zeros(shape=(0, 3))
for i in range(n):
if not (allhkl[i][0] == 0 and allhkl[i][1] == 0 and allhkl[i][2] == 0):
hkl = np.vstack((hkl, [allhkl[i][0], allhkl[i][1], allhkl[i][2]]))
d.append(calcDspacing(3.56683, 3.56683, 3.56683, 90,
90, 90, hkl[k][0], hkl[k][1], hkl[k][2]))
k += 1
d = np.array(d)
# ORDER hkl according to d-spacing
B = sorted(d)[::-1] # returns d sorted in descending order
IX = np.argsort(d)[::-1] # and corresponding elements
sorthkl = np.zeros(shape=(k, 3))
for i in range(k):
sorthkl[i] = hkl[IX[i]]
d[i] = B[i]
# print('hkl: {0:0.3f} {1:0.3f} {2:0.3f} d-spacing: {3:0.3f} A'.format(sorthkl[i][0], sorthkl[i][1],
# sorthkl[i][2], d[i]))
return sorthkl
def getISAWub(fullfilename):
'''
%getISAWub reads UB determined by ISAW and stored in file "fname"
% Detailed explanation goes here
% [filename pathname ~] = ...
% uigetfile('*.dat','Choose UB file (generated by ISAW)');
% fullfilename = [pathname filename];
'''
fileID = fullfilename
if fileID == 1:
print(('Error opening file: ' + fullfilename))
f = open(fileID, "r")
lines = f.readlines()
f.close()
# Build UB matrix and lattice
UB = np.zeros(shape=(3, 3))
lattice = np.zeros(shape=(2, 6))
for i in range(3):
UB[i][0], UB[i][1], UB[i][2] = lines[i].split()
UB = UB.transpose()
for i in range(3, 5):
lattice[i - 3][0], lattice[i - 3][1], \
lattice[i - 3][2], lattice[i - 3][3], \
lattice[i - 3][4], lattice[i - 3][5], \
non = lines[i].split()
print('Successfully got UB and lattice')
return UB, lattice
def pkintread(hkl, loc):
'''
%reads calculated Fcalc and converts to
%Fobs using Buras-Gerard Eqn.
%inputs are hkl(nref,3) and
% loc(nref,3), which contains, lambda, d-spacing and ttheta for
% each of the nref reflections.
% get Fcalcs for diamond, generated by GSAS (using lattice parameter 3.5668
% and Uiso(C) = 0.0038
% disp('in pkintread');
returns pkint = np. array - 1D vector
'''
# A = np.genfromtxt('diamond_reflist.csv', delimiter=',', skip_header=True)
# print A
A = np.array([[1.00000000e+00, 1.00000000e+00, 1.00000000e+00, 8.00000000e+00,
2.06110000e+00, 5.54000000e+04],
[2.00000000e+00, 2.00000000e+00, 0.00000000e+00, 1.20000000e+01,
1.26220000e+00, 7.52000000e+04],
[3.00000000e+00, 1.00000000e+00, 1.00000000e+00, 2.40000000e+01,
1.07640000e+00, 2.98000000e+04],
[2.00000000e+00, 2.00000000e+00, 2.00000000e+00, 8.00000000e+00,
1.03060000e+00, 2.50000000e-25],
[4.00000000e+00, 0.00000000e+00, 0.00000000e+00, 6.00000000e+00,
8.92500000e-01, 4.05000000e+04],
[3.00000000e+00, 3.00000000e+00, 1.00000000e+00, 2.40000000e+01,
8.19000000e-01, 1.61000000e+04],
[4.00000000e+00, 2.00000000e+00, 2.00000000e+00, 2.40000000e+01,
7.28700000e-01, 2.18000000e+04],
[5.00000000e+00, 1.00000000e+00, 1.00000000e+00, 2.40000000e+01,
6.87000000e-01, 8.64000000e+03],
[3.00000000e+00, 3.00000000e+00, 3.00000000e+00, 8.00000000e+00,
6.87000000e-01, 8.64000000e+03],
[4.00000000e+00, 4.00000000e+00, 0.00000000e+00, 1.20000000e+01,
6.31100000e-01, 1.17000000e+04],
[5.00000000e+00, 3.00000000e+00, 1.00000000e+00, 4.80000000e+01,
6.03400000e-01, 4.65000000e+03],
[4.00000000e+00, 4.00000000e+00, 2.00000000e+00, 2.40000000e+01,
5.95000000e-01, 1.83000000e-12],
[6.00000000e+00, 2.00000000e+00, 0.00000000e+00, 2.40000000e+01,
5.64500000e-01, 6.31000000e+03],
[5.00000000e+00, 3.00000000e+00, 3.00000000e+00, 2.40000000e+01,
5.44400000e-01, 2.50000000e+03],
[6.00000000e+00, 2.00000000e+00, 2.00000000e+00, 2.40000000e+01,
5.38200000e-01, 8.80000000e-26],
[4.00000000e+00, 4.00000000e+00, 4.00000000e+00, 8.00000000e+00,
5.15300000e-01, 3.40000000e+03],
[5.00000000e+00, 5.00000000e+00, 1.00000000e+00, 2.40000000e+01,
4.99900000e-01, 1.35000000e+03],
[7.00000000e+00, 1.00000000e+00, 1.00000000e+00, 2.40000000e+01,
4.99900000e-01, 1.35000000e+03],
[6.00000000e+00, 4.00000000e+00, 2.00000000e+00, 4.80000000e+01,
4.77100000e-01, 1.83000000e+03],
[7.00000000e+00, 3.00000000e+00, 1.00000000e+00, 4.80000000e+01,
4.64800000e-01, 7.25000000e+02],
[5.00000000e+00, 5.00000000e+00, 3.00000000e+00, 2.40000000e+01,
4.64800000e-01, 7.25000000e+02],
[8.00000000e+00, 0.00000000e+00, 0.00000000e+00, 6.00000000e+00,
4.46200000e-01, 9.84000000e+02],
[7.00000000e+00, 3.00000000e+00, 3.00000000e+00, 2.40000000e+01,
4.36100000e-01, 3.90000000e+02],
[6.00000000e+00, 4.00000000e+00, 4.00000000e+00, 2.40000000e+01,
4.32900000e-01, 1.53000000e-13],
[6.00000000e+00, 6.00000000e+00, 0.00000000e+00, 1.20000000e+01,
4.20700000e-01, 5.30000000e+02],
[8.00000000e+00, 2.00000000e+00, 2.00000000e+00, 2.40000000e+01,
4.20700000e-01, 5.30000000e+02],
[5.00000000e+00, 5.00000000e+00, 5.00000000e+00, 8.00000000e+00,
4.12200000e-01, 2.10000000e+02],
[7.00000000e+00, 5.00000000e+00, 1.00000000e+00, 4.80000000e+01,
4.12200000e-01, 2.10000000e+02],
[6.00000000e+00, 6.00000000e+00, 2.00000000e+00, 2.40000000e+01,
4.09500000e-01, 1.98000000e-26],
[8.00000000e+00, 4.00000000e+00, 0.00000000e+00, 2.40000000e+01,
3.99100000e-01, 2.85000000e+02],
[7.00000000e+00, 5.00000000e+00, 3.00000000e+00, 4.80000000e+01,
3.91900000e-01, 1.13000000e+02],
[9.00000000e+00, 1.00000000e+00, 1.00000000e+00, 2.40000000e+01,
3.91900000e-01, 1.13000000e+02],
[8.00000000e+00, 4.00000000e+00, 2.00000000e+00, 4.80000000e+01,
3.89500000e-01, 4.44000000e-14],
[6.00000000e+00, 6.00000000e+00, 4.00000000e+00, 2.40000000e+01,
3.80600000e-01, 1.53000000e+02],
[9.00000000e+00, 3.00000000e+00, 1.00000000e+00, 4.80000000e+01,
3.74200000e-01, 6.08000000e+01],
[8.00000000e+00, 4.00000000e+00, 4.00000000e+00, 2.40000000e+01,
3.64400000e-01, 8.26000000e+01],
[9.00000000e+00, 3.00000000e+00, 3.00000000e+00, 2.40000000e+01,
3.58800000e-01, 3.27000000e+01],
[7.00000000e+00, 5.00000000e+00, 5.00000000e+00, 2.40000000e+01,
3.58800000e-01, 3.27000000e+01],
[7.00000000e+00, 7.00000000e+00, 1.00000000e+00, 2.40000000e+01,
3.58800000e-01, 3.27000000e+01]])
diamd = A[:, 4]
# diamMult = A[:, 3] # unused variable
diamFCalcSq = A[:, 5]
nref = hkl.shape[0]
# % disp(['there are: ' num2str(nref) ' reflections']);
# % whos loc
'''
% [i,j] = size(x);
% dipspec = zeros(i,j); %array containing dip spectrum
% difspec = zeros(i,j); %array containing diffraction spectrum
% d = x/sqrt(2); %dspacings for this lamda range at 90 degrees
% In order to calculate the scattered intensity I from the Fcalc^2, need to
% apply the Buras-Gerward formula:
%
% Fcalc^2 = I*2*sin(theta)^2/(lamda^2*A*E*incFlux*detEffic)
'''
pkint = np.zeros(nref)
for i in range(nref):
if loc[i][0] > 0:
# % satisfies Bragg condition (otherwise ignore)
Fsq = Fsqcalc(loc[i][1], diamd, diamFCalcSq)
# % Fsq = 1;
L = (np.sin(np.radians(loc[i][2] / 2.0))) ** 2 # Lorentz correction
R = 1.0 # %dipLam(i)^4; %reflectivity correction
A = 1.0 # %Absorption correction
Ecor = 1
pkint[i] = Fsq * R * A / (L * Ecor) # %scattered intensity
'''
% whos difspec
% whos van
% whos dipspec
% difspec = difspec.*van;
% dipspec = dipspec.*van;
% figure(1)
% plot(d,difspec)
'''
return pkint
def Fsqcalc(d, diamd, diamFCalcSq):
'''
% diamond reflections are identified according to their d-spacing
% and corresponding calculated Fsq are returned
% global sf111 sf220 sf311 sf400 sf331
'''
# n = len(diamd) # unused variable
ref = d
dif = abs(diamd - ref)
i = dif.argmin(0) # i is index of diamd closest to d
Fsq = diamFCalcSq[i]
return Fsq
def pkposcalc(hkl, UB, setang):
'''
% calculates some useful numbers from (ISAW calculated) UB
% hkl is a 2D array containing all hkl's
%
'''
ome = setang[0]
phi = setang[1]
chi = setang[2]
thkl = hkl.transpose()
Q = UB.dot(thkl)
Rx = np.array([[1, 0, 0], [0, np.cos(np.radians(ome)), -np.sin(np.radians(ome))],
[0, np.sin(np.radians(ome)), np.cos(np.radians(ome))]])
Ry = np.array([[np.cos(np.radians(phi)), 0, np.sin(np.radians(phi))], [0, 1, 0],
[-np.sin(np.radians(phi)), 0, np.cos(np.radians(phi))]])
Rz = np.array([[np.cos(np.radians(chi)), -np.sin(np.radians(chi)), 0],
[np.sin(np.radians(chi)), np.cos(np.radians(chi)), 0], [0, 0, 1]])
Rall = Rz.dot(Ry).dot(Rx) # all three rotations
# str = sprintf('initial: %6.4f %6.4f %6.4f',Q);
# disp(str)
Q = Rall.dot(Q)
magQ = np.sqrt((Q * Q).sum(axis=0))
'''
# str = sprintf('Scattering vector: %6.4f %6.4f %6.4f',Q);
# if show==1
# disp(str)
# end
% %calculate angle with incident beam i.e. (-1 0 0)
% beam = [1 0 0];
% alpha = acosd(dot(Q,beam)/norm(Q));
% str = sprintf('Angle scat. vect. to beam: %6.4f',alpha);
% if show==1
% disp(str)
% end
% beam = [0 1 0];
% alpha = acosd(dot(Q,beam)/norm(Q));
% str = sprintf('Angle scat. vect. to y: %6.4f',alpha);
% if show==1
% disp(str)
% end
% beam = [0 0 1];
% alpha = acosd(dot(Q,beam)/norm(Q));
% str = sprintf('Angle scat. vect. to z: %6.4f',alpha);
% if show==1
% disp(str)
% end
% Q is a vector pointing to the reciprocal lattice point corresponding to
% vector hkl. The coordinate system is in frame I that is right handed with x pointing along
% the beam direction and z vertical.
'''
d = (1.0 / magQ) # by definition (note ISAW doesn't use 2pi factor)
d = d.transpose()
'''
% In frame I the incident beam vector will be of the form [k 0 0]
% where k = 1/lambda
% by considering the scattering relation that Q=k_f-k_i, can show that the dot product of
% -k_i.Q gives the scattering angle 2theta, thus:
'''
ttheta = 180 - 2 * np.degrees(np.arccos(-Q[0, :] / magQ))
ttheta = ttheta.transpose()
# and Bragg's law gives:
lambda_1 = 2 * d * np.sin(np.radians(ttheta / 2))
lambda_1 = lambda_1.transpose()
'''
%
% str = sprintf('for hkl: %3i%3i%3i',hkl(1),hkl(2),hkl(3));
% disp(str)
% str = sprintf('d-spacing is: %6.4f',d);
% disp(str)
% str = sprintf('ttheta is: %6.4f',ttheta);
% disp(str)
% str = sprintf('lambda is: %6.4f',lambda);
% disp(str)
'''
return lambda_1, d, ttheta
def getMANTIDdat_keepbinning(csvfile):
'''
getMANTIDdat reads data from mantid "SaveAscii" output
% input file name should be 'csvfilename'.csv
% data are returned with binning (xmin:xbin:xmax)
returns TOF, y , e
'''
fid = open(csvfile, "r")
lines = fid.readlines()
x = []
y = []
e = []
if fid < 0:
print(('Error opening file: ' + csvfile))
for i in range(1, len(lines)):
a, b, c = lines[i].split(",")
x.append(float(a))
y.append(float(b))
e.append(float(c))
fid.close()
x = np.array(x)
y = np.array(y)
e = np.array(e)
return x, y, e
def findeqvs(hkl):
'''
FINDEQVS runs through array of hkls and labels those that are equivalent
%in the m-3m point group.
%
% there are n reflections.
% hkl has dimensions nx3
% eqvlab has dimensions nx1
'''
n, m = hkl.shape
eqvlab = np.zeros(n)
lab = 1
for i in range(n):
if eqvlab[i] == 0: # then it's not been checked yet, so check it
eqvlab[i] = lab
refhkl = np.array([abs(hkl[i][0]), abs(hkl[i][1]), abs(hkl[i][2])])
for j in range(i + 1, n): # check remaining indices
comphkl = np.array(
[abs(hkl[j][0]), abs(hkl[j][1]), abs(hkl[j][2])])
# all possible permutations
permcomphkl = list(itt.permutations(comphkl))
nperm = len(permcomphkl)
for k in range(nperm):
if refhkl[0] == permcomphkl[k][0] and refhkl[1] == permcomphkl[k][1] and \
refhkl[2] == permcomphkl[k][2]:
eqvlab[j] = lab
lab += 1
return eqvlab, lab
def showx3(x):
'''
%showx displays all parameters for refinement in reasonably intelligible
%form
Input : parameter vector and the sets of hkl indices for the diamonds
'''
global hkl1, hkl2
global UB1, pkcalcint1
global UB2, pkcalcint2
global pktype
global lam, y, e, TOF
global L1
global ttot
global fxsamediam
global neqv1, eqvlab1, neqv2, eqvlab2
global difa, function_verbose
# nref1 = hkl1.shape[0] # % number of reflections to integrate over # unused variable
# nref2 = hkl2.shape[0] # % number of reflections to integrate over # unused variable
# % returns array with same dim as input labelling equivs
eqvlab1, neqv1 = findeqvs(hkl1)
eqvlab2, neqv2 = findeqvs(hkl2)
setang1 = x[0:3]
pkmult1 = x[3:4 + neqv1 - 1]
setang2 = x[4 + neqv1 - 1:6 + neqv1]
pkmult2 = x[6 + neqv1:7 + neqv1 + neqv2 - 1]
sf = x[neqv1 + neqv2 + 7 - 1]
pkwid1 = x[neqv1 + neqv2 + 8 - 1]
# bgd = x[neqv1 + neqv2 + 8 - 1:neqv1 + neqv2 + 9 + 2 - 1] # unused variable
pkwid2 = x[neqv1 + neqv2 + 10]
# % if diamond intensities the same, allow single scale f
relsf = x[neqv1 + neqv2 + 11]
delam = x[neqv1 + neqv2 + 12]
L2 = x[neqv1 + neqv2 + 13]
print('_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/\n')
print(('Setting angles diam {0} : \nalp {1} bet {2} gam {3} \n'.format(
1, setang1[0], setang1[1], setang1[2])))
print(('pkmult1: {0}\n'.format(pkmult1)))
print(('Setting angles diam {0} : \nalp {1} bet {2} gam {3} \n'.format(
2, setang2[0], setang2[1], setang2[2])))
print(('pkmult2: {0}\n'.format(pkmult2)))
print(('Scale factor: {0}\n'.format(sf)))
print(('pkwid1: {0}\n'.format(pkwid1)))
print(('pkwid2: {0}\n'.format(pkwid2)))
print(('Rel. scale factor : {0}\n'.format(relsf)))
print(('Lambda multiplier: {0}\n'.format(delam)))
print(('L2 sample to detector: {0} m\n'.format(L2)))
print('_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/\n')
def SimTransOutput3(name, x):
'''
%SimTrans calculates transmission spectrum from two crystals
% lam - array containing wavelengths to calc over
% hkl - contains all Nref hkl's that calculation is performed for
% bgd - array containing coefficients of polynomial for background
% sf - overall scale factor
% pktype - 1 = gauss; 2 = lorentz; ...
% UB1 - UB matrix for first crystal
% setang1 - setting angles for first crystal (deviations from ideal UB
% location).
% pkpars1 - position(lambda), position(d-spacing), position(ttheta), width, intensity for each Nref reflections
% UB2,setang2,pkpars2 - as above, for second crystal
%
% M. Guthrie 21st Jan 2014
%
% calculate background profile
% determine number of coeffs bgd
'''
global hkl1, hkl2
global UB1, pkcalcint1
global UB2, pkcalcint2
global pktype
global lam, y, e, TOF
global L1
global ttot
global fxsamediam
global neqv1, eqvlab1, neqv2, eqvlab2
global difa, function_verbose
global figure_name_attenuation, run_number
nref1 = hkl1.shape[0] # % number of reflections to integrate over
nref2 = hkl2.shape[0] # % number of reflections to integrate over
# % returns array with same dim as input labelling equivs
eqvlab1, neqv1 = findeqvs(hkl1)
eqvlab2, neqv2 = findeqvs(hkl2)
setang1 = x[0:3]
pkmult1 = x[3:4 + neqv1 - 1]
setang2 = x[4 + neqv1 - 1:6 + neqv1]
sf = x[neqv1 + neqv2 + 7 - 1]
pkwid1 = x[neqv1 + neqv2 + 7]
bgd = x[neqv1 + neqv2 + 8:neqv1 + neqv2 + 9 + 2 - 1]
pkwid2 = x[neqv1 + neqv2 + 10]
# % if diamond intensities the same, allow single scale f
relsf = x[neqv1 + neqv2 + 11]
if fxsamediam == 1:
x[6 + neqv1:7 + neqv1 + neqv2 - 1] = x[3:4 + neqv2 - 1] * relsf
pkmult2 = x[6 + neqv1:7 + neqv1 + neqv2 - 1]
else:
pkmult2 = x[6 + neqv1:7 + neqv1 + neqv2 - 1]
delam = x[neqv1 + neqv2 + 12]
L2 = x[neqv1 + neqv2 + 13]
shftlam = 0.0039558 * TOF / (L1 + L2) + difa * (TOF ** 2)
# number of lambda points to calculate over
npt = shftlam.shape[0]
# calculate information for peaks for crystal 1 using hkl,UB1, setang,
# pkpos
a, b, c = pkposcalc(hkl1, UB1, setang1)
pkpars1 = np.column_stack((a, b, c))
# calculate information for peaks for crystal 2 using hkl,UB1, setang,
# pkpos
a, b, c = pkposcalc(hkl2, UB2, setang2)
pkpars2 = np.column_stack((a, b, c))
# generate nptx,nco array containing, x^0,x^1,x^2,...x^nco for
# all nonzero background coefficients
bgdco = np.where(bgd != 0)[0]
nco = bgdco.shape[0]
nonzerobgd = np.zeros(nco)
X = np.ones(shape=(nco, npt))
for i in range(nco):
X[i, :] = shftlam ** (bgd[bgdco[i]] - 1)
nonzerobgd[i] = bgd[bgdco[i]]
# calculate background profile by multiplying this with coefficients
# themselves
bgdprof = nonzerobgd.dot(X)
# bgdprof = np.outer(nonzerobgd, X)
# print bgdprof
# bgdprof = bgdprof[0, :]
# calculate peaks for crystal 1
t1 = np.zeros(npt) # initialise array containing profile
for i in range(nref1):
if pktype == 1:
pkpars1[i][0] = pkpars1[i][0] * delam # linear lambda shift
sig = pkwid1 * pkpars1[i][0] + pkwid2 * (pkpars1[i][0] ** 2.) # const del(lambda)/lambda
extScl = pkpars1[i][0] ** 0 # lambda dependent extinction effect
t1 = t1 - extScl * pkmult1[int(eqvlab1[i])] * pkcalcint1[i] * (
np.exp(-((shftlam - pkpars1[i][0]) ** 2.) / (2 * (sig ** 2))))
# calculate peaks for crystal 2
t2 = np.zeros(npt) # initialise array containing profile
for i in range(nref2):
if pktype == 1:
pkpars2[i][0] = pkpars2[i][0] * delam # linear lambda shift
sig = pkwid1 * pkpars2[i][0] + pkwid2 * (pkpars2[i][0] ** 2.) # const del(lambda)/lambda
extScl = pkpars2[i][0] ** 0 # lambda dependent extinction effect
t2 = t2 - extScl * pkmult2[int(eqvlab2[i])] * pkcalcint2[i] * (
np.exp(-(shftlam - pkpars2[i][0]) ** 2. / (2 * (sig ** 2))))
# calculate final profile
ttot = (bgdprof + sf * t1) * (bgdprof + sf * t2)
# t1 = 1.0;
# t2 = 1.0;
# introduce weighting function and calc chi2...
w = np.ones(len(shftlam)) # equal weighting everywhere
# i1 = np.where(shftlam > 2.15)[0][0]
# j1 = np.where(shftlam > 2.65)[0][0]
# w[i1:j1] = 5 #extra weighting in region of first peaks
# i1 = find(lam>1.68,1,'first');
# j1 = find(lam>2.05,1,'first');
# w(i1:j1)=5; %extra weighting but not too much
resid = (y - ttot) * w
chi2 = np.sum(resid ** 2. / (2 * e ** 2)) / npt
output = 1
if output == 1:
diam1trans = sf * t1 + bgdprof
diam2trans = sf * t2 + bgdprof
out = np.column_stack((shftlam, diam1trans, diam2trans, ttot, y))
np.savetxt(name, out, delimiter=',')
figure_name_attenuation = 'Attenuation ' + run_number
plt.figure(figure_name_attenuation)
plt.plot(shftlam, ttot, 'r', label='Total att.')
plt.plot(shftlam, diam1trans, 'k', label='Diam 1 att.')
plt.plot(shftlam, diam2trans, 'b', label='Diam 2 att.')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=3, mode="expand", borderaxespad=0.)
plt.xlabel('Wavelength (A)')
plt.ylabel('Transmission')
plt.grid()
for i in range(len(pkpars1)):
plt.arrow(pkpars1[i, 0] * delam, 1.1, 0.0, 0.025,
fc="k", ec="k", head_width=0, head_length=0)
for i in range(len(pkpars2)):
plt.arrow(pkpars2[i, 0] * delam, 1.15, 0.0, 0.025,
fc="k", ec="k", head_width=0, head_length=0)
plt.xlim(1, 2.7)
plt.ylim(0, 1.2)
plt.show()
return chi2
def SimTrans3(x):
'''
%SimTrans calculates transmission spectrum from two crystals
% lam - array containing wavelengths to calc over
% hkl - contains all Nref hkl's that calculation is performed for
% bgd - array containing coefficients of polynomial for background
% sf - overall scale factor
% pktype - 1 = gauss; 2 = lorentz; ...
% UB1 - UB matrix for first crystal
% setang1 - setting angles for first crystal (deviations from ideal UB
% location).
% pkpars1 - position(lambda), position(d-spacing), position(ttheta), width, intensity for each Nref reflections
% UB2,setang2,pkpars2 - as above, for second crystal
%
% M. Guthrie 21st Jan 2014
%
% calculate background profile
% determine number of coeffs bgd
%
% version 2 constrains intensities for equivalent dips to be the same
% M. Guthrie 3 April 2014
%
% M. Guthrie 7 April 2014, realised I was making an (obvious) mistake by
% adding the transmissions from the two diamonds. Clearly, they should be
% multiplied. I've implemented the change...will see what difference it
% makes.
%
% M. Guthrie 9 April 2014, introduced possibility to refine L2 and also a
% scale factor for calculated dip wavelengths (to account for diamond
% compressibility).
'''
global hkl1, hkl2
global UB1, pkcalcint1
global UB2, pkcalcint2
global pktype
global lam, y, e, TOF
global L1
global ttot
global fxsamediam
global neqv1, eqvlab1, neqv2, eqvlab2
global difa, function_verbose
nref1 = hkl1.shape[0] # % number of reflections to integrate over
nref2 = hkl2.shape[0] # % number of reflections to integrate over
# % returns array with same dim as input labelling equivs
eqvlab1, neqv1 = findeqvs(hkl1)
eqvlab2, neqv2 = findeqvs(hkl2)
setang1 = x[0:3]
pkmult1 = x[3:4 + neqv1 - 1]
setang2 = x[4 + neqv1 - 1:6 + neqv1]
sf = x[neqv1 + neqv2 + 7 - 1]
pkwid1 = x[neqv1 + neqv2 + 7]
bgd = x[neqv1 + neqv2 + 8:neqv1 + neqv2 + 9 + 2 - 1]
pkwid2 = x[neqv1 + neqv2 + 10]
# % if diamond intensities the same, allow single scale f
relsf = x[neqv1 + neqv2 + 11]
if fxsamediam == 1:
x[6 + neqv1:7 + neqv1 + neqv2 - 1] = x[3:4 + neqv2 - 1] * relsf
pkmult2 = x[6 + neqv1:7 + neqv1 + neqv2 - 1]
else:
pkmult2 = x[6 + neqv1:7 + neqv1 + neqv2 - 1]
delam = x[neqv1 + neqv2 + 12]
L2 = x[neqv1 + neqv2 + 13]
shftlam = 0.0039558 * TOF / (L1 + L2) + difa * (TOF ** 2)
# number of lambda points to calculate over
npt = shftlam.shape[0]
# calculate information for peaks for crystal 1 using hkl,UB1, setang,
# pkpos
a, b, c = pkposcalc(hkl1, UB1, setang1)
pkpars1 = np.column_stack((a, b, c))
# calculate information for peaks for crystal 2 using hkl,UB1, setang,
# pkpos
a, b, c = pkposcalc(hkl2, UB2, setang2)
pkpars2 = np.column_stack((a, b, c))
# generate nptx,nco array containing, x^0,x^1,x^2,...x^nco for
# all nonzero background coefficients
bgdco = np.where(bgd != 0)[0]
nco = bgdco.shape[0]
nonzerobgd = np.zeros(nco)
X = np.ones(shape=(nco, npt))
for i in range(nco):
X[i, :] = shftlam ** (bgd[bgdco[i]] - 1)
nonzerobgd[i] = bgd[bgdco[i]]
# calculate background profile by multiplying this with coefficients
# themselves
bgdprof = nonzerobgd.dot(X)
# bgdprof = np.outer(nonzerobgd, X)
# print bgdprof
# bgdprof = bgdprof[0, :]
# calculate peaks for crystal 1
t1 = np.zeros(npt) # initialise array containing profile
for i in range(nref1):
if pktype == 1:
pkpars1[i][0] = pkpars1[i][0] * delam # linear lambda shift
sig = pkwid1 * pkpars1[i][0] + pkwid2 * (pkpars1[i][0] ** 2.) # const del(lambda)/lambda
extScl = pkpars1[i][0] ** 0 # lambda dependent extinction effect
t1 = t1 - extScl * pkmult1[int(eqvlab1[i])] * pkcalcint1[i] * (
np.exp(-((shftlam - pkpars1[i][0]) ** 2.) / (2 * (sig ** 2))))
# calculate peaks for crystal 2
t2 = np.zeros(npt) # initialise array containing profile
for i in range(nref2):
if pktype == 1:
pkpars2[i][0] = pkpars2[i][0] * delam # linear lambda shift
sig = pkwid1 * pkpars2[i][0] + pkwid2 * (pkpars2[i][0] ** 2.) # const del(lambda)/lambda
extScl = pkpars2[i][0] ** 0 # lambda dependent extinction effect
t2 = t2 - extScl * pkmult2[int(eqvlab2[i])] * pkcalcint2[i] * (
np.exp(-(shftlam - pkpars2[i][0]) ** 2. / (2 * (sig ** 2))))
# calculate final profile
ttot = (bgdprof + sf * t1) * (bgdprof + sf * t2)
# t1 = 1.0;
# t2 = 1.0;
# introduce weighting function and calc chi2...
w = np.ones(len(shftlam)) # equal weighting everywhere
# i1 = np.where(shftlam > 2.15)[0][0]
# j1 = np.where(shftlam > 2.65)[0][0]
# w[i1:j1] = 5 #extra weighting in region of first peaks
# i1 = find(lam>1.68,1,'first');
# j1 = find(lam>2.05,1,'first');
# w(i1:j1)=5; %extra weighting but not too much
resid = (y - ttot) * w
chi2 = np.sum(resid ** 2. / (2 * e ** 2)) / npt
# Print if the user wants verbose minimization
if function_verbose == 'y':
print(('Chi^2 ... ' + str(chi2)))
return chi2
def FitTrans():
'''
Main part of the program
'''
global hkl1, hkl2
global UB1, pkcalcint1
global UB2, pkcalcint2
global pktype
global lam, y, e, TOF
global L1
global ttot
global fxsamediam
global neqv1, eqvlab1, neqv2, eqvlab2
global difa, function_verbose
global run_number
# Customize constraints
cnstang = 1 # if set equal to one, setting angles will be constrained between
# limits defined by anglim1 and anglim2.
anglim1 = 1.0 # if cnstang ~= 1, setting angles for D2 only move by +/- this amount
anglim2 = 1.0 # if cnstang ~= 1, setting angles for D2 can only move by +/- this amount
fxsamediam = 1 # ==1 fix intensities for given hkl to be identical for both diamonds
fixmult = 0 # if ==1 peak multipliers are fixed during refinement
initL2 = 0.340 # m dist from centre of instrument to transmission det
delinitL2 = 0.005 # m variation in det position allowed within refinement
difa = -1e-10 # of order e-10
function_verbose = 'n'
# constraint notifications
if fxsamediam == 0:
print('*diamonds constrained to have same relative dip intensity*\n')
else:
print('*diamonds allowed to have different dip intensities!*')
if cnstang == 1:
print((
'*Diam {0} setting angles constrained to range of +/- {1} about their current values*'.format(1, anglim1)))
print((
'*Diam {0} setting angles constrained to range of +/- {1} about their current values*'.format(2, anglim2)))
else:
print('no constraint on setting angles')
if fixmult == 1:
print('*intensity multipliers fixed*')
# Get Input Files...
peaks_file = str(input('Name of file containing diamond peaks: '))
run_number = str(input('Input run number for transmission data: '))
# Build input filenames
# fullfilename_ub1 = str(run_number) + 'UB1.dat' # unused variable
# fullfilename_ub2 = str(run_number) + 'UB2.dat' # unused variable
fullfilename_trans = 'transNorm' + str(run_number) + '.dat'
# get both UB's
UB1, UB2 = UBMG.UBMatrixGen(peaks_file)
# [filename pathname ~] = ...
# uigetfile('*.dat','Choose UB matrix for upstream diamond:');
# fullfilename = [pathname filename];
# fullfilename_ub1 = 'snap13108UB1.dat'
# UB1, remainder = getISAWub(fullfilename_ub1)
# [filename pathname ~] = ...
# uigetfile('*.dat','Choose UB matrix for downstream diamond:');
# fullfilename = [pathname filename];
# fullfilename_ub2 = 'snap13108UB2.dat'
# UB2, remainder = getISAWub(fullfilename_ub2)
# get transmission data...
# [filename,pathname,~] = ...
# uigetfile('*.csv','Choose transmission datafile:');
# fullfilename = [pathname filename];
fullfilename_trans = 'transNorm13148.csv'
TOF, yin, ein = getMANTIDdat_keepbinning(fullfilename_trans)
print(('Starting refinement for: ' + fullfilename_trans))
# set-up simulation
L1 = 15.0 # m dist to centre of instrument in m
# global initial conditions
sf = 1
pktype = 1 # 1 = Gaussian, only current working peaktype
pkwid = 0.003 # peak width 'sig' is quadratic in lamda
pkwid2 = 2e-4 # sig = pkwid*lam+pkwid2*lam^2
#####################
# Start work...
#####################
# rebin transmission data
lam = 0.0039558 * TOF / (L1 + initL2)
print(('wavelength limits: ' +
str(lam[0]) + ' and ' + str(lam[len(lam) - 1])))
minlam = 0.8
maxlam = 3.5
imin = np.where(lam >= minlam)[0][0]
imax = np.where(lam >= maxlam)[0][0]
lam = lam[imin:imax + 1]
TOF = TOF[imin:imax + 1] # this will be the TOF range used in fit
y = yin[imin:imax + 1]
e = ein[imin:imax + 1]
bgd = np.array([1.0, 0.0])
# generate all allowed diamond hkls:
allhkl = allowedDiamRefs(-7, 7, -7, 7, -7, 7)
# initial conditions for crystal 1
setang1 = np.zeros(3)
# setang1[1:3] = 0.0 # rotation angles applied to refined UB
# use these to calculate resulting peak positions in wavelength
# pkpars1(:,1) is lambda
# pkpars1(:,2) is d-spacing
# pkpars1(:,3) is is 2theta
a, b, c = pkposcalc(allhkl, UB1, setang1)
pkpars1 = np.column_stack((a, b, c))
# initial conditions for crystal 2
setang2 = np.zeros(3)
# setang2[1:3][0] = 0.0
a, b, c = pkposcalc(allhkl, UB2, setang2)
pkpars2 = np.column_stack((a, b, c))
# purge all reflections that don't satisfy the Bragg condition and that are
# out of wavelength calculation range...
laminlim = lam[0]
lamaxlim = lam[len(lam) - 1]
nref = len(allhkl)
k1 = 0
k2 = 0
hkl1 = np.zeros(shape=(0, 3))
hkl2 = np.zeros(shape=(0, 3))
for i in range(nref):
if laminlim <= pkpars1[i][0] <= lamaxlim: # reflection in range
hkl1 = np.vstack([hkl1, allhkl[i]])
k1 += 1
if laminlim <= pkpars2[i][0] <= lamaxlim: # reflection in range
hkl2 = np.vstack([hkl2, allhkl[i]])
k2 += 1
print(('There are: ' + str(k1) + ' expected dips due to Crystal 1'))
print(('There are: ' + str(k2) + ' expected dips due to Crystal 2'))
# determine equivalents
# returns array with same dim as input labelling equivs
eqvlab1, neqv1 = findeqvs(hkl1)
eqvlab2, neqv2 = findeqvs(hkl2)
# pkpars1 = np.zeros(shape=(k, 6)) #empty array
a, b, c = pkposcalc(hkl1, UB1, setang1)
pkpars1 = np.column_stack((a, b, c))
# Calculated ref intensities
pkcalcint1 = pkintread(hkl1, (pkpars1[:, 0:3]))
pkcalcint1 *= 1e-6
pkmult1 = np.ones(neqv1) # intensity multiplier for each group of equivs
# pkpars2 = np.zeros(shape=(l, 6)) #empty array
a, b, c = pkposcalc(hkl2, UB2, setang2)
pkpars2 = np.column_stack((a, b, c))
# Calculated ref intensities
pkcalcint2 = pkintread(hkl2, (pkpars2[:, 0:3]))
pkcalcint2 *= 1e-6
pkmult2 = np.ones(neqv2) # peak intensity multiplier
relsf = 1.0 # default value
delam = 1.0
L2 = initL2
tbgd = bgd
# Either generate, or read variable array from file
# This is one big array with all the parameters to be refined in it.
prevf = str(input('Look for pars from a previous run ([y]/n)? '))
if prevf == 'n':
x0 = np.hstack((setang1, pkmult1, setang2, pkmult2, sf,
pkwid, tbgd, pkwid2, relsf, delam, L2))
else:
# choose which file to use
parfilename = str(input('Choose file with starting pars: '))
parfullfilename = parfilename
x0 = dlmread(parfullfilename)
tog = str(input('Got parameters from: \n' +
parfilename + '\nUse these ([y]/n)?'))
if tog == 'n':
x0 = np.hstack((setang1, pkmult1, setang2, pkmult2,
sf, pkwid, tbgd, pkwid2, relsf, delam, L2))
print('discarding pars from previous run')
print((str(len(x0)) + ' parameters will be refined'))
nvar = len(x0)
print(('number of variables: ' + str(nvar)))
# nref1 = hkl1.shape[0] # unused variable
# nref2 = hkl2.shape[0] # unused variable
# need to apply correction in the case that pars from previous run had
# fxsamediam==1 and current run also has fxsamediam==1
# to avoid a double multiplication by relsf
if fxsamediam == 1 and x0[neqv1 + neqv2 + 11] != 1:
x0[6 + neqv1:7 + neqv1 + neqv2 - 1] = x0[3:4 + neqv2 - 1] / x0[neqv1 + neqv2 + 11]
print(('Diam 2 peak multipliers reset: ' + str(x0[neqv1 + neqv2 + 11])))
# check starting point
chi2 = SimTrans3(x0)
fig_name_start = 'Starting point ' + run_number
plt.figure(fig_name_start)
plt.plot(lam, y, label='Observed')
plt.plot(lam, ttot, label='Calculated')
plt.plot(lam, (y - ttot), label='Residual')
plt.xlabel('Wavelength (A)')
plt.ylabel('Transmission')
plt.grid()
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=3, mode="expand", borderaxespad=0.)
plt.show()
print(('Initial chi^2 is: ' + str(chi2)))
showx3(x0)
# Prepare minimization of chi^2 for the calculated profile
# Set-up default constraints...
# inequalities
A = np.zeros(len(x0))
A[0:3] = 0 # setang1 *no constraint
A[3:4 + neqv1 - 1] = -1.0 # pkmult1 Contrains intensities to be positive
A[4 + neqv1 - 1:6 + neqv1] = 0.0 # setang2 *no constraint
A[6 + neqv1:7 + neqv1 + neqv2 - 1] = -1.0 # pkmult2
A[6 + neqv1 + neqv2] = -1.0 # sf Scale factor must be +ve
A[7 + neqv1 + neqv2] = -1.0 # pkwid peak width must be +ve
A[neqv1 + neqv2 + 8:neqv1 + neqv2 + 9 + 2 - 1] = 0.0 # bgd *no constraint
A[(neqv1 + neqv2 + 10)] = 0.0 # *no constraint
A[(neqv1 + neqv2 + 11)] = 0.0 # *no constraint
A[(neqv1 + neqv2 + 12)] = 0.0 # *no constraint
A[(neqv1 + neqv2 + 13)] = 0.0 # *no constraint
# equalities
Aeq = np.zeros(len(x0))
Aeq[0:3] = 0.0 # setang1
Aeq[3:4 + neqv1 - 1] = 0.0 # pkmult1
Aeq[4 + neqv1 - 1:6 + neqv1] = 0.0 # setang2
Aeq[6 + neqv1:7 + neqv1 + neqv2 - 1] = 0.0 # pkmult2
Aeq[6 + neqv1 + neqv2] = 0.0 # sf
Aeq[7 + neqv1 + neqv2] = 0.0 # pkwid
Aeq[neqv1 + neqv2 + 8:neqv1 + neqv2 + 9 + 2 - 1] = 0 # unfixed bgd
Aeq[neqv1 + neqv2 + 10] = 0
Aeq[neqv1 + neqv2 + 11] = 0
Aeq[neqv1 + neqv2 + 12] = 0
Aeq[neqv1 + neqv2 + 13] = 0
# beq = 0 # unused variable
# lower bounds
lb = np.zeros(len(x0))
lb[0:3] = -10 # setang1
lb[3:4 + neqv1 - 1] = 0.5 # pkmult1
lb[4 + neqv1 - 1:6 + neqv1] = -10 # setang2
lb[6 + neqv1:7 + neqv1 + neqv2 - 1] = 0.5 # pkmult2
lb[6 + neqv1 + neqv2] = 0.0 # sf
lb[7 + neqv1 + neqv2] = 0.0005 # pkwid
lb[neqv1 + neqv2 + 8:neqv1 + neqv2 + 9 + 2 - 1] = [0.995, -0.0005] # bgd
lb[neqv1 + neqv2 + 10] = 0.5e-4 # 2nd order pkwid
lb[neqv1 + neqv2 + 11] = 0.9 # rel scale factor must be positive
lb[neqv1 + neqv2 + 12] = 0.9 # min lambda shift
# (m) min L2 dist sample to d/stream detector
lb[neqv1 + neqv2 + 13] = initL2 - delinitL2
# upper bounds
ub = np.zeros(len(x0))
ub[0:3] = 10 # setang1
ub[3:4 + neqv1 - 1] = 50 # pkmult1
ub[4 + neqv1 - 1:6 + neqv1] = 10 # setang2
ub[6 + neqv1:7 + neqv1 + neqv2 - 1] = 50 # pkmult2
ub[6 + neqv1 + neqv2] = 50 # sf
ub[7 + neqv1 + neqv2] = 0.01 # pkwid
ub[neqv1 + neqv2 + 8:neqv1 + neqv2 + 9 + 2 - 1] = [1.005, 0.0005] # bgd
ub[neqv1 + neqv2 + 10] = 1.0e-2 # 2nd order pkwid
# diamond shouldn't be more than 2 times bigger!
ub[neqv1 + neqv2 + 11] = 1.1
ub[neqv1 + neqv2 + 12] = 1.1 # max lambda shift
# (m) max L2 dist sample to d/stream detector
ub[neqv1 + neqv2 + 13] = initL2 + delinitL2
# Customize constraints
if cnstang == 1:
# diamond 1
lb[0] = x0[0] - anglim1
lb[1] = x0[1] - anglim1
lb[2] = x0[2] - anglim1
ub[0] = x0[0] + anglim1
ub[1] = x0[1] + anglim1
ub[2] = x0[2] + anglim1
# diamond 2
lb[3 + neqv1] = x0[3 + neqv1] - anglim2
lb[4 + neqv1] = x0[4 + neqv1] - anglim2
lb[5 + neqv1] = x0[5 + neqv1] - anglim2
ub[3 + neqv1] = x0[3 + neqv1] + anglim2
ub[4 + neqv1] = x0[4 + neqv1] + anglim2
ub[5 + neqv1] = x0[5 + neqv1] + anglim2
if fixmult == 1:
lb[3:4 + neqv1 - 1] = x0[3:4 + neqv1 - 1] - 0.01
lb[6 + neqv1:7 + neqv1 + neqv2 - 1] = x0[6 +
neqv1:7 + neqv1 + neqv2 - 1] - 0.01
ub[3:4 + neqv1 - 1] = x0[3:4 + neqv1 - 1] + 0.01
ub[6 + neqv1:7 + neqv1 + neqv2 - 1] = x0[6 +
neqv1:7 + neqv1 + neqv2 - 1] + 0.01
prompt = str(input('Enter anything to begin refinement...'))
print('Refining...\nMight take quite a long time...')
max_number_iterations = int(
input('Maximum number of iterations for minimization: '))
function_verbose = str(input('Verbose minimization ([y]/n): '))
# make dictionary holding constraints for minimization
# equalities (all must equal 0) and inequalities
cons = []
for i in range(len(x0)):
cons.append({'type': 'ineq', 'fun': lambda x: -A[i] * x[i]})
cons = tuple(cons)
# bounds have to be list of tuples with (lower, upper) for each parameter
bds = np.vstack((lb, ub)).T
res = sp.minimize(SimTrans3, x0, method='SLSQP', bounds=bds, constraints=cons,
options={'disp': True, 'maxiter': max_number_iterations})
# tolerance limits to put in minimization if you want so : 'ftol': 0.001
x = np.array(res.x)
#
# minimisation...
#
# figure(2)
# options = optimoptions(@fmincon,'Algorithm','interior-point', 'Display','off', 'MaxFunEvals',10000*nvar,'PlotFcns'
# @optimplotfval, 'MaxIter',4000)
# x, fval, exitflag, output = fmincon(@SimTrans3,x0,A,b,[],[],Aeq beq
# lb,ub,[],options)
# necessary to update these here...
if fxsamediam == 1:
# set peak parameters for second diamond to equal those of first
# but scaled by relsf
# len(x)
# neqv1+neqv2+11
# x[neqv1+neqv2+11]
x[6 + neqv1:7 + neqv1 + neqv2 - 1] = x[3:4 + neqv2 - 1] * x[neqv1 + neqv2 + 11]
print(('Diam 2 peak multipliers reset with factor: ' + str(x[neqv1 + neqv2 + 11])))
else:
# label ensuring I know that run did not use fxsamediam
x[neqv1 + neqv2 + 11] = 1.0
print('AFTER REFINEMENT')
showx3(x)
####
# output final information
####
# calculate chi2 for best fit
chi2 = SimTrans3(x)
print(('Final Chi2 = ' + str(chi2)))
# determine output wavelength range using refined L2 value
# lamscale = x[neqv1 + neqv2 + 12] # unused variable
L2 = x[neqv1 + neqv2 + 13]
outlam = 0.0039558 * TOF / (L1 + L2) + difa * (TOF ** 2)
fig_name_final = 'Final result ' + run_number
plt.figure(fig_name_final)
plt.plot(outlam, y, 'k', label='Observed')
plt.plot(outlam, ttot, 'r', label='Calculated')
plt.plot(outlam, (y - ttot), 'b', label='Final residuals')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=3, mode="expand", borderaxespad=0.)
plt.text(2.1, 0.5, 'CHI^2=' + str(chi2))
plt.grid()
for i in range(len(pkpars1)):
plt.arrow(pkpars1[i, 0] * delam, 1.1, 0.0, 0.025,
fc="k", ec="k", head_width=0, head_length=0)
for i in range(len(pkpars2)):
plt.arrow(pkpars2[i, 0] * delam, 1.15, 0.0, 0.025,
fc="k", ec="k", head_width=0, head_length=0)
plt.xlim(1.0, 2.7)
plt.ylim(ymax=1.2)
plt.xlabel('Wavelength (A)')
plt.ylabel('Transmission')
plt.show()
prompt = str(input('output best fit to file ([y]/n): '))
if prompt == 'n':
print('Ending')
else:
fitparname = str(run_number) + '.best_fit_pars3.dat'
np.savetxt(fitparname, x, delimiter=',')
print(('output parameters written to file: \n' + fitparname))
ofilename = str(run_number) + '.fitted3.dat'
SimTransOutput3(ofilename, x) # generate output file with fitted data
if __name__ == "__main__":
FitTrans()
| gpl-3.0 |
haozhangphd/genx-py3 | genx/event_handlers.py | 1 | 46539 | '''
Just a library of the different eventhandler needed in the GUI
File started by: Matts Bjorck
$Rev:: $: Revision of last commit
$Author:: $: Author of last commit
$Date:: $: Date of last commit
'''
import genx.version
__version__ = genx.version.version
import sys
from six.moves import _thread
import time
import wx, wx.adv, os, traceback
from wx.lib.wordwrap import wordwrap
import webbrowser
import numpy as np
import genx.help
import genx.model as modellib
import genx.solvergui
import genx.filehandling as io
manual_url = 'http://genx.sourceforge.net/doc/'
homepage_url = 'http://genx.sf.net'
def get_pages(frame):
pages = [frame.plot_data, frame.plot_fom, frame.plot_pars,\
frame.plot_fomscan]
return pages
def set_title(frame):
filepath, filename = os.path.split(frame.model.filename)
if filename != '':
if frame.model.saved:
frame.SetTitle(filename + ' - ' + filepath + ' - GenX '\
+ __version__)
else:
frame.SetTitle(filename + '* - ' + filepath + ' - GenX '\
+ __version__)
else:
frame.SetTitle('GenX ' + __version__)
def models_changed(frame, event):
'''models_changed(frame, event) --> None
callback when something has changed in the model so that the
user can be made aware that the model needs saving.
'''
try:
frame.model.saved = not event.permanent_change
except AttributeError:
frame.model.saved = False
else:
frame.plugin_control.OnGridChanged(None)
set_title(frame)
def new(frame, event):
'''
new(frame, event) --> None
Event handler for creating a new model
'''
#print "Event handler: new"
if not frame.model.saved:
ans = ShowQuestionDialog(frame, 'If you continue any changes in'
' your model will not be saved.',
'Model not saved')
if not ans:
return
# Reset the model - remove everything from the previous model
frame.model.new_model()
# Update all components so all the traces are gone.
_post_new_model_event(frame, frame.model, desc='Fresh model')
frame.plugin_control.OnNewModel(None)
frame.main_frame_statusbar.SetStatusText('New model created', 1)
set_title(frame)
frame.model.saved = True
def open(frame, event):
'''
open(frame, event) --> None
Event handler for opening a model file...
'''
# Check so the model is saved before quitting
if not frame.model.saved:
ans = ShowQuestionDialog(frame, 'If you continue any changes in'
' your model will not be saved.',
'Model not saved')
if not ans:
return
if sys.version_info.major == 3:
dlg = wx.FileDialog(frame, message="Open", defaultFile="",\
wildcard="GenX File and HDF5 GenX File (*.gx, *.gx3, *.hgx)|*.gx;*.gx3;*.hgx",\
style=wx.FD_OPEN #| wx.FD_CHANGE_DIR
)
else:
dlg = wx.FileDialog(frame, message="Open", defaultFile="",\
wildcard="GenX File and HDF5 GenX File (*.gx, *.hgx)|*.gx;*.hgx",\
style=wx.FD_OPEN #| wx.FD_CHANGE_DIR
)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
open_model(frame, path)
dlg.Destroy()
def open_model(frame, path):
#print "open_model"
frame.model.new_model()
frame.paramter_grid.PrepareNewModel()
# Update all components so all the traces are gone.
#_post_new_model_event(frame, frame.model)
try:
io.load_file(path, frame.model, frame.solver_control.optimizer, frame.config)
except modellib.IOError as e:
ShowModelErrorDialog(frame, e.__str__())
except Exception as e:
outp = io.StringIO()
traceback.print_exc(200, outp)
val = outp.getvalue()
outp.close()
print('Error in loading the file ', path, '. Pyton traceback:\n ',val)
ShowErrorDialog(frame, 'Could not open the file. Python Error:\n%s' % (val,))
return
try:
[p.ReadConfig() for p in get_pages(frame)]
except Exception as e:
outp = io.StringIO()
traceback.print_exc(200, outp)
val = outp.getvalue()
outp.close()
print('Error in loading config for the plots. Pyton traceback:\n ',val)
ShowErrorDialog(frame, 'Could not read the config for the plots. Python Error:\n%s' % (val,))
try:
frame.paramter_grid.ReadConfig()
except Exception as e:
outp = io.StringIO()
traceback.print_exc(200, outp)
val = outp.getvalue()
outp.close()
print('Error in loading config for parameter grid. Pyton traceback:\n ', val)
ShowErrorDialog(frame, 'Could not read the config for the parameter grid. Python Error:\n%s' % (val,))
else:
# Update the Menu choice
frame.mb_view_grid_slider.Check(frame.paramter_grid.GetValueEditorSlider())
# Letting the plugin do their stuff...
try:
frame.plugin_control.OnOpenModel(None)
except Exception as e:
outp = io.StringIO()
traceback.print_exc(200, outp)
val = outp.getvalue()
outp.close()
ShowErrorDialog(frame, 'Problems when plugins processed model.'\
' Python Error:\n%s'%(val,))
frame.main_frame_statusbar.SetStatusText('Model loaded from file',\
1)
# Post an event to update everything else
_post_new_model_event(frame, frame.model)
# Needs to put it to saved since all the widgets will have
# been updated
frame.model.saved = True
set_title(frame)
def on_new_model(frame, event):
'''
on_new_model(frame, event) --> None
Callback for NEW_MODEL event. Used to update the script for
a new model i.e. put the string to the correct value.
'''
#print "Callback: on_new_model"
# Set the string in the script_editor
frame.script_editor.SetText(event.GetModel().get_script())
# Let the solvergui do its loading and updating:
frame.solver_control.ModelLoaded()
# Lets update the mb_use_toggle_show Menu item
frame.mb_use_toggle_show.Check(frame.config.get_boolean('data handling',
'toggle show'))
try:
val = frame.config.get_boolean('parameter grid', 'auto sim')
except io.OptionError:
print('Could not locate option parameters.auto sim')
frame.mb_fit_autosim.Check(True)
else:
frame.mb_fit_autosim.Check(val)
# Let other event handlers recieve the event as well
event.Skip()
def update_for_save(frame):
"""Updates the various objects for a save"""
frame.model.set_script(frame.script_editor.GetText())
# Save the current state of autosim to the config
frame.config.set('parameter grid', 'auto sim', frame.mb_fit_autosim.IsChecked())
def save(frame, event):
'''
save(frame, event) --> None
Event handler for saving a model file ...
'''
update_for_save(frame)
fname = frame.model.get_filename()
# If model hasn't been saved
if fname == '':
# Proceed with calling save as
save_as(frame, event)
else:
# If it has been saved just save it
try:
io.save_file(fname, frame.model, frame.solver_control.optimizer, frame.config)
except modellib.IOError as e:
ShowModelErrorDialog(frame, e.__str__())
except Exception as e:
outp = io.StringIO()
traceback.print_exc(200, outp)
val = outp.getvalue()
outp.close()
ShowErrorDialog(frame, 'Could not save the file. Python Error: \n%s' % (val,))
set_title(frame)
frame.main_frame_statusbar.SetStatusText('Model saved to file', 1)
def save_as(frame, event):
'''save_as(frame, event) --> None
Event handler for save as ...
'''
if sys.version_info.major == 3:
dlg = wx.FileDialog(frame, message="Save As", defaultFile="",
wildcard="HDF5 GenX File (*.hgx)|*.hgx|GenX File (*.gx3)|*.gx3",
style=wx.FD_SAVE #| wx.FD_CHANGE_DIR
)
else:
dlg = wx.FileDialog(frame, message="Save As", defaultFile="",
wildcard="HDF5 GenX File (*.hgx)|*.hgx|GenX File (*.gx)|*.gx",
style=wx.FD_SAVE #| wx.FD_CHANGE_DIR
)
if dlg.ShowModal() == wx.ID_OK:
update_for_save(frame)
fname = dlg.GetPath()
base, ext = os.path.splitext(fname)
if ext == '':
ext = '.hgx'
fname = base + ext
result = True
if os.path.exists(fname):
filepath, filename = os.path.split(fname)
result = ShowQuestionDialog(frame, 'The file %s already exists. Do you wish to overwrite it?' % filename,
'Overwrite?')
if result:
try:
io.save_file(fname, frame.model, frame.solver_control.optimizer,\
frame.config)
except modellib.IOError as e:
ShowModelErrorDialog(frame, e.__str__())
except Exception as e:
outp = io.StringIO()
traceback.print_exc(200, outp)
val = outp.getvalue()
outp.close()
ShowErrorDialog(frame, 'Could not save the file. Python Error:\n%s'%(val,))
set_title(frame)
dlg.Destroy()
def export_data(frame, event):
'''export_data(frame, event) --> None
exports the data to one file per data set with a basename with
extention given by a save dialog.
'''
dlg = wx.FileDialog(frame, message="Export data", defaultFile="",\
wildcard="Dat File (*.dat)|*.dat",\
style=wx.FD_SAVE | wx.FD_CHANGE_DIR
)
if dlg.ShowModal() == wx.ID_OK:
try:
frame.model.export_data(dlg.GetPath())
except modellib.IOError as e:
ShowModelErrorDialog(frame, str(e))
frame.main_frame_statusbar.SetStatusText(\
'Error when exporting data', 1)
except Exception as e:
ShowErrorDialog(frame, str(e), 'export data - model.export_data')
frame.main_frame_statusbar.SetStatusText('Fatal Error', 1)
else:
frame.main_frame_statusbar.SetStatusText('Data exported', 1)
dlg.Destroy()
def export_script(frame, event):
'''export_script(frame, event) --> None
Exports the script to a python file given by a filedialog.
'''
dlg = wx.FileDialog(frame, message="Export data", defaultFile="",\
wildcard="Python File (*.py)|*.py",\
style=wx.FD_SAVE | wx.FD_CHANGE_DIR
)
if dlg.ShowModal() == wx.ID_OK:
fname = dlg.GetPath()
base, ext = os.path.splitext(fname)
if ext == '':
ext = '.py'
fname = base + ext
result = True
if os.path.exists(fname):
filepath, filename = os.path.split(fname)
result = ShowQuestionDialog(frame, \
'The file %s already exists. Do you wish to overwrite it?'%filename\
, 'Overwrite?')
if result:
try:
#frame.model.export_script(dlg.GetPath())
frame.model.export_script(fname)
except modellib.IOError as e:
ShowModelErrorDialog(frame, str(e))
frame.main_frame_statusbar.SetStatusText(\
'Error when exporting script', 1)
return
except Exception as e:
ShowErrorDialog(frame, str(e),\
'export script - model.export_script')
frame.main_frame_statusbar.SetStatusText('Fatal Error', 1)
return
else:
frame.main_frame_statusbar.SetStatusText(\
'Script exported to file', 1)
dlg.Destroy()
def export_table(frame, event):
'''export_table(frame, event) --> None
Exports the table to a dat file given by a filedialog.
'''
dlg = wx.FileDialog(frame, message="Export data", defaultFile="",\
wildcard="Table File (*.tab)|*.tab",\
style=wx.FD_SAVE | wx.FD_CHANGE_DIR
)
if dlg.ShowModal() == wx.ID_OK:
fname = dlg.GetPath()
base, ext = os.path.splitext(fname)
if ext == '':
ext = '.tab'
fname = base + ext
result = True
if os.path.exists(fname):
filepath, filename = os.path.split(fname)
result = ShowQuestionDialog(frame, \
'The file %s already exists. Do you wish to overwrite it?'%filename\
, 'Overwrite?')
if result:
try:
#frame.model.export_table(dlg.GetPath())
frame.model.export_table(fname)
except modellib.IOError as e:
ShowModelErrorDialog(frame, str(e))
frame.main_frame_statusbar.SetStatusText(\
'Error when exporting table', 1)
return
except Exception as e:
ShowErrorDialog(frame, str(e),\
'export table - model.export_table')
frame.main_frame_statusbar.SetStatusText('Fatal Error', 1)
return
else:
frame.main_frame_statusbar.SetStatusText(\
'Table exported to file', 1)
dlg.Destroy()
def import_script(frame, event):
'''import_script(frame, event) --> None
imports a script from the file given by a file dialog box
'''
dlg = wx.FileDialog(frame, message="Import script", defaultFile="",\
wildcard="Python files (*.py)|*.py|All files (*.*)|*.*",\
style=wx.FD_OPEN | wx.FD_CHANGE_DIR
)
if dlg.ShowModal() == wx.ID_OK:
try:
frame.model.import_script(dlg.GetPath())
#frame.model.import_script(fname)
except modellib.IOError as e:
ShowModelErrorDialog(frame, str(e))
frame.main_frame_statusbar.SetStatusText(\
'Error when importing script', 1)
return
except Exception as e:
ShowErrorDialog(frame, str(e),\
'import script - model.import_script')
frame.main_frame_statusbar.SetStatusText('Fatal Error', 1)
return
try:
frame.plugin_control.OnOpenModel(None)
except Exception as e:
outp = io.StringIO()
traceback.print_exc(200, outp)
val = outp.getvalue()
outp.close()
ShowErrorDialog(frame, 'Problems when plugins processed model.'\
' Python Error:\n%s'%(val,))
else:
frame.main_frame_statusbar.SetStatusText(\
'Script imported from file', 1)
dlg.Destroy()
# Post event to tell that the model has changed
_post_new_model_event(frame, frame.model)
def import_data(frame, event):
''' import_data(frame, event) -->None
callback to import data into the program
'''
# Reuse of the callback in the datalist.DataController
try:
frame.data_list.eh_tb_open(event)
except Exception as e:
ShowErrorDialog(frame, str(e),\
'import data - data_list.eh_tb_open')
frame.main_frame_statusbar.SetStatusText('Fatal Error', 1)
return
def import_table(frame, event):
'''import_table(frame, event) --> None
imports a table from the file given by a file dialog box
'''
dlg = wx.FileDialog(frame, message="Import script", defaultFile="",\
wildcard="Table File (*.tab)|*.tab|All files (*.*)|*.*",\
style=wx.FD_OPEN | wx.FD_CHANGE_DIR
)
if dlg.ShowModal() == wx.ID_OK:
try:
frame.model.import_table(dlg.GetPath())
except modellib.IOError as e:
ShowModelErrorDialog(frame, str(e))
frame.main_frame_statusbar.SetStatusText(\
'Error when importing script', 1)
dlg.Destroy()
return
except Exception as e:
ShowErrorDialog(frame, str(e),\
'import script - model.import_script')
frame.main_frame_statusbar.SetStatusText('Fatal Error', 1)
dlg.Destroy()
return
dlg.Destroy()
# Post event to tell that the model has cahnged
_post_new_model_event(frame, frame.model)
frame.main_frame_statusbar.SetStatusText('Table imported from file', 1)
def parameter_value_changed(frame, event):
""" Event handler for when a value of a parameter in the grid has been updated.
:param frame:
:param event:
:return:
"""
frame.simulation_queue_counter += 1
if frame.mb_fit_autosim.IsChecked() and not frame.flag_simulating:
_thread.start_new_thread(simulation_loop, (frame,))
def simulation_loop(frame):
""" Simulation loop for threading to increase the speed of the interactive simulations
:param frame:
:return:
"""
frame.flag_simulating = True
while frame.simulation_queue_counter > 0:
do_simulation(frame)
time.sleep(0.1)
frame.simulation_queue_counter = min(1, frame.simulation_queue_counter - 1)
frame.flag_simulating = False
def evaluate(frame, event):
'''evaluate(frame, event) --> None
Envent handler for only evaluating the Sim function - no recompiling
'''
frame.flag_simulating = True
frame.main_frame_statusbar.SetStatusText('Simulating...', 1)
# Compile is not necessary when using simualate...
#frame.model.compile_script()
try:
frame.model.simulate(compile = False)
except modellib.GenericError as e:
ShowModelErrorDialog(frame, str(e))
frame.main_frame_statusbar.SetStatusText('Error in simulation', 1)
except Exception as e:
outp = io.StringIO()
traceback.print_exc(200, outp)
val = outp.getvalue()
outp.close()
ShowErrorDialog(frame, val)
frame.main_frame_statusbar.SetStatusText('Fatal Error - simulate', 1)
else:
_post_sim_plot_event(frame, frame.model, 'Simulation')
frame.main_frame_statusbar.SetStatusText('Simulation Sucessful', 1)
frame.plugin_control.OnSimulate(None)
frame.flag_simulating = False
def simulate(frame, event):
'''
simulate(frame, event) --> None
Event handler for simulation.
'''
frame.flag_simulating = True
do_simulation(frame)
set_possible_parameters_in_grid(frame)
frame.flag_simulating = False
def do_simulation(frame):
# Just a debugging output...
# print frame.script_editor.GetText()
frame.main_frame_statusbar.SetStatusText('Simulating...', 1)
frame.model.set_script(frame.script_editor.GetText())
# Compile is not necessary when using simualate...
#frame.model.compile_script()
try:
frame.model.simulate()
except modellib.GenericError as e:
ShowModelErrorDialog(frame, str(e))
frame.main_frame_statusbar.SetStatusText('Error in simulation', 1)
except Exception as e:
outp = io.StringIO()
traceback.print_exc(200, outp)
val = outp.getvalue()
outp.close()
ShowErrorDialog(frame, val)
frame.main_frame_statusbar.SetStatusText('Fatal Error - simulate', 1)
else:
_post_sim_plot_event(frame, frame.model, 'Simulation')
frame.plugin_control.OnSimulate(None)
frame.main_frame_statusbar.SetStatusText('Simulation Sucessful', 1)
def set_possible_parameters_in_grid(frame):
# Now we should find the parameters that we can use to
# in the grid
try:
pardict = frame.model.get_possible_parameters()
except Exception as e:
outp = io.StringIO()
traceback.print_exc(200, outp)
val = outp.getvalue()
outp.close()
#ShowErrorDialog(frame, val)
ShowErrorDialog(frame, val,\
'simulate - model.get_possible_parameters')
frame.main_frame_statusbar.SetStatusText('Fatal Error', 0)
return
try:
frame.paramter_grid.SetParameterSelections(pardict)
except Exception as e:
ShowErrorDialog(frame, str(e),\
'simulate - parameter_grid.SetParameterSelection')
frame.main_frame_statusbar.SetStatusText('Fatal Error', 0)
return
# Set the function for which the parameter can be evaluated with
frame.paramter_grid.SetEvalFunc(frame.model.eval_in_model)
def start_fit(frame, event):
'''start_fit(frame, event) --> None
Event handler to start fitting
'''
if frame.model.compiled:
try:
frame.solver_control.StartFit()
except modellib.GenericError as e:
ShowModelErrorDialog(frame, str(e))
frame.main_frame_statusbar.SetStatusText('Error in fitting', 1)
except Exception as e:
ShowErrorDialog(frame, str(e))
frame.main_frame_statusbar.SetStatusText('Fatal Error', 1)
else:
frame.main_frame_statusbar.SetStatusText('Fitting starting ...', 1)
else:
ShowNotificationDialog(frame, 'The script is not compiled, do a'\
' simulation before you start fitting.')
def stop_fit(frame, event):
'''stop_fit(frame, event) --> None
Event handler to stop the fitting routine
'''
frame.solver_control.StopFit()
def resume_fit(frame, event):
'''resume_fit(frame, event) --> None
Event handler to resume the fitting routine. No initilization.
'''
if frame.model.compiled:
try:
frame.solver_control.ResumeFit()
except modellib.GenericError as e:
ShowModelErrorDialog(frame, str(e))
frame.main_frame_statusbar.SetStatusText('Error in fitting', 1)
except Exception as e:
ShowErrorDialog(frame, str(e))
frame.main_frame_statusbar.SetStatusText('Fatal Error', 1)
else:
frame.main_frame_statusbar.SetStatusText('Fitting starting ...',1)
else:
ShowNotificationDialog(frame, 'The script is not compiled, do a'\
' simulation before you start fitting.')
def calculate_error_bars(frame, evt):
'''calculate_error_bars(frame, evt) --> None
callback to calculate the error bars on the data.
'''
try:
error_values = frame.solver_control.CalcErrorBars()
except genx.solvergui.ErrorBarError as e:
ShowNotificationDialog(frame, str(e))
except Exception as e:
ShowErrorDialog(frame, str(e), 'solvergui - CalcErrorBars')
frame.main_frame_statusbar.SetStatusText('Fatal Error', 1)
else:
frame.model.parameters.set_error_pars(error_values)
frame.paramter_grid.SetParameters(frame.model.parameters)
frame.main_frame_statusbar.SetStatusText('Errorbars calculated', 1)
def scan_parameter(frame, row):
''' scan_parameter(frame, row) --> None
Scans the parameter in row row [int] from max to min in the number
of steps given by dialog input.
'''
if not frame.model.is_compiled():
ShowNotificationDialog(frame, 'Please conduct a simulation before' +\
' scanning a parameter. The script needs to be compiled.')
return
try:
step = wx.GetNumberFromUser(message='Input the number of evaluation points for the scan',\
prompt='Steps', caption='', value=50, min=2, max=1000)
frame.main_frame_statusbar.SetStatusText('Scanning parameter', 1)
x, y = frame.solver_control.ScanParameter(row, step)
fs, pars = frame.model.get_sim_pars()
bestx = frame.model.parameters.get_data()[row][1]
besty = frame.model.fom
frame.plot_fomscan.SetPlottype('scan')
frame.plot_fomscan.Plot((x, y, bestx, besty,\
frame.solver_control.fom_error_bars_level))
frame.plot_notebook.SetSelection(3)
except Exception as e:
outp = io.StringIO()
traceback.print_exc(200, outp)
val = outp.getvalue()
outp.close()
ShowErrorDialog(frame, val)
frame.main_frame_statusbar.SetStatusText('Fatal Error - scan fom', 1)
else:
frame.main_frame_statusbar.SetStatusText('Scanning finished', 1)
def project_fom_parameter(frame, row):
'''project_fom_parameter(frame, row) --> None
Plots the project fom given by the row row [int]
'''
if not frame.solver_control.IsFitted():
ShowNotificationDialog(frame, 'Please conduct a fit before' +
' scanning a parameter. The script needs to be compiled and foms have'
+ ' to be collected.')
return
frame.main_frame_statusbar.SetStatusText('Trying to project fom', 1)
try:
x, y = frame.solver_control.ProjectEvals(row)
if len(x) == 0 or len(y) == 0:
ShowNotificationDialog(frame, 'Please conduct a fit before' +
' projecting a parameter. The script needs to be compiled and foms have'
+ ' to be collected.')
return
elif frame.model.fom is None or np.isnan(frame.model.fom):
ShowNotificationDialog(frame, 'The model must be simulated (FOM is not a valid number)')
return
fs, pars = frame.model.get_sim_pars()
bestx = pars[row]
besty = frame.model.fom
frame.plot_fomscan.SetPlottype('project')
frame.plot_fomscan.Plot((x, y, bestx, besty,\
frame.solver_control.fom_error_bars_level))
frame.plot_notebook.SetSelection(3)
except Exception as e:
outp = io.StringIO()
traceback.print_exc(200, outp)
val = outp.getvalue()
outp.close()
ShowErrorDialog(frame, val)
frame.main_frame_statusbar.SetStatusText('Fatal Error - project fom', 1)
else:
frame.main_frame_statusbar.SetStatusText('Projected fom plotted', 1)
def on_optimizer_settings(frame, event):
'''on_optimizer_settings(self, event) --> None
Show the settings dialog for the optimizer
'''
#try:
frame.solver_control.ParametersDialog(frame)
#except Exception as e:
# raise e
def on_data_loader_settings(frame, event):
'''on_data_loader_settings(frame, event) --> None
Show the data_loader settings dialog. Allow the user to change the
data loader.
'''
frame.data_list.DataLoaderSettingsDialog()
def quit(frame, event):
'''quit(frame, event) --> None
Quit the program
'''
# Check so the model is saved before quitting
if not frame.model.saved:
ans = ShowQuestionDialog(frame, 'If you continue any changes in'
' your model will not be saved.',
'Model not saved')
if ans:
#frame.Destroy()
frame.parent.ExitMainLoop()
else:
#frame.Destroy()
frame.parent.ExitMainLoop()
def status_text(frame, event):
'''status_text(frame, event) --> None
Print a status text in the window. event should have a string
member text. This will display the message in the status bar.
'''
frame.main_frame_statusbar.SetStatusText(event.text, 1)
def fom_value(frame, event):
'''fom_value(frame, event) --> None
Callback to update the fom_value displayed by the gui
'''
fom_value = event.model.fom
fom_name = event.model.fom_func.__name__
if fom_value:
frame.main_frame_fom_text.SetLabel(' FOM %s: %.4e'%(fom_name,fom_value))
else:
frame.main_frame_fom_text.SetLabel(' FOM %s: None'%(fom_name))
def point_pick(frame, event):
'''point_pick(frame, event) --> None
Callback for the picking of a data point in a plotting window.
This will display the message in the status bar.
'''
frame.main_frame_statusbar.SetStatusText(event.text, 2)
def on_zoom_check(frame, event):
'''on_zoom_toolbar(event) --> none
Takes care of clicks on the toolbar zoom button and the menu item zoom.
'''
sel = frame.plot_notebook.GetSelection()
pages = get_pages(frame)
if sel < len(pages):
zoom_state = not pages[sel].GetZoom()
pages[sel].SetZoom(zoom_state)
frame.main_frame_toolbar.ToggleTool(10009, zoom_state)
frame.mb_view_zoom.Check(zoom_state)
def zoomall(frame, event):
'''zoomall(self, event) --> None
Zoom out and show all data points
'''
sel = frame.plot_notebook.GetSelection()
pages = get_pages(frame)
if sel < len(pages):
tmp = pages[sel].GetAutoScale()
pages[sel].SetAutoScale(True)
pages[sel].AutoScale()
pages[sel].SetAutoScale(tmp)
pages[sel].AutoScale()
def set_yscale(frame, type):
'''set_yscale(frame, type) --> None
Set the y-scale of the current plot. type should be linear or log, strings.
'''
sel = frame.plot_notebook.GetSelection()
pages = get_pages(frame)
if sel < len(pages):
pages[sel].SetYScale(type)
def set_xscale(frame, type):
'''set_xscale(frame, type) --> None
Set the x-scale of the current plot. type should be linear or log, strings.
'''
sel = frame.plot_notebook.GetSelection()
pages = get_pages(frame)
if sel < len(pages):
pages[sel].SetXScale(type)
def on_grid_slider_check(frame, event):
"""Change the state of the grid value input, either as slider or as a number.
:param frame:
:param event:
:return:
"""
frame.paramter_grid.SetValueEditorSlider(frame.mb_view_grid_slider.IsChecked())
#print frame.paramter_grid.get_toggle_slider_tool_state()
frame.paramter_grid.toggle_slider_tool(frame.mb_view_grid_slider.IsChecked())
frame.paramter_grid.Refresh()
def on_autoscale(frame, event):
'''on_autoscale(frame, event) --> None
Toggles the autoscale of the current plot.
'''
sel = frame.plot_notebook.GetSelection()
pages = get_pages(frame)
if sel < len(pages):
pages[sel].SetAutoScale(not pages[sel].GetAutoScale())
def plot_settings_changed(frame, event):
'''zoom_changed(frame, event) --> None
Callback for the settings change event for the current plot
- change the toggle for the zoom icon and change the menu items.
'''
frame.main_frame_toolbar.ToggleTool(10009, event.zoomstate)
frame.mb_view_zoom.Check(event.zoomstate)
if event.yscale == 'log':
frame.mb_view_yscale_log.Check(True)
elif event.yscale == 'linear':
frame.mb_view_yscale_lin.Check(True)
if event.xscale == 'log':
frame.mb_view_xscale_log.Check(True)
elif event.xscale == 'linear':
frame.mb_view_xscale_lin.Check(True)
frame.mb_view_autoscale.Check(event.autoscale)
def plot_page_changed(frame, event):
'''plot_page_changed(frame, event) --> None
Callback for page change in plot notebook. Changes the state of
the zoom toggle button.
'''
sel = event.GetSelection()
pages = get_pages(frame)
if sel < len(pages):
zoom_state = pages[sel].GetZoom()
# Set the zoom button to the correct value
frame.main_frame_toolbar.ToggleTool(10009, zoom_state)
frame.mb_view_zoom.Check(zoom_state)
yscale = pages[sel].GetYScale()
if yscale == 'log':
frame.mb_view_yscale_log.Check(True)
elif yscale == 'linear':
frame.mb_view_yscale_lin.Check(True)
xscale = pages[sel].GetXScale()
if xscale == 'log':
frame.mb_view_yscale_log.Check(True)
elif xscale == 'linear':
frame.mb_view_yscale_lin.Check(True)
def print_plot(frame, event):
'''print_plot(frame, event) --> None
prints the current plot in the plot notebook.
'''
sel = frame.plot_notebook.GetSelection()
pages = get_pages(frame)
if sel < len(pages):
pages[sel].Print()
def print_preview_plot(frame, event):
'''print_preview_plot(frame, event) --> None
prints a preview of the current plot int the plot notebook.
'''
sel = frame.plot_notebook.GetSelection()
pages = get_pages(frame)
if sel < len(pages):
pages[sel].PrintPreview()
def print_parameter_grid(frame, event):
''' print_parameter_grid(frame, event) --> None
Prints the table of parameters that have been fitted.
'''
frame.paramter_grid.Print()
#genxprint.TablePanel(frame)
def print_preview_parameter_grid(frame, event):
''' print_parameter_grid(frame, event) --> None
Prints the table of parameters that have been fitted.
'''
frame.paramter_grid.PrintPreview()
def copy_graph(frame, event):
'''copy_graph(self, event) --> None
Callback that copies the current graph in the plot notebook to
the clipboard.
'''
sel = frame.plot_notebook.GetSelection()
pages = get_pages(frame)
if sel < len(pages):
pages[sel].CopyToClipboard()
def copy_table(frame, event):
'''copy_table(frame, event) --> None
Copies the table as ascii text to the clipboard
'''
ascii_table = frame.paramter_grid.table.pars.get_ascii_output()
text_table=wx.TextDataObject(ascii_table)
if wx.TheClipboard.Open():
wx.TheClipboard.SetData(text_table)
wx.TheClipboard.Close()
def copy_sim(frame, event):
'''copy_sim(frame, event) --> None
Copies the simulation and the data to the clipboard. Note that this
copies ALL data.
'''
text_string = frame.model.get_data_as_asciitable()
text = wx.TextDataObject(text_string)
if wx.TheClipboard.Open():
wx.TheClipboard.SetData(text)
wx.TheClipboard.Close()
def on_findreplace(frame, event):
'''Show the find and replace dialog box.
'''
frame.findreplace_dlg.Show(True)
def on_find_event(frame, event):
'''callback for find events - coupled to the script
'''
evtype = event.GetEventType()
def find():
find_str = event.GetFindString()
##print frame.findreplace_data.GetFlags()
flags = event.GetFlags()
if flags & 1:
##print "Searching down"
pos = frame.script_editor.SearchNext(flags, find_str)
else:
##print "Searching up"
pos = frame.script_editor.SearchPrev(flags, find_str)
if pos == -1:
frame.main_frame_statusbar.SetStatusText(\
'Could not find text %s'%find_str, 1)
return pos
def replace():
replace_str = event.GetReplaceString()
frame.script_editor.ReplaceSelection(replace_str)
# Deal with the different cases
if evtype == wx.wxEVT_COMMAND_FIND:
frame.script_editor.SearchAnchor()
find()
elif evtype == wx.wxEVT_COMMAND_FIND_NEXT:
pnew = frame.script_editor.GetSelectionEnd()
##print pnew
frame.script_editor.GotoPos(pnew)
frame.script_editor.SetAnchor(pnew)
frame.script_editor.SearchAnchor()
##print 'Finding next'
find()
elif evtype == wx.wxEVT_COMMAND_FIND_REPLACE:
#print 'find and replace'
# If we do not have found text already
# or if we have marked other text by mistake...
if frame.script_editor.GetSelectedText() != \
event.GetFindString():
find()
# We already have found and marked text that we should
# replace
else:
frame.script_editor.ReplaceSelection(\
event.GetReplaceString())
# Find a new text to replace
find()
elif evtype == wx.wxEVT_COMMAND_FIND_REPLACE_ALL:
#print 'find and replace all'
if frame.script_editor.GetSelectedText() != \
event.GetFindString():
pos = find()
i = 0
while pos != -1:
frame.script_editor.ReplaceSelection(\
event.GetReplaceString())
i += 1
pos = find()
frame.main_frame_statusbar.SetStatusText(\
'Replaces %d occurancies of %s'%(i,\
event.GetFindString()), 1)
##else:
## ShowErrorDialog(frame, 'Faulty event supplied in find and'\
## ' repalce functionallity', 'on_find_event')
# This will scroll the editor to the right position so we can see
# the text
frame.script_editor.EnsureCaretVisible()
def change_data_grid_view(frame, event):
'''change_data_grid_view(frame, event) --> None
change the data displayed in the grid...
'''
#print event.GetSelection()
dataset = frame.model.data[event.GetSelection()]
rows = frame.data_grid.GetNumberRows()
new_rows = max(len(dataset.x), len(dataset.y),\
len(dataset.x_raw), len(dataset.y_raw))
frame.data_grid.DeleteRows(numRows = rows)
frame.data_grid.AppendRows(new_rows)
[[frame.data_grid.SetCellValue(row, col, '-') for col in range(6)]\
for row in range(new_rows)]
[frame.data_grid.SetCellValue(row, 0, '%.3e'%dataset.x_raw[row])\
for row in range(len(dataset.x_raw))]
[frame.data_grid.SetCellValue(row, 1, '%.3e'%dataset.y_raw[row])\
for row in range(len(dataset.y_raw))]
[frame.data_grid.SetCellValue(row, 2, '%.3e'%dataset.error_raw[row])\
for row in range(len(dataset.error_raw))]
[frame.data_grid.SetCellValue(row, 3, '%.3e'%dataset.x[row])\
for row in range(len(dataset.x))]
[frame.data_grid.SetCellValue(row, 4, '%.3e'%dataset.y[row])\
for row in range(len(dataset.y))]
[frame.data_grid.SetCellValue(row, 5, '%.3e'%dataset.error[row])\
for row in range(len(dataset.error))]
def update_data_grid_choice(frame, event):
'''update_data_grid_choice(frame, event) --> None
Updates the choices of the grids to display from the data.
'''
data = event.GetData()
names = [data_set.name for data_set in data]
#frame.data_grid_choice.SetItems(names)
frame.data_grid_choice.Clear()
frame.data_grid_choice.AppendItems(names)
event.Skip()
def update_data(frame, event):
'''update_data(frame, event) --> None
callback for updating data, right now in the plugins
'''
frame.plugin_control.OnDataChanged(event)
def fom_help(frame, event):
'''Show a help dialog for information about the different fom.
'''
dlg = genx.help.PluginHelpDialog(frame, 'fom_funcs')
dlg.Show()
def models_help(frame, event):
'''models_help(frame, event) --> None
Show a help dialog for information about the different models.
'''
dlg = genx.help.PluginHelpDialog(frame,'models')
dlg.Show()
def plugins_help(frame, event):
'''plugins_help(frame, event) --> None
Show a help dialog for information about the different plugins.
'''
dlg = genx.help.PluginHelpDialog(frame,'plugins.add_ons')
dlg.Show()
def data_loaders_help(frame, event):
'''data_loaders_help(frame, event) --> None
Show a help dialog for information about the different data_loaders.
'''
dlg = genx.help.PluginHelpDialog(frame,'plugins.data_loaders')
dlg.Show()
def show_manual(frame, event):
'''show_manual(frame, event) --> None
Callback to show the manual
'''
#ShowNotificationDialog(frame, 'There is no manual yet!')
webbrowser.open_new(manual_url)
def show_homepage(frame, event):
'''show_homepage(frame, event) --> None
Callback to show the homepage
'''
webbrowser.open_new(homepage_url)
def show_about_box(frame, event):
'''show_about_box(frame, event) --> None
Show an about box about GenX with some info...
'''
import numpy, scipy, matplotlib, platform
try:
import cython
except:
cython_version = 'Not installed'
else:
cython_version = cython.__version__
info = wx.adv.AboutDialogInfo()
info.Name = "GenX"
info.Version = __version__
info.Copyright = "(C) 2008 Matts Bjorck"
info.Description = wordwrap(
"GenX is a multipurpose refinement program using the differential "
"evolution algorithm. It is developed mainly for refining x-ray reflectivity "
"and neutron reflectivity data."
"\n\nThe versions of the mandatory libraries are:\n"
"Python: %s, wxPython: %s, Numpy: %s, Scipy: %s, Matplotlib: %s"
"\nThe non-mandatory but useful package: cython: %s"%(platform.python_version(), wx.__version__,\
numpy.version.version, scipy.version.version,\
matplotlib.__version__, cython_version),
500, wx.ClientDC(frame))
info.WebSite = ("http:////genx.sourceforge.net", "GenX homepage")
# No developers yet
#info.Developers = []
#head, tail = os.path.split(__file__)
#license_text = file(head + '/LICENSE.txt','r').read()
#license_text = file(_path + 'LICENSE.txt','r').read()
#info.License = license_text#wordwrap(license_text, 500, wx.ClientDC(self))
info.Licence = wordwrap('This program is free software: you can redistribute it and/or modify '
'it under the terms of the GNU General Public License as published by '
'the Free Software Foundation, either version 3 of the License, or '
'(at your option) any later version. '
'\n\nThis program is distributed in the hope that it will be useful, '
'but WITHOUT ANY WARRANTY; without even the implied warranty of '
'MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the '
'GNU General Public License for more details. '
'\n\nYou should have received a copy of the GNU General Public License '
'along with this program. If not, see <http://www.gnu.org/licenses/>. '
, 400, wx.ClientDC(frame))
wx.adv.AboutBox(info)
#=============================================================================
# Custom events needed for updating and message parsing between the different
# modules.
class GenericModelEvent(wx.PyCommandEvent):
'''
Event class for a new model - for updating
of the paramters, plots and script.
'''
def __init__(self,evt_type, id, model):
wx.PyCommandEvent.__init__(self, evt_type, id)
self.model = model
self.description = ''
def GetModel(self):
return self.model
def SetModel(self, model):
self.model = model
def SetDescription(self, desc):
'''
Set a string that describes the event that has occurred
'''
self.description = desc
# Generating an event type:
myEVT_NEW_MODEL = wx.NewEventType()
# Creating an event binder object
EVT_NEW_MODEL = wx.PyEventBinder(myEVT_NEW_MODEL)
def _post_new_model_event(parent, model, desc = ''):
# Send an event that a new data set has been loaded
evt = GenericModelEvent(myEVT_NEW_MODEL, parent.GetId(), model)
evt.SetDescription(desc)
# Process the event!
parent.GetEventHandler().ProcessEvent(evt)
# Generating an event type:
myEVT_SIM_PLOT = wx.NewEventType()
# Creating an event binder object
EVT_SIM_PLOT = wx.PyEventBinder(myEVT_SIM_PLOT)
def _post_sim_plot_event(parent, model, desc = ''):
# Send an event that a new data set ahs been loaded
evt = GenericModelEvent(myEVT_SIM_PLOT, parent.GetId(), model)
evt.SetDescription(desc)
# Process the event!
parent.GetEventHandler().ProcessEvent(evt)
#==============================================================================
## Functions for showing error dialogs
def ShowQuestionDialog(frame, message, title = 'Question?'):
dlg = wx.MessageDialog(frame, message,
title,
#wx.YES_NO | wx.ICON_QUESTION
)
result = dlg.ShowModal() == wx.ID_OK
dlg.Destroy()
return result
def ShowModelErrorDialog(frame, message):
dlg = wx.MessageDialog(frame, message,
'Warning',
wx.OK | wx.ICON_WARNING
)
dlg.ShowModal()
dlg.Destroy()
def ShowNotificationDialog(frame, message):
dlg = wx.MessageDialog(frame, message,
'Information',
wx.OK | wx.ICON_INFORMATION
)
dlg.ShowModal()
dlg.Destroy()
def ShowErrorDialog(frame, message, position = ''):
if position != '':
dlg = wx.MessageDialog(frame, message + '\n' + 'Position: ' + position,
'FATAL ERROR',
wx.OK | wx.ICON_ERROR
)
else:
dlg = wx.MessageDialog(frame, message,
'FATAL ERROR',
wx.OK | wx.ICON_ERROR
)
dlg.ShowModal()
dlg.Destroy()
#==============================================================================
| gpl-3.0 |
iABC2XYZ/abc | DM_Twiss/TwissTrain.py | 1 | 2204 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 20 13:37:16 2017
Author: Peiyong Jiang : [email protected]
Function:
Check that the Distribution generation method is right.
"""
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
from Orth import LambdaR,OrthTrans
from TFOrth import TFLambdaR,TFOrthTrans
plt.close('all')
emitX=1.4
alphaX=-2.3
betaX=15.3
gammaX=(1.+alphaX**2)/betaX
diagRX=LambdaR(emitX,alphaX,betaX,gammaX)
PX=OrthTrans(emitX,alphaX,betaX,gammaX)
numPart=np.int32(1e1)
Z=np.random.randn(2,numPart)/1000.
X=np.matmul(np.matmul(PX,np.linalg.inv(diagRX)),Z)
'''
plt.figure(1)
plt.plot(X[0,:],X[1,:],'r.')
plt.axis('equal')
'''
##
wEmit=tf.Variable([emitX])
wAlpha=tf.Variable([alphaX])
wBeta=tf.Variable([betaX])
wGamma=tf.Variable([gammaX])
wEmit=tf.Variable([10.1])
wAlpha=tf.Variable([10.1])
wBeta=tf.Variable([10.1])
wGamma=tf.Variable([10.1])
xH=tf.placeholder(tf.float32,[2,None])
diagR,diagRT=TFLambdaR(wEmit,wAlpha,wBeta,wGamma)
P,PI=TFOrthTrans(wEmit,wAlpha,wBeta,wGamma)
zH=tf.matmul(tf.matmul(diagR,PI),xH)
R=zH[0]**2+zH[1]**2
#lossR=tf.abs(R-2.e-6)
lossR=R
optR=tf.train.AdamOptimizer(0.001)
trainR=optR.minimize(lossR)
sess=tf.Session()
sess.run(tf.global_variables_initializer())
#sess.run(diagR)
print(sess.run(R,feed_dict={xH:X}))
numIter=1
recEmit=np.zeros(numIter)
recAlpha=np.zeros(numIter)
recBeta=np.zeros(numIter)
recGamma=np.zeros(numIter)
for _ in xrange(numIter):
sess.run(trainR,feed_dict={xH:X})
recEmit[_]=sess.run(wEmit)
recAlpha[_]=sess.run(wAlpha)
recBeta[_]=sess.run(wBeta)
recGamma[_]=sess.run(wGamma)
print(recEmit)
print(recAlpha)
print(sess.run(R,feed_dict={xH:X}))
#plt.figure('emit')
#plt.plot(recEmit)
'''
zGet=sess.run(zH,feed_dict={xH:X})
print(sess.run(lossR,feed_dict={xH:X}))
'''
'''
plt.figure('Check')
plt.hold('on')
plt.plot(Z[0,:],Z[1,:],'bo')
plt.plot(zGet[0,:],zGet[1,:],'r.')
plt.axis('equal')
'''
'''
print(sess.run(wEmit))
print(sess.run(wAlpha))
print(sess.run(wBeta))
print(sess.run(wGamma))
print(sess.run(diagR))
print(sess.run(diagRT))
'''
#print(PX)
#print(sess.run(P))
#print(sess.run(zH,feed_dict={xH:X}))
| gpl-3.0 |
murali-munna/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <[email protected]
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
mhdella/scikit-learn | sklearn/tests/test_cross_validation.py | 27 | 41664 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
y = np.arange(10) // 2
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
sorted_array = np.arange(100)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(101, 200)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(201, 300)
assert_true(np.any(sorted_array != ind[train]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist(), allow_lists=False)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = cval.train_test_split(X_df, allow_lists=False)
assert_true(isinstance(X_train_arr, np.ndarray))
assert_true(isinstance(X_test_arr, np.ndarray))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
| bsd-3-clause |
BlueBrain/deap | examples/es/cma_mo.py | 10 | 4169 | # This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
import numpy
from deap import algorithms
from deap import base
from deap import benchmarks
from deap.benchmarks.tools import hypervolume
from deap import cma
from deap import creator
from deap import tools
# Problem size
N = 5
# ZDT1, ZDT2, DTLZ2
MIN_BOUND = numpy.zeros(N)
MAX_BOUND = numpy.ones(N)
# Kursawe
# MIN_BOUND = numpy.zeros(N) - 5
# MAX_BOUND = numpy.zeros(N) + 5
creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0))
creator.create("Individual", list, fitness=creator.FitnessMin)
def distance(feasible_ind, original_ind):
"""A distance function to the feasability region."""
return sum((f - o)**2 for f, o in zip(feasible_ind, original_ind))
def closest_feasible(individual):
"""A function returning a valid individual from an invalid one."""
feasible_ind = numpy.array(individual)
feasible_ind = numpy.maximum(MIN_BOUND, feasible_ind)
feasible_ind = numpy.minimum(MAX_BOUND, feasible_ind)
return feasible_ind
def valid(individual):
"""Determines if the individual is valid or not."""
if any(individual < MIN_BOUND) or any(individual > MAX_BOUND):
return False
return True
toolbox = base.Toolbox()
toolbox.register("evaluate", benchmarks.zdt1)
toolbox.decorate("evaluate", tools.ClosestValidPenality(valid, closest_feasible, 1.0e-6, distance))
def main():
# The cma module uses the numpy random number generator
# numpy.random.seed(128)
MU, LAMBDA = 10, 10
NGEN = 500
verbose = True
# The MO-CMA-ES algorithm takes a full population as argument
population = [creator.Individual(x) for x in (numpy.random.uniform(0, 1, (MU, N)))]
for ind in population:
ind.fitness.values = toolbox.evaluate(ind)
strategy = cma.StrategyMultiObjective(population, sigma=1.0, mu=MU, lambda_=LAMBDA)
toolbox.register("generate", strategy.generate, creator.Individual)
toolbox.register("update", strategy.update)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("min", numpy.min, axis=0)
stats.register("max", numpy.max, axis=0)
logbook = tools.Logbook()
logbook.header = ["gen", "nevals"] + (stats.fields if stats else [])
for gen in range(NGEN):
# Generate a new population
population = toolbox.generate()
# Evaluate the individuals
fitnesses = toolbox.map(toolbox.evaluate, population)
for ind, fit in zip(population, fitnesses):
ind.fitness.values = fit
# Update the strategy with the evaluated individuals
toolbox.update(population)
record = stats.compile(population) if stats is not None else {}
logbook.record(gen=gen, nevals=len(population), **record)
if verbose:
print(logbook.stream)
if verbose:
print("Final population hypervolume is %f" % hypervolume(strategy.parents, [11.0, 11.0]))
# import matplotlib.pyplot as plt
# valid_front = numpy.array([ind.fitness.values for ind in strategy.parents if valid(ind)])
# invalid_front = numpy.array([ind.fitness.values for ind in strategy.parents if not valid(ind)])
# fig = plt.figure()
# if len(valid_front) > 0:
# plt.scatter(valid_front[:,0], valid_front[:,1], c="g")
# if len(invalid_front) > 0:
# plt.scatter(invalid_front[:,0], invalid_front[:,1], c="r")
# plt.show()
return strategy.parents
if __name__ == "__main__":
solutions = main()
| lgpl-3.0 |
jakobworldpeace/scikit-learn | sklearn/metrics/cluster/tests/test_bicluster.py | 394 | 1770 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
| bsd-3-clause |
Sklearn-HMM/scikit-learn-HMM | sklean-hmm/manifold/tests/test_mds.py | 324 | 1862 | import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
mikec964/chelmbigstock | tests/linear_adapter_test.py | 1 | 1969 | #
'''
May,08, 2014
@author Hideki Ikeda
Unit test for the LinearAdapter class in stock_value.py
'''
import os
import sys
from datetime import date, datetime, timedelta
import shutil
import unittest
from sklearn import linear_model
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), '..'))
from chelmbigstock import stock_value
class TestLinearAdapter(unittest.TestCase):
_e = 1e-6 # tolerance
_test_LR = [ # test data for linear regression
[
[0, 1, 3],
[0, 1, 3],
[5.5], [5.5]
],
[
[1, 3, 4, 5, 9],
[2, 5, 8, 7, 10],
[5.5], [7.4375]
]
]
_test_RR = [ # test data for Ridge regression with alpha = 0.5
[
[0, 1, 3],
[0, 1, 3],
[5.5], [5.09677419]
],
[
[1, 3, 4, 5, 9],
[2, 5, 8, 7, 10],
[5.5], [7.42296919]
]
]
def equal_floats(self, l1, l2):
if len(l1) != len(l2):
return False
for i in range(0, len(l1)):
if abs((l1[i] - l2[i])/l1[i]) >= self._e:
return False
return True
def test_linear_regression(self):
clf = linear_model.LinearRegression()
predictor = stock_value.LinearAdapter(clf)
for x, y, ex, ey in self._test_LR:
predictor.fit(x, y)
self.assertTrue(self.equal_floats(ey, predictor.predict([ex])))
def test_ridge_regression(self):
clf = linear_model.Ridge(alpha = 0.5)
predictor = stock_value.LinearAdapter(clf)
for x, y, ex, ey in self._test_RR:
predictor.fit(x, y)
self.assertTrue(self.equal_floats(ey, predictor.predict([ex])))
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
vonholst/deeplearning_example_kog | test_model.py | 1 | 2763 | # important!! keras==1.2.2
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
from keras import optimizers
from lib.helpers import get_training_parameters, plot_training_history
import json
from keras.models import load_model
from keras.utils.generic_utils import CustomObjectScope
import keras
np.random.seed(123) # for reproducibility
options = get_training_parameters(rows=224, cols=224)
validation_image_path = './dev'
val_datagen = ImageDataGenerator(
rescale=options["image_scale"],
# rotation_range=5.0,
# shear_range=0.3,
# zoom_range=0.3,
)
val_generator = val_datagen.flow_from_directory(
validation_image_path,
target_size=(options["img_rows"], options["img_cols"]),
batch_size=options["image_gen_batch_size"],
class_mode='categorical',
shuffle=True)
# score = model.evaluate_generator(val_generator, 50)
# print("%s: %.2f%%" % (model.metrics_names[1], score[1] * 100))
def class_from_id(id, class_dictionary):
for k, v in class_dictionary.iteritems():
if v == id:
return k
return None
def show_result(im, target_class, result_class, result_confidence):
plt.imshow(im)
plt.title(u"{target}: {result_confidence}, max:({result_class})".format(target=target_class,
result_confidence=result_confidence,
result_class=result_class))
plt.show()
if __name__ == "__main__":
with open('./model/keras_model_training_history.json', 'r') as data_file:
training_history = json.load(data_file)
plot_training_history(training_history)
with open('./model/keras_model_classes.json') as data_file:
class_indices = json.load(data_file)
with CustomObjectScope({'relu6': keras.applications.mobilenet.relu6,'DepthwiseConv2D': keras.applications.mobilenet.DepthwiseConv2D}):
model = load_model("./model/keras_model.h5")
for _ in range(10):
batch = val_generator.next()
for i in range(batch[0].shape[0]):
im = batch[0][i]
target = batch[1][i]
img = im.reshape(1, options["img_rows"], options["img_cols"], 3)
result = model.predict(img)
target_id = target.argmax()
target_class = class_from_id(target_id, class_indices)
result_class = class_from_id(result.argmax(), class_indices)
result_confidence = result[0][target_id] * 100
if target_class != result_class:
show_result(im, target_class, result_class, result_confidence)
else:
print("Correct {}".format(i))
| mit |
aabadie/scikit-learn | examples/svm/plot_svm_scale_c.py | 19 | 5409 | """
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <[email protected]>
# Jaques Grobler <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['navy', 'cyan', 'darkorange']
lw = 2
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(train_size=train_size,
n_splits=250, random_state=1))
grid.fit(X, y)
scores = grid.cv_results_['mean_test_score']
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size, color=colors[k], lw=lw)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
krez13/scikit-learn | sklearn/datasets/base.py | 22 | 22973 | """
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import csv
import sys
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os.path import splitext
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets
Dictionary-like object that exposes its keys as attributes.
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
super(Bunch, self).__init__(kwargs)
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __getstate__(self):
return self.__dict__
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'iris.csv')) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float64)
target[i] = np.asarray(ir[-1], dtype=np.int)
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_breast_cancer():
"""Load and return the breast cancer wisconsin dataset (classification).
The breast cancer dataset is a classic and very easy binary classification
dataset.
================= ==============
Classes 2
Samples per class 212(M),357(B)
Samples total 569
Dimensionality 30
Features real, positive
================= ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
The copy of UCI ML Breast Cancer Wisconsin (Diagnostic) dataset is
downloaded from:
https://goo.gl/U2Uwz2
Examples
--------
Let's say you are interested in the samples 10, 50, and 85, and want to
know their class name.
>>> from sklearn.datasets import load_breast_cancer
>>> data = load_breast_cancer()
>>> data.target[[10, 50, 85]]
array([0, 1, 0])
>>> list(data.target_names)
['malignant', 'benign']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'breast_cancer.csv')) as csv_file:
data_file = csv.reader(csv_file)
first_line = next(data_file)
n_samples = int(first_line[0])
n_features = int(first_line[1])
target_names = np.array(first_line[2:4])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for count, value in enumerate(data_file):
data[count] = np.asarray(value[:-1], dtype=np.float64)
target[count] = np.asarray(value[-1], dtype=np.int)
with open(join(module_path, 'descr', 'breast_cancer.rst')) as rst_file:
fdescr = rst_file.read()
feature_names = np.array(['mean radius', 'mean texture',
'mean perimeter', 'mean area',
'mean smoothness', 'mean compactness',
'mean concavity', 'mean concave points',
'mean symmetry', 'mean fractal dimension',
'radius error', 'texture error',
'perimeter error', 'area error',
'smoothness error', 'compactness error',
'concavity error', 'concave points error',
'symmetry error', 'fractal dimension error',
'worst radius', 'worst texture',
'worst perimeter', 'worst area',
'worst smoothness', 'worst compactness',
'worst concavity', 'worst concave points',
'worst symmetry', 'worst fractal dimension'])
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=feature_names)
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import pylab as pl #doctest: +SKIP
>>> pl.gray() #doctest: +SKIP
>>> pl.matshow(digits.images[0]) #doctest: +SKIP
>>> pl.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float64)
target[i] = np.asarray(d[-1], dtype=np.float64)
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
def _pkl_filepath(*args, **kwargs):
"""Ensure different filenames for Python 2 and Python 3 pickles
An object pickled under Python 3 cannot be loaded under Python 2.
An object pickled under Python 2 can sometimes not be loaded loaded
correctly under Python 3 because some Python 2 strings are decoded as
Python 3 strings which can be problematic for objects that use Python 2
strings as byte buffers for numerical data instead of "real" strings.
Therefore, dataset loaders in scikit-learn use different files for pickles
manages by Python 2 and Python 3 in the same SCIKIT_LEARN_DATA folder so
as to avoid conflicts.
args[-1] is expected to be the ".pkl" filename. Under Python 3, a
suffix is inserted before the extension to s
_pkl_filepath('/path/to/folder', 'filename.pkl') returns:
- /path/to/folder/filename.pkl under Python 2
- /path/to/folder/filename_py3.pkl under Python 3+
"""
py3_suffix = kwargs.get("py3_suffix", "_py3")
basename, ext = splitext(args[-1])
if sys.version_info[0] >= 3:
basename += py3_suffix
new_args = args[:-1] + (basename + ext,)
return join(*new_args)
| bsd-3-clause |
seadsystem/Backend | Analysis and Classification/Analysis/Code/New and modified versions of Henry's code/Asiiah's_Code/harmonics_3d_new.py | 1 | 8061 | #!/usr/bin/python
# ============================================================
# File: harmonics_3d.py
# Description: Takes a xlsx file from the PA1000 and 3d plots
# the Voltage, Current, and Wattage harmonics
# usage: harmonics_3d.py [source.xlsx]
# Created by Henry Crute
# 9/12/2014
# Edited very slightly by Lanjing Zhang
# 11/9/2014
# ============================================================
from mpl_toolkits.mplot3d import Axes3D
import os
import datetime
import sys
import random
import math
import numpy as np
import matplotlib.pyplot as plt
from openpyxl import load_workbook
#creates folder if it doesn't exist in the directory
def ensure_dir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
#uses openpyxl library to parse all of the data in voltage, amperage
#input variables. extremely ugly
def get_harmonics(filename, voltage, amperage, wattage):
wb2 = load_workbook(filename, use_iterators = True)
ws = wb2.get_sheet_by_name(name = 'PA1000 (0122) Ch1')
voltIndex = []
ampIndex = []
j = -2
#iterate through all rows
for row in ws.iter_rows():
i = -1
if j > -2:
amperage.append([])
voltage.append([])
j = j + 1
#iterate through each cell in row
for cell in row:
i = i + 1
#extract data from the index values and puts it into arrays
if i in voltIndex:
if cell.value == '':
continue
#print cell.value
voltage[j].append(cell.value)
continue
if i in ampIndex:
if cell.value == '':
continue
#print cell.value
amperage[j].append(cell.value)
continue
if j > 0 and i == 3:
wattage.append(cell.value)
#placeholder index values for the first row, tells where data is
if type(cell.value) is datetime.datetime:
continue
elif type(cell.value) is float:
continue
elif 'Volt' in cell.value and 'Magnitude' in cell.value:
#print cell.value
voltIndex.append(i)
elif 'Magnitude' in cell.value and 'Amp' in cell.value:
#print cell.value
ampIndex.append(i)
#takes average of given arrays, from start_index to stop_index
#also finds max, and min values within the range
def get_avgmaxmin(input_list, start_index, stop_index, mean_list, std_list):
if stop_index == 0:
return
#initialize array of zeros size equal to number of harmonics
mean = [0] * len(input_list[0])
for index in range(start_index, stop_index):
for number in range(len(input_list[index])):
mean[number] = mean[number] + input_list[index][number]
for number in range(len(mean)):
mean[number] = mean[number] / (stop_index - start_index)
#print mean
mean_list.append(mean)
#initialize array of zeros equal to number of harmonics for variance
variance = [0] * len(input_list[0])
for number in range(len(input_list[0])):
for index in range(start_index, stop_index):
variance[number] = variance[number] + math.pow((input_list[index][number] - mean[number]), 2)
for number in range(len(variance)):
variance[number] = variance[number] / (stop_index - start_index)
#print variance
stdeviation = [0] * len(input_list[0])
for number in range(len(variance)):
stdeviation[number] = math.sqrt(variance[number])
std_list.append(stdeviation)
#print stdeviation
#percent difference comparison to see transients in snapshot, and trim
def trim_harmonics(voltage, amperage, volt_mean, amp_mean, volt_std, amp_std):
#for voltage
i = 0
prevSum = -1
averageStart = 0
averageStop = 0
#iterates over a copy of the list
for row in list(voltage):
curSum = sum(row)
#solves division by zero error
if ((curSum + prevSum) == 0):
per_diff = 0
else:
per_diff = abs(curSum - prevSum)/((curSum + prevSum)/2)
#print 'per_diff = ' + str(per_diff)
if abs(per_diff) < 0.10: #########################CHANGEME
#print 'deleting ' + str(voltage[i])
#voltage.remove(row)
pass
else :
get_avgmaxmin(voltage, averageStart, averageStop, volt_mean, volt_std)
averageStart = averageStop
prevSum = curSum
i = i + 1
averageStop = averageStop + 1
get_avgmaxmin(amperage, averageStart, averageStop, volt_mean, volt_std)
#for amperage
i = 0
prevSum = -1
averageStart = 0
averageStop = 0
#iterates over a copy of the list
for row in list(amperage):
curSum = sum(row)
#solves division by zero error
if ((curSum + prevSum) == 0):
per_diff = 0
else:
per_diff = abs(curSum - prevSum)/((curSum + prevSum)/2)
#print 'per_diff = ' + str(per_diff)
if abs(per_diff) < 0.10: #########################CHANGEME
#print 'deleting row ' + str(averageStop)
#amperage.remove(row)
pass
else :
get_avgmaxmin(amperage, averageStart, averageStop, amp_mean, amp_std)
averageStart = averageStop
prevSum = curSum
i = i + 1
averageStop = averageStop + 1
get_avgmaxmin(amperage, averageStart, averageStop, amp_mean, amp_std)
def visualize_data(wattage, filename):
ensure_dir(output_dir + filename)
fig = plt.figure()
plt.title('Power from ' + filename)
plt.xlabel('Seconds')
plt.ylabel('Watts')
x = np.arange(0, len(wattage) / 2.0, 0.5)
plt.plot(x, wattage, 'r',
x, wattage, 'ro')
fig.savefig(output_dir + filename + '/' + filename + '_watt' + '.png')
#creates a 3 dimensional bargraph from two dimensional list_in
#with colors specified from input colorpicker
def bargraph_3d(list_in, list_std, colorpicker, filename):
ensure_dir(output_dir + filename)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_title('Harmonics from ' + filename)
yaxis = np.arange(len(list_in))
#print yaxis
print "colorpicker is " + str(len(colorpicker))
print "yaxis is " + str(len(yaxis))
for c, z in zip(colorpicker, yaxis):
xs = np.arange(len(list_in[z]))
#print xs
ys = list_in[z]
#print ys
cs = c
#make colors different, e.g. each
ax.bar(xs, ys, zs=z, zdir='y', color=c, alpha=0.8)
ax.set_zlim(0,1)
ax.set_xlabel('Harmonic Number')
ax.set_ylabel('Change over Time')
ax.set_zlabel('Harmonic Arms Amplitude')
fig.savefig(output_dir + filename + '/' + filename + '_harmonics.png')
plt.show()
def process_data(file_directory):
filename = os.path.basename(file_directory)
#2 dimensional list initializations
voltage = []
amperage = []
wattage = []
#grabs all of the harmonics from the xlsx
get_harmonics(file_directory, voltage, amperage, wattage)
#trims harmonics to get means, and standard deviations to graph
volt_mean = []
amp_mean = []
watt_mean = []
volt_std = []
amp_std = []
watt_std = []
trim_harmonics(voltage, amperage, volt_mean, amp_mean, volt_std, amp_std)
print amp_mean
#creates a random color list for voltage then amperage
volt_color = []
for x in range(0, len(volt_mean)):
volt_color.append((random.random(), random.random(), random.random()))
amp_color = []
for x in range(0, len(amp_mean)):
amp_color.append((random.random(), random.random(), random.random()))
#make bar graphs
visualize_data(wattage, filename)
#bargraph_3d(volt_mean, amp_std, volt_color)
bargraph_3d(amp_mean, amp_std, amp_color, filename)
#START OF PROGRAM
if len(sys.argv) < 3:
print 'usage: ' + sys.argv[0] + ' [input file] [output directory]'
exit(1)
#creates workbook from input argument
path = sys.argv[1]
#opens output directory to output files
output_dir = sys.argv[2]
for root, dirs, filenames in os.walk(path):
for f in filenames:
absolute_path = path + f
print absolute_path
process_data(absolute_path)
| mit |
spencerpomme/coconuts-on-fire | TSP algorithm application.py | 1 | 9351 | #! needed libraries
from __future__ import division
import matplotlib.pyplot as plt
import random
import time
import itertools
import urllib
import csv
<<<<<<< HEAD
# file_xy是景点经纬度的csv文件,file_value是景点热度的csv文件
file_xy = open(r'J:\四会多规合一\四会景点坐标.csv')
file_value = open(r'J:\四会多规合一\四会景点热度.csv')
file_back = open(r'J:\四会多规合一\四会景点坐标back.csv')
=======
file = open(r'E:\SkyDrive\近期\relics.csv')
>>>>>>> ff4c1fe4d21994e0803cc53087a4ea15874fb379
def alltours(cities):
'''Return a list of tours, each a permutation of cities, but each one starting
with the same city.'''
start = first(cities)
return [[start] + Tour(rest) for rest in itertools.permutations(cities - {start})]
def first(collection):
'''Start iterating over collection, and return the first element.'''
return next(iter(collection))
Tour = list # Tours are implemented as lists of cities
# This Cities function need to be improved in data reading adaptablity.
def Cities(csv):
'''
A function to drag out x and y coordinates from a csv file(n roww 2 col) and
convert them into complex numbers and store into a frozen set.
'''
assembly = []
# The supposed scale in certain lat and long.
long_scale = 1.0
lat_scale = 1.0
for item in csv:
a = item.rstrip()
pair = list(map(lambda x: float(x), a.split(',')))
pair[0] = pair[0] * long_scale
pair[1] = pair[1] * lat_scale
coor = complex(*pair)
assembly.append(coor)
return frozenset(assembly)
<<<<<<< HEAD
def nodes_reader(file):
'''
A function to drag out x and y coordinates from a csv file
(n row 2 col) and convert them into complex numbers and store
into a frozen set. It's extended to work with more than 2 colums
and can omit empty rows.
'''
assembly = []
readerxy = csv.reader(file, delimiter=',', skipinitialspace=True)
filter_num = first_n(file_value)#Attention value below this will be omited.
print('firstn:', filter_num)
for row in readerxy:
if row[0] != '':
latitude = float(row[1])# latitude as Y coordinate
longitude = float(row[2])# longitude as X coordinate
coor = complex(longitude, latitude)
if float(row[3]) >= filter_num:
assembly.append(coor)
print('length:', len(assembly))
return frozenset(assembly)
def all_points_plot(file):
reader_all = csv.reader(file, delimiter=',', skipinitialspace=True)
x_all = []
y_all = []
for row in reader_all:
if row[0] != '':
y_all.append(float(row[1]))# latitude as Y coordinate
x_all.append(float(row[2]))# longitude as X coordinate
return (x_all, y_all)
def first_n(file):
value_list = []
readerv = csv.reader(file, delimiter=',', skipinitialspace=True)
for row in readerv:
if row[1] != 0:
value_list.append(float(row[1]))
value_list.sort()
n = 20
#The n here represents how many points you want to include in the tour.
return value_list[(len(value_list)-1)-n]
=======
def Cities_selected(csv):
'''
A selected set of points according to selector's rule.
'''
pass
def selector(assembly):
'''
A function to select points from the massive points generated by the function
above(Cities)
'''
pass
>>>>>>> ff4c1fe4d21994e0803cc53087a4ea15874fb379
def shortest_tour(tours):
'''Choose the tour with the minimum tour length.'''
return min(tours, key = tour_length)
def repeated_nn_tsp(cities, repetitions=30):
'''
Repeat the nn_tsp algorithm starting from specified number of cities;
return the shortest tour.
'''
return shortest_tour(nn_tsp(cities, start) for start in sample(cities, repetitions))
def sample(population, k, seed=42):
'''
Return a list of k elements sampled from population. Set random.seed with seed.
'''
if k is None or k > len(population):
return population
random.seed(len(population) * k * seed)
return random.sample(population, k)
def nn_tsp(cities, start=None):
"""Start the tour at the first city; at each step extend the tour by moving
from the previous city to its nearest neighbor that has not yet been visited.
"""
if start is None: start = first(cities)
tour = [start]
unvisited = set(cities - {start})
while unvisited:
C = nearest_neighbor(tour[-1], unvisited)
tour.append(C)
unvisited.remove(C)
return tour
def nearest_neighbor(A, cities):
"""
Find nearest city in cities that is nearest to city A and remain unvisited
"""
return min(cities, key = lambda c: distance(c,A))
def reverse_segment_if_better(tour, i, j):
'''
If reversing tour[i:j] would make the tour shorter, then do it.
'''
# Given tour [...A-B...C-D...], consider reversing B...C to get [...A-C...B-D...]
A, B, C, D = tour[i-1], tour[i], tour[j-1], tour[j % len(tour)]
# Are old edges(AB+CD) longer than new ones(AC+BD)? If so, reverse segment.
if distance(A, B) + distance(C, D) > distance(A, C) + distance(B, D):
tour[i:j] = reversed(tour[i:j])
def alter_tour(tour):
"Try to alter tour for the better by reversing segments."
original_length = tour_length(tour)
for (start, end) in all_segments(len(tour)):
reverse_segment_if_better(tour, start, end)
# If we made an improvement, then try again; else stop and return tour.
if tour_length(tour) < original_length:
return alter_tour(tour)
return tour
def all_segments(N):
"Return (start, end) pairs of indexes that form segments of tour of length N."
return [(start, start + length)
for length in range(N, 2-1, -1)
for start in range(N - length + 1)]
# The algorithm below is the best so far(2015-4-5)
def altered_nn_tsp(cities):
"Run nearest neighbor TSP algorithm, and alter the results by reversing segments."
return alter_tour(nn_tsp(cities))
def repeated_altered_nn_tsp(cities, repetitions=20):
"Use alteration to improve each repetition of nearest neighbors."
return shortest_tour(alter_tour(nn_tsp(cities, start))
for start in sample(cities, repetitions))
def tour_length(tour):
'''
The total of distance between each pair of consecutive cities in the tour
'''
return sum(distance(tour[i], tour[i-1]) for i in range(len(tour)))
def X(point):
'''
The x coordinate of a point.'''
return point.real
def Y(point):
'''
The y coordinate of a point.'''
return point.imag
def distance(A, B):
'''
The distance between two points.'''
return abs(A-B)
<<<<<<< HEAD
def plot_all_points(allpoints, style='go'):
'''
To plot all points which are potentially visitable.
Also as an object of reference to make the route meaningful and easy to see.
'''
plt.plot(list(map(X, points)), list(map(Y, points)), style)
plt.show()
=======
>>>>>>> ff4c1fe4d21994e0803cc53087a4ea15874fb379
def plot_tour(tour):
'''
Plot the cities as circles and the tour as lines between them.'''
start = tour[0]
plot_lines(list(tour) + [tour[0]])
plot_lines([start], 'rs') # mark the start city with a red square
<<<<<<< HEAD
=======
>>>>>>> ff4c1fe4d21994e0803cc53087a4ea15874fb379
def plot_lines(points, style='bo-'):
'''
Plot lines to connect a series of points.
'''
<<<<<<< HEAD
# The line below presents the Background points, which is to say, some of them is
# not included in a tour.
back = all_points_plot(file_back)
plt.plot(back[0], back[1], 'go')
=======
>>>>>>> ff4c1fe4d21994e0803cc53087a4ea15874fb379
plt.plot(list(map(X, points)), list(map(Y, points)), style)
plt.axis('scaled');plt.axis('off')
def plot_tsp(algorithm, cities):
'''Apply a TSP algorithm to cities, plot the resulting tour, and print
information.'''
# Find the solution and time how long it takes
t0 = time.clock()
tour = algorithm(cities)
t1 = time.clock()
assert valid_tour(tour, cities)
plot_tour(tour)
<<<<<<< HEAD
=======
>>>>>>> ff4c1fe4d21994e0803cc53087a4ea15874fb379
plt.show()
print('{} city tour with length {:.1f} in {:.3f} secs for {}'.format(\
len(tour), tour_length(tour), t1-t0, algorithm.__name__))
def valid_tour(tour, cities):
'''Is tour a valid tour for this cities?'''
return set(tour) == set(cities) and len(tour) == len(cities)
def length_ratio(cities):
'''The ratio of the tour lengths ofr nn_tsp and alltour_tsp algorithms.'''
return tour_length(nn_tsp(cities)) / tour_length(repeated_nn_tsp(cities))
<<<<<<< HEAD
res = nodes_reader(file_xy)
rep = repeated_nn_tsp(res)
alt = altered_nn_tsp(res)
rpalt = repeated_altered_nn_tsp(res, repetitions = 300)
=======
res = Cities(file)
rep = repeated_nn_tsp(res)
alt = altered_nn_tsp(res)
rpalt = repeated_altered_nn_tsp(res, repetitions = 100)
>>>>>>> ff4c1fe4d21994e0803cc53087a4ea15874fb379
# print(rpalt)
print(len(rpalt))
plot_tsp(repeated_altered_nn_tsp, res)
<<<<<<< HEAD
plt.show()
a = all_points_plot(file_xy)
print(a)
=======
>>>>>>> ff4c1fe4d21994e0803cc53087a4ea15874fb379
| apache-2.0 |
vigilv/scikit-learn | sklearn/datasets/tests/test_20news.py | 280 | 3045 | """Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_length_consistency():
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract the full dataset
data = datasets.fetch_20newsgroups(subset='all')
assert_equal(len(data['data']), len(data.data))
assert_equal(len(data['target']), len(data.target))
assert_equal(len(data['filenames']), len(data.filenames))
def test_20news_vectorized():
# This test is slow.
raise SkipTest("Test too slow.")
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 107428))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 107428))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="all")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 107428))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
| bsd-3-clause |
darionyaphet/spark | python/pyspark/sql/tests/test_pandas_grouped_map.py | 6 | 24514 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import unittest
import sys
from collections import OrderedDict
from decimal import Decimal
from pyspark.sql import Row
from pyspark.sql.functions import array, explode, col, lit, udf, sum, pandas_udf, PandasUDFType, \
window
from pyspark.sql.types import *
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
if have_pandas:
import pandas as pd
from pandas.util.testing import assert_frame_equal
if have_pyarrow:
import pyarrow as pa
# Tests below use pd.DataFrame.assign that will infer mixed types (unicode/str) for column names
# from kwargs w/ Python 2, so need to set check_column_type=False and avoid this check
_check_column_type = sys.version >= '3'
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
class GroupedMapInPandasTests(ReusedSQLTestCase):
@property
def data(self):
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i) for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))).drop('vs')
def test_supported_types(self):
values = [
1, 2, 3,
4, 5, 1.1,
2.2, Decimal(1.123),
[1, 2, 2], True, 'hello',
bytearray([0x01, 0x02])
]
output_fields = [
('id', IntegerType()), ('byte', ByteType()), ('short', ShortType()),
('int', IntegerType()), ('long', LongType()), ('float', FloatType()),
('double', DoubleType()), ('decim', DecimalType(10, 3)),
('array', ArrayType(IntegerType())), ('bool', BooleanType()), ('str', StringType()),
('bin', BinaryType())
]
output_schema = StructType([StructField(*x) for x in output_fields])
df = self.spark.createDataFrame([values], schema=output_schema)
# Different forms of group map pandas UDF, results of these are the same
udf1 = pandas_udf(
lambda pdf: pdf.assign(
byte=pdf.byte * 2,
short=pdf.short * 2,
int=pdf.int * 2,
long=pdf.long * 2,
float=pdf.float * 2,
double=pdf.double * 2,
decim=pdf.decim * 2,
bool=False if pdf.bool else True,
str=pdf.str + 'there',
array=pdf.array,
bin=pdf.bin
),
output_schema,
PandasUDFType.GROUPED_MAP
)
udf2 = pandas_udf(
lambda _, pdf: pdf.assign(
byte=pdf.byte * 2,
short=pdf.short * 2,
int=pdf.int * 2,
long=pdf.long * 2,
float=pdf.float * 2,
double=pdf.double * 2,
decim=pdf.decim * 2,
bool=False if pdf.bool else True,
str=pdf.str + 'there',
array=pdf.array,
bin=pdf.bin
),
output_schema,
PandasUDFType.GROUPED_MAP
)
udf3 = pandas_udf(
lambda key, pdf: pdf.assign(
id=key[0],
byte=pdf.byte * 2,
short=pdf.short * 2,
int=pdf.int * 2,
long=pdf.long * 2,
float=pdf.float * 2,
double=pdf.double * 2,
decim=pdf.decim * 2,
bool=False if pdf.bool else True,
str=pdf.str + 'there',
array=pdf.array,
bin=pdf.bin
),
output_schema,
PandasUDFType.GROUPED_MAP
)
result1 = df.groupby('id').apply(udf1).sort('id').toPandas()
expected1 = df.toPandas().groupby('id').apply(udf1.func).reset_index(drop=True)
result2 = df.groupby('id').apply(udf2).sort('id').toPandas()
expected2 = expected1
result3 = df.groupby('id').apply(udf3).sort('id').toPandas()
expected3 = expected1
assert_frame_equal(expected1, result1, check_column_type=_check_column_type)
assert_frame_equal(expected2, result2, check_column_type=_check_column_type)
assert_frame_equal(expected3, result3, check_column_type=_check_column_type)
def test_array_type_correct(self):
df = self.data.withColumn("arr", array(col("id"))).repartition(1, "id")
output_schema = StructType(
[StructField('id', LongType()),
StructField('v', IntegerType()),
StructField('arr', ArrayType(LongType()))])
udf = pandas_udf(
lambda pdf: pdf,
output_schema,
PandasUDFType.GROUPED_MAP
)
result = df.groupby('id').apply(udf).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(udf.func).reset_index(drop=True)
assert_frame_equal(expected, result, check_column_type=_check_column_type)
def test_register_grouped_map_udf(self):
foo_udf = pandas_udf(lambda x: x, "id long", PandasUDFType.GROUPED_MAP)
with QuietTest(self.sc):
with self.assertRaisesRegexp(
ValueError,
'f.*SQL_BATCHED_UDF.*SQL_SCALAR_PANDAS_UDF.*SQL_GROUPED_AGG_PANDAS_UDF.*'):
self.spark.catalog.registerFunction("foo_udf", foo_udf)
def test_decorator(self):
df = self.data
@pandas_udf(
'id long, v int, v1 double, v2 long',
PandasUDFType.GROUPED_MAP
)
def foo(pdf):
return pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id)
result = df.groupby('id').apply(foo).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(foo.func).reset_index(drop=True)
assert_frame_equal(expected, result, check_column_type=_check_column_type)
def test_coerce(self):
df = self.data
foo = pandas_udf(
lambda pdf: pdf,
'id long, v double',
PandasUDFType.GROUPED_MAP
)
result = df.groupby('id').apply(foo).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(foo.func).reset_index(drop=True)
expected = expected.assign(v=expected.v.astype('float64'))
assert_frame_equal(expected, result, check_column_type=_check_column_type)
def test_complex_groupby(self):
df = self.data
@pandas_udf(
'id long, v int, norm double',
PandasUDFType.GROUPED_MAP
)
def normalize(pdf):
v = pdf.v
return pdf.assign(norm=(v - v.mean()) / v.std())
result = df.groupby(col('id') % 2 == 0).apply(normalize).sort('id', 'v').toPandas()
pdf = df.toPandas()
expected = pdf.groupby(pdf['id'] % 2 == 0, as_index=False).apply(normalize.func)
expected = expected.sort_values(['id', 'v']).reset_index(drop=True)
expected = expected.assign(norm=expected.norm.astype('float64'))
assert_frame_equal(expected, result, check_column_type=_check_column_type)
def test_empty_groupby(self):
df = self.data
@pandas_udf(
'id long, v int, norm double',
PandasUDFType.GROUPED_MAP
)
def normalize(pdf):
v = pdf.v
return pdf.assign(norm=(v - v.mean()) / v.std())
result = df.groupby().apply(normalize).sort('id', 'v').toPandas()
pdf = df.toPandas()
expected = normalize.func(pdf)
expected = expected.sort_values(['id', 'v']).reset_index(drop=True)
expected = expected.assign(norm=expected.norm.astype('float64'))
assert_frame_equal(expected, result, check_column_type=_check_column_type)
def test_datatype_string(self):
df = self.data
foo_udf = pandas_udf(
lambda pdf: pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id),
'id long, v int, v1 double, v2 long',
PandasUDFType.GROUPED_MAP
)
result = df.groupby('id').apply(foo_udf).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(foo_udf.func).reset_index(drop=True)
assert_frame_equal(expected, result, check_column_type=_check_column_type)
def test_wrong_return_type(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid return type.*grouped map Pandas UDF.*MapType'):
pandas_udf(
lambda pdf: pdf,
'id long, v map<int, int>',
PandasUDFType.GROUPED_MAP)
def test_wrong_args(self):
df = self.data
with QuietTest(self.sc):
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(lambda x: x)
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(udf(lambda x: x, DoubleType()))
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(sum(df.v))
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(df.v + 1)
with self.assertRaisesRegexp(ValueError, 'Invalid function'):
df.groupby('id').apply(
pandas_udf(lambda: 1, StructType([StructField("d", DoubleType())])))
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(pandas_udf(lambda x, y: x, DoubleType()))
with self.assertRaisesRegexp(ValueError, 'Invalid udf.*GROUPED_MAP'):
df.groupby('id').apply(
pandas_udf(lambda x, y: x, DoubleType(), PandasUDFType.SCALAR))
def test_unsupported_types(self):
common_err_msg = 'Invalid return type.*grouped map Pandas UDF.*'
unsupported_types = [
StructField('map', MapType(StringType(), IntegerType())),
StructField('arr_ts', ArrayType(TimestampType())),
StructField('null', NullType()),
StructField('struct', StructType([StructField('l', LongType())])),
]
for unsupported_type in unsupported_types:
schema = StructType([StructField('id', LongType(), True), unsupported_type])
with QuietTest(self.sc):
with self.assertRaisesRegexp(NotImplementedError, common_err_msg):
pandas_udf(lambda x: x, schema, PandasUDFType.GROUPED_MAP)
# Regression test for SPARK-23314
def test_timestamp_dst(self):
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime.datetime(2015, 11, 1, 0, 30),
datetime.datetime(2015, 11, 1, 1, 30),
datetime.datetime(2015, 11, 1, 2, 30)]
df = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
foo_udf = pandas_udf(lambda pdf: pdf, 'time timestamp', PandasUDFType.GROUPED_MAP)
result = df.groupby('time').apply(foo_udf).sort('time')
assert_frame_equal(df.toPandas(), result.toPandas(), check_column_type=_check_column_type)
def test_udf_with_key(self):
import numpy as np
df = self.data
pdf = df.toPandas()
def foo1(key, pdf):
assert type(key) == tuple
assert type(key[0]) == np.int64
return pdf.assign(v1=key[0],
v2=pdf.v * key[0],
v3=pdf.v * pdf.id,
v4=pdf.v * pdf.id.mean())
def foo2(key, pdf):
assert type(key) == tuple
assert type(key[0]) == np.int64
assert type(key[1]) == np.int32
return pdf.assign(v1=key[0],
v2=key[1],
v3=pdf.v * key[0],
v4=pdf.v + key[1])
def foo3(key, pdf):
assert type(key) == tuple
assert len(key) == 0
return pdf.assign(v1=pdf.v * pdf.id)
# v2 is int because numpy.int64 * pd.Series<int32> results in pd.Series<int32>
# v3 is long because pd.Series<int64> * pd.Series<int32> results in pd.Series<int64>
udf1 = pandas_udf(
foo1,
'id long, v int, v1 long, v2 int, v3 long, v4 double',
PandasUDFType.GROUPED_MAP)
udf2 = pandas_udf(
foo2,
'id long, v int, v1 long, v2 int, v3 int, v4 int',
PandasUDFType.GROUPED_MAP)
udf3 = pandas_udf(
foo3,
'id long, v int, v1 long',
PandasUDFType.GROUPED_MAP)
# Test groupby column
result1 = df.groupby('id').apply(udf1).sort('id', 'v').toPandas()
expected1 = pdf.groupby('id', as_index=False)\
.apply(lambda x: udf1.func((x.id.iloc[0],), x))\
.sort_values(['id', 'v']).reset_index(drop=True)
assert_frame_equal(expected1, result1, check_column_type=_check_column_type)
# Test groupby expression
result2 = df.groupby(df.id % 2).apply(udf1).sort('id', 'v').toPandas()
expected2 = pdf.groupby(pdf.id % 2, as_index=False)\
.apply(lambda x: udf1.func((x.id.iloc[0] % 2,), x))\
.sort_values(['id', 'v']).reset_index(drop=True)
assert_frame_equal(expected2, result2, check_column_type=_check_column_type)
# Test complex groupby
result3 = df.groupby(df.id, df.v % 2).apply(udf2).sort('id', 'v').toPandas()
expected3 = pdf.groupby([pdf.id, pdf.v % 2], as_index=False)\
.apply(lambda x: udf2.func((x.id.iloc[0], (x.v % 2).iloc[0],), x))\
.sort_values(['id', 'v']).reset_index(drop=True)
assert_frame_equal(expected3, result3, check_column_type=_check_column_type)
# Test empty groupby
result4 = df.groupby().apply(udf3).sort('id', 'v').toPandas()
expected4 = udf3.func((), pdf)
assert_frame_equal(expected4, result4, check_column_type=_check_column_type)
def test_column_order(self):
# Helper function to set column names from a list
def rename_pdf(pdf, names):
pdf.rename(columns={old: new for old, new in
zip(pd_result.columns, names)}, inplace=True)
df = self.data
grouped_df = df.groupby('id')
grouped_pdf = df.toPandas().groupby('id', as_index=False)
# Function returns a pdf with required column names, but order could be arbitrary using dict
def change_col_order(pdf):
# Constructing a DataFrame from a dict should result in the same order,
# but use OrderedDict to ensure the pdf column order is different than schema
return pd.DataFrame.from_dict(OrderedDict([
('id', pdf.id),
('u', pdf.v * 2),
('v', pdf.v)]))
ordered_udf = pandas_udf(
change_col_order,
'id long, v int, u int',
PandasUDFType.GROUPED_MAP
)
# The UDF result should assign columns by name from the pdf
result = grouped_df.apply(ordered_udf).sort('id', 'v')\
.select('id', 'u', 'v').toPandas()
pd_result = grouped_pdf.apply(change_col_order)
expected = pd_result.sort_values(['id', 'v']).reset_index(drop=True)
assert_frame_equal(expected, result, check_column_type=_check_column_type)
# Function returns a pdf with positional columns, indexed by range
def range_col_order(pdf):
# Create a DataFrame with positional columns, fix types to long
return pd.DataFrame(list(zip(pdf.id, pdf.v * 3, pdf.v)), dtype='int64')
range_udf = pandas_udf(
range_col_order,
'id long, u long, v long',
PandasUDFType.GROUPED_MAP
)
# The UDF result uses positional columns from the pdf
result = grouped_df.apply(range_udf).sort('id', 'v') \
.select('id', 'u', 'v').toPandas()
pd_result = grouped_pdf.apply(range_col_order)
rename_pdf(pd_result, ['id', 'u', 'v'])
expected = pd_result.sort_values(['id', 'v']).reset_index(drop=True)
assert_frame_equal(expected, result, check_column_type=_check_column_type)
# Function returns a pdf with columns indexed with integers
def int_index(pdf):
return pd.DataFrame(OrderedDict([(0, pdf.id), (1, pdf.v * 4), (2, pdf.v)]))
int_index_udf = pandas_udf(
int_index,
'id long, u int, v int',
PandasUDFType.GROUPED_MAP
)
# The UDF result should assign columns by position of integer index
result = grouped_df.apply(int_index_udf).sort('id', 'v') \
.select('id', 'u', 'v').toPandas()
pd_result = grouped_pdf.apply(int_index)
rename_pdf(pd_result, ['id', 'u', 'v'])
expected = pd_result.sort_values(['id', 'v']).reset_index(drop=True)
assert_frame_equal(expected, result, check_column_type=_check_column_type)
@pandas_udf('id long, v int', PandasUDFType.GROUPED_MAP)
def column_name_typo(pdf):
return pd.DataFrame({'iid': pdf.id, 'v': pdf.v})
@pandas_udf('id long, v int', PandasUDFType.GROUPED_MAP)
def invalid_positional_types(pdf):
return pd.DataFrame([(u'a', 1.2)])
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, "KeyError: 'id'"):
grouped_df.apply(column_name_typo).collect()
with self.assertRaisesRegexp(Exception, "an integer is required"):
grouped_df.apply(invalid_positional_types).collect()
def test_positional_assignment_conf(self):
with self.sql_conf({
"spark.sql.legacy.execution.pandas.groupedMap.assignColumnsByName": False}):
@pandas_udf("a string, b float", PandasUDFType.GROUPED_MAP)
def foo(_):
return pd.DataFrame([('hi', 1)], columns=['x', 'y'])
df = self.data
result = df.groupBy('id').apply(foo).select('a', 'b').collect()
for r in result:
self.assertEqual(r.a, 'hi')
self.assertEqual(r.b, 1)
def test_self_join_with_pandas(self):
@pandas_udf('key long, col string', PandasUDFType.GROUPED_MAP)
def dummy_pandas_udf(df):
return df[['key', 'col']]
df = self.spark.createDataFrame([Row(key=1, col='A'), Row(key=1, col='B'),
Row(key=2, col='C')])
df_with_pandas = df.groupBy('key').apply(dummy_pandas_udf)
# this was throwing an AnalysisException before SPARK-24208
res = df_with_pandas.alias('temp0').join(df_with_pandas.alias('temp1'),
col('temp0.key') == col('temp1.key'))
self.assertEquals(res.count(), 5)
def test_mixed_scalar_udfs_followed_by_grouby_apply(self):
df = self.spark.range(0, 10).toDF('v1')
df = df.withColumn('v2', udf(lambda x: x + 1, 'int')(df['v1'])) \
.withColumn('v3', pandas_udf(lambda x: x + 2, 'int')(df['v1']))
result = df.groupby() \
.apply(pandas_udf(lambda x: pd.DataFrame([x.sum().sum()]),
'sum int',
PandasUDFType.GROUPED_MAP))
self.assertEquals(result.collect()[0]['sum'], 165)
def test_grouped_with_empty_partition(self):
data = [Row(id=1, x=2), Row(id=1, x=3), Row(id=2, x=4)]
expected = [Row(id=1, x=5), Row(id=1, x=5), Row(id=2, x=4)]
num_parts = len(data) + 1
df = self.spark.createDataFrame(self.sc.parallelize(data, numSlices=num_parts))
f = pandas_udf(lambda pdf: pdf.assign(x=pdf['x'].sum()),
'id long, x int', PandasUDFType.GROUPED_MAP)
result = df.groupBy('id').apply(f).collect()
self.assertEqual(result, expected)
def test_grouped_over_window(self):
data = [(0, 1, "2018-03-10T00:00:00+00:00", [0]),
(1, 2, "2018-03-11T00:00:00+00:00", [0]),
(2, 2, "2018-03-12T00:00:00+00:00", [0]),
(3, 3, "2018-03-15T00:00:00+00:00", [0]),
(4, 3, "2018-03-16T00:00:00+00:00", [0]),
(5, 3, "2018-03-17T00:00:00+00:00", [0]),
(6, 3, "2018-03-21T00:00:00+00:00", [0])]
expected = {0: [0],
1: [1, 2],
2: [1, 2],
3: [3, 4, 5],
4: [3, 4, 5],
5: [3, 4, 5],
6: [6]}
df = self.spark.createDataFrame(data, ['id', 'group', 'ts', 'result'])
df = df.select(col('id'), col('group'), col('ts').cast('timestamp'), col('result'))
def f(pdf):
# Assign each result element the ids of the windowed group
pdf['result'] = [pdf['id']] * len(pdf)
return pdf
result = df.groupby('group', window('ts', '5 days')).applyInPandas(f, df.schema)\
.select('id', 'result').collect()
for r in result:
self.assertListEqual(expected[r[0]], r[1])
def test_grouped_over_window_with_key(self):
data = [(0, 1, "2018-03-10T00:00:00+00:00", False),
(1, 2, "2018-03-11T00:00:00+00:00", False),
(2, 2, "2018-03-12T00:00:00+00:00", False),
(3, 3, "2018-03-15T00:00:00+00:00", False),
(4, 3, "2018-03-16T00:00:00+00:00", False),
(5, 3, "2018-03-17T00:00:00+00:00", False),
(6, 3, "2018-03-21T00:00:00+00:00", False)]
expected_window = [
{'start': datetime.datetime(2018, 3, 10, 0, 0),
'end': datetime.datetime(2018, 3, 15, 0, 0)},
{'start': datetime.datetime(2018, 3, 15, 0, 0),
'end': datetime.datetime(2018, 3, 20, 0, 0)},
{'start': datetime.datetime(2018, 3, 20, 0, 0),
'end': datetime.datetime(2018, 3, 25, 0, 0)},
]
expected = {0: (1, expected_window[0]),
1: (2, expected_window[0]),
2: (2, expected_window[0]),
3: (3, expected_window[1]),
4: (3, expected_window[1]),
5: (3, expected_window[1]),
6: (3, expected_window[2])}
df = self.spark.createDataFrame(data, ['id', 'group', 'ts', 'result'])
df = df.select(col('id'), col('group'), col('ts').cast('timestamp'), col('result'))
@pandas_udf(df.schema, PandasUDFType.GROUPED_MAP)
def f(key, pdf):
group = key[0]
window_range = key[1]
# Result will be True if group and window range equal to expected
is_expected = pdf.id.apply(lambda id: (expected[id][0] == group and
expected[id][1] == window_range))
return pdf.assign(result=is_expected)
result = df.groupby('group', window('ts', '5 days')).apply(f).select('result').collect()
# Check that all group and window_range values from udf matched expected
self.assertTrue(all([r[0] for r in result]))
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_grouped_map import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
andersbll/deeppy | examples/adversarial_mnist.py | 1 | 4004 | #!/usr/bin/env python
"""
Digit generation using generative adversarial nets
==================================================
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import deeppy as dp
import deeppy.expr as expr
# Fetch dataset
dataset = dp.dataset.MNIST()
x_train, y_train, x_test, y_test = dataset.arrays(dp_dtypes=True, flat=True)
n_classes = dataset.n_classes
img_shape = dataset.img_shape
# Normalize pixel intensities
scaler = dp.UniformScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
# Normalize pixel intensities
scaler = dp.UniformScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
x_train = np.reshape(x_train, (x_train.shape[0], -1))
x_test = np.reshape(x_test, (x_test.shape[0], -1))
# Setup network
def affine(n_out):
return expr.nnet.Affine(n_out=n_out, weights=dp.AutoFiller(gain=1.25))
n_in = x_train.shape[1]
n_discriminator = 1024
n_hidden = 64
n_generator = 1024
generator = expr.Sequential([
affine(n_generator),
expr.nnet.BatchNormalization(),
expr.nnet.ReLU(),
affine(n_generator),
expr.nnet.BatchNormalization(),
expr.nnet.ReLU(),
affine(n_in),
expr.nnet.Sigmoid(),
])
discriminator = expr.Sequential([
expr.nnet.Dropout(0.5),
affine(n_discriminator),
expr.nnet.ReLU(),
expr.nnet.Dropout(0.5),
affine(n_discriminator),
expr.nnet.ReLU(),
affine(1),
expr.nnet.Sigmoid(),
])
model = dp.model.AdversarialNet(generator, discriminator, n_hidden=512)
# Prepare network feeds
batch_size = 64
train_feed = dp.Feed(x_train, batch_size=batch_size)
# Samples to be plotted during training
n_examples = 100
samples = np.random.normal(size=(n_examples, model.n_hidden)).astype(dp.float_)
plot_epochs = [0, 4, 14]
plot_imgs = [(x_train[:n_examples], 'Dataset examples')]
# Train network
n_epochs = 15
margin = 0.25
equilibrium = 0.6931
learn_rate = 0.075
learn_rule_g = dp.RMSProp(learn_rate=learn_rate)
learn_rule_d = dp.RMSProp(learn_rate=learn_rate)
model.setup(*train_feed.shapes)
g_params, d_params = model.params
learn_rule_g.learn_rate /= batch_size
learn_rule_d.learn_rate /= batch_size*2
g_states = [learn_rule_g.init_state(p) for p in g_params]
d_states = [learn_rule_d.init_state(p) for p in d_params]
for epoch in range(n_epochs):
batch_costs = []
for x, in train_feed.batches():
real_cost, gen_cost = model.update(x)
batch_costs.append((real_cost, gen_cost))
update_g = True
update_d = True
if real_cost < equilibrium - margin or gen_cost < equilibrium - margin:
update_d = False
if real_cost > equilibrium + margin or gen_cost > equilibrium + margin:
update_g = False
if not (update_g or update_d):
update_g = True
update_d = True
if update_g:
for param, state in zip(g_params, g_states):
learn_rule_g.step(param, state)
if update_d:
for param, state in zip(d_params, d_states):
learn_rule_d.step(param, state)
real_cost = np.mean([cost[0] for cost in batch_costs])
gen_cost = np.mean([cost[1] for cost in batch_costs])
print('epoch %d real_cost:%.4f gen_cost:%.4f' % (epoch, real_cost,
gen_cost))
if epoch in plot_epochs:
samples_img = model.generate(samples)
plot_imgs.append((samples_img, 'Samples after epoch %i' % (epoch + 1)))
model.setup(train_feed.x_shape)
model.phase = 'train'
# Plot
fig = plt.figure()
fig_gs = matplotlib.gridspec.GridSpec(2, 2)
for i, (imgs, title) in enumerate(plot_imgs):
imgs = np.reshape(imgs, (-1,) + img_shape)
imgs = dp.misc.to_b01c(imgs)
img_tile = dp.misc.img_tile(dp.misc.img_stretch(imgs))
ax = plt.subplot(fig_gs[i // 2, i % 2])
ax.imshow(img_tile, interpolation='nearest', cmap='gray')
ax.set_title(title)
ax.axis('off')
plt.tight_layout()
| mit |
probml/pyprobml | scripts/ekf_continuous_demo.py | 1 | 2061 | # Example of an Extended Kalman Filter using
# a figure-8 nonlinear dynamical system.
# For futher reference and examples see:
# * Section on EKFs in PML vol2 book
# * https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python/blob/master/11-Extended-Kalman-Filters.ipynb
# * Nonlinear Dynamics and Chaos - Steven Strogatz
# Author: Gerardo Durán-Martín (@gerdm)
import nlds_lib as ds
import matplotlib.pyplot as plt
import pyprobml_utils as pml
import jax.numpy as jnp
import numpy as np
from jax import random
plt.rcParams["axes.spines.right"] = False
plt.rcParams["axes.spines.top"] = False
def fz(x):
x, y = x
return jnp.asarray([y, x - x ** 3])
def fx(x):
x, y = x
return jnp.asarray([x, y])
dt = 0.01
T = 7.5
nsamples = 70
x0 = jnp.array([0.5, -0.75])
# State noise
Qt = jnp.eye(2) * 0.001
# Observed noise
Rt = jnp.eye(2) * 0.01
key = random.PRNGKey(314)
ekf = ds.ContinuousExtendedKalmanFilter(fz, fx, Qt, Rt)
sample_state, sample_obs, jump = ekf.sample(key, x0, T, nsamples)
mu_hist, V_hist = ekf.estimate(sample_state, sample_obs, jump, dt)
vmin, vmax, step = -1.5, 1.5 + 0.5, 0.5
X = np.mgrid[-1:1.5:step, vmin:vmax:step][::-1]
X_dot = jnp.apply_along_axis(fz, 0, X)
fig, ax = plt.subplots()
ax.plot(*sample_state.T, label="state space")
ax.scatter(*sample_obs.T, marker="+", c="tab:green", s=60, label="observations")
field = ax.streamplot(*X, *X_dot, density=1.1, color="#ccccccaa")
ax.legend()
plt.axis("equal")
ax.set_title("State Space")
pml.savefig("ekf-state-space.pdf")
fig, ax = plt.subplots()
ax.plot(*sample_state.T, c="tab:orange", label="EKF estimation")
ax.scatter(*sample_obs.T, marker="+", s=60, c="tab:green", label="observations")
ax.scatter(*mu_hist[0], c="black", zorder=3)
for mut, Vt in zip(mu_hist[::4], V_hist[::4]):
pml.plot_ellipse(Vt, mut, ax, plot_center=False, alpha=0.9, zorder=3)
plt.legend()
field = ax.streamplot(*X, *X_dot, density=1.1, color="#ccccccaa")
ax.legend()
plt.axis("equal")
ax.set_title("Approximate Space")
pml.savefig("ekf-estimated-space.pdf")
plt.show()
| mit |
yuginboy/from_GULP_to_FEFF | feff/libs/test.py | 1 | 1751 | from joblib import Parallel, delayed
import time
import multiprocessing
import numpy as np
import matplotlib.pyplot as plt
import time
def f(name):
print ('hello', name)
time.sleep(2)
print ('hello', name)
class A():
_idx = 0
# def __init__(self):
# t = 0
def get_idx(self):
return A._idx
def set_idx(self, i):
A._idx = i
# def runInParallel(*fns):
# proc = []
# for fn in fns:
# p = Process(target=fn)
# p.start()
# proc.append(p)
# for p in proc:
# p.join()
if __name__ == '__main__':
# # p = Process(target=f, args=('bob',))
# # p.start()
# # p.join()
# num_cores = multiprocessing.cpu_count()
# # for i in range(3):
# # print(f(i))
# ((i) for i in range(3))
# print( Parallel(n_jobs=3)(delayed(f)(i) for i in range(3)) )
# # runInParallel(f('1'), f('2'),f('3'))
#
#
# plt.switch_backend('QT4Agg') #default on my system
# print('Backend: {}'.format(plt.get_backend()))
#
# fig = plt.figure()
# ax = fig.add_axes([0,0, 1,1])
# ax.axis([0,10, 0,10])
# ax.plot(5, 5, 'ro')
#
# mng = plt._pylab_helpers.Gcf.figs.get(fig.number, None)
#
# mng.window.showMaximized() #maximize the figure
# time.sleep(3)
# mng.window.showMinimized() #minimize the figure
# time.sleep(3)
# mng.window.showNormal() #normal figure
# time.sleep(3)
# mng.window.hide() #hide the figure
# time.sleep(3)
# fig.show() #show the previously hidden figure
#
# ax.plot(6,6, 'bo') #just to check that everything is ok
# plt.show()
a = A()
b = A()
print(a.get_idx())
print(b.get_idx())
a.set_idx(5)
print(a.get_idx())
print(b.get_idx())
| gpl-3.0 |
dougalsutherland/skl-groups | skl_groups/features.py | 1 | 14812 | from __future__ import division, print_function
from copy import deepcopy
import warnings
import numpy as np
from sklearn.externals.six import iteritems, string_types
from sklearn.externals.six.moves import xrange
from .utils import as_integer_type
class Features(object):
'''
A wrapper class for storing bags of features. (A *bag* is a set of feature
vectors corresponding to a single "object.")
Supports storing data in two major ways:
- As a list of pointers to a numpy array per bag. This is the default,
because it usually doesn't require copying all of your data. Note that
the sub-arrays are not enforced to be row-major or even contiguous.
- As a single big row-major array. This lets you do certain things more
easily (e.g. run PCA).
The main usage API is the same for both versions; you can distinguish them
with the `stacked` property, and convert from pointers to stacked with the
`make_stacked()` method.
Supports the following operations:
* ``len(features)`` gives the number of bags.
* ``for bag in features:`` loops over bags.
* ``features[4]`` gives the fifth bag.
* ``features[[4, 8, 7]]`` makes a new Features object with only the \
passed indices, preserving metadata.
* ``feats1 == feats2`` checks that all of the features `and metadata` are \
the same.
* ``feats1 + feats2`` concatenates the two Features objects. Metadata is \
preserved if both features have that key, thrown out if not.
Parameters
----------
bags : list of numpy arrays, single array, or Features object
The feature data. If a list of numpy arrays, should be one array per
bag, each of shape [n_pts, dim], where dim is consistent between bags
but n_pts need not be (though it cannot ever be 0). If a single numpy
array, it should be of shape [sum(n_pts), dim] and contain the features
from the first bag, then the next bag, .... In this case you must also
pass n_pts. If a Features object, "copies" it (but only actually copies
any data if ``copy=True``).
n_pts : array-like of positive integers, only if bags is a single array
If bags is passed as a single array, a list of positive integers
defining the size of each bag.
stack : boolean, optional, default False
If true, stack the features. Otherwise, only stack them if a stacked
array of features is passed in.
copy : boolean, optional, default False
If true, always make a copy of the data (so that direct modifications)
don't modify the original arrays. If false, make a copy only if
necessary (i.e. stack=True for an unstacked argument).
bare : boolean, optional, default False
If true, and ``bags`` is a Features instance, don't include its
metadata.
any other keyword argument : array-like with first dimension num_bags
Metadata for each bag. Just stored along with the features, nothing
in particular done with it. (If ``bags`` is a Features instance, its
metadata is merged with any keyword arguments, with keywords taking
precedence.)
Attributes
----------
features : list of arrays of shape ``[n_pts[i], dim]``
A list of the contained features.
If ``stacked``, each array is a slice of ``stacked_features``.
stacked : boolean
Whether the features are stacked.
stacked_features : array of shape ``[sum(n_pts), dim]``
All of the features, concatenated together. Only present if ``stacked``.
n_pts : integer array of shape ``[len(self)]``
The number of points in each bag.
meta : dictionary mapping strings to arrays of shape ``[len(self)]``
The stored metadata. ``meta['foo']`` is also accessible as ``self.foo``.
'''
def __init__(self, bags, n_pts=None, stack=False, copy=False, bare=False,
**meta):
if isinstance(bags, Features):
if n_pts is not None:
raise TypeError("can't pass n_pts if copying a Features object")
oth = bags
if oth.stacked:
bags = oth.stacked_features
n_pts = oth.n_pts
else:
bags = oth.features
n_pts = None
if not bare:
for k, v in iteritems(oth.meta):
meta.setdefault(k, v)
if isinstance(bags, np.ndarray) and bags.ndim == 2:
if n_pts is None:
raise TypeError("must pass n_pts if passing stacked array of "
"features")
n_pts = np.asarray(n_pts)
if n_pts.ndim != 1:
raise TypeError("n_pts must be 1-dimensional")
if n_pts.size == 0:
raise TypeError("must have at least one bag")
if np.any(n_pts <= 0):
raise TypeError("n_pts must all be positive")
try:
n_pts = as_integer_type(n_pts)
except ValueError:
raise TypeError("n_pts must be an array of integers.")
bags = np.array(bags, order='C', copy=copy)
if bags.ndim != 2 or bags.shape[0] != np.sum(n_pts):
raise TypeError("bags must have shape sum(n_pts) x dim")
if bags.shape[1] == 0:
raise TypeError("bags must have dimension > 0")
dim = bags.shape[1]
self.stacked = True
self.n_pts = n_pts
self.stacked_features = bags
self._boundaries = bounds = np.r_[0, np.cumsum(n_pts)]
self.features = np.empty(len(n_pts), object)
self.features[:] = [bags[bounds[i-1]:bounds[i]]
for i in xrange(1, len(bounds))]
else:
if n_pts is not None:
raise TypeError("n_pts should only be passed if bags is a "
"single stacked array")
dim = None
dtype = None
new_bags = np.empty(len(bags), dtype=object)
n_pts = np.empty(len(bags), dtype=int)
for i, bag in enumerate(bags):
a = np.array(bag, copy=copy)
if a.ndim == 1:
a = a[None, :]
if a.ndim != 2:
raise TypeError("bag {} not two-dimensional".format(i))
if dim is None:
dim = a.shape[1]
elif a.shape[1] != dim:
msg = "bags' second dimension must be consistent: " \
"{} is {}, expected {}"
raise TypeError(msg.format(i, a.shape[1], dim))
if dtype is None:
dtype = a.dtype
if dtype.kind not in 'fiu':
msg = "can't handle features of type {}"
raise TypeError(msg.format(a.dtype.name))
elif a.dtype != dtype:
msg = "bags' dtype is inconsistent: {} is {}, expected {}"
raise TypeError(msg.format(i, a.dtype.name, dtype.name))
if a.shape[0] == 0:
raise TypeError("bag {} has no points".format(i))
new_bags[i] = a
n_pts[i] = a.shape[0]
self.stacked = False
self.n_pts = n_pts
self.features = new_bags
try:
del self._boundaries
except AttributeError:
pass
try:
del self.stacked_features
except AttributeError:
pass
if stack:
self.make_stacked()
# handle metadata
self.meta = {}
for name, val in iteritems(meta):
if len(val) != len(n_pts):
msg = "Have {} bags but {} values for {}"
raise ValueError(msg.format(len(n_pts), len(val), name))
val = np.array(val, copy=copy)
self.meta[name] = val
if hasattr(self, name):
msg = "Features already has an attribute named '{}'; won't " \
"be accessible as an attribute"
warnings.warn(msg.format(name))
else:
setattr(self, name, val)
def make_stacked(self):
"If unstacked, convert to stacked. If stacked, do nothing."
if self.stacked:
return
self._boundaries = bounds = np.r_[0, np.cumsum(self.n_pts)]
self.stacked_features = stacked = np.vstack(self.features)
self.features = np.array(
[stacked[bounds[i-1]:bounds[i]] for i in xrange(1, len(bounds))],
dtype=object)
self.stacked = True
############################################################################
## Properties to get at basic metadata
@property
def total_points(self):
"The total number of points in all bags."
return self.n_pts.sum()
@property
def dim(self):
"The dimensionality of the features."
return self.features[0].shape[1]
@property
def dtype(self):
"The data type of the feature vectors."
return self.features[0].dtype
############################################################################
## Copying / pickling utilities
def copy(self, stack=False, copy_meta=False, memo=None):
'''
Copies the Feature object. Makes a copy of the features array.
Parameters
----------
stack : boolean, optional, default False
Whether to stack the copy if this one is unstacked.
copy_meta : boolean, optional, default False
Also copy the metadata. If False, metadata in both points to the
same object.
'''
if self.stacked:
fs = deepcopy(self.stacked_features, memo)
n_pts = self.n_pts.copy()
elif stack:
fs = np.vstack(self.features)
n_pts = self.n_pts.copy()
else:
fs = deepcopy(self.features, memo)
n_pts = None
meta = deepcopy(self.meta, memo) if copy_meta else self.meta
return Features(fs, n_pts, copy=False, **meta)
def __copy__(self):
return self.copy(stack=False, copy_meta=False)
def __deepcopy__(self, memo=None):
return self.copy(stack=False, copy_meta=True, memo=memo)
def __getstate__(self):
if self.stacked:
return (self.stacked_features, self.n_pts, self.meta)
else:
return (self.features, None, self.meta)
def __setstate__(self, state):
feats, n_pts, meta = state
self.__init__(feats, n_pts, **meta)
############################################################################
## General magic methods for basic behavior
__hash__ = None
def __eq__(self, oth):
if self is oth:
return True
elif isinstance(oth, Features):
return (len(self) == len(oth) and
set(self.meta) == set(oth.meta) and
all(np.all(self_b == oth_b)
for self_b, oth_b in zip(self, oth)) and
all(np.all(self.meta[k] == oth.meta[k])
for k in self.meta))
elif self.meta:
return False
else:
return (len(self) == len(oth) and
all(np.all(self_b == oth_b)
for self_b, oth_b in zip(self, oth)))
def __ne__(self, oth):
return not (self == oth)
def __repr__(self):
s = '<Features: {:,} bags with {} {}-dimensional points ({:,} total)>'
min_p = self.n_pts.min()
max_p = self.n_pts.max()
if min_p == max_p:
pts = "{:,}".format(min_p)
else:
pts = '{:,} to {:,}'.format(min_p, max_p)
return s.format(len(self), pts, self.dim, self.total_points)
def __len__(self):
return self.n_pts.size
def __iter__(self):
return iter(self.features)
def __getitem__(self, key):
if (isinstance(key, string_types) or
(isinstance(key, (tuple, list)) and
any(isinstance(x, string_types) for x in key))):
msg = "Features indexing only subsets rows, but got {!r}"
raise TypeError(msg.format(key))
if np.isscalar(key):
return self.features[key]
else:
return type(self)(self.features[key], copy=False, stack=False,
**{k: v[key] for k, v in iteritems(self.meta)})
def __add__(self, oth):
if isinstance(oth, Features):
meta = {k: np.r_[self.meta[k], oth.meta[k]]
for k in self.meta if k in oth.meta}
oth_features = oth.features
elif isinstance(oth, list):
meta = {}
oth_features = np.empty(len(oth), object)
oth_features[:] = oth
else:
return NotImplemented
return Features(np.r_[self.features, oth_features],
stack=False, copy=True, **meta)
def __radd__(self, oth):
if isinstance(oth, list):
oth_features = np.empty(len(oth), object)
oth_features[:] = oth
else:
return NotImplemented
return Features(np.r_[oth_features, self.features],
stack=False, copy=True)
############################################################################
## Others
def bare(self):
"Make a Features object with no metadata; points to the same features."
if not self.meta:
return self
elif self.stacked:
return Features(self.stacked_features, self.n_pts, copy=False)
else:
return Features(self.features, copy=False)
def as_features(X, stack=False, bare=False):
'''
Returns a version of X as a :class:`Features` object.
Parameters
----------
stack : boolean, default False
Make a stacked version of X. Note that if X is a features object,
this will stack it in-place, since that's usually what you want.
(If not, just use the :class:`Features` constructor instead.)
bare : boolean, default False
Return a bare version of X (no metadata).
Returns
-------
feats : :class:`Features`
A version of X. If X is already a :class:`Features` object, the original
X may be returned, depending on the arguments.
'''
if isinstance(X, Features):
if stack:
X.make_stacked()
return X.bare() if bare else X
return Features(X, stack=stack, bare=bare)
| bsd-3-clause |
johnson1228/pymatgen | pymatgen/io/abinit/abitimer.py | 5 | 29938 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides objects for extracting timing data from the ABINIT output files
It also provides tools to analye and to visualize the parallel efficiency.
"""
from __future__ import unicode_literals, division
import sys
import os
import collections
import numpy as np
from six.moves import zip
from monty.string import is_string, list_strings
from pymatgen.util.num import minloc
from pymatgen.util.plotting import add_fig_kwargs, get_ax_fig_plt
import logging
logger = logging.getLogger(__name__)
def alternate(*iterables):
"""
[a[0], b[0], ... , a[1], b[1], ..., a[n], b[n] ...]
>>> alternate([1,4], [2,5], [3,6])
[1, 2, 3, 4, 5, 6]
"""
items = []
for tup in zip(*iterables):
items.extend([item for item in tup])
return items
class AbinitTimerParserError(Exception):
"""Errors raised by AbinitTimerParser"""
class AbinitTimerParser(collections.Iterable):
"""
Responsible for parsing a list of output files, extracting the timing results
and analyzing the results.
Assume the Abinit output files have been produced with `timopt -1`.
Example:
parser = AbinitTimerParser()
parser.parse(list_of_files)
To analyze all *.abo files withing top, use:
parser, paths, okfiles = AbinitTimerParser.walk(top=".", ext=".abo")
"""
# The markers enclosing the data.
BEGIN_TAG = "-<BEGIN_TIMER"
END_TAG = "-<END_TIMER>"
Error = AbinitTimerParserError
#DEFAULT_MPI_RANK = "0"
@classmethod
def walk(cls, top=".", ext=".abo"):
"""
Scan directory tree starting from top, look for files with extension `ext` and
parse timing data.
Return: (parser, paths, okfiles)
where `parser` is the new object, `paths` is the list of files found and `okfiles`
is the list of files that have been parsed successfully.
(okfiles == paths) if all files have been parsed.
"""
paths = []
for root, dirs, files in os.walk(top):
for f in files:
if f.endswith(ext):
paths.append(os.path.join(root, f))
parser = cls()
okfiles = parser.parse(paths)
return parser, paths, okfiles
def __init__(self):
# List of files that have been parsed.
self._filenames = []
# timers[filename][mpi_rank]
# contains the timer extracted from the file filename associated to the MPI rank mpi_rank.
self._timers = collections.OrderedDict()
def __iter__(self):
return self._timers.__iter__()
def __len__(self):
return len(self._timers)
@property
def filenames(self):
"""List of files that have been parsed successfully."""
return self._filenames
def parse(self, filenames):
"""
Read and parse a filename or a list of filenames.
Files that cannot be opened are ignored. A single filename may also be given.
Return: list of successfully read files.
"""
filenames = list_strings(filenames)
read_ok = []
for fname in filenames:
try:
fh = open(fname)
except IOError:
logger.warning("Cannot open file %s" % fname)
continue
try:
self._read(fh, fname)
read_ok.append(fname)
except self.Error as e:
logger.warning("exception while parsing file %s:\n%s" % (fname, str(e)))
continue
finally:
fh.close()
# Add read_ok to the list of files that have been parsed.
self._filenames.extend(read_ok)
return read_ok
def _read(self, fh, fname):
"""Parse the TIMER section"""
if fname in self._timers:
raise self.Error("Cannot overwrite timer associated to: %s " % fname)
def parse_line(line):
"""Parse single line."""
name, vals = line[:25], line[25:].split()
try:
ctime, cfract, wtime, wfract, ncalls, gflops = vals
except ValueError:
# v8.3 Added two columns at the end [Speedup, Efficacity]
ctime, cfract, wtime, wfract, ncalls, gflops, speedup, eff = vals
return AbinitTimerSection(name, ctime, cfract, wtime, wfract, ncalls, gflops)
data = {}
inside, has_timer = 0, False
for line in fh:
#print(line.strip())
if line.startswith(self.BEGIN_TAG):
has_timer = True
sections = []
info = {}
inside = 1
line = line[len(self.BEGIN_TAG):].strip()[:-1]
info["fname"] = fname
for tok in line.split(","):
key, val = [s.strip() for s in tok.split("=")]
info[key] = val
elif line.startswith(self.END_TAG):
inside = 0
timer = AbinitTimer(sections, info, cpu_time, wall_time)
mpi_rank = info["mpi_rank"]
data[mpi_rank] = timer
elif inside:
inside += 1
line = line[1:].strip()
if inside == 2:
d = dict()
for tok in line.split(","):
key, val = [s.strip() for s in tok.split("=")]
d[key] = float(val)
cpu_time, wall_time = d["cpu_time"], d["wall_time"]
elif inside > 5:
sections.append(parse_line(line))
else:
try:
parse_line(line)
except:
parser_failed = True
if not parser_failed:
raise self.Error("line should be empty: " + str(inside) + line)
if not has_timer:
raise self.Error("%s: No timer section found" % fname)
# Add it to the dict
self._timers[fname] = data
def timers(self, filename=None, mpi_rank="0"):
"""
Return the list of timers associated to the given `filename` and MPI rank mpi_rank.
"""
if filename is not None:
return [self._timers[filename][mpi_rank]]
else:
return [self._timers[filename][mpi_rank] for filename in self._filenames]
def section_names(self, ordkey="wall_time"):
"""
Return the names of sections ordered by ordkey.
For the time being, the values are taken from the first timer.
"""
section_names = []
# FIXME this is not trivial
for idx, timer in enumerate(self.timers()):
if idx == 0:
section_names = [s.name for s in timer.order_sections(ordkey)]
#check = section_names
#else:
# new_set = set( [s.name for s in timer.order_sections(ordkey)])
# section_names.intersection_update(new_set)
# check = check.union(new_set)
#if check != section_names:
# print("sections", section_names)
# print("check",check)
return section_names
def get_sections(self, section_name):
"""
Return the list of sections stored in self.timers() given `section_name`
A fake section is returned if the timer does not have section_name.
"""
sections = []
for timer in self.timers():
for sect in timer.sections:
if sect.name == section_name:
sections.append(sect)
break
else:
sections.append(AbinitTimerSection.fake())
return sections
def pefficiency(self):
"""
Analyze the parallel efficiency.
Return:
:class:`ParallelEfficiency` object.
"""
timers = self.timers()
# Number of CPUs employed in each calculation.
ncpus = [timer.ncpus for timer in timers]
# Find the minimum number of cpus used and its index in timers.
min_idx = minloc(ncpus)
min_ncpus = ncpus[min_idx]
# Reference timer
ref_t = timers[min_idx]
# Compute the parallel efficiency (total and section efficiency)
peff = {}
ctime_peff = [(min_ncpus * ref_t.wall_time) / (t.wall_time * ncp) for (t, ncp) in zip(timers, ncpus)]
wtime_peff = [(min_ncpus * ref_t.cpu_time) / (t.cpu_time * ncp) for (t, ncp) in zip(timers, ncpus)]
n = len(timers)
peff["total"] = {}
peff["total"]["cpu_time"] = ctime_peff
peff["total"]["wall_time"] = wtime_peff
peff["total"]["cpu_fract"] = n * [100]
peff["total"]["wall_fract"] = n * [100]
for sect_name in self.section_names():
#print(sect_name)
ref_sect = ref_t.get_section(sect_name)
sects = [t.get_section(sect_name) for t in timers]
try:
ctime_peff = [(min_ncpus * ref_sect.cpu_time) / (s.cpu_time * ncp) for (s, ncp) in zip(sects, ncpus)]
wtime_peff = [(min_ncpus * ref_sect.wall_time) / (s.wall_time * ncp) for (s, ncp) in zip(sects, ncpus)]
except ZeroDivisionError:
ctime_peff = n * [-1]
wtime_peff = n * [-1]
assert sect_name not in peff
peff[sect_name] = {}
peff[sect_name]["cpu_time"] = ctime_peff
peff[sect_name]["wall_time"] = wtime_peff
peff[sect_name]["cpu_fract"] = [s.cpu_fract for s in sects]
peff[sect_name]["wall_fract"] = [s.wall_fract for s in sects]
return ParallelEfficiency(self._filenames, min_idx, peff)
def summarize(self, **kwargs):
"""
Return pandas DataFrame with the most important results stored in the timers.
"""
import pandas as pd
colnames = ["fname", "wall_time", "cpu_time", "mpi_nprocs", "omp_nthreads", "mpi_rank"]
frame = pd.DataFrame(columns=colnames)
for i, timer in enumerate(self.timers()):
frame = frame.append({k: getattr(timer, k) for k in colnames}, ignore_index=True)
frame["tot_ncpus"] = frame["mpi_nprocs"] * frame["omp_nthreads"]
# Compute parallel efficiency (use the run with min number of cpus to normalize).
i = frame["tot_ncpus"].values.argmin()
ref_wtime = frame.ix[i]["wall_time"]
ref_ncpus = frame.ix[i]["tot_ncpus"]
frame["peff"] = (ref_ncpus * ref_wtime) / (frame["wall_time"] * frame["tot_ncpus"])
return frame
@add_fig_kwargs
def plot_efficiency(self, key="wall_time", what="good+bad", nmax=5, ax=None, **kwargs):
"""
Plot the parallel efficiency
Args:
key: Parallel efficiency is computed using the wall_time.
what: Specifies what to plot: `good` for sections with good parallel efficiency.
`bad` for sections with bad efficiency. Options can be concatenated with `+`.
nmax: Maximum number of entries in plot
ax: matplotlib :class:`Axes` or None if a new figure should be created.
================ ====================================================
kwargs Meaning
================ ====================================================
linewidth matplotlib linewidth. Default: 2.0
markersize matplotlib markersize. Default: 10
================ ====================================================
Returns:
`matplotlib` figure
"""
ax, fig, plt = get_ax_fig_plt(ax=ax)
lw = kwargs.pop("linewidth", 2.0)
msize = kwargs.pop("markersize", 10)
what = what.split("+")
timers = self.timers()
peff = self.pefficiency()
n = len(timers)
xx = np.arange(n)
#ax.set_color_cycle(['g', 'b', 'c', 'm', 'y', 'k'])
ax.set_prop_cycle(color=['g', 'b', 'c', 'm', 'y', 'k'])
lines, legend_entries = [], []
# Plot sections with good efficiency.
if "good" in what:
good = peff.good_sections(key=key, nmax=nmax)
for g in good:
#print(g, peff[g])
yy = peff[g][key]
line, = ax.plot(xx, yy, "-->", linewidth=lw, markersize=msize)
lines.append(line)
legend_entries.append(g)
# Plot sections with bad efficiency.
if "bad" in what:
bad = peff.bad_sections(key=key, nmax=nmax)
for b in bad:
#print(b, peff[b])
yy = peff[b][key]
line, = ax.plot(xx, yy, "-.<", linewidth=lw, markersize=msize)
lines.append(line)
legend_entries.append(b)
# Add total if not already done
if "total" not in legend_entries:
yy = peff["total"][key]
total_line, = ax.plot(xx, yy, "r", linewidth=lw, markersize=msize)
lines.append(total_line)
legend_entries.append("total")
ax.legend(lines, legend_entries, loc="best", shadow=True)
#ax.set_title(title)
ax.set_xlabel('Total_NCPUs')
ax.set_ylabel('Efficiency')
ax.grid(True)
# Set xticks and labels.
labels = ["MPI=%d, OMP=%d" % (t.mpi_nprocs, t.omp_nthreads) for t in timers]
ax.set_xticks(xx)
ax.set_xticklabels(labels, fontdict=None, minor=False, rotation=15)
return fig
@add_fig_kwargs
def plot_pie(self, key="wall_time", minfract=0.05, **kwargs):
"""
Plot pie charts of the different timers.
Args:
key: Keyword used to extract data from timers.
minfract: Don't show sections whose relative weight is less that minfract.
Returns:
`matplotlib` figure
"""
timers = self.timers()
n = len(timers)
# Make square figures and axes
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
fig = plt.gcf()
gspec = GridSpec(n, 1)
for idx, timer in enumerate(timers):
ax = plt.subplot(gspec[idx, 0])
ax.set_title(str(timer))
timer.pie(ax=ax, key=key, minfract=minfract, show=False)
return fig
@add_fig_kwargs
def plot_stacked_hist(self, key="wall_time", nmax=5, ax=None, **kwargs):
"""
Plot stacked histogram of the different timers.
Args:
key: Keyword used to extract data from the timers. Only the first `nmax`
sections with largest value are show.
mmax: Maximum nuber of sections to show. Other entries are grouped together
in the `others` section.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns:
`matplotlib` figure
"""
ax, fig, plt = get_ax_fig_plt(ax=ax)
mpi_rank = "0"
timers = self.timers(mpi_rank=mpi_rank)
n = len(timers)
names, values = [], []
rest = np.zeros(n)
for idx, sname in enumerate(self.section_names(ordkey=key)):
sections = self.get_sections(sname)
svals = np.asarray([s.__dict__[key] for s in sections])
if idx < nmax:
names.append(sname)
values.append(svals)
else:
rest += svals
names.append("others (nmax=%d)" % nmax)
values.append(rest)
# The dataset is stored in values. Now create the stacked histogram.
ind = np.arange(n) # the locations for the groups
width = 0.35 # the width of the bars
colors = nmax * ['r', 'g', 'b', 'c', 'k', 'y', 'm']
bars = []
bottom = np.zeros(n)
for idx, vals in enumerate(values):
color = colors[idx]
bar = ax.bar(ind, vals, width, color=color, bottom=bottom)
bars.append(bar)
bottom += vals
ax.set_ylabel(key)
ax.set_title("Stacked histogram with the %d most important sections" % nmax)
ticks = ind + width / 2.0
labels = ["MPI=%d, OMP=%d" % (t.mpi_nprocs, t.omp_nthreads) for t in timers]
ax.set_xticks(ticks)
ax.set_xticklabels(labels, rotation=15)
# Add legend.
ax.legend([bar[0] for bar in bars], names, loc="best")
return fig
def plot_all(self, show=True, **kwargs):
"""
Call all plot methods provided by the parser.
"""
figs = []; app = figs.append
app(self.plot_stacked_hist(show=show))
app(self.plot_efficiency(show=show))
app(self.plot_pie(show=show))
return figs
class ParallelEfficiency(dict):
def __init__(self, filenames, ref_idx, *args, **kwargs):
self.update(*args, **kwargs)
self.filenames = filenames
self._ref_idx = ref_idx
def _order_by_peff(self, key, criterion, reverse=True):
self.estimator = {
"min": min,
"max": max,
"mean": lambda items: sum(items) / len(items),
}[criterion]
data = []
for (sect_name, peff) in self.items():
# Ignore values where we had a division by zero.
if all([v != -1 for v in peff[key]]):
values = peff[key][:]
#print(sect_name, values)
if len(values) > 1:
ref_value = values.pop(self._ref_idx)
assert ref_value == 1.0
data.append((sect_name, self.estimator(values)))
fsort = lambda t: t[1]
data.sort(key=fsort, reverse=reverse)
return tuple([sect_name for (sect_name, e) in data])
def totable(self, stop=None, reverse=True):
osects = self._order_by_peff("wall_time", criterion="mean", reverse=reverse)
n = len(self.filenames)
table = [["AbinitTimerSection"] + alternate(self.filenames, n * ["%"])]
for sect_name in osects:
peff = self[sect_name]["wall_time"]
fract = self[sect_name]["wall_fract"]
vals = alternate(peff, fract)
table.append([sect_name] + ["%.2f" % val for val in vals])
return table
def good_sections(self, key="wall_time", criterion="mean", nmax=5):
good_sections = self._order_by_peff(key, criterion=criterion)
return good_sections[:nmax]
def bad_sections(self, key="wall_time", criterion="mean", nmax=5):
bad_sections = self._order_by_peff(key, criterion=criterion, reverse=False)
return bad_sections[:nmax]
class AbinitTimerSection(object):
"""Record with the timing results associated to a section of code."""
STR_FIELDS = [
"name"
]
NUMERIC_FIELDS = [
"wall_time",
"wall_fract",
"cpu_time",
"cpu_fract",
"ncalls",
"gflops",
]
FIELDS = tuple(STR_FIELDS + NUMERIC_FIELDS)
@classmethod
def fake(cls):
return AbinitTimerSection("fake", 0.0, 0.0, 0.0, 0.0, -1, 0.0)
def __init__(self, name, cpu_time, cpu_fract, wall_time, wall_fract, ncalls, gflops):
self.name = name.strip()
self.cpu_time = float(cpu_time)
self.cpu_fract = float(cpu_fract)
self.wall_time = float(wall_time)
self.wall_fract = float(wall_fract)
self.ncalls = int(ncalls)
self.gflops = float(gflops)
def to_tuple(self):
return tuple([self.__dict__[at] for at in AbinitTimerSection.FIELDS])
def to_dict(self):
return {at: self.__dict__[at] for at in AbinitTimerSection.FIELDS}
def to_csvline(self, with_header=False):
"""Return a string with data in CSV format"""
string = ""
if with_header:
string += "# " + " ".join(at for at in AbinitTimerSection.FIELDS) + "\n"
string += ", ".join(str(v) for v in self.to_tuple()) + "\n"
return string
def __str__(self):
string = ""
for a in AbinitTimerSection.FIELDS: string += a + " = " + self.__dict__[a] + ","
return string[:-1]
class AbinitTimer(object):
"""Container class storing the timing results."""
def __init__(self, sections, info, cpu_time, wall_time):
# Store sections and names
self.sections = tuple(sections)
self.section_names = tuple([s.name for s in self.sections])
self.info = info
self.cpu_time = float(cpu_time)
self.wall_time = float(wall_time)
self.mpi_nprocs = int(info["mpi_nprocs"])
self.omp_nthreads = int(info["omp_nthreads"])
self.mpi_rank = info["mpi_rank"].strip()
self.fname = info["fname"].strip()
def __str__(self):
string = "file=%s, wall_time=%.1f, mpi_nprocs=%d, omp_nthreads=%d" % (
self.fname, self.wall_time, self.mpi_nprocs, self.omp_nthreads )
#string += ", rank = " + self.mpi_rank
return string
def __cmp__(self, other):
return cmp(self.wall_time, other.wall_time)
@property
def ncpus(self):
"""Total number of CPUs employed."""
return self.mpi_nprocs * self.omp_nthreads
def get_section(self, section_name):
try:
idx = self.section_names.index(section_name)
except:
raise
sect = self.sections[idx]
assert sect.name == section_name
return sect
def to_csv(self, fileobj=sys.stdout):
"""Write data on file fileobj using CSV format."""
openclose = is_string(fileobj)
if openclose:
fileobj = open(fileobj, "w")
for idx, section in enumerate(self.sections):
fileobj.write(section.to_csvline(with_header=(idx == 0)))
fileobj.flush()
if openclose:
fileobj.close()
def to_table(self, sort_key="wall_time", stop=None):
"""Return a table (list of lists) with timer data"""
table = [list(AbinitTimerSection.FIELDS), ]
ord_sections = self.order_sections(sort_key)
if stop is not None:
ord_sections = ord_sections[:stop]
for osect in ord_sections:
row = [str(item) for item in osect.to_tuple()]
table.append(row)
return table
# Maintain old API
totable = to_table
def get_dataframe(self, sort_key="wall_time", **kwargs):
"""
Return a pandas DataFrame with entries sorted according to `sort_key`.
"""
import pandas as pd
frame = pd.DataFrame(columns=AbinitTimerSection.FIELDS)
for osect in self.order_sections(sort_key):
frame = frame.append(osect.to_dict(), ignore_index=True)
# Monkey patch
frame.info = self.info
frame.cpu_time = self.cpu_time
frame.wall_time = self.wall_time
frame.mpi_nprocs = self.mpi_nprocs
frame.omp_nthreads = self.omp_nthreads
frame.mpi_rank = self.mpi_rank
frame.fname = self.fname
return frame
def get_values(self, keys):
"""
Return a list of values associated to a particular list of keys.
"""
if is_string(keys):
return [s.__dict__[keys] for s in self.sections]
else:
values = []
for k in keys:
values.append([s.__dict__[k] for s in self.sections])
return values
def names_and_values(self, key, minval=None, minfract=None, sorted=True):
"""
Select the entries whose value[key] is >= minval or whose fraction[key] is >= minfract
Return the names of the sections and the corresponding values.
"""
values = self.get_values(key)
names = self.get_values("name")
new_names, new_values = [], []
other_val = 0.0
if minval is not None:
assert minfract is None
for n, v in zip(names, values):
if v >= minval:
new_names.append(n)
new_values.append(v)
else:
other_val += v
new_names.append("below minval " + str(minval))
new_values.append(other_val)
elif minfract is not None:
assert minval is None
total = self.sum_sections(key)
for n, v in zip(names, values):
if v / total >= minfract:
new_names.append(n)
new_values.append(v)
else:
other_val += v
new_names.append("below minfract " + str(minfract))
new_values.append(other_val)
else:
# all values
new_names, new_values = names, values
if sorted:
# Sort new_values and rearrange new_names.
fsort = lambda t: t[1]
nandv = [nv for nv in zip(new_names, new_values)]
nandv.sort(key=fsort)
new_names, new_values = [n[0] for n in nandv], [n[1] for n in nandv]
return new_names, new_values
def _reduce_sections(self, keys, operator):
return operator(self.get_values(keys))
def sum_sections(self, keys):
return self._reduce_sections(keys, sum)
def order_sections(self, key, reverse=True):
"""Sort sections according to the value of key."""
fsort = lambda s: s.__dict__[key]
return sorted(self.sections, key=fsort, reverse=reverse)
@add_fig_kwargs
def cpuwall_histogram(self, ax=None, **kwargs):
ax, fig, plt = get_ax_fig_plt(ax=ax)
nk = len(self.sections)
ind = np.arange(nk) # the x locations for the groups
width = 0.35 # the width of the bars
cpu_times = self.get_values("cpu_time")
rects1 = plt.bar(ind, cpu_times, width, color='r')
wall_times = self.get_values("wall_time")
rects2 = plt.bar(ind + width, wall_times, width, color='y')
# Add ylable and title
ax.set_ylabel('Time (s)')
#if title:
# plt.title(title)
#else:
# plt.title('CPU-time and Wall-time for the different sections of the code')
ticks = self.get_values("name")
ax.set_xticks(ind + width, ticks)
ax.legend((rects1[0], rects2[0]), ('CPU', 'Wall'), loc="best")
return fig
#def hist2(self, key1="wall_time", key2="cpu_time"):
# labels = self.get_values("name")
# vals1, vals2 = self.get_values([key1, key2])
# N = len(vals1)
# assert N == len(vals2)
# plt.figure(1)
# plt.subplot(2, 1, 1) # 2 rows, 1 column, figure 1
# n1, bins1, patches1 = plt.hist(vals1, N, facecolor="m")
# plt.xlabel(labels)
# plt.ylabel(key1)
# plt.subplot(2, 1, 2)
# n2, bins2, patches2 = plt.hist(vals2, N, facecolor="y")
# plt.xlabel(labels)
# plt.ylabel(key2)
# plt.show()
@add_fig_kwargs
def pie(self, key="wall_time", minfract=0.05, ax=None, **kwargs):
"""
Plot pie chart for this timer.
Args:
key: Keyword used to extract data from the timer.
minfract: Don't show sections whose relative weight is less that minfract.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns:
`matplotlib` figure
"""
ax, fig, plt = get_ax_fig_plt(ax=ax)
# Set aspect ratio to be equal so that pie is drawn as a circle.
ax.axis("equal")
# Don't show section whose value is less that minfract
labels, vals = self.names_and_values(key, minfract=minfract)
ax.pie(vals, explode=None, labels=labels, autopct='%1.1f%%', shadow=True)
return fig
@add_fig_kwargs
def scatter_hist(self, ax=None, **kwargs):
from mpl_toolkits.axes_grid1 import make_axes_locatable
ax, fig, plt = get_ax_fig_plt(ax=ax)
x = np.asarray(self.get_values("cpu_time"))
y = np.asarray(self.get_values("wall_time"))
# the scatter plot:
axScatter = plt.subplot(1, 1, 1)
axScatter.scatter(x, y)
axScatter.set_aspect("auto")
# create new axes on the right and on the top of the current axes
# The first argument of the new_vertical(new_horizontal) method is
# the height (width) of the axes to be created in inches.
divider = make_axes_locatable(axScatter)
axHistx = divider.append_axes("top", 1.2, pad=0.1, sharex=axScatter)
axHisty = divider.append_axes("right", 1.2, pad=0.1, sharey=axScatter)
# make some labels invisible
plt.setp(axHistx.get_xticklabels() + axHisty.get_yticklabels(), visible=False)
# now determine nice limits by hand:
binwidth = 0.25
xymax = np.max([np.max(np.fabs(x)), np.max(np.fabs(y))])
lim = (int(xymax / binwidth) + 1) * binwidth
bins = np.arange(-lim, lim + binwidth, binwidth)
axHistx.hist(x, bins=bins)
axHisty.hist(y, bins=bins, orientation='horizontal')
# the xaxis of axHistx and yaxis of axHisty are shared with axScatter,
# thus there is no need to manually adjust the xlim and ylim of these axis.
#axHistx.axis["bottom"].major_ticklabels.set_visible(False)
for tl in axHistx.get_xticklabels():
tl.set_visible(False)
axHistx.set_yticks([0, 50, 100])
#axHisty.axis["left"].major_ticklabels.set_visible(False)
for tl in axHisty.get_yticklabels():
tl.set_visible(False)
axHisty.set_xticks([0, 50, 100])
#plt.draw()
return fig
| mit |
jmargeta/scikit-learn | sklearn/utils/fixes.py | 4 | 6523 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD
import collections
from operator import itemgetter
import inspect
from sklearn.externals import six
import numpy as np
try:
Counter = collections.Counter
except AttributeError:
class Counter(collections.defaultdict):
"""Partial replacement for Python 2.7 collections.Counter."""
def __init__(self, iterable=(), **kwargs):
super(Counter, self).__init__(int, **kwargs)
self.update(iterable)
def most_common(self):
return sorted(six.iteritems(self), key=itemgetter(1), reverse=True)
def update(self, other):
"""Adds counts for elements in other"""
if isinstance(other, self.__class__):
for x, n in six.iteritems(other):
self[x] += n
else:
for x in other:
self[x] += 1
def lsqr(X, y, tol=1e-3):
import scipy.sparse.linalg as sp_linalg
from ..utils.extmath import safe_sparse_dot
if hasattr(sp_linalg, 'lsqr'):
# scipy 0.8 or greater
return sp_linalg.lsqr(X, y)
else:
n_samples, n_features = X.shape
if n_samples > n_features:
coef, _ = sp_linalg.cg(safe_sparse_dot(X.T, X),
safe_sparse_dot(X.T, y),
tol=tol)
else:
coef, _ = sp_linalg.cg(safe_sparse_dot(X, X.T), y, tol=tol)
coef = safe_sparse_dot(X.T, coef)
residues = y - safe_sparse_dot(X, coef)
return coef, None, None, residues
def _unique(ar, return_index=False, return_inverse=False):
"""A replacement for the np.unique that appeared in numpy 1.4.
While np.unique existed long before, keyword return_inverse was
only added in 1.4.
"""
try:
ar = ar.flatten()
except AttributeError:
if not return_inverse and not return_index:
items = sorted(set(ar))
return np.asarray(items)
else:
ar = np.asarray(ar).flatten()
if ar.size == 0:
if return_inverse and return_index:
return ar, np.empty(0, np.bool), np.empty(0, np.bool)
elif return_inverse or return_index:
return ar, np.empty(0, np.bool)
else:
return ar
if return_inverse or return_index:
perm = ar.argsort()
aux = ar[perm]
flag = np.concatenate(([True], aux[1:] != aux[:-1]))
if return_inverse:
iflag = np.cumsum(flag) - 1
iperm = perm.argsort()
if return_index:
return aux[flag], perm[flag], iflag[iperm]
else:
return aux[flag], iflag[iperm]
else:
return aux[flag], perm[flag]
else:
ar.sort()
flag = np.concatenate(([True], ar[1:] != ar[:-1]))
return ar[flag]
np_version = []
for x in np.__version__.split('.'):
try:
np_version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
np_version.append(x)
np_version = tuple(np_version)
if np_version[:2] < (1, 5):
unique = _unique
else:
unique = np.unique
def _bincount(X, weights=None, minlength=None):
"""Replacing np.bincount in numpy < 1.6 to provide minlength."""
result = np.bincount(X, weights)
if len(result) >= minlength:
return result
out = np.zeros(minlength, np.int)
out[:len(result)] = result
return out
if np_version[:2] < (1, 6):
bincount = _bincount
else:
bincount = np.bincount
def _copysign(x1, x2):
"""Slow replacement for np.copysign, which was introduced in numpy 1.4"""
return np.abs(x1) * np.sign(x2)
if not hasattr(np, 'copysign'):
copysign = _copysign
else:
copysign = np.copysign
def _in1d(ar1, ar2, assume_unique=False):
"""Replacement for in1d that is provided for numpy >= 1.4"""
if not assume_unique:
ar1, rev_idx = unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
equal_adj = (sar[1:] == sar[:-1])
flag = np.concatenate((equal_adj, [False]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
if not hasattr(np, 'in1d'):
in1d = _in1d
else:
in1d = np.in1d
def qr_economic(A, **kwargs):
"""Compat function for the QR-decomposition in economic mode
Scipy 0.9 changed the keyword econ=True to mode='economic'
"""
import scipy.linalg
# trick: triangular solve has introduced in 0.9
if hasattr(scipy.linalg, 'solve_triangular'):
return scipy.linalg.qr(A, mode='economic', **kwargs)
else:
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
return scipy.linalg.qr(A, econ=True, **kwargs)
def savemat(file_name, mdict, oned_as="column", **kwargs):
"""MATLAB-format output routine that is compatible with SciPy 0.7's.
0.7.2 (or .1?) added the oned_as keyword arg with 'column' as the default
value. It issues a warning if this is not provided, stating that "This will
change to 'row' in future versions."
"""
import scipy.io
try:
return scipy.io.savemat(file_name, mdict, oned_as=oned_as, **kwargs)
except TypeError:
return scipy.io.savemat(file_name, mdict, **kwargs)
try:
from numpy import count_nonzero
except ImportError:
def count_nonzero(X):
return len(np.flatnonzero(X))
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
| bsd-3-clause |
alexandrejaguar/programandociencia | 20150828-graf3dpython/ex_trisurf3d.py | 1 | 3403 | """
EX_TRISURF3D.PY
Material de apoio para o post "Gráficos tridimensionais no Python
[PARTE I]", no Programando Ciência.
Support material for the blog post "Three-dimensional plots on Python
[PART I]", on Programando Ciência.
* Autor/Author: Alexandre 'Jaguar' Fioravante de Siqueira
* Contato/Contact: http://www.programandociencia.com/sobre/
* Material de apoio/Support material:
http://www.github.com/alexandrejaguar/programandociencia
* Para citar esse material, por favor utilize a referência abaixo:
DE SIQUEIRA, Alexandre Fioravante. Gráficos tridimensionais no Python
[PARTE I]. Campinas: Programando Ciência, 28 de agosto de 2015.
Disponível em:
http://programandociencia.com/2015/08/28/
graficos-tridimensionais-no-python-parte-i-three-dimensional-plots-on-python-part-i/.
Acesso em: <DATA DE ACESSO>.
* In order to cite this material, please use the reference below
(this is a Chicago-like style):
de Siqueira, Alexandre Fioravante. “Three-dimensional plots on Python
[PART I]”. Programando Ciência. 2015, August 28. Available at
http://programandociencia.com/2015/08/28/
graficos-tridimensionais-no-python-parte-i-three-dimensional-plots-on-python-part-i/.
Access date: <ACCESS DATE>.
Copyright (C) Alexandre Fioravante de Siqueira
Este programa é um software livre; você pode redistribuí-lo e/ou
modificá-lo dentro dos termos da Licença Pública Geral GNU como publicada
pela Fundação do Software Livre (FSF); na versão 3 da Licença, ou qualquer
versão posterior.
Este programa é distribuído na esperança de que possa ser útil, mas SEM
NENHUMA GARANTIA; sem uma garantia implícita de ADEQUAÇÃO a qualquer
MERCADO ou APLICAÇÃO EM PARTICULAR. Veja a Licença Pública Geral GNU para
maiores detalhes.
Você deve ter recebido uma cópia da Licença Pública Geral GNU junto com
este programa. Se não, veja <http://www.gnu.org/licenses/>.
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
Software Foundation, either version 3 of the License, or (at your option)
any later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from mpl_toolkits.mplot3d import Axes3D # ajuda do Welton Vaz: https://github.com/weltonvaz
import matplotlib.pyplot as plt
import numpy as np
n_angles = 72
n_radii = 4
# An array of radii
# Does not include radius r=0, this is to eliminate duplicate points
radii = np.linspace(0.125, 1.0, n_radii)
# An array of angles
angles = np.linspace(0, 2*np.pi, n_angles, endpoint=True)
# Repeat all angles for each radius
angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)
# Convert polar (radii, angles) coords to cartesian (x, y) coords
# (0, 0) is added here. There are no duplicate points in the (x, y) plane
x = np.append(0, (radii*np.cos(angles)).flatten())
y = np.append(0, (radii*np.sin(angles)).flatten())
# Surface
z = np.sin(-x*(y**2))+np.cos((x**2)*-y)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_trisurf(x, y, z, cmap='Oranges', linewidth=0.1)
plt.show()
| gpl-2.0 |
TomAugspurger/pandas | pandas/tests/indexes/period/test_shift.py | 4 | 4398 | import numpy as np
import pytest
from pandas import PeriodIndex, period_range
import pandas._testing as tm
class TestPeriodIndexShift:
# ---------------------------------------------------------------
# PeriodIndex.shift is used by __add__ and __sub__
def test_pi_shift_ndarray(self):
idx = PeriodIndex(
["2011-01", "2011-02", "NaT", "2011-04"], freq="M", name="idx"
)
result = idx.shift(np.array([1, 2, 3, 4]))
expected = PeriodIndex(
["2011-02", "2011-04", "NaT", "2011-08"], freq="M", name="idx"
)
tm.assert_index_equal(result, expected)
result = idx.shift(np.array([1, -2, 3, -4]))
expected = PeriodIndex(
["2011-02", "2010-12", "NaT", "2010-12"], freq="M", name="idx"
)
tm.assert_index_equal(result, expected)
def test_shift(self):
pi1 = period_range(freq="A", start="1/1/2001", end="12/1/2009")
pi2 = period_range(freq="A", start="1/1/2002", end="12/1/2010")
tm.assert_index_equal(pi1.shift(0), pi1)
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(1), pi2)
pi1 = period_range(freq="A", start="1/1/2001", end="12/1/2009")
pi2 = period_range(freq="A", start="1/1/2000", end="12/1/2008")
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(-1), pi2)
pi1 = period_range(freq="M", start="1/1/2001", end="12/1/2009")
pi2 = period_range(freq="M", start="2/1/2001", end="1/1/2010")
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(1), pi2)
pi1 = period_range(freq="M", start="1/1/2001", end="12/1/2009")
pi2 = period_range(freq="M", start="12/1/2000", end="11/1/2009")
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(-1), pi2)
pi1 = period_range(freq="D", start="1/1/2001", end="12/1/2009")
pi2 = period_range(freq="D", start="1/2/2001", end="12/2/2009")
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(1), pi2)
pi1 = period_range(freq="D", start="1/1/2001", end="12/1/2009")
pi2 = period_range(freq="D", start="12/31/2000", end="11/30/2009")
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(-1), pi2)
def test_shift_corner_cases(self):
# GH#9903
idx = PeriodIndex([], name="xxx", freq="H")
msg = "`freq` argument is not supported for PeriodArray._time_shift"
with pytest.raises(TypeError, match=msg):
# period shift doesn't accept freq
idx.shift(1, freq="H")
tm.assert_index_equal(idx.shift(0), idx)
tm.assert_index_equal(idx.shift(3), idx)
idx = PeriodIndex(
["2011-01-01 10:00", "2011-01-01 11:00", "2011-01-01 12:00"],
name="xxx",
freq="H",
)
tm.assert_index_equal(idx.shift(0), idx)
exp = PeriodIndex(
["2011-01-01 13:00", "2011-01-01 14:00", "2011-01-01 15:00"],
name="xxx",
freq="H",
)
tm.assert_index_equal(idx.shift(3), exp)
exp = PeriodIndex(
["2011-01-01 07:00", "2011-01-01 08:00", "2011-01-01 09:00"],
name="xxx",
freq="H",
)
tm.assert_index_equal(idx.shift(-3), exp)
def test_shift_nat(self):
idx = PeriodIndex(
["2011-01", "2011-02", "NaT", "2011-04"], freq="M", name="idx"
)
result = idx.shift(1)
expected = PeriodIndex(
["2011-02", "2011-03", "NaT", "2011-05"], freq="M", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
def test_shift_gh8083(self):
# test shift for PeriodIndex
# GH#8083
drange = period_range("20130101", periods=5, freq="D")
result = drange.shift(1)
expected = PeriodIndex(
["2013-01-02", "2013-01-03", "2013-01-04", "2013-01-05", "2013-01-06"],
freq="D",
)
tm.assert_index_equal(result, expected)
def test_shift_periods(self):
# GH #22458 : argument 'n' was deprecated in favor of 'periods'
idx = period_range(freq="A", start="1/1/2001", end="12/1/2009")
tm.assert_index_equal(idx.shift(periods=0), idx)
tm.assert_index_equal(idx.shift(0), idx)
| bsd-3-clause |
rosshamish/deepdreamers | caffe-master/examples/finetune_flickr_style/assemble_data.py | 38 | 3636 | #!/usr/bin/env python
"""
Form a subset of the Flickr Style data, download images to dirname, and write
Caffe ImagesDataLayer training file.
"""
import os
import urllib
import hashlib
import argparse
import numpy as np
import pandas as pd
from skimage import io
import multiprocessing
# Flickr returns a special image if the request is unavailable.
MISSING_IMAGE_SHA1 = '6a92790b1c2a301c6e7ddef645dca1f53ea97ac2'
example_dirname = os.path.abspath(os.path.dirname(__file__))
caffe_dirname = os.path.abspath(os.path.join(example_dirname, '../..'))
training_dirname = os.path.join(caffe_dirname, 'data/flickr_style')
def download_image(args_tuple):
"For use with multiprocessing map. Returns filename on fail."
try:
url, filename = args_tuple
if not os.path.exists(filename):
urllib.urlretrieve(url, filename)
with open(filename) as f:
assert hashlib.sha1(f.read()).hexdigest() != MISSING_IMAGE_SHA1
test_read_image = io.imread(filename)
return True
except KeyboardInterrupt:
raise Exception() # multiprocessing doesn't catch keyboard exceptions
except:
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Download a subset of Flickr Style to a directory')
parser.add_argument(
'-s', '--seed', type=int, default=0,
help="random seed")
parser.add_argument(
'-i', '--images', type=int, default=-1,
help="number of images to use (-1 for all [default])",
)
parser.add_argument(
'-w', '--workers', type=int, default=-1,
help="num workers used to download images. -x uses (all - x) cores [-1 default]."
)
parser.add_argument(
'-l', '--labels', type=int, default=0,
help="if set to a positive value, only sample images from the first number of labels."
)
args = parser.parse_args()
np.random.seed(args.seed)
# Read data, shuffle order, and subsample.
csv_filename = os.path.join(example_dirname, 'flickr_style.csv.gz')
df = pd.read_csv(csv_filename, index_col=0, compression='gzip')
df = df.iloc[np.random.permutation(df.shape[0])]
if args.labels > 0:
df = df.loc[df['label'] < args.labels]
if args.images > 0 and args.images < df.shape[0]:
df = df.iloc[:args.images]
# Make directory for images and get local filenames.
if training_dirname is None:
training_dirname = os.path.join(caffe_dirname, 'data/flickr_style')
images_dirname = os.path.join(training_dirname, 'images')
if not os.path.exists(images_dirname):
os.makedirs(images_dirname)
df['image_filename'] = [
os.path.join(images_dirname, _.split('/')[-1]) for _ in df['image_url']
]
# Download images.
num_workers = args.workers
if num_workers <= 0:
num_workers = multiprocessing.cpu_count() + num_workers
print('Downloading {} images with {} workers...'.format(
df.shape[0], num_workers))
pool = multiprocessing.Pool(processes=num_workers)
map_args = zip(df['image_url'], df['image_filename'])
results = pool.map(download_image, map_args)
# Only keep rows with valid images, and write out training file lists.
df = df[results]
for split in ['train', 'test']:
split_df = df[df['_split'] == split]
filename = os.path.join(training_dirname, '{}.txt'.format(split))
split_df[['image_filename', 'label']].to_csv(
filename, sep=' ', header=None, index=None)
print('Writing train/val for {} successfully downloaded images.'.format(
df.shape[0]))
| gpl-2.0 |
Stargrazer82301/ChrisFit | Old/Wrappers/ChrisFit_HAPLESS_Pieter_Wrapper.py | 1 | 10523 | # Identify location
import socket
location = socket.gethostname()
if location == 'Monolith':
dropbox = 'E:\\Users\\Chris\\Dropbox\\'
if location == 'grima':
dropbox = '/home/chris/Dropbox/'
if location == 'saruman':
dropbox = '/home/herdata/spx7cjc/Dropbox/'
# Import smorgasbord
import os
import pdb
import sys
import gc
sys.path.append( os.path.join(dropbox,'Work','Scripts') )
sys.path.append( os.path.join(dropbox,'Work','Scripts','ChrisFit'))
import time
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats
#import pygame.mixer
import shutil
import ChrisFuncs
import ChrisFit
# Prepare output file
time_stamp = time.time()
output_header = '# ID T_COLD T_COLD_ERR M_COLD M_COLD_ERR T_WARM T_WARM_ERR M_WARM M_WARM_ERR M_DUST M_DUST_ERR BETA BETA_ERR CHISQ_DUST \n'
filepath = os.path.join(dropbox,'Work','Scripts','ChrisFit','Wrappers','Output','HAPLESS_Pieter_Greybody_Output_'+str(time_stamp).replace(',','-')+'.dat')
datfile = open(filepath, 'a')
datfile.write(output_header)
datfile.close()
# Read input catalogue
data = np.genfromtxt( os.path.join(dropbox+'Work','Tables','H-ATLAS','HAPLESS Ancillary','HAPLESS_Pieter_DR1_Old-Beam.csv'), delimiter=',', names=True)
#data = np.genfromtxt( os.path.join(dropbox,'Work','Tables','HAPLESS.csv'), delimiter=',', names=True)
names = data['HAPLESS_ID']
n_sources = data['HAPLESS_ID'].shape[0]
distances = np.genfromtxt( os.path.join(dropbox+'Work','Tables','HAPLESS.csv'), delimiter=',', names=True)['DISTANCE']
# Declare wavebands and their instruments
in_wavelengths = np.array([22E-6, 60E-6, 100E-6, 160E-6, 250E-6, 350E-6, 500E-6])
in_instruments = ['WISE', 'IRAS', 'PACS', 'PACS', 'SPIRE', 'SPIRE', 'SPIRE']
in_limits = [True, 'blah', False, False, False, False, False]
#in_wavelengths = np.array([100E-6, 160E-6, 250E-6, 350E-6, 500E-6])
#in_instruments = ['PACS', 'PACS', 'SPIRE', 'SPIRE', 'SPIRE']
#in_limits = [False, False, False, False, False]
# Construct nice big arrays of fluxes & errors
in_fluxes = np.zeros([n_sources, in_wavelengths.shape[0]])
in_errors = np.zeros([n_sources, in_wavelengths.shape[0]])
in_fluxes[:,0], in_fluxes[:,1], in_fluxes[:,2], in_fluxes[:,3], in_fluxes[:,4], in_fluxes[:,5], in_fluxes[:,6] = data['w4'], data['60'], data['100'], data['160'], data['250'], data['350'], data['500']
in_errors[:,0], in_errors[:,1], in_errors[:,2], in_errors[:,3], in_errors[:,4], in_errors[:,5], in_errors[:,6] = data['w4err'], data['60err'], data['100err'], data['160err'], data['250err'], data['350err'], data['500err']
#in_fluxes[:,0], in_fluxes[:,1], in_fluxes[:,2], in_fluxes[:,3], in_fluxes[:,4], in_fluxes[:,5], in_fluxes[:,6] = ChrisFuncs.ABMagsToJy(data['W4_CAAPR']), data['F60_SCANPI'], data['F100_CAAPR'], data['F160_CAAPR'], data['F250_CAAPR'], data['F350_CAAPR'], data['F500_CAAPR']
#in_errors[:,0], in_errors[:,1], in_errors[:,2], in_errors[:,3], in_errors[:,4], in_errors[:,5], in_errors[:,6] = ChrisFuncs.ErrABMagsToJy(data['W4_CAAPR_ERR'], data['W4_CAAPR']), data['E60_SCANPI'], data['E100_CAAPR'], data['E160_CAAPR'], data['E250_CAAPR'], data['E350_CAAPR'], data['E500_CAAPR']
# Prepare array to hold output values
final = np.zeros([n_sources, 14])
col_corrs = np.zeros([n_sources, len(in_wavelengths)])
# Decide whether to generate plots
plotting = True
# Decide whether to bootstrap
bootstrapping = 100
# Decide whether to attempt both types of fit, or just a single-component fit
both_fits = True
residuals_test1_list = np.zeros(n_sources)
residuals_test2_list = np.zeros(n_sources)
# Ready timer variables
source_time_array = []
time_total = 0.0
# Loop over galaxies
for h in range(0, n_sources):
H = int(data['HAPLESS_ID'][h])
final[h,0] = H
time_start = time.time()
if h==24:
continue
# Read in source details
distance = distances[h] * 1.0E6
source_name = 'HAPLESS '+str(H)
func_redshift = 0.0
# Check if data present for each waveband, and construct appropriate input arrays for function
func_wavelengths = []
func_instruments = []
func_fluxes = []
func_errors = []
func_limits = []
for w in range(0, in_wavelengths.shape[0]):
# if in_instruments[w]!='IRAS':
if in_fluxes[h,w] > -90.0:
if np.isnan(in_fluxes[h,w]) == False:
if in_errors[h,w] > -90.0:
if np.isnan(in_errors[h,w]) == False:
func_wavelengths.append(float(in_wavelengths[w]))
func_instruments.append(in_instruments[w])
func_limits.append(in_limits[w])
func_fluxes.append(float(in_fluxes[h,w]))
func_errors.append(float(in_errors[h,w]))
if in_instruments[w]=='IRAS':
if float(in_fluxes[h,w])==0.0:
func_errors[len(func_errors)-1] *= 1.0
# Deal with bands which are or are not limits depending upon fit type
func_limits_1GB, func_limits_2GB = func_limits[:], func_limits[:]
func_limits_1GB[ np.where(np.array(func_instruments)=='IRAS')[0] ] = True
if func_limits_2GB[ np.where(np.array(func_instruments)=='IRAS')[0] ] != True:
func_limits_2GB[ np.where(np.array(func_instruments)=='IRAS')[0] ] = False
# Run data through ChrisFit functions for 1 and then 2 component greybodies, and move generated plots to appropriate directories
output_1GB = ChrisFit.ChrisFit(source_name, func_wavelengths, func_fluxes, func_errors, func_instruments, 1, distance, limits=func_limits_1GB, beta=2.0, kappa_0=0.077, lambda_0=850E-6, redshift=func_redshift, col_corr=True, plotting=plotting, bootstrapping=bootstrapping, percentile=66.6, min_temp=10.0)
if both_fits==True:
output_2GB = ChrisFit.ChrisFit(source_name, func_wavelengths, func_fluxes, func_errors, func_instruments, 2, distance, limits=func_limits_2GB, beta=2.0, kappa_0=0.077, lambda_0=850E-6, redshift=func_redshift, col_corr=True, plotting=plotting, bootstrapping=bootstrapping, percentile=66.6, min_temp=10.0)
# Record colour corrections
if both_fits==True:
func_indices = np.in1d(in_wavelengths, func_wavelengths)
col_corrs[h,func_indices] = output_2GB[3] / np.array(func_fluxes)
# Calculate for each fit the probability that the hull hypothesis is not satisfied
prob_1GB = 0
prob_2GB = 1E50
# Deal with output if only attempting the one-component fit
if both_fits==False:
final[h,1] = output_1GB[1][0]
final[h,2] = output_1GB[2][0]
final[h,3] = output_1GB[1][1]
final[h,4] = output_1GB[2][1]
final[h,5] = output_1GB[1][2]
final[h,6] = output_1GB[2][2]
final[h,7] = output_1GB[1][3]
final[h,8] = output_1GB[2][3]
final[h,9] = output_1GB[1][4]
final[h,10] = output_1GB[2][4]
final[h,11] = output_1GB[1][5]
final[h,12] = output_1GB[2][5]
final[h,13] = np.sum(output_1GB[0])
# Check if 1-greybody fit is superior, and process accordingly
if both_fits==True:
if prob_1GB>=prob_2GB:
final[h,1] = output_1GB[1][0]
final[h,2] = output_1GB[2][0]
final[h,3] = output_1GB[1][1]
final[h,4] = output_1GB[2][1]
final[h,5] = output_1GB[1][2]
final[h,6] = output_1GB[2][2]
final[h,7] = output_1GB[1][3]
final[h,8] = output_1GB[2][3]
final[h,9] = output_1GB[1][4]
final[h,10] = output_1GB[2][4]
final[h,11] = output_1GB[1][5]
final[h,12] = output_1GB[2][5]
final[h,13] = np.sum(output_1GB[0])
# Else check if 2-greybody fit is superior, and process accordingly
if prob_2GB>prob_1GB:
final[h,1] = output_2GB[1][0] # T_c
final[h,2] = output_2GB[2][0] # T_c_sigma
final[h,3] = output_2GB[1][1] # M_c
final[h,4] = output_2GB[2][1] # M_c_sigma
final[h,5] = output_2GB[1][2] # T_w
final[h,6] = output_2GB[2][2] # T_w_sigma
final[h,7] = output_2GB[1][3] # M_w
final[h,8] = output_2GB[2][3] # M_w_sigma
final[h,9] = output_2GB[1][4] # M_d
final[h,10] = output_2GB[2][4] # M_d_sigma
final[h,11] = output_2GB[1][5] # beta
final[h,12] = output_2GB[2][5] # beta_sigma
final[h,13] = np.sum(output_2GB[0]) # chisq
# Record output for source to file
datstring = str((final[h,:]).tolist())
datstring = datstring.replace('[', '')
datstring = datstring.replace(']', '')
datstring = datstring.replace(',', ' ')
datstring = datstring+' \n'
datfile = open(filepath, 'a')
datfile.write(datstring)
datfile.close()
plt.close('all')
gc.collect()
# Record time spent
time_source = (time.time() - time_start)
time_total += time_source
time_source_mins = time_source/60.0
source_time_array.append(time_source_mins)
time_source_mean = np.mean(np.array(source_time_array))
try:
lambda_test1 = 350E-6
index_test1 = np.where(np.array(func_wavelengths)==lambda_test1)[0][0]
residuals_test1_list[h] = output_2GB[4][index_test1] / np.array(func_errors)[index_test1]
index_test1 = np.where(np.array(in_wavelengths)==lambda_test1)[0][0]
except:
pass
try:
lambda_test2 = 500E-6
index_test2 = np.where(np.array(func_wavelengths)==lambda_test2)[0][0]
residuals_test2_list[h] = output_2GB[4][index_test2] / np.array(func_errors)[index_test2]
index_test2 = np.where(np.array(in_wavelengths)==lambda_test2)[0][0]
except:
pass
print np.where( (in_fluxes[:,index_test1]>in_errors[:,index_test1]) & (np.isnan(residuals_test1_list)==False) )[0].shape[0]
print np.where( (in_fluxes[:,index_test2]>in_errors[:,index_test2]) & (np.isnan(residuals_test2_list)==False) )[0].shape[0]
print np.mean( residuals_test1_list[ np.where( (in_fluxes[:,index_test1]>in_errors[:,index_test1]) & (np.isnan(residuals_test1_list)==False) ) ] )
print np.mean( residuals_test2_list[ np.where( (in_fluxes[:,index_test2]>in_errors[:,index_test2]) & (np.isnan(residuals_test2_list)==False) ) ] )
print np.median(final[:,1][ np.where( (in_fluxes[:,index_test2]>in_errors[:,index_test2]) & (np.isnan(residuals_test2_list)==False) ) ] )
print np.median(final[:,11][ np.where( (in_fluxes[:,index_test2]>in_errors[:,index_test2]) & (np.isnan(residuals_test2_list)==False) ) ] )
# Jubilate
print 'All done!'
| mit |
geopandas/geopandas | doc/source/conf.py | 1 | 13315 | # -*- coding: utf-8 -*-
#
# GeoPandas documentation build configuration file, created by
# sphinx-quickstart on Tue Oct 15 08:08:14 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import warnings
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"IPython.sphinxext.ipython_console_highlighting",
"IPython.sphinxext.ipython_directive",
"sphinx_gallery.load_style",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.autodoc",
"myst_parser",
"nbsphinx",
"numpydoc",
"sphinx_toggleprompt",
"matplotlib.sphinxext.plot_directive",
]
# continue doc build and only print warnings/errors in examples
ipython_warning_is_error = False
ipython_exec_lines = [
# ensure that dataframes are not truncated in the IPython code blocks
"import pandas as _pd",
'_pd.set_option("display.max_columns", 20)',
'_pd.set_option("display.width", 100)',
]
# Fix issue with warnings from numpydoc (see discussion in PR #534)
numpydoc_show_class_members = False
def setup(app):
app.add_css_file("custom.css") # may also be an URL
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
autosummary_generate = True
nbsphinx_execute = "always"
nbsphinx_allow_errors = True
# suppress matplotlib warning in examples
warnings.filterwarnings(
"ignore",
category=UserWarning,
message="Matplotlib is currently using agg, which is a"
" non-GUI backend, so cannot show the figure.",
)
# The suffix of source filenames.
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"GeoPandas"
copyright = u"2013–2021, GeoPandas developers"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
import geopandas
version = release = geopandas.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "pydata_sphinx_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"search_bar_position": "sidebar",
"github_url": "https://github.com/geopandas/geopandas",
"twitter_url": "https://twitter.com/geopandas",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/logo/geopandas_logo_web.svg"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/logo/favicon.png"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# Add redirect for previously existing pages, each item is like `(from_old, to_new)`
moved_pages = [
# user guide
("aggregation_with_dissolve", "docs/user_guide/aggregation_with_dissolve"),
("data_structures", "docs/user_guide/data_structures"),
("geocoding", "docs/user_guide/geocoding"),
("geometric_manipulations", "docs/user_guide/geometric_manipulations"),
("indexing", "docs/user_guide/indexing"),
("io", "docs/user_guide/io"),
("mapping", "docs/user_guide/mapping"),
("mergingdata", "docs/user_guide/mergingdata"),
("missing_empty", "docs/user_guide/missing_empty"),
("projections", "docs/user_guide/projections"),
("set_operations", "docs/user_guide/set_operations"),
# other
("install", "getting_started/install"),
("reference", "docs/reference"),
("changelog", "docs/changelog"),
("code_of_conduct", "community/code_of_conduct"),
("contributing", "community/contributing"),
]
html_additional_pages = {page[0]: "redirect.html" for page in moved_pages}
html_context = {"redirects": {old: new for old, new in moved_pages}}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "GeoPandasdoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", "GeoPandas.tex", u"GeoPandas Documentation", u"Kelsey Jordahl", "manual")
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "geopandas", u"GeoPandas Documentation", [u"Kelsey Jordahl"], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"GeoPandas",
u"GeoPandas Documentation",
u"Kelsey Jordahl",
"GeoPandas",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
nbsphinx_prolog = r"""
{% set docname = env.doc2path(env.docname, base=None) %}
.. only:: html
.. role:: raw-html(raw)
:format: html
.. note::
| This page was generated from `{{ docname }}`__.
| Interactive online version: :raw-html:`<a href="https://mybinder.org/v2/gh/geopandas/geopandas/master?urlpath=lab/tree/doc/source/{{ docname }}"><img alt="Binder badge" src="https://mybinder.org/badge_logo.svg" style="vertical-align:text-bottom"></a>`
__ https://github.com/geopandas/geopandas/blob/master/doc/source/{{ docname }}
"""
# --Options for sphinx extensions -----------------------------------------------
# connect docs in other projects
intersphinx_mapping = {
"pyproj": (
"https://pyproj4.github.io/pyproj/stable/",
"https://pyproj4.github.io/pyproj/stable/objects.inv",
),
"pandas": (
"https://pandas.pydata.org/pandas-docs/stable/",
"https://pandas.pydata.org/pandas-docs/stable/objects.inv",
),
"shapely": (
"https://shapely.readthedocs.io/en/stable/",
"https://shapely.readthedocs.io/en/stable/objects.inv",
),
"fiona": (
"https://fiona.readthedocs.io/en/stable/",
"https://fiona.readthedocs.io/en/stable/objects.inv",
),
"pygeos": (
"https://pygeos.readthedocs.io/en/latest/",
"https://pygeos.readthedocs.io/en/latest/objects.inv",
),
"rtree": (
"https://rtree.readthedocs.io/en/stable/",
"https://rtree.readthedocs.io/en/stable/objects.inv",
),
"mapclassify": (
"https://pysal.org/mapclassify/",
"https://pysal.org/mapclassify/objects.inv",
),
"libpysal": (
"https://pysal.org/libpysal/",
"https://pysal.org/libpysal/objects.inv",
),
"matplotlib": (
"https://matplotlib.org/stable/",
"https://matplotlib.org/stable/objects.inv",
),
"geopy": (
"https://geopy.readthedocs.io/en/stable/",
"https://geopy.readthedocs.io/en/stable/objects.inv",
),
"cartopy": (
"https://scitools.org.uk/cartopy/docs/latest/",
"https://scitools.org.uk/cartopy/docs/latest/objects.inv",
),
"pyepsg": (
"https://pyepsg.readthedocs.io/en/stable/",
"https://pyepsg.readthedocs.io/en/stable/objects.inv",
),
"contextily": (
"https://contextily.readthedocs.io/en/stable/",
"https://contextily.readthedocs.io/en/stable/objects.inv",
),
"rasterio": (
"https://rasterio.readthedocs.io/en/stable/",
"https://rasterio.readthedocs.io/en/stable/objects.inv",
),
"geoplot": (
"https://residentmario.github.io/geoplot/index.html",
"https://residentmario.github.io/geoplot/objects.inv",
),
"folium": (
"https://python-visualization.github.io/folium/",
"https://python-visualization.github.io/folium/objects.inv",
),
"python": ("https://docs.python.org/3", "https://docs.python.org/3/objects.inv"),
}
| bsd-3-clause |
louispotok/pandas | pandas/tests/indexes/interval/test_interval_range.py | 1 | 13089 | from __future__ import division
import pytest
import numpy as np
from datetime import timedelta
from pandas import (
Interval, IntervalIndex, Timestamp, Timedelta, DateOffset,
interval_range, date_range, timedelta_range)
from pandas.core.dtypes.common import is_integer
from pandas.tseries.offsets import Day
import pandas.util.testing as tm
@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither'])
def closed(request):
return request.param
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalRange(object):
@pytest.mark.parametrize('freq, periods', [
(1, 100), (2.5, 40), (5, 20), (25, 4)])
def test_constructor_numeric(self, closed, name, freq, periods):
start, end = 0, 100
breaks = np.arange(101, step=freq)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
# defined from start/end/freq
result = interval_range(
start=start, end=end, freq=freq, name=name, closed=closed)
tm.assert_index_equal(result, expected)
# defined from start/periods/freq
result = interval_range(
start=start, periods=periods, freq=freq, name=name, closed=closed)
tm.assert_index_equal(result, expected)
# defined from end/periods/freq
result = interval_range(
end=end, periods=periods, freq=freq, name=name, closed=closed)
tm.assert_index_equal(result, expected)
# GH 20976: linspace behavior defined from start/end/periods
result = interval_range(
start=start, end=end, periods=periods, name=name, closed=closed)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'US/Eastern'])
@pytest.mark.parametrize('freq, periods', [
('D', 364), ('2D', 182), ('22D18H', 16), ('M', 11)])
def test_constructor_timestamp(self, closed, name, freq, periods, tz):
start, end = Timestamp('20180101', tz=tz), Timestamp('20181231', tz=tz)
breaks = date_range(start=start, end=end, freq=freq)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
# defined from start/end/freq
result = interval_range(
start=start, end=end, freq=freq, name=name, closed=closed)
tm.assert_index_equal(result, expected)
# defined from start/periods/freq
result = interval_range(
start=start, periods=periods, freq=freq, name=name, closed=closed)
tm.assert_index_equal(result, expected)
# defined from end/periods/freq
result = interval_range(
end=end, periods=periods, freq=freq, name=name, closed=closed)
tm.assert_index_equal(result, expected)
# GH 20976: linspace behavior defined from start/end/periods
if not breaks.freq.isAnchored() and tz is None:
# matches expected only for non-anchored offsets and tz naive
# (anchored/DST transitions cause unequal spacing in expected)
result = interval_range(start=start, end=end, periods=periods,
name=name, closed=closed)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('freq, periods', [
('D', 100), ('2D12H', 40), ('5D', 20), ('25D', 4)])
def test_constructor_timedelta(self, closed, name, freq, periods):
start, end = Timedelta('0 days'), Timedelta('100 days')
breaks = timedelta_range(start=start, end=end, freq=freq)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
# defined from start/end/freq
result = interval_range(
start=start, end=end, freq=freq, name=name, closed=closed)
tm.assert_index_equal(result, expected)
# defined from start/periods/freq
result = interval_range(
start=start, periods=periods, freq=freq, name=name, closed=closed)
tm.assert_index_equal(result, expected)
# defined from end/periods/freq
result = interval_range(
end=end, periods=periods, freq=freq, name=name, closed=closed)
tm.assert_index_equal(result, expected)
# GH 20976: linspace behavior defined from start/end/periods
result = interval_range(
start=start, end=end, periods=periods, name=name, closed=closed)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('start, end, freq, expected_endpoint', [
(0, 10, 3, 9),
(0, 10, 1.5, 9),
(0.5, 10, 3, 9.5),
(Timedelta('0D'), Timedelta('10D'), '2D4H', Timedelta('8D16H')),
(Timestamp('2018-01-01'),
Timestamp('2018-02-09'),
'MS',
Timestamp('2018-02-01')),
(Timestamp('2018-01-01', tz='US/Eastern'),
Timestamp('2018-01-20', tz='US/Eastern'),
'5D12H',
Timestamp('2018-01-17 12:00:00', tz='US/Eastern'))])
def test_early_truncation(self, start, end, freq, expected_endpoint):
# index truncates early if freq causes end to be skipped
result = interval_range(start=start, end=end, freq=freq)
result_endpoint = result.right[-1]
assert result_endpoint == expected_endpoint
@pytest.mark.parametrize('start, end, freq', [
(0.5, None, None),
(None, 4.5, None),
(0.5, None, 1.5),
(None, 6.5, 1.5)])
def test_no_invalid_float_truncation(self, start, end, freq):
# GH 21161
if freq is None:
breaks = [0.5, 1.5, 2.5, 3.5, 4.5]
else:
breaks = [0.5, 2.0, 3.5, 5.0, 6.5]
expected = IntervalIndex.from_breaks(breaks)
result = interval_range(start=start, end=end, periods=4, freq=freq)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('start, mid, end', [
(Timestamp('2018-03-10', tz='US/Eastern'),
Timestamp('2018-03-10 23:30:00', tz='US/Eastern'),
Timestamp('2018-03-12', tz='US/Eastern')),
(Timestamp('2018-11-03', tz='US/Eastern'),
Timestamp('2018-11-04 00:30:00', tz='US/Eastern'),
Timestamp('2018-11-05', tz='US/Eastern'))])
def test_linspace_dst_transition(self, start, mid, end):
# GH 20976: linspace behavior defined from start/end/periods
# accounts for the hour gained/lost during DST transition
result = interval_range(start=start, end=end, periods=2)
expected = IntervalIndex.from_breaks([start, mid, end])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('freq', [2, 2.0])
@pytest.mark.parametrize('end', [10, 10.0])
@pytest.mark.parametrize('start', [0, 0.0])
def test_float_subtype(self, start, end, freq):
# Has float subtype if any of start/end/freq are float, even if all
# resulting endpoints can safely be upcast to integers
# defined from start/end/freq
index = interval_range(start=start, end=end, freq=freq)
result = index.dtype.subtype
expected = 'int64' if is_integer(start + end + freq) else 'float64'
assert result == expected
# defined from start/periods/freq
index = interval_range(start=start, periods=5, freq=freq)
result = index.dtype.subtype
expected = 'int64' if is_integer(start + freq) else 'float64'
assert result == expected
# defined from end/periods/freq
index = interval_range(end=end, periods=5, freq=freq)
result = index.dtype.subtype
expected = 'int64' if is_integer(end + freq) else 'float64'
assert result == expected
# GH 20976: linspace behavior defined from start/end/periods
index = interval_range(start=start, end=end, periods=5)
result = index.dtype.subtype
expected = 'int64' if is_integer(start + end) else 'float64'
assert result == expected
def test_constructor_coverage(self):
# float value for periods
expected = interval_range(start=0, periods=10)
result = interval_range(start=0, periods=10.5)
tm.assert_index_equal(result, expected)
# equivalent timestamp-like start/end
start, end = Timestamp('2017-01-01'), Timestamp('2017-01-15')
expected = interval_range(start=start, end=end)
result = interval_range(start=start.to_pydatetime(),
end=end.to_pydatetime())
tm.assert_index_equal(result, expected)
result = interval_range(start=start.asm8, end=end.asm8)
tm.assert_index_equal(result, expected)
# equivalent freq with timestamp
equiv_freq = ['D', Day(), Timedelta(days=1), timedelta(days=1),
DateOffset(days=1)]
for freq in equiv_freq:
result = interval_range(start=start, end=end, freq=freq)
tm.assert_index_equal(result, expected)
# equivalent timedelta-like start/end
start, end = Timedelta(days=1), Timedelta(days=10)
expected = interval_range(start=start, end=end)
result = interval_range(start=start.to_pytimedelta(),
end=end.to_pytimedelta())
tm.assert_index_equal(result, expected)
result = interval_range(start=start.asm8, end=end.asm8)
tm.assert_index_equal(result, expected)
# equivalent freq with timedelta
equiv_freq = ['D', Day(), Timedelta(days=1), timedelta(days=1)]
for freq in equiv_freq:
result = interval_range(start=start, end=end, freq=freq)
tm.assert_index_equal(result, expected)
def test_errors(self):
# not enough params
msg = ('Of the four parameters: start, end, periods, and freq, '
'exactly three must be specified')
with tm.assert_raises_regex(ValueError, msg):
interval_range(start=0)
with tm.assert_raises_regex(ValueError, msg):
interval_range(end=5)
with tm.assert_raises_regex(ValueError, msg):
interval_range(periods=2)
with tm.assert_raises_regex(ValueError, msg):
interval_range()
# too many params
with tm.assert_raises_regex(ValueError, msg):
interval_range(start=0, end=5, periods=6, freq=1.5)
# mixed units
msg = 'start, end, freq need to be type compatible'
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=0, end=Timestamp('20130101'), freq=2)
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=0, end=Timedelta('1 day'), freq=2)
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=0, end=10, freq='D')
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=Timestamp('20130101'), end=10, freq='D')
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=Timestamp('20130101'),
end=Timedelta('1 day'), freq='D')
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=Timestamp('20130101'),
end=Timestamp('20130110'), freq=2)
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=Timedelta('1 day'), end=10, freq='D')
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=Timedelta('1 day'),
end=Timestamp('20130110'), freq='D')
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=Timedelta('1 day'),
end=Timedelta('10 days'), freq=2)
# invalid periods
msg = 'periods must be a number, got foo'
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=0, periods='foo')
# invalid start
msg = 'start must be numeric or datetime-like, got foo'
with tm.assert_raises_regex(ValueError, msg):
interval_range(start='foo', periods=10)
# invalid end
msg = r'end must be numeric or datetime-like, got \(0, 1\]'
with tm.assert_raises_regex(ValueError, msg):
interval_range(end=Interval(0, 1), periods=10)
# invalid freq for datetime-like
msg = 'freq must be numeric or convertible to DateOffset, got foo'
with tm.assert_raises_regex(ValueError, msg):
interval_range(start=0, end=10, freq='foo')
with tm.assert_raises_regex(ValueError, msg):
interval_range(start=Timestamp('20130101'), periods=10, freq='foo')
with tm.assert_raises_regex(ValueError, msg):
interval_range(end=Timedelta('1 day'), periods=10, freq='foo')
# mixed tz
start = Timestamp('2017-01-01', tz='US/Eastern')
end = Timestamp('2017-01-07', tz='US/Pacific')
msg = 'Start and end cannot both be tz-aware with different timezones'
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=start, end=end)
| bsd-3-clause |
lucidfrontier45/scikit-learn | sklearn/linear_model/tests/test_sparse_coordinate_descent.py | 1 | 9159 | import warnings
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
ElasticNetCV)
def test_sparse_coef():
""" Check that the sparse_coef propery works """
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.todense().tolist()[0], clf.coef_)
def test_normalize_option():
""" Check that the normalize option in enet works """
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
"""Check that the sparse lasso can handle zero data without crashing"""
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
"""Test ElasticNet for various values of alpha and l1_ratio with list X"""
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
with warnings.catch_warnings(record=True):
# catch warning about alpha=0.
# this is discouraged but should work.
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
"""Test ElasticNet for various values of alpha and l1_ratio with sparse
X"""
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples / 2:], X[:n_samples / 2]
y_train, y_test = y[n_samples / 2:], y[:n_samples / 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.todense(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
# check that warm restart leads to the same result with
# sparse and dense versions
rng = np.random.RandomState(seed=0)
coef_init = rng.randn(n_features)
d_clf.fit(X_train.todense(), y_train, coef_init=coef_init)
s_clf.fit(X_train, y_train, coef_init=coef_init)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples / 2:], X[:n_samples / 2]
y_train, y_test = y[n_samples / 2:], y[:n_samples / 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.todense(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap, eps = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_, estimator.eps_)
for k in xrange(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
assert_array_almost_equal(eps[k], estimator.eps_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
| bsd-3-clause |
hainm/scikit-xray | doc/sphinxext/tests/test_docscrape.py | 12 | 14257 | # -*- encoding:utf-8 -*-
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
from docscrape_sphinx import SphinxDocString, SphinxClassDoc
from nose.tools import *
doc_txt = '''\
numpy.multivariate_normal(mean, cov, shape=None)
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
Parameters
----------
mean : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
cov : (N,N) ndarray
Covariance matrix of the distribution.
shape : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
Returns
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
Warnings
--------
Certain warnings apply.
Notes
-----
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
References
----------
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
See Also
--------
some, other, funcs
otherfunc : relationship
Examples
--------
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print(x.shape)
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print(list( (x[0,0,:] - mean) < 0.6 ))
[True, True]
.. index:: random
:refguide: random;distributions, random;gauss
'''
doc = NumpyDocString(doc_txt)
def test_signature():
assert doc['Signature'].startswith('numpy.multivariate_normal(')
assert doc['Signature'].endswith('shape=None)')
def test_summary():
assert doc['Summary'][0].startswith('Draw values')
assert doc['Summary'][-1].endswith('covariance.')
def test_extended_summary():
assert doc['Extended Summary'][0].startswith('The multivariate normal')
def test_parameters():
assert_equal(len(doc['Parameters']), 3)
assert_equal(
[n for n, _, _ in doc['Parameters']], ['mean', 'cov', 'shape'])
arg, arg_type, desc = doc['Parameters'][1]
assert_equal(arg_type, '(N,N) ndarray')
assert desc[0].startswith('Covariance matrix')
assert doc['Parameters'][0][-1][-2] == ' (1+2+3)/3'
def test_returns():
assert_equal(len(doc['Returns']), 1)
arg, arg_type, desc = doc['Returns'][0]
assert_equal(arg, 'out')
assert_equal(arg_type, 'ndarray')
assert desc[0].startswith('The drawn samples')
assert desc[-1].endswith('distribution.')
def test_notes():
assert doc['Notes'][0].startswith('Instead')
assert doc['Notes'][-1].endswith('definite.')
assert_equal(len(doc['Notes']), 17)
def test_references():
assert doc['References'][0].startswith('..')
assert doc['References'][-1].endswith('2001.')
def test_examples():
assert doc['Examples'][0].startswith('>>>')
assert doc['Examples'][-1].endswith('True]')
def test_index():
assert_equal(doc['index']['default'], 'random')
print(doc['index'])
assert_equal(len(doc['index']), 2)
assert_equal(len(doc['index']['refguide']), 2)
def non_blank_line_by_line_compare(a, b):
a = [l for l in a.split('\n') if l.strip()]
b = [l for l in b.split('\n') if l.strip()]
for n, line in enumerate(a):
if not line == b[n]:
raise AssertionError("Lines %s of a and b differ: "
"\n>>> %s\n<<< %s\n" %
(n, line, b[n]))
def test_str():
non_blank_line_by_line_compare(str(doc),
"""numpy.multivariate_normal(mean, cov, shape=None)
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
Parameters
----------
mean : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
cov : (N,N) ndarray
Covariance matrix of the distribution.
shape : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
Returns
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
Warnings
--------
Certain warnings apply.
See Also
--------
`some`_, `other`_, `funcs`_
`otherfunc`_
relationship
Notes
-----
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
References
----------
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
Examples
--------
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print(x.shape)
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print(list( (x[0,0,:] - mean) < 0.6 ))
[True, True]
.. index:: random
:refguide: random;distributions, random;gauss""")
def test_sphinx_str():
sphinx_doc = SphinxDocString(doc_txt)
non_blank_line_by_line_compare(str(sphinx_doc),
"""
.. index:: random
single: random;distributions, random;gauss
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
:Parameters:
**mean** : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
**cov** : (N,N) ndarray
Covariance matrix of the distribution.
**shape** : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
:Returns:
**out** : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
.. warning::
Certain warnings apply.
.. seealso::
:obj:`some`, :obj:`other`, :obj:`funcs`
:obj:`otherfunc`
relationship
.. rubric:: Notes
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
.. rubric:: References
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
.. only:: latex
[1]_, [2]_
.. rubric:: Examples
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print(x.shape)
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print(list( (x[0,0,:] - mean) < 0.6 ))
[True, True]
""")
doc2 = NumpyDocString("""
Returns array of indices of the maximum values of along the given axis.
Parameters
----------
a : {array_like}
Array to look in.
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis""")
def test_parameters_without_extended_description():
assert_equal(len(doc2['Parameters']), 2)
doc3 = NumpyDocString("""
my_signature(*params, **kwds)
Return this and that.
""")
def test_escape_stars():
signature = str(doc3).split('\n')[0]
assert_equal(signature, 'my_signature(\*params, \*\*kwds)')
doc4 = NumpyDocString(
"""a.conj()
Return an array with all complex-valued elements conjugated.""")
def test_empty_extended_summary():
assert_equal(doc4['Extended Summary'], [])
doc5 = NumpyDocString(
"""
a.something()
Raises
------
LinAlgException
If array is singular.
""")
def test_raises():
assert_equal(len(doc5['Raises']), 1)
name, _, desc = doc5['Raises'][0]
assert_equal(name, 'LinAlgException')
assert_equal(desc, ['If array is singular.'])
def test_see_also():
doc6 = NumpyDocString(
"""
z(x,theta)
See Also
--------
func_a, func_b, func_c
func_d : some equivalent func
foo.func_e : some other func over
multiple lines
func_f, func_g, :meth:`func_h`, func_j,
func_k
:obj:`baz.obj_q`
:class:`class_j`: fubar
foobar
""")
assert len(doc6['See Also']) == 12
for func, desc, role in doc6['See Also']:
if func in ('func_a', 'func_b', 'func_c', 'func_f',
'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'):
assert(not desc)
else:
assert(desc)
if func == 'func_h':
assert role == 'meth'
elif func == 'baz.obj_q':
assert role == 'obj'
elif func == 'class_j':
assert role == 'class'
else:
assert role is None
if func == 'func_d':
assert desc == ['some equivalent func']
elif func == 'foo.func_e':
assert desc == ['some other func over', 'multiple lines']
elif func == 'class_j':
assert desc == ['fubar', 'foobar']
def test_see_also_print():
class Dummy(object):
"""
See Also
--------
func_a, func_b
func_c : some relationship
goes here
func_d
"""
pass
obj = Dummy()
s = str(FunctionDoc(obj, role='func'))
assert(':func:`func_a`, :func:`func_b`' in s)
assert(' some relationship' in s)
assert(':func:`func_d`' in s)
doc7 = NumpyDocString("""
Doc starts on second line.
""")
def test_empty_first_line():
assert doc7['Summary'][0].startswith('Doc starts')
def test_no_summary():
str(SphinxDocString("""
Parameters
----------"""))
def test_unicode():
doc = SphinxDocString("""
öäöäöäöäöåååå
öäöäöäööäååå
Parameters
----------
ååå : äää
ööö
Returns
-------
ååå : ööö
äää
""")
assert doc['Summary'][0] == u'öäöäöäöäöåååå'.encode('utf-8')
def test_plot_examples():
cfg = dict(use_plots=True)
doc = SphinxDocString("""
Examples
--------
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3],[4,5,6])
>>> plt.show()
""", config=cfg)
assert 'plot::' in str(doc), str(doc)
doc = SphinxDocString("""
Examples
--------
.. plot::
import matplotlib.pyplot as plt
plt.plot([1,2,3],[4,5,6])
plt.show()
""", config=cfg)
assert str(doc).count('plot::') == 1, str(doc)
def test_class_members():
class Dummy(object):
"""
Dummy class.
"""
def spam(self, a, b):
"""Spam\n\nSpam spam."""
pass
def ham(self, c, d):
"""Cheese\n\nNo cheese."""
pass
for cls in (ClassDoc, SphinxClassDoc):
doc = cls(Dummy, config=dict(show_class_members=False))
assert 'Methods' not in str(doc), (cls, str(doc))
assert 'spam' not in str(doc), (cls, str(doc))
assert 'ham' not in str(doc), (cls, str(doc))
doc = cls(Dummy, config=dict(show_class_members=True))
assert 'Methods' in str(doc), (cls, str(doc))
assert 'spam' in str(doc), (cls, str(doc))
assert 'ham' in str(doc), (cls, str(doc))
if cls is SphinxClassDoc:
assert '.. autosummary::' in str(doc), str(doc)
| bsd-3-clause |
lin-credible/scikit-learn | doc/datasets/mldata_fixture.py | 367 | 1183 | """Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
| bsd-3-clause |
raincoatrun/basemap | examples/fcstmaps_axesgrid.py | 3 | 3120 | from __future__ import print_function
from __future__ import unicode_literals
# this example reads today's numerical weather forecasts
# from the NOAA OpenDAP servers and makes a multi-panel plot.
# This version demonstrates the use of the AxesGrid toolkit.
import numpy as np
import matplotlib.pyplot as plt
import sys
import numpy.ma as ma
import datetime
from mpl_toolkits.basemap import Basemap, addcyclic
from mpl_toolkits.axes_grid1 import AxesGrid
from netCDF4 import Dataset as NetCDFFile, num2date
# today's date is default.
if len(sys.argv) > 1:
YYYYMMDD = sys.argv[1]
else:
YYYYMMDD = datetime.datetime.today().strftime('%Y%m%d')
# set OpenDAP server URL.
try:
URLbase="http://nomads.ncep.noaa.gov:9090/dods/gfs/gfs"
URL=URLbase+YYYYMMDD+'/gfs_00z'
print(URL)
data = NetCDFFile(URL)
except:
msg = """
opendap server not providing the requested data.
Try another date by providing YYYYMMDD on command line."""
raise IOError(msg)
# read lats,lons,times.
print(data.variables.keys())
latitudes = data.variables['lat']
longitudes = data.variables['lon']
fcsttimes = data.variables['time']
times = fcsttimes[0:6] # first 6 forecast times.
ntimes = len(times)
# convert times for datetime instances.
fdates = num2date(times,units=fcsttimes.units,calendar='standard')
# make a list of YYYYMMDDHH strings.
verifdates = [fdate.strftime('%Y%m%d%H') for fdate in fdates]
# convert times to forecast hours.
fcsthrs = []
for fdate in fdates:
fdiff = fdate-fdates[0]
fcsthrs.append(fdiff.days*24. + fdiff.seconds/3600.)
print(fcsthrs)
print(verifdates)
lats = latitudes[:]
nlats = len(lats)
lons1 = longitudes[:]
nlons = len(lons1)
# unpack 2-meter temp forecast data.
t2mvar = data.variables['tmp2m']
# create figure, set up AxesGrid.
fig=plt.figure(figsize=(6,8))
grid = AxesGrid(fig, [0.05,0.01,0.9,0.9],
nrows_ncols=(3, 2),
axes_pad=0.25,
cbar_mode='single',
cbar_pad=0.3,
cbar_size=0.1,
cbar_location='top',
share_all=True,
)
# create Basemap instance for Orthographic projection.
m = Basemap(lon_0=-90,lat_0=60,projection='ortho')
# add wrap-around point in longitude.
t2m = np.zeros((ntimes,nlats,nlons+1),np.float32)
for nt in range(ntimes):
t2m[nt,:,:], lons = addcyclic(t2mvar[nt,:,:], lons1)
# convert to celsius.
t2m = t2m-273.15
# contour levels
clevs = np.arange(-30,30.1,2.)
lons, lats = np.meshgrid(lons, lats)
x, y = m(lons, lats)
# make subplots.
for nt,fcsthr in enumerate(fcsthrs):
ax = grid[nt]
m.ax = ax
cs = m.contourf(x,y,t2m[nt,:,:],clevs,cmap=plt.cm.jet,extend='both')
m.drawcoastlines(linewidth=0.5)
m.drawcountries()
m.drawparallels(np.arange(-80,81,20))
m.drawmeridians(np.arange(0,360,20))
# panel title
ax.set_title('%d-h forecast valid '%fcsthr+verifdates[nt],fontsize=9)
# figure title
plt.figtext(0.5,0.95,
"2-m temp (\N{DEGREE SIGN}C) forecasts from %s"%verifdates[0],
horizontalalignment='center',fontsize=14)
# a single colorbar.
cbar = fig.colorbar(cs, cax=grid.cbar_axes[0], orientation='horizontal')
plt.show()
| gpl-2.0 |
leggitta/mne-python | mne/viz/misc.py | 13 | 19748 | """Functions to make simple plots with M/EEG data
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Cathy Nangini <[email protected]>
# Mainak Jas <[email protected]>
#
# License: Simplified BSD
import copy
import warnings
from glob import glob
import os.path as op
from itertools import cycle
import numpy as np
from scipy import linalg
from ..surface import read_surface
from ..io.proj import make_projector
from ..utils import logger, verbose, get_subjects_dir
from ..io.pick import pick_types
from .utils import tight_layout, COLORS, _prepare_trellis
@verbose
def plot_cov(cov, info, exclude=[], colorbar=True, proj=False, show_svd=True,
show=True, verbose=None):
"""Plot Covariance data
Parameters
----------
cov : instance of Covariance
The covariance matrix.
info: dict
Measurement info.
exclude : list of string | str
List of channels to exclude. If empty do not exclude any channel.
If 'bads', exclude info['bads'].
colorbar : bool
Show colorbar or not.
proj : bool
Apply projections or not.
show_svd : bool
Plot also singular values of the noise covariance for each sensor
type. We show square roots ie. standard deviations.
show : bool
Show figure if True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig_cov : instance of matplotlib.pyplot.Figure
The covariance plot.
fig_svd : instance of matplotlib.pyplot.Figure | None
The SVD spectra plot of the covariance.
"""
if exclude == 'bads':
exclude = info['bads']
ch_names = [n for n in cov.ch_names if n not in exclude]
ch_idx = [cov.ch_names.index(n) for n in ch_names]
info_ch_names = info['ch_names']
sel_eeg = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude=exclude)
sel_mag = pick_types(info, meg='mag', eeg=False, ref_meg=False,
exclude=exclude)
sel_grad = pick_types(info, meg='grad', eeg=False, ref_meg=False,
exclude=exclude)
idx_eeg = [ch_names.index(info_ch_names[c])
for c in sel_eeg if info_ch_names[c] in ch_names]
idx_mag = [ch_names.index(info_ch_names[c])
for c in sel_mag if info_ch_names[c] in ch_names]
idx_grad = [ch_names.index(info_ch_names[c])
for c in sel_grad if info_ch_names[c] in ch_names]
idx_names = [(idx_eeg, 'EEG covariance', 'uV', 1e6),
(idx_grad, 'Gradiometers', 'fT/cm', 1e13),
(idx_mag, 'Magnetometers', 'fT', 1e15)]
idx_names = [(idx, name, unit, scaling)
for idx, name, unit, scaling in idx_names if len(idx) > 0]
C = cov.data[ch_idx][:, ch_idx]
if proj:
projs = copy.deepcopy(info['projs'])
# Activate the projection items
for p in projs:
p['active'] = True
P, ncomp, _ = make_projector(projs, ch_names)
if ncomp > 0:
logger.info(' Created an SSP operator (subspace dimension'
' = %d)' % ncomp)
C = np.dot(P, np.dot(C, P.T))
else:
logger.info(' The projection vectors do not apply to these '
'channels.')
import matplotlib.pyplot as plt
fig_cov = plt.figure(figsize=(2.5 * len(idx_names), 2.7))
for k, (idx, name, _, _) in enumerate(idx_names):
plt.subplot(1, len(idx_names), k + 1)
plt.imshow(C[idx][:, idx], interpolation="nearest", cmap='RdBu_r')
plt.title(name)
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.2, 0.26)
tight_layout(fig=fig_cov)
fig_svd = None
if show_svd:
fig_svd = plt.figure()
for k, (idx, name, unit, scaling) in enumerate(idx_names):
s = linalg.svd(C[idx][:, idx], compute_uv=False)
plt.subplot(1, len(idx_names), k + 1)
plt.ylabel('Noise std (%s)' % unit)
plt.xlabel('Eigenvalue index')
plt.semilogy(np.sqrt(s) * scaling)
plt.title(name)
tight_layout(fig=fig_svd)
if show:
plt.show()
return fig_cov, fig_svd
def plot_source_spectrogram(stcs, freq_bins, tmin=None, tmax=None,
source_index=None, colorbar=False, show=True):
"""Plot source power in time-freqency grid.
Parameters
----------
stcs : list of SourceEstimate
Source power for consecutive time windows, one SourceEstimate object
should be provided for each frequency bin.
freq_bins : list of tuples of float
Start and end points of frequency bins of interest.
tmin : float
Minimum time instant to show.
tmax : float
Maximum time instant to show.
source_index : int | None
Index of source for which the spectrogram will be plotted. If None,
the source with the largest activation will be selected.
colorbar : bool
If true, a colorbar will be added to the plot.
show : bool
Show figure if True.
"""
import matplotlib.pyplot as plt
# Input checks
if len(stcs) == 0:
raise ValueError('cannot plot spectrogram if len(stcs) == 0')
stc = stcs[0]
if tmin is not None and tmin < stc.times[0]:
raise ValueError('tmin cannot be smaller than the first time point '
'provided in stcs')
if tmax is not None and tmax > stc.times[-1] + stc.tstep:
raise ValueError('tmax cannot be larger than the sum of the last time '
'point and the time step, which are provided in stcs')
# Preparing time-frequency cell boundaries for plotting
if tmin is None:
tmin = stc.times[0]
if tmax is None:
tmax = stc.times[-1] + stc.tstep
time_bounds = np.arange(tmin, tmax + stc.tstep, stc.tstep)
freq_bounds = sorted(set(np.ravel(freq_bins)))
freq_ticks = copy.deepcopy(freq_bounds)
# Reject time points that will not be plotted and gather results
source_power = []
for stc in stcs:
stc = stc.copy() # copy since crop modifies inplace
stc.crop(tmin, tmax - stc.tstep)
source_power.append(stc.data)
source_power = np.array(source_power)
# Finding the source with maximum source power
if source_index is None:
source_index = np.unravel_index(source_power.argmax(),
source_power.shape)[1]
# If there is a gap in the frequency bins record its locations so that it
# can be covered with a gray horizontal bar
gap_bounds = []
for i in range(len(freq_bins) - 1):
lower_bound = freq_bins[i][1]
upper_bound = freq_bins[i + 1][0]
if lower_bound != upper_bound:
freq_bounds.remove(lower_bound)
gap_bounds.append((lower_bound, upper_bound))
# Preparing time-frequency grid for plotting
time_grid, freq_grid = np.meshgrid(time_bounds, freq_bounds)
# Plotting the results
fig = plt.figure(figsize=(9, 6))
plt.pcolor(time_grid, freq_grid, source_power[:, source_index, :],
cmap='Reds')
ax = plt.gca()
plt.title('Time-frequency source power')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
time_tick_labels = [str(np.round(t, 2)) for t in time_bounds]
n_skip = 1 + len(time_bounds) // 10
for i in range(len(time_bounds)):
if i % n_skip != 0:
time_tick_labels[i] = ''
ax.set_xticks(time_bounds)
ax.set_xticklabels(time_tick_labels)
plt.xlim(time_bounds[0], time_bounds[-1])
plt.yscale('log')
ax.set_yticks(freq_ticks)
ax.set_yticklabels([np.round(freq, 2) for freq in freq_ticks])
plt.ylim(freq_bounds[0], freq_bounds[-1])
plt.grid(True, ls='-')
if colorbar:
plt.colorbar()
tight_layout(fig=fig)
# Covering frequency gaps with horizontal bars
for lower_bound, upper_bound in gap_bounds:
plt.barh(lower_bound, time_bounds[-1] - time_bounds[0], upper_bound -
lower_bound, time_bounds[0], color='#666666')
if show:
plt.show()
return fig
def _plot_mri_contours(mri_fname, surf_fnames, orientation='coronal',
slices=None, show=True):
"""Plot BEM contours on anatomical slices.
Parameters
----------
mri_fname : str
The name of the file containing anatomical data.
surf_fnames : list of str
The filenames for the BEM surfaces in the format
['inner_skull.surf', 'outer_skull.surf', 'outer_skin.surf'].
orientation : str
'coronal' or 'axial' or 'sagittal'
slices : list of int
Slice indices.
show : bool
Show figure if True.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
"""
import matplotlib.pyplot as plt
import nibabel as nib
if orientation not in ['coronal', 'axial', 'sagittal']:
raise ValueError("Orientation must be 'coronal', 'axial' or "
"'sagittal'. Got %s." % orientation)
# Load the T1 data
nim = nib.load(mri_fname)
data = nim.get_data()
affine = nim.get_affine()
n_sag, n_axi, n_cor = data.shape
orientation_name2axis = dict(sagittal=0, axial=1, coronal=2)
orientation_axis = orientation_name2axis[orientation]
if slices is None:
n_slices = data.shape[orientation_axis]
slices = np.linspace(0, n_slices, 12, endpoint=False).astype(np.int)
# create of list of surfaces
surfs = list()
trans = linalg.inv(affine)
# XXX : next line is a hack don't ask why
trans[:3, -1] = [n_sag // 2, n_axi // 2, n_cor // 2]
for surf_fname in surf_fnames:
surf = dict()
surf['rr'], surf['tris'] = read_surface(surf_fname)
# move back surface to MRI coordinate system
surf['rr'] = nib.affines.apply_affine(trans, surf['rr'])
surfs.append(surf)
fig, axs = _prepare_trellis(len(slices), 4)
for ax, sl in zip(axs, slices):
# adjust the orientations for good view
if orientation == 'coronal':
dat = data[:, :, sl].transpose()
elif orientation == 'axial':
dat = data[:, sl, :]
elif orientation == 'sagittal':
dat = data[sl, :, :]
# First plot the anatomical data
ax.imshow(dat, cmap=plt.cm.gray)
ax.axis('off')
# and then plot the contours on top
for surf in surfs:
if orientation == 'coronal':
ax.tricontour(surf['rr'][:, 0], surf['rr'][:, 1],
surf['tris'], surf['rr'][:, 2],
levels=[sl], colors='yellow', linewidths=2.0)
elif orientation == 'axial':
ax.tricontour(surf['rr'][:, 2], surf['rr'][:, 0],
surf['tris'], surf['rr'][:, 1],
levels=[sl], colors='yellow', linewidths=2.0)
elif orientation == 'sagittal':
ax.tricontour(surf['rr'][:, 2], surf['rr'][:, 1],
surf['tris'], surf['rr'][:, 0],
levels=[sl], colors='yellow', linewidths=2.0)
plt.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0.,
hspace=0.)
if show:
plt.show()
return fig
def plot_bem(subject=None, subjects_dir=None, orientation='coronal',
slices=None, show=True):
"""Plot BEM contours on anatomical slices.
Parameters
----------
subject : str
Subject name.
subjects_dir : str | None
Path to the SUBJECTS_DIR. If None, the path is obtained by using
the environment variable SUBJECTS_DIR.
orientation : str
'coronal' or 'axial' or 'sagittal'.
slices : list of int
Slice indices.
show : bool
Show figure if True.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
# Get the MRI filename
mri_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
if not op.isfile(mri_fname):
raise IOError('MRI file "%s" does not exist' % mri_fname)
# Get the BEM surface filenames
bem_path = op.join(subjects_dir, subject, 'bem')
if not op.isdir(bem_path):
raise IOError('Subject bem directory "%s" does not exist' % bem_path)
surf_fnames = []
for surf_name in ['*inner_skull', '*outer_skull', '*outer_skin']:
surf_fname = glob(op.join(bem_path, surf_name + '.surf'))
if len(surf_fname) > 0:
surf_fname = surf_fname[0]
logger.info("Using surface: %s" % surf_fname)
surf_fnames.append(surf_fname)
if len(surf_fnames) == 0:
raise IOError('No surface files found. Surface files must end with '
'inner_skull.surf, outer_skull.surf or outer_skin.surf')
# Plot the contours
return _plot_mri_contours(mri_fname, surf_fnames, orientation=orientation,
slices=slices, show=show)
def plot_events(events, sfreq=None, first_samp=0, color=None, event_id=None,
axes=None, equal_spacing=True, show=True):
"""Plot events to get a visual display of the paradigm
Parameters
----------
events : array, shape (n_events, 3)
The events.
sfreq : float | None
The sample frequency. If None, data will be displayed in samples (not
seconds).
first_samp : int
The index of the first sample. Typically the raw.first_samp
attribute. It is needed for recordings on a Neuromag
system as the events are defined relative to the system
start and not to the beginning of the recording.
color : dict | None
Dictionary of event_id value and its associated color. If None,
colors are automatically drawn from a default list (cycled through if
number of events longer than list of default colors).
event_id : dict | None
Dictionary of event label (e.g. 'aud_l') and its associated
event_id value. Label used to plot a legend. If None, no legend is
drawn.
axes : instance of matplotlib.axes.AxesSubplot
The subplot handle.
equal_spacing : bool
Use equal spacing between events in y-axis.
show : bool
Show figure if True.
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
Notes
-----
.. versionadded:: 0.9.0
"""
if sfreq is None:
sfreq = 1.0
xlabel = 'samples'
else:
xlabel = 'Time (s)'
events = np.asarray(events)
unique_events = np.unique(events[:, 2])
if event_id is not None:
# get labels and unique event ids from event_id dict,
# sorted by value
event_id_rev = dict((v, k) for k, v in event_id.items())
conditions, unique_events_id = zip(*sorted(event_id.items(),
key=lambda x: x[1]))
for this_event in unique_events_id:
if this_event not in unique_events:
raise ValueError('%s from event_id is not present in events.'
% this_event)
for this_event in unique_events:
if this_event not in unique_events_id:
warnings.warn('event %s missing from event_id will be ignored.'
% this_event)
else:
unique_events_id = unique_events
if color is None:
if len(unique_events) > len(COLORS):
warnings.warn('More events than colors available. '
'You should pass a list of unique colors.')
colors = cycle(COLORS)
color = dict()
for this_event, this_color in zip(unique_events_id, colors):
color[this_event] = this_color
else:
for this_event in color:
if this_event not in unique_events_id:
raise ValueError('%s from color is not present in events '
'or event_id.' % this_event)
for this_event in unique_events_id:
if this_event not in color:
warnings.warn('Color is not available for event %d. Default '
'colors will be used.' % this_event)
import matplotlib.pyplot as plt
fig = None
if axes is None:
fig = plt.figure()
ax = axes if axes else plt.gca()
unique_events_id = np.array(unique_events_id)
min_event = np.min(unique_events_id)
max_event = np.max(unique_events_id)
for idx, ev in enumerate(unique_events_id):
ev_mask = events[:, 2] == ev
kwargs = {}
if event_id is not None:
event_label = '{0} ({1})'.format(event_id_rev[ev],
np.sum(ev_mask))
kwargs['label'] = event_label
if ev in color:
kwargs['color'] = color[ev]
if equal_spacing:
ax.plot((events[ev_mask, 0] - first_samp) / sfreq,
(idx + 1) * np.ones(ev_mask.sum()), '.', **kwargs)
else:
ax.plot((events[ev_mask, 0] - first_samp) / sfreq,
events[ev_mask, 2], '.', **kwargs)
if equal_spacing:
ax.set_ylim(0, unique_events_id.size + 1)
ax.set_yticks(1 + np.arange(unique_events_id.size))
ax.set_yticklabels(unique_events_id)
else:
ax.set_ylim([min_event - 1, max_event + 1])
ax.set_xlabel(xlabel)
ax.set_ylabel('Events id')
ax.grid('on')
fig = fig if fig is not None else plt.gcf()
if event_id is not None:
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
fig.canvas.draw()
if show:
plt.show()
return fig
def _get_presser(fig):
"""Helper to get our press callback"""
callbacks = fig.canvas.callbacks.callbacks['button_press_event']
func = None
for key, val in callbacks.items():
if val.func.__class__.__name__ == 'partial':
func = val.func
break
assert func is not None
return func
def plot_dipole_amplitudes(dipoles, colors=None, show=True):
"""Plot the amplitude traces of a set of dipoles
Parameters
----------
dipoles : list of instance of Dipoles
The dipoles whose amplitudes should be shown.
colors: list of colors | None
Color to plot with each dipole. If None default colors are used.
show : bool
Show figure if True.
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
Notes
-----
.. versionadded:: 0.9.0
"""
import matplotlib.pyplot as plt
if colors is None:
colors = cycle(COLORS)
fig, ax = plt.subplots(1, 1)
xlim = [np.inf, -np.inf]
for dip, color in zip(dipoles, colors):
ax.plot(dip.times, dip.amplitude, color=color, linewidth=1.5)
xlim[0] = min(xlim[0], dip.times[0])
xlim[1] = max(xlim[1], dip.times[-1])
ax.set_xlim(xlim)
ax.set_xlabel('Time (sec)')
ax.set_ylabel('Amplitude (nAm)')
if show:
fig.show()
return fig
| bsd-3-clause |
wmaciel/van-crime | src/run_demo.py | 1 | 4046 | # coding=utf-8
__author__ = 'walthermaciel'
from geopy.geocoders import DataBC
from geopy.exc import GeopyError
from time import sleep
import sys
from ssl import SSLError
from create_feature_vector import create_vector
import os
import pandas as pd
from sklearn.externals import joblib
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
crime_id = {0: 'BNE Residential ',
1: 'Theft from Vehicle',
2: 'Other Thefts ',
3: 'Mischief ',
4: 'Theft of Vehicle ',
5: 'BNE Commercial '}
def gather_time():
print 'Year:\t',
year = sys.stdin.readline().strip()
month_ok = False
while not month_ok:
print 'Month:\t',
month = sys.stdin.readline().strip()
if 12 >= int(month) > 0:
month_ok = True
else:
print 'Nice try, champ...'
return int(year), int(month)
def gather_address():
print 'Street Number:\t',
st_num = sys.stdin.readline().strip()
print 'Street Name:\t',
st_name = sys.stdin.readline().strip()
address = st_num + ' ' + st_name + ', Vancouver, BC, Canada'
return address
def gather_lat_long(address):
print 'Researching lat long for ' + address + '...'
got_it = False
delay = 1
while not got_it:
if delay > 10:
print 'could not find address, exiting...'
exit()
try:
sleep(delay)
location = geolocator.geocode(address)
got_it = True
except (GeopyError, SSLError) as e:
delay *= 2
got_it = False
print '!!! Are you sure you got the right address? Trying again...'
print 'Got it!'
latitude = "{:.8f}".format(location.latitude)
longitude = "{:.8f}".format(location.longitude)
print 'LatLong:\t( ' + latitude + ', ' + longitude + ' )'
return location.latitude, location.longitude
def run_demo():
os.system('clear')
print '''
888 888 .d8888b. d8b
888 888 d88P Y88b Y8P
888 888 888 888
Y88b d88P 8888b. 88888b. 888 888d888 888 88888b.d88b. .d88b.
Y88b d88P "88b 888 "88b 888 888P" 888 888 "888 "88b d8P Y8b
Y88o88P .d888888 888 888 888 888 888 888 888 888 888 88888888
Y888P 888 888 888 888 Y88b d88P 888 888 888 888 888 Y8b.
Y8P "Y888888 888 888 "Y8888P" 888 888 888 888 888 "Y8888
------------------ https://github.com/wmaciel/van-crime -----------------
'''
year, month = gather_time()
address = gather_address()
latitude, longitude = gather_lat_long(address)
print 'Generating feature vector...',
f_vec = create_vector(int(year), int(month), latitude, longitude)
if isinstance(f_vec, int):
print 'Failed'
else:
print 'OK'
print 'Loading classification model...',
clf = joblib.load('../models/random_forest_model.p')
print 'OK'
print 'Loading regression model...',
reg = joblib.load('../models/RandomForestRegressor.p')
print 'OK'
print '\n\n----- Results -----'
print 'Probability of crime type, given that a crime happened:'
prob_list = clf.predict_proba(f_vec.as_matrix())[0]
for i, p in enumerate(prob_list):
print crime_id[i] + '\t' + "{:.2f}".format(p * 100) + '%'
print '--------------------------\n'
print 'Expected number of crimes to happen:'
expected = reg.predict(f_vec.as_matrix())[0]
print expected
print '--------------------------\n'
print 'Expected number of crimes to happen by type:'
for i, p in enumerate(prob_list):
print crime_id[i] + '\t' + "{:.2f}".format(p * expected)
if __name__ == '__main__':
geolocator = DataBC()
while True:
run_demo()
print '\npress enter to reset'
sys.stdin.readline()
| mit |
DGrady/pandas | pandas/tests/io/formats/test_format.py | 6 | 103145 | # -*- coding: utf-8 -*-
"""
Test output formatting for Series/DataFrame, including to_string & reprs
"""
from __future__ import print_function
import re
import pytz
import dateutil
import itertools
from operator import methodcaller
import os
import sys
import warnings
from datetime import datetime
import pytest
import numpy as np
import pandas as pd
from pandas import (DataFrame, Series, Index, Timestamp, MultiIndex,
date_range, NaT, read_table)
from pandas.compat import (range, zip, lrange, StringIO, PY3,
u, lzip, is_platform_windows,
is_platform_32bit)
import pandas.compat as compat
import pandas.io.formats.format as fmt
import pandas.io.formats.printing as printing
import pandas.util.testing as tm
from pandas.io.formats.terminal import get_terminal_size
from pandas.core.config import (set_option, get_option, option_context,
reset_option)
use_32bit_repr = is_platform_windows() or is_platform_32bit()
_frame = DataFrame(tm.getSeriesData())
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def has_info_repr(df):
r = repr(df)
c1 = r.split('\n')[0].startswith("<class")
c2 = r.split('\n')[0].startswith(r"<class") # _repr_html_
return c1 or c2
def has_non_verbose_info_repr(df):
has_info = has_info_repr(df)
r = repr(df)
# 1. <class>
# 2. Index
# 3. Columns
# 4. dtype
# 5. memory usage
# 6. trailing newline
nv = len(r.split('\n')) == 6
return has_info and nv
def has_horizontally_truncated_repr(df):
try: # Check header row
fst_line = np.array(repr(df).splitlines()[0].split())
cand_col = np.where(fst_line == '...')[0][0]
except:
return False
# Make sure each row has this ... in the same place
r = repr(df)
for ix, l in enumerate(r.splitlines()):
if not r.split()[cand_col] == '...':
return False
return True
def has_vertically_truncated_repr(df):
r = repr(df)
only_dot_row = False
for row in r.splitlines():
if re.match(r'^[\.\ ]+$', row):
only_dot_row = True
return only_dot_row
def has_truncated_repr(df):
return has_horizontally_truncated_repr(
df) or has_vertically_truncated_repr(df)
def has_doubly_truncated_repr(df):
return has_horizontally_truncated_repr(
df) and has_vertically_truncated_repr(df)
def has_expanded_repr(df):
r = repr(df)
for line in r.split('\n'):
if line.endswith('\\'):
return True
return False
class TestDataFrameFormatting(object):
def setup_method(self, method):
self.warn_filters = warnings.filters
warnings.filterwarnings('ignore', category=FutureWarning,
module=".*format")
self.frame = _frame.copy()
def teardown_method(self, method):
warnings.filters = self.warn_filters
def test_repr_embedded_ndarray(self):
arr = np.empty(10, dtype=[('err', object)])
for i in range(len(arr)):
arr['err'][i] = np.random.randn(i)
df = DataFrame(arr)
repr(df['err'])
repr(df)
df.to_string()
def test_eng_float_formatter(self):
self.frame.loc[5] = 0
fmt.set_eng_float_format()
repr(self.frame)
fmt.set_eng_float_format(use_eng_prefix=True)
repr(self.frame)
fmt.set_eng_float_format(accuracy=0)
repr(self.frame)
tm.reset_display_options()
def test_show_null_counts(self):
df = DataFrame(1, columns=range(10), index=range(10))
df.iloc[1, 1] = np.nan
def check(null_counts, result):
buf = StringIO()
df.info(buf=buf, null_counts=null_counts)
assert ('non-null' in buf.getvalue()) is result
with option_context('display.max_info_rows', 20,
'display.max_info_columns', 20):
check(None, True)
check(True, True)
check(False, False)
with option_context('display.max_info_rows', 5,
'display.max_info_columns', 5):
check(None, False)
check(True, False)
check(False, False)
def test_repr_tuples(self):
buf = StringIO()
df = DataFrame({'tups': lzip(range(10), range(10))})
repr(df)
df.to_string(col_space=10, buf=buf)
def test_repr_truncation(self):
max_len = 20
with option_context("display.max_colwidth", max_len):
df = DataFrame({'A': np.random.randn(10),
'B': [tm.rands(np.random.randint(
max_len - 1, max_len + 1)) for i in range(10)
]})
r = repr(df)
r = r[r.find('\n') + 1:]
adj = fmt._get_adjustment()
for line, value in lzip(r.split('\n'), df['B']):
if adj.len(value) + 1 > max_len:
assert '...' in line
else:
assert '...' not in line
with option_context("display.max_colwidth", 999999):
assert '...' not in repr(df)
with option_context("display.max_colwidth", max_len + 2):
assert '...' not in repr(df)
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
pd.reset_option("display.chop_threshold") # default None
assert repr(df) == ' 0 1\n0 0.1 0.5\n1 0.5 -0.1'
with option_context("display.chop_threshold", 0.2):
assert repr(df) == ' 0 1\n0 0.0 0.5\n1 0.5 0.0'
with option_context("display.chop_threshold", 0.6):
assert repr(df) == ' 0 1\n0 0.0 0.0\n1 0.0 0.0'
with option_context("display.chop_threshold", None):
assert repr(df) == ' 0 1\n0 0.1 0.5\n1 0.5 -0.1'
def test_repr_chop_threshold_column_below(self):
# GH 6839: validation case
df = pd.DataFrame([[10, 20, 30, 40],
[8e-10, -1e-11, 2e-9, -2e-11]]).T
with option_context("display.chop_threshold", 0):
assert repr(df) == (' 0 1\n'
'0 10.0 8.000000e-10\n'
'1 20.0 -1.000000e-11\n'
'2 30.0 2.000000e-09\n'
'3 40.0 -2.000000e-11')
with option_context("display.chop_threshold", 1e-8):
assert repr(df) == (' 0 1\n'
'0 10.0 0.000000e+00\n'
'1 20.0 0.000000e+00\n'
'2 30.0 0.000000e+00\n'
'3 40.0 0.000000e+00')
with option_context("display.chop_threshold", 5e-11):
assert repr(df) == (' 0 1\n'
'0 10.0 8.000000e-10\n'
'1 20.0 0.000000e+00\n'
'2 30.0 2.000000e-09\n'
'3 40.0 0.000000e+00')
def test_repr_obeys_max_seq_limit(self):
with option_context("display.max_seq_items", 2000):
assert len(printing.pprint_thing(lrange(1000))) > 1000
with option_context("display.max_seq_items", 5):
assert len(printing.pprint_thing(lrange(1000))) < 100
def test_repr_set(self):
assert printing.pprint_thing(set([1])) == '{1}'
def test_repr_is_valid_construction_code(self):
# for the case of Index, where the repr is traditional rather then
# stylized
idx = Index(['a', 'b'])
res = eval("pd." + repr(idx))
tm.assert_series_equal(Series(res), Series(idx))
def test_repr_should_return_str(self):
# http://docs.python.org/py3k/reference/datamodel.html#object.__repr__
# http://docs.python.org/reference/datamodel.html#object.__repr__
# "...The return value must be a string object."
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = [u("\u03c3"), u("\u03c4"), u("\u03c5"), u("\u03c6")]
cols = [u("\u03c8")]
df = DataFrame(data, columns=cols, index=index1)
assert type(df.__repr__()) == str # both py2 / 3
def test_repr_no_backslash(self):
with option_context('mode.sim_interactive', True):
df = DataFrame(np.random.randn(10, 4))
assert '\\' not in repr(df)
def test_expand_frame_repr(self):
df_small = DataFrame('hello', [0], [0])
df_wide = DataFrame('hello', [0], lrange(10))
df_tall = DataFrame('hello', lrange(30), lrange(5))
with option_context('mode.sim_interactive', True):
with option_context('display.max_columns', 10, 'display.width', 20,
'display.max_rows', 20,
'display.show_dimensions', True):
with option_context('display.expand_frame_repr', True):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_truncated_repr(df_wide)
assert has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert has_expanded_repr(df_tall)
with option_context('display.expand_frame_repr', False):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_horizontally_truncated_repr(df_wide)
assert not has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert not has_expanded_repr(df_tall)
def test_repr_non_interactive(self):
# in non interactive mode, there can be no dependency on the
# result of terminal auto size detection
df = DataFrame('hello', lrange(1000), lrange(5))
with option_context('mode.sim_interactive', False, 'display.width', 0,
'display.max_rows', 5000):
assert not has_truncated_repr(df)
assert not has_expanded_repr(df)
def test_repr_max_columns_max_rows(self):
term_width, term_height = get_terminal_size()
if term_width < 10 or term_height < 10:
pytest.skip("terminal size too small, "
"{0} x {1}".format(term_width, term_height))
def mkframe(n):
index = ['%05d' % i for i in range(n)]
return DataFrame(0, index, index)
df6 = mkframe(6)
df10 = mkframe(10)
with option_context('mode.sim_interactive', True):
with option_context('display.width', term_width * 2):
with option_context('display.max_rows', 5,
'display.max_columns', 5):
assert not has_expanded_repr(mkframe(4))
assert not has_expanded_repr(mkframe(5))
assert not has_expanded_repr(df6)
assert has_doubly_truncated_repr(df6)
with option_context('display.max_rows', 20,
'display.max_columns', 10):
# Out off max_columns boundary, but no extending
# since not exceeding width
assert not has_expanded_repr(df6)
assert not has_truncated_repr(df6)
with option_context('display.max_rows', 9,
'display.max_columns', 10):
# out vertical bounds can not result in exanded repr
assert not has_expanded_repr(df10)
assert has_vertically_truncated_repr(df10)
# width=None in terminal, auto detection
with option_context('display.max_columns', 100, 'display.max_rows',
term_width * 20, 'display.width', None):
df = mkframe((term_width // 7) - 2)
assert not has_expanded_repr(df)
df = mkframe((term_width // 7) + 2)
printing.pprint_thing(df._repr_fits_horizontal_())
assert has_expanded_repr(df)
def test_str_max_colwidth(self):
# GH 7856
df = pd.DataFrame([{'a': 'foo',
'b': 'bar',
'c': 'uncomfortably long line with lots of stuff',
'd': 1}, {'a': 'foo',
'b': 'bar',
'c': 'stuff',
'd': 1}])
df.set_index(['a', 'b', 'c'])
assert str(df) == (
' a b c d\n'
'0 foo bar uncomfortably long line with lots of stuff 1\n'
'1 foo bar stuff 1')
with option_context('max_colwidth', 20):
assert str(df) == (' a b c d\n'
'0 foo bar uncomfortably lo... 1\n'
'1 foo bar stuff 1')
def test_auto_detect(self):
term_width, term_height = get_terminal_size()
fac = 1.05 # Arbitrary large factor to exceed term widht
cols = range(int(term_width * fac))
index = range(10)
df = DataFrame(index=index, columns=cols)
with option_context('mode.sim_interactive', True):
with option_context('max_rows', None):
with option_context('max_columns', None):
# Wrap around with None
assert has_expanded_repr(df)
with option_context('max_rows', 0):
with option_context('max_columns', 0):
# Truncate with auto detection.
assert has_horizontally_truncated_repr(df)
index = range(int(term_height * fac))
df = DataFrame(index=index, columns=cols)
with option_context('max_rows', 0):
with option_context('max_columns', None):
# Wrap around with None
assert has_expanded_repr(df)
# Truncate vertically
assert has_vertically_truncated_repr(df)
with option_context('max_rows', None):
with option_context('max_columns', 0):
assert has_horizontally_truncated_repr(df)
def test_to_string_repr_unicode(self):
buf = StringIO()
unicode_values = [u('\u03c3')] * 10
unicode_values = np.array(unicode_values, dtype=object)
df = DataFrame({'unicode': unicode_values})
df.to_string(col_space=10, buf=buf)
# it works!
repr(df)
idx = Index(['abc', u('\u03c3a'), 'aegdvg'])
ser = Series(np.random.randn(len(idx)), idx)
rs = repr(ser).split('\n')
line_len = len(rs[0])
for line in rs[1:]:
try:
line = line.decode(get_option("display.encoding"))
except:
pass
if not line.startswith('dtype:'):
assert len(line) == line_len
# it works even if sys.stdin in None
_stdin = sys.stdin
try:
sys.stdin = None
repr(df)
finally:
sys.stdin = _stdin
def test_to_string_unicode_columns(self):
df = DataFrame({u('\u03c3'): np.arange(10.)})
buf = StringIO()
df.to_string(buf=buf)
buf.getvalue()
buf = StringIO()
df.info(buf=buf)
buf.getvalue()
result = self.frame.to_string()
assert isinstance(result, compat.text_type)
def test_to_string_utf8_columns(self):
n = u("\u05d0").encode('utf-8')
with option_context('display.max_rows', 1):
df = DataFrame([1, 2], columns=[n])
repr(df)
def test_to_string_unicode_two(self):
dm = DataFrame({u('c/\u03c3'): []})
buf = StringIO()
dm.to_string(buf)
def test_to_string_unicode_three(self):
dm = DataFrame(['\xc2'])
buf = StringIO()
dm.to_string(buf)
def test_to_string_with_formatters(self):
df = DataFrame({'int': [1, 2, 3],
'float': [1.0, 2.0, 3.0],
'object': [(1, 2), True, False]},
columns=['int', 'float', 'object'])
formatters = [('int', lambda x: '0x%x' % x),
('float', lambda x: '[% 4.1f]' % x),
('object', lambda x: '-%s-' % str(x))]
result = df.to_string(formatters=dict(formatters))
result2 = df.to_string(formatters=lzip(*formatters)[1])
assert result == (' int float object\n'
'0 0x1 [ 1.0] -(1, 2)-\n'
'1 0x2 [ 2.0] -True-\n'
'2 0x3 [ 3.0] -False-')
assert result == result2
def test_to_string_with_datetime64_monthformatter(self):
months = [datetime(2016, 1, 1), datetime(2016, 2, 2)]
x = DataFrame({'months': months})
def format_func(x):
return x.strftime('%Y-%m')
result = x.to_string(formatters={'months': format_func})
expected = 'months\n0 2016-01\n1 2016-02'
assert result.strip() == expected
def test_to_string_with_datetime64_hourformatter(self):
x = DataFrame({'hod': pd.to_datetime(['10:10:10.100', '12:12:12.120'],
format='%H:%M:%S.%f')})
def format_func(x):
return x.strftime('%H:%M')
result = x.to_string(formatters={'hod': format_func})
expected = 'hod\n0 10:10\n1 12:12'
assert result.strip() == expected
def test_to_string_with_formatters_unicode(self):
df = DataFrame({u('c/\u03c3'): [1, 2, 3]})
result = df.to_string(formatters={u('c/\u03c3'): lambda x: '%s' % x})
assert result == u(' c/\u03c3\n') + '0 1\n1 2\n2 3'
def test_east_asian_unicode_frame(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
# not alighned properly because of east asian width
# mid col
df = DataFrame({'a': [u'あ', u'いいい', u'う', u'ええええええ'],
'b': [1, 222, 33333, 4]},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" a b\na あ 1\n"
u"bb いいい 222\nc う 33333\n"
u"ddd ええええええ 4")
assert _rep(df) == expected
# last col
df = DataFrame({'a': [1, 222, 33333, 4],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" a b\na 1 あ\n"
u"bb 222 いいい\nc 33333 う\n"
u"ddd 4 ええええええ")
assert _rep(df) == expected
# all col
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" a b\na あああああ あ\n"
u"bb い いいい\nc う う\n"
u"ddd えええ ええええええ")
assert _rep(df) == expected
# column name
df = DataFrame({u'あああああ': [1, 222, 33333, 4],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" b あああああ\na あ 1\n"
u"bb いいい 222\nc う 33333\n"
u"ddd ええええええ 4")
assert _rep(df) == expected
# index
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=[u'あああ', u'いいいいいい', u'うう', u'え'])
expected = (u" a b\nあああ あああああ あ\n"
u"いいいいいい い いいい\nうう う う\n"
u"え えええ ええええええ")
assert _rep(df) == expected
# index name
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=pd.Index([u'あ', u'い', u'うう', u'え'],
name=u'おおおお'))
expected = (u" a b\n"
u"おおおお \n"
u"あ あああああ あ\n"
u"い い いいい\n"
u"うう う う\n"
u"え えええ ええええええ")
assert _rep(df) == expected
# all
df = DataFrame({u'あああ': [u'あああ', u'い', u'う', u'えええええ'],
u'いいいいい': [u'あ', u'いいい', u'う', u'ええ']},
index=pd.Index([u'あ', u'いいい', u'うう', u'え'],
name=u'お'))
expected = (u" あああ いいいいい\n"
u"お \n"
u"あ あああ あ\n"
u"いいい い いいい\n"
u"うう う う\n"
u"え えええええ ええ")
assert _rep(df) == expected
# MultiIndex
idx = pd.MultiIndex.from_tuples([(u'あ', u'いい'), (u'う', u'え'), (
u'おおお', u'かかかか'), (u'き', u'くく')])
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=idx)
expected = (u" a b\n"
u"あ いい あああああ あ\n"
u"う え い いいい\n"
u"おおお かかかか う う\n"
u"き くく えええ ええええええ")
assert _rep(df) == expected
# truncate
with option_context('display.max_rows', 3, 'display.max_columns', 3):
df = pd.DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ'],
'c': [u'お', u'か', u'ききき', u'くくくくくく'],
u'ああああ': [u'さ', u'し', u'す', u'せ']},
columns=['a', 'b', 'c', u'ああああ'])
expected = (u" a ... ああああ\n0 あああああ ... さ\n"
u".. ... ... ...\n3 えええ ... せ\n"
u"\n[4 rows x 4 columns]")
assert _rep(df) == expected
df.index = [u'あああ', u'いいいい', u'う', 'aaa']
expected = (u" a ... ああああ\nあああ あああああ ... さ\n"
u".. ... ... ...\naaa えええ ... せ\n"
u"\n[4 rows x 4 columns]")
assert _rep(df) == expected
# Emable Unicode option -----------------------------------------
with option_context('display.unicode.east_asian_width', True):
# mid col
df = DataFrame({'a': [u'あ', u'いいい', u'う', u'ええええええ'],
'b': [1, 222, 33333, 4]},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" a b\na あ 1\n"
u"bb いいい 222\nc う 33333\n"
u"ddd ええええええ 4")
assert _rep(df) == expected
# last col
df = DataFrame({'a': [1, 222, 33333, 4],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" a b\na 1 あ\n"
u"bb 222 いいい\nc 33333 う\n"
u"ddd 4 ええええええ")
assert _rep(df) == expected
# all col
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" a b\n"
u"a あああああ あ\n"
u"bb い いいい\n"
u"c う う\n"
u"ddd えええ ええええええ")
assert _rep(df) == expected
# column name
df = DataFrame({u'あああああ': [1, 222, 33333, 4],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" b あああああ\n"
u"a あ 1\n"
u"bb いいい 222\n"
u"c う 33333\n"
u"ddd ええええええ 4")
assert _rep(df) == expected
# index
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=[u'あああ', u'いいいいいい', u'うう', u'え'])
expected = (u" a b\n"
u"あああ あああああ あ\n"
u"いいいいいい い いいい\n"
u"うう う う\n"
u"え えええ ええええええ")
assert _rep(df) == expected
# index name
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=pd.Index([u'あ', u'い', u'うう', u'え'],
name=u'おおおお'))
expected = (u" a b\n"
u"おおおお \n"
u"あ あああああ あ\n"
u"い い いいい\n"
u"うう う う\n"
u"え えええ ええええええ")
assert _rep(df) == expected
# all
df = DataFrame({u'あああ': [u'あああ', u'い', u'う', u'えええええ'],
u'いいいいい': [u'あ', u'いいい', u'う', u'ええ']},
index=pd.Index([u'あ', u'いいい', u'うう', u'え'],
name=u'お'))
expected = (u" あああ いいいいい\n"
u"お \n"
u"あ あああ あ\n"
u"いいい い いいい\n"
u"うう う う\n"
u"え えええええ ええ")
assert _rep(df) == expected
# MultiIndex
idx = pd.MultiIndex.from_tuples([(u'あ', u'いい'), (u'う', u'え'), (
u'おおお', u'かかかか'), (u'き', u'くく')])
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=idx)
expected = (u" a b\n"
u"あ いい あああああ あ\n"
u"う え い いいい\n"
u"おおお かかかか う う\n"
u"き くく えええ ええええええ")
assert _rep(df) == expected
# truncate
with option_context('display.max_rows', 3, 'display.max_columns',
3):
df = pd.DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ'],
'c': [u'お', u'か', u'ききき', u'くくくくくく'],
u'ああああ': [u'さ', u'し', u'す', u'せ']},
columns=['a', 'b', 'c', u'ああああ'])
expected = (u" a ... ああああ\n"
u"0 あああああ ... さ\n"
u".. ... ... ...\n"
u"3 えええ ... せ\n"
u"\n[4 rows x 4 columns]")
assert _rep(df) == expected
df.index = [u'あああ', u'いいいい', u'う', 'aaa']
expected = (u" a ... ああああ\n"
u"あああ あああああ ... さ\n"
u"... ... ... ...\n"
u"aaa えええ ... せ\n"
u"\n[4 rows x 4 columns]")
assert _rep(df) == expected
# ambiguous unicode
df = DataFrame({u'あああああ': [1, 222, 33333, 4],
'b': [u'あ', u'いいい', u'¡¡', u'ええええええ']},
index=['a', 'bb', 'c', '¡¡¡'])
expected = (u" b あああああ\n"
u"a あ 1\n"
u"bb いいい 222\n"
u"c ¡¡ 33333\n"
u"¡¡¡ ええええええ 4")
assert _rep(df) == expected
def test_to_string_buffer_all_unicode(self):
buf = StringIO()
empty = DataFrame({u('c/\u03c3'): Series()})
nonempty = DataFrame({u('c/\u03c3'): Series([1, 2, 3])})
print(empty, file=buf)
print(nonempty, file=buf)
# this should work
buf.getvalue()
def test_to_string_with_col_space(self):
df = DataFrame(np.random.random(size=(1, 3)))
c10 = len(df.to_string(col_space=10).split("\n")[1])
c20 = len(df.to_string(col_space=20).split("\n")[1])
c30 = len(df.to_string(col_space=30).split("\n")[1])
assert c10 < c20 < c30
# GH 8230
# col_space wasn't being applied with header=False
with_header = df.to_string(col_space=20)
with_header_row1 = with_header.splitlines()[1]
no_header = df.to_string(col_space=20, header=False)
assert len(with_header_row1) == len(no_header)
def test_to_string_truncate_indices(self):
for index in [tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex]:
for column in [tm.makeStringIndex]:
for h in [10, 20]:
for w in [10, 20]:
with option_context("display.expand_frame_repr",
False):
df = DataFrame(index=index(h), columns=column(w))
with option_context("display.max_rows", 15):
if h == 20:
assert has_vertically_truncated_repr(df)
else:
assert not has_vertically_truncated_repr(
df)
with option_context("display.max_columns", 15):
if w == 20:
assert has_horizontally_truncated_repr(df)
else:
assert not (
has_horizontally_truncated_repr(df))
with option_context("display.max_rows", 15,
"display.max_columns", 15):
if h == 20 and w == 20:
assert has_doubly_truncated_repr(df)
else:
assert not has_doubly_truncated_repr(
df)
def test_to_string_truncate_multilevel(self):
arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
df = DataFrame(index=arrays, columns=arrays)
with option_context("display.max_rows", 7, "display.max_columns", 7):
assert has_doubly_truncated_repr(df)
def test_truncate_with_different_dtypes(self):
# 11594, 12045
# when truncated the dtypes of the splits can differ
# 11594
import datetime
s = Series([datetime.datetime(2012, 1, 1)] * 10 +
[datetime.datetime(1012, 1, 2)] + [
datetime.datetime(2012, 1, 3)] * 10)
with pd.option_context('display.max_rows', 8):
result = str(s)
assert 'object' in result
# 12045
df = DataFrame({'text': ['some words'] + [None] * 9})
with pd.option_context('display.max_rows', 8,
'display.max_columns', 3):
result = str(df)
assert 'None' in result
assert 'NaN' not in result
def test_datetimelike_frame(self):
# GH 12211
df = DataFrame(
{'date': [pd.Timestamp('20130101').tz_localize('UTC')] +
[pd.NaT] * 5})
with option_context("display.max_rows", 5):
result = str(df)
assert '2013-01-01 00:00:00+00:00' in result
assert 'NaT' in result
assert '...' in result
assert '[6 rows x 1 columns]' in result
dts = [pd.Timestamp('2011-01-01', tz='US/Eastern')] * 5 + [pd.NaT] * 5
df = pd.DataFrame({"dt": dts,
"x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context('display.max_rows', 5):
expected = (' dt x\n'
'0 2011-01-01 00:00:00-05:00 1\n'
'1 2011-01-01 00:00:00-05:00 2\n'
'.. ... ..\n'
'8 NaT 9\n'
'9 NaT 10\n\n'
'[10 rows x 2 columns]')
assert repr(df) == expected
dts = [pd.NaT] * 5 + [pd.Timestamp('2011-01-01', tz='US/Eastern')] * 5
df = pd.DataFrame({"dt": dts,
"x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context('display.max_rows', 5):
expected = (' dt x\n'
'0 NaT 1\n'
'1 NaT 2\n'
'.. ... ..\n'
'8 2011-01-01 00:00:00-05:00 9\n'
'9 2011-01-01 00:00:00-05:00 10\n\n'
'[10 rows x 2 columns]')
assert repr(df) == expected
dts = ([pd.Timestamp('2011-01-01', tz='Asia/Tokyo')] * 5 +
[pd.Timestamp('2011-01-01', tz='US/Eastern')] * 5)
df = pd.DataFrame({"dt": dts,
"x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context('display.max_rows', 5):
expected = (' dt x\n'
'0 2011-01-01 00:00:00+09:00 1\n'
'1 2011-01-01 00:00:00+09:00 2\n'
'.. ... ..\n'
'8 2011-01-01 00:00:00-05:00 9\n'
'9 2011-01-01 00:00:00-05:00 10\n\n'
'[10 rows x 2 columns]')
assert repr(df) == expected
def test_nonunicode_nonascii_alignment(self):
df = DataFrame([["aa\xc3\xa4\xc3\xa4", 1], ["bbbb", 2]])
rep_str = df.to_string()
lines = rep_str.split('\n')
assert len(lines[1]) == len(lines[2])
def test_unicode_problem_decoding_as_ascii(self):
dm = DataFrame({u('c/\u03c3'): Series({'test': np.nan})})
compat.text_type(dm.to_string())
def test_string_repr_encoding(self):
filepath = tm.get_data_path('unicode_series.csv')
df = pd.read_csv(filepath, header=None, encoding='latin1')
repr(df)
repr(df[1])
def test_repr_corner(self):
# representing infs poses no problems
df = DataFrame({'foo': [-np.inf, np.inf]})
repr(df)
def test_frame_info_encoding(self):
index = ['\'Til There Was You (1997)',
'ldum klaka (Cold Fever) (1994)']
fmt.set_option('display.max_rows', 1)
df = DataFrame(columns=['a', 'b', 'c'], index=index)
repr(df)
repr(df.T)
fmt.set_option('display.max_rows', 200)
def test_pprint_thing(self):
from pandas.io.formats.printing import pprint_thing as pp_t
if PY3:
pytest.skip("doesn't work on Python 3")
assert pp_t('a') == u('a')
assert pp_t(u('a')) == u('a')
assert pp_t(None) == 'None'
assert pp_t(u('\u05d0'), quote_strings=True) == u("u'\u05d0'")
assert pp_t(u('\u05d0'), quote_strings=False) == u('\u05d0')
assert (pp_t((u('\u05d0'), u('\u05d1')), quote_strings=True) ==
u("(u'\u05d0', u'\u05d1')"))
assert (pp_t((u('\u05d0'), (u('\u05d1'), u('\u05d2'))),
quote_strings=True) == u("(u'\u05d0', "
"(u'\u05d1', u'\u05d2'))"))
assert (pp_t(('foo', u('\u05d0'), (u('\u05d0'), u('\u05d0'))),
quote_strings=True) == u("(u'foo', u'\u05d0', "
"(u'\u05d0', u'\u05d0'))"))
# gh-2038: escape embedded tabs in string
assert "\t" not in pp_t("a\tb", escape_chars=("\t", ))
def test_wide_repr(self):
with option_context('mode.sim_interactive', True,
'display.show_dimensions', True):
max_cols = get_option('display.max_columns')
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
assert "10 rows x %d columns" % (max_cols - 1) in rep_str
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context('display.width', 120):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option('display.expand_frame_repr')
def test_wide_repr_wide_columns(self):
with option_context('mode.sim_interactive', True):
df = DataFrame(np.random.randn(5, 3),
columns=['a' * 90, 'b' * 90, 'c' * 90])
rep_str = repr(df)
assert len(rep_str.splitlines()) == 20
def test_wide_repr_named(self):
with option_context('mode.sim_interactive', True):
max_cols = get_option('display.max_columns')
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
df.index.name = 'DataFrame Index'
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context('display.width', 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert 'DataFrame Index' in line
reset_option('display.expand_frame_repr')
def test_wide_repr_multiindex(self):
with option_context('mode.sim_interactive', True):
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
max_cols = get_option('display.max_columns')
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)),
index=midx)
df.index.names = ['Level 0', 'Level 1']
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context('display.width', 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert 'Level 0 Level 1' in line
reset_option('display.expand_frame_repr')
def test_wide_repr_multiindex_cols(self):
with option_context('mode.sim_interactive', True):
max_cols = get_option('display.max_columns')
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
mcols = MultiIndex.from_arrays(
tm.rands_array(3, size=(2, max_cols - 1)))
df = DataFrame(tm.rands_array(25, (10, max_cols - 1)),
index=midx, columns=mcols)
df.index.names = ['Level 0', 'Level 1']
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context('display.width', 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option('display.expand_frame_repr')
def test_wide_repr_unicode(self):
with option_context('mode.sim_interactive', True):
max_cols = get_option('display.max_columns')
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context('display.width', 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option('display.expand_frame_repr')
def test_wide_repr_wide_long_columns(self):
with option_context('mode.sim_interactive', True):
df = DataFrame({'a': ['a' * 30, 'b' * 30],
'b': ['c' * 70, 'd' * 80]})
result = repr(df)
assert 'ccccc' in result
assert 'ddddd' in result
def test_long_series(self):
n = 1000
s = Series(
np.random.randint(-50, 50, n),
index=['s%04d' % x for x in range(n)], dtype='int64')
import re
str_rep = str(s)
nmatches = len(re.findall('dtype', str_rep))
assert nmatches == 1
def test_index_with_nan(self):
# GH 2850
df = DataFrame({'id1': {0: '1a3',
1: '9h4'},
'id2': {0: np.nan,
1: 'd67'},
'id3': {0: '78d',
1: '79d'},
'value': {0: 123,
1: 64}})
# multi-index
y = df.set_index(['id1', 'id2', 'id3'])
result = y.to_string()
expected = u(
' value\nid1 id2 id3 \n'
'1a3 NaN 78d 123\n9h4 d67 79d 64')
assert result == expected
# index
y = df.set_index('id2')
result = y.to_string()
expected = u(
' id1 id3 value\nid2 \n'
'NaN 1a3 78d 123\nd67 9h4 79d 64')
assert result == expected
# with append (this failed in 0.12)
y = df.set_index(['id1', 'id2']).set_index('id3', append=True)
result = y.to_string()
expected = u(
' value\nid1 id2 id3 \n'
'1a3 NaN 78d 123\n9h4 d67 79d 64')
assert result == expected
# all-nan in mi
df2 = df.copy()
df2.loc[:, 'id2'] = np.nan
y = df2.set_index('id2')
result = y.to_string()
expected = u(
' id1 id3 value\nid2 \n'
'NaN 1a3 78d 123\nNaN 9h4 79d 64')
assert result == expected
# partial nan in mi
df2 = df.copy()
df2.loc[:, 'id2'] = np.nan
y = df2.set_index(['id2', 'id3'])
result = y.to_string()
expected = u(
' id1 value\nid2 id3 \n'
'NaN 78d 1a3 123\n 79d 9h4 64')
assert result == expected
df = DataFrame({'id1': {0: np.nan,
1: '9h4'},
'id2': {0: np.nan,
1: 'd67'},
'id3': {0: np.nan,
1: '79d'},
'value': {0: 123,
1: 64}})
y = df.set_index(['id1', 'id2', 'id3'])
result = y.to_string()
expected = u(
' value\nid1 id2 id3 \n'
'NaN NaN NaN 123\n9h4 d67 79d 64')
assert result == expected
def test_to_string(self):
# big mixed
biggie = DataFrame({'A': np.random.randn(200),
'B': tm.makeStringIndex(200)},
index=lrange(200))
biggie.loc[:20, 'A'] = np.nan
biggie.loc[:20, 'B'] = np.nan
s = biggie.to_string()
buf = StringIO()
retval = biggie.to_string(buf=buf)
assert retval is None
assert buf.getvalue() == s
assert isinstance(s, compat.string_types)
# print in right order
result = biggie.to_string(columns=['B', 'A'], col_space=17,
float_format='%.5f'.__mod__)
lines = result.split('\n')
header = lines[0].strip().split()
joined = '\n'.join([re.sub(r'\s+', ' ', x).strip() for x in lines[1:]])
recons = read_table(StringIO(joined), names=header,
header=None, sep=' ')
tm.assert_series_equal(recons['B'], biggie['B'])
assert recons['A'].count() == biggie['A'].count()
assert (np.abs(recons['A'].dropna() -
biggie['A'].dropna()) < 0.1).all()
# expected = ['B', 'A']
# assert header == expected
result = biggie.to_string(columns=['A'], col_space=17)
header = result.split('\n')[0].strip().split()
expected = ['A']
assert header == expected
biggie.to_string(columns=['B', 'A'],
formatters={'A': lambda x: '%.1f' % x})
biggie.to_string(columns=['B', 'A'], float_format=str)
biggie.to_string(columns=['B', 'A'], col_space=12, float_format=str)
frame = DataFrame(index=np.arange(200))
frame.to_string()
def test_to_string_no_header(self):
df = DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})
df_s = df.to_string(header=False)
expected = "0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
def test_to_string_specified_header(self):
df = DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})
df_s = df.to_string(header=['X', 'Y'])
expected = ' X Y\n0 1 4\n1 2 5\n2 3 6'
assert df_s == expected
with pytest.raises(ValueError):
df.to_string(header=['X'])
def test_to_string_no_index(self):
df = DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})
df_s = df.to_string(index=False)
expected = "x y\n1 4\n2 5\n3 6"
assert df_s == expected
def test_to_string_line_width_no_index(self):
df = DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = "x \\\n1 \n2 \n3 \n\ny \n4 \n5 \n6"
assert df_s == expected
def test_to_string_float_formatting(self):
tm.reset_display_options()
fmt.set_option('display.precision', 5, 'display.column_space', 12,
'display.notebook_repr_html', False)
df = DataFrame({'x': [0, 0.25, 3456.000, 12e+45, 1.64e+6, 1.7e+8,
1.253456, np.pi, -1e6]})
df_s = df.to_string()
# Python 2.5 just wants me to be sad. And debian 32-bit
# sys.version_info[0] == 2 and sys.version_info[1] < 6:
if _three_digit_exp():
expected = (' x\n0 0.00000e+000\n1 2.50000e-001\n'
'2 3.45600e+003\n3 1.20000e+046\n4 1.64000e+006\n'
'5 1.70000e+008\n6 1.25346e+000\n7 3.14159e+000\n'
'8 -1.00000e+006')
else:
expected = (' x\n0 0.00000e+00\n1 2.50000e-01\n'
'2 3.45600e+03\n3 1.20000e+46\n4 1.64000e+06\n'
'5 1.70000e+08\n6 1.25346e+00\n7 3.14159e+00\n'
'8 -1.00000e+06')
assert df_s == expected
df = DataFrame({'x': [3234, 0.253]})
df_s = df.to_string()
expected = (' x\n' '0 3234.000\n' '1 0.253')
assert df_s == expected
tm.reset_display_options()
assert get_option("display.precision") == 6
df = DataFrame({'x': [1e9, 0.2512]})
df_s = df.to_string()
# Python 2.5 just wants me to be sad. And debian 32-bit
# sys.version_info[0] == 2 and sys.version_info[1] < 6:
if _three_digit_exp():
expected = (' x\n'
'0 1.000000e+009\n'
'1 2.512000e-001')
else:
expected = (' x\n'
'0 1.000000e+09\n'
'1 2.512000e-01')
assert df_s == expected
def test_to_string_small_float_values(self):
df = DataFrame({'a': [1.5, 1e-17, -5.5e-7]})
result = df.to_string()
# sadness per above
if '%.4g' % 1.7e8 == '1.7e+008':
expected = (' a\n'
'0 1.500000e+000\n'
'1 1.000000e-017\n'
'2 -5.500000e-007')
else:
expected = (' a\n'
'0 1.500000e+00\n'
'1 1.000000e-17\n'
'2 -5.500000e-07')
assert result == expected
# but not all exactly zero
df = df * 0
result = df.to_string()
expected = (' 0\n' '0 0\n' '1 0\n' '2 -0')
def test_to_string_float_index(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(lrange(5), index=index)
result = df.to_string()
expected = (' 0\n'
'1.5 0\n'
'2.0 1\n'
'3.0 2\n'
'4.0 3\n'
'5.0 4')
assert result == expected
def test_to_string_ascii_error(self):
data = [('0 ', u(' .gitignore '), u(' 5 '),
' \xe2\x80\xa2\xe2\x80\xa2\xe2\x80'
'\xa2\xe2\x80\xa2\xe2\x80\xa2')]
df = DataFrame(data)
# it works!
repr(df)
def test_to_string_int_formatting(self):
df = DataFrame({'x': [-15, 20, 25, -35]})
assert issubclass(df['x'].dtype.type, np.integer)
output = df.to_string()
expected = (' x\n' '0 -15\n' '1 20\n' '2 25\n' '3 -35')
assert output == expected
def test_to_string_index_formatter(self):
df = DataFrame([lrange(5), lrange(5, 10), lrange(10, 15)])
rs = df.to_string(formatters={'__index__': lambda x: 'abc' [x]})
xp = """\
0 1 2 3 4
a 0 1 2 3 4
b 5 6 7 8 9
c 10 11 12 13 14\
"""
assert rs == xp
def test_to_string_left_justify_cols(self):
tm.reset_display_options()
df = DataFrame({'x': [3234, 0.253]})
df_s = df.to_string(justify='left')
expected = (' x \n' '0 3234.000\n' '1 0.253')
assert df_s == expected
def test_to_string_format_na(self):
tm.reset_display_options()
df = DataFrame({'A': [np.nan, -1, -2.1234, 3, 4],
'B': [np.nan, 'foo', 'foooo', 'fooooo', 'bar']})
result = df.to_string()
expected = (' A B\n'
'0 NaN NaN\n'
'1 -1.0000 foo\n'
'2 -2.1234 foooo\n'
'3 3.0000 fooooo\n'
'4 4.0000 bar')
assert result == expected
df = DataFrame({'A': [np.nan, -1., -2., 3., 4.],
'B': [np.nan, 'foo', 'foooo', 'fooooo', 'bar']})
result = df.to_string()
expected = (' A B\n'
'0 NaN NaN\n'
'1 -1.0 foo\n'
'2 -2.0 foooo\n'
'3 3.0 fooooo\n'
'4 4.0 bar')
assert result == expected
def test_to_string_line_width(self):
df = DataFrame(123, lrange(10, 15), lrange(30))
s = df.to_string(line_width=80)
assert max(len(l) for l in s.split('\n')) == 80
def test_show_dimensions(self):
df = DataFrame(123, lrange(10, 15), lrange(30))
with option_context('display.max_rows', 10, 'display.max_columns', 40,
'display.width', 500, 'display.expand_frame_repr',
'info', 'display.show_dimensions', True):
assert '5 rows' in str(df)
assert '5 rows' in df._repr_html_()
with option_context('display.max_rows', 10, 'display.max_columns', 40,
'display.width', 500, 'display.expand_frame_repr',
'info', 'display.show_dimensions', False):
assert '5 rows' not in str(df)
assert '5 rows' not in df._repr_html_()
with option_context('display.max_rows', 2, 'display.max_columns', 2,
'display.width', 500, 'display.expand_frame_repr',
'info', 'display.show_dimensions', 'truncate'):
assert '5 rows' in str(df)
assert '5 rows' in df._repr_html_()
with option_context('display.max_rows', 10, 'display.max_columns', 40,
'display.width', 500, 'display.expand_frame_repr',
'info', 'display.show_dimensions', 'truncate'):
assert '5 rows' not in str(df)
assert '5 rows' not in df._repr_html_()
def test_repr_html(self):
self.frame._repr_html_()
fmt.set_option('display.max_rows', 1, 'display.max_columns', 1)
self.frame._repr_html_()
fmt.set_option('display.notebook_repr_html', False)
self.frame._repr_html_()
tm.reset_display_options()
df = DataFrame([[1, 2], [3, 4]])
fmt.set_option('display.show_dimensions', True)
assert '2 rows' in df._repr_html_()
fmt.set_option('display.show_dimensions', False)
assert '2 rows' not in df._repr_html_()
tm.reset_display_options()
def test_repr_html_wide(self):
max_cols = get_option('display.max_columns')
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
reg_repr = df._repr_html_()
assert "..." not in reg_repr
wide_df = DataFrame(tm.rands_array(25, size=(10, max_cols + 1)))
wide_repr = wide_df._repr_html_()
assert "..." in wide_repr
def test_repr_html_wide_multiindex_cols(self):
max_cols = get_option('display.max_columns')
mcols = MultiIndex.from_product([np.arange(max_cols // 2),
['foo', 'bar']],
names=['first', 'second'])
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))),
columns=mcols)
reg_repr = df._repr_html_()
assert '...' not in reg_repr
mcols = MultiIndex.from_product((np.arange(1 + (max_cols // 2)),
['foo', 'bar']),
names=['first', 'second'])
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))),
columns=mcols)
wide_repr = df._repr_html_()
assert '...' in wide_repr
def test_repr_html_long(self):
with option_context('display.max_rows', 60):
max_rows = get_option('display.max_rows')
h = max_rows - 1
df = DataFrame({'A': np.arange(1, 1 + h),
'B': np.arange(41, 41 + h)})
reg_repr = df._repr_html_()
assert '..' not in reg_repr
assert str(41 + max_rows // 2) in reg_repr
h = max_rows + 1
df = DataFrame({'A': np.arange(1, 1 + h),
'B': np.arange(41, 41 + h)})
long_repr = df._repr_html_()
assert '..' in long_repr
assert str(41 + max_rows // 2) not in long_repr
assert u('%d rows ') % h in long_repr
assert u('2 columns') in long_repr
def test_repr_html_float(self):
with option_context('display.max_rows', 60):
max_rows = get_option('display.max_rows')
h = max_rows - 1
df = DataFrame({'idx': np.linspace(-10, 10, h),
'A': np.arange(1, 1 + h),
'B': np.arange(41, 41 + h)}).set_index('idx')
reg_repr = df._repr_html_()
assert '..' not in reg_repr
assert str(40 + h) in reg_repr
h = max_rows + 1
df = DataFrame({'idx': np.linspace(-10, 10, h),
'A': np.arange(1, 1 + h),
'B': np.arange(41, 41 + h)}).set_index('idx')
long_repr = df._repr_html_()
assert '..' in long_repr
assert '31' not in long_repr
assert u('%d rows ') % h in long_repr
assert u('2 columns') in long_repr
def test_repr_html_long_multiindex(self):
max_rows = get_option('display.max_rows')
max_L1 = max_rows // 2
tuples = list(itertools.product(np.arange(max_L1), ['foo', 'bar']))
idx = MultiIndex.from_tuples(tuples, names=['first', 'second'])
df = DataFrame(np.random.randn(max_L1 * 2, 2), index=idx,
columns=['A', 'B'])
reg_repr = df._repr_html_()
assert '...' not in reg_repr
tuples = list(itertools.product(np.arange(max_L1 + 1), ['foo', 'bar']))
idx = MultiIndex.from_tuples(tuples, names=['first', 'second'])
df = DataFrame(np.random.randn((max_L1 + 1) * 2, 2), index=idx,
columns=['A', 'B'])
long_repr = df._repr_html_()
assert '...' in long_repr
def test_repr_html_long_and_wide(self):
max_cols = get_option('display.max_columns')
max_rows = get_option('display.max_rows')
h, w = max_rows - 1, max_cols - 1
df = DataFrame(dict((k, np.arange(1, 1 + h)) for k in np.arange(w)))
assert '...' not in df._repr_html_()
h, w = max_rows + 1, max_cols + 1
df = DataFrame(dict((k, np.arange(1, 1 + h)) for k in np.arange(w)))
assert '...' in df._repr_html_()
def test_info_repr(self):
max_rows = get_option('display.max_rows')
max_cols = get_option('display.max_columns')
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame(dict((k, np.arange(1, 1 + h)) for k in np.arange(w)))
assert has_vertically_truncated_repr(df)
with option_context('display.large_repr', 'info'):
assert has_info_repr(df)
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame(dict((k, np.arange(1, 1 + h)) for k in np.arange(w)))
assert has_horizontally_truncated_repr(df)
with option_context('display.large_repr', 'info'):
assert has_info_repr(df)
def test_info_repr_max_cols(self):
# GH #6939
df = DataFrame(np.random.randn(10, 5))
with option_context('display.large_repr', 'info',
'display.max_columns', 1,
'display.max_info_columns', 4):
assert has_non_verbose_info_repr(df)
with option_context('display.large_repr', 'info',
'display.max_columns', 1,
'display.max_info_columns', 5):
assert not has_non_verbose_info_repr(df)
# test verbose overrides
# fmt.set_option('display.max_info_columns', 4) # exceeded
def test_info_repr_html(self):
max_rows = get_option('display.max_rows')
max_cols = get_option('display.max_columns')
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame(dict((k, np.arange(1, 1 + h)) for k in np.arange(w)))
assert r'<class' not in df._repr_html_()
with option_context('display.large_repr', 'info'):
assert r'<class' in df._repr_html_()
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame(dict((k, np.arange(1, 1 + h)) for k in np.arange(w)))
assert '<class' not in df._repr_html_()
with option_context('display.large_repr', 'info'):
assert '<class' in df._repr_html_()
def test_fake_qtconsole_repr_html(self):
def get_ipython():
return {'config': {'KernelApp':
{'parent_appname': 'ipython-qtconsole'}}}
repstr = self.frame._repr_html_()
assert repstr is not None
fmt.set_option('display.max_rows', 5, 'display.max_columns', 2)
repstr = self.frame._repr_html_()
assert 'class' in repstr # info fallback
tm.reset_display_options()
def test_pprint_pathological_object(self):
"""
If the test fails, it at least won't hang.
"""
class A:
def __getitem__(self, key):
return 3 # obviously simplified
df = DataFrame([A()])
repr(df) # just don't die
def test_float_trim_zeros(self):
vals = [2.08430917305e+10, 3.52205017305e+10, 2.30674817305e+10,
2.03954217305e+10, 5.59897817305e+10]
skip = True
for line in repr(DataFrame({'A': vals})).split('\n')[:-2]:
if line.startswith('dtype:'):
continue
if _three_digit_exp():
assert ('+010' in line) or skip
else:
assert ('+10' in line) or skip
skip = False
def test_dict_entries(self):
df = DataFrame({'A': [{'a': 1, 'b': 2}]})
val = df.to_string()
assert "'a': 1" in val
assert "'b': 2" in val
def test_period(self):
# GH 12615
df = pd.DataFrame({'A': pd.period_range('2013-01',
periods=4, freq='M'),
'B': [pd.Period('2011-01', freq='M'),
pd.Period('2011-02-01', freq='D'),
pd.Period('2011-03-01 09:00', freq='H'),
pd.Period('2011-04', freq='M')],
'C': list('abcd')})
exp = (" A B C\n0 2013-01 2011-01 a\n"
"1 2013-02 2011-02-01 b\n2 2013-03 2011-03-01 09:00 c\n"
"3 2013-04 2011-04 d")
assert str(df) == exp
def gen_series_formatting():
s1 = pd.Series(['a'] * 100)
s2 = pd.Series(['ab'] * 100)
s3 = pd.Series(['a', 'ab', 'abc', 'abcd', 'abcde', 'abcdef'])
s4 = s3[::-1]
test_sers = {'onel': s1, 'twol': s2, 'asc': s3, 'desc': s4}
return test_sers
class TestSeriesFormatting(object):
def setup_method(self, method):
self.ts = tm.makeTimeSeries()
def test_repr_unicode(self):
s = Series([u('\u03c3')] * 10)
repr(s)
a = Series([u("\u05d0")] * 1000)
a.name = 'title1'
repr(a)
def test_to_string(self):
buf = StringIO()
s = self.ts.to_string()
retval = self.ts.to_string(buf=buf)
assert retval is None
assert buf.getvalue().strip() == s
# pass float_format
format = '%.4f'.__mod__
result = self.ts.to_string(float_format=format)
result = [x.split()[1] for x in result.split('\n')[:-1]]
expected = [format(x) for x in self.ts]
assert result == expected
# empty string
result = self.ts[:0].to_string()
assert result == 'Series([], Freq: B)'
result = self.ts[:0].to_string(length=0)
assert result == 'Series([], Freq: B)'
# name and length
cp = self.ts.copy()
cp.name = 'foo'
result = cp.to_string(length=True, name=True, dtype=True)
last_line = result.split('\n')[-1].strip()
assert last_line == ("Freq: B, Name: foo, "
"Length: %d, dtype: float64" % len(cp))
def test_freq_name_separation(self):
s = Series(np.random.randn(10),
index=date_range('1/1/2000', periods=10), name=0)
result = repr(s)
assert 'Freq: D, Name: 0' in result
def test_to_string_mixed(self):
s = Series(['foo', np.nan, -1.23, 4.56])
result = s.to_string()
expected = (u('0 foo\n') + u('1 NaN\n') + u('2 -1.23\n') +
u('3 4.56'))
assert result == expected
# but don't count NAs as floats
s = Series(['foo', np.nan, 'bar', 'baz'])
result = s.to_string()
expected = (u('0 foo\n') + '1 NaN\n' + '2 bar\n' + '3 baz')
assert result == expected
s = Series(['foo', 5, 'bar', 'baz'])
result = s.to_string()
expected = (u('0 foo\n') + '1 5\n' + '2 bar\n' + '3 baz')
assert result == expected
def test_to_string_float_na_spacing(self):
s = Series([0., 1.5678, 2., -3., 4.])
s[::2] = np.nan
result = s.to_string()
expected = (u('0 NaN\n') + '1 1.5678\n' + '2 NaN\n' +
'3 -3.0000\n' + '4 NaN')
assert result == expected
def test_to_string_without_index(self):
# GH 11729 Test index=False option
s = Series([1, 2, 3, 4])
result = s.to_string(index=False)
expected = (u('1\n') + '2\n' + '3\n' + '4')
assert result == expected
def test_unicode_name_in_footer(self):
s = Series([1, 2], name=u('\u05e2\u05d1\u05e8\u05d9\u05ea'))
sf = fmt.SeriesFormatter(s, name=u('\u05e2\u05d1\u05e8\u05d9\u05ea'))
sf._get_footer() # should not raise exception
def test_east_asian_unicode_series(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
# not aligned properly because of east asian width
# unicode index
s = Series(['a', 'bb', 'CCC', 'D'],
index=[u'あ', u'いい', u'ううう', u'ええええ'])
expected = (u"あ a\nいい bb\nううう CCC\n"
u"ええええ D\ndtype: object")
assert _rep(s) == expected
# unicode values
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
index=['a', 'bb', 'c', 'ddd'])
expected = (u"a あ\nbb いい\nc ううう\n"
u"ddd ええええ\ndtype: object")
assert _rep(s) == expected
# both
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
index=[u'ああ', u'いいいい', u'う', u'えええ'])
expected = (u"ああ あ\nいいいい いい\nう ううう\n"
u"えええ ええええ\ndtype: object")
assert _rep(s) == expected
# unicode footer
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
index=[u'ああ', u'いいいい', u'う', u'えええ'],
name=u'おおおおおおお')
expected = (u"ああ あ\nいいいい いい\nう ううう\n"
u"えええ ええええ\nName: おおおおおおお, dtype: object")
assert _rep(s) == expected
# MultiIndex
idx = pd.MultiIndex.from_tuples([(u'あ', u'いい'), (u'う', u'え'), (
u'おおお', u'かかかか'), (u'き', u'くく')])
s = Series([1, 22, 3333, 44444], index=idx)
expected = (u"あ いい 1\n"
u"う え 22\n"
u"おおお かかかか 3333\n"
u"き くく 44444\ndtype: int64")
assert _rep(s) == expected
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, 'AB', np.nan, u'あああ'])
expected = (u"1 1\nAB 22\nNaN 3333\n"
u"あああ 44444\ndtype: int64")
assert _rep(s) == expected
# object dtype, longer than unicode repr
s = Series([1, 22, 3333, 44444],
index=[1, 'AB', pd.Timestamp('2011-01-01'), u'あああ'])
expected = (u"1 1\n"
u"AB 22\n"
u"2011-01-01 00:00:00 3333\n"
u"あああ 44444\ndtype: int64")
assert _rep(s) == expected
# truncate
with option_context('display.max_rows', 3):
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
name=u'おおおおおおお')
expected = (u"0 あ\n ... \n"
u"3 ええええ\n"
u"Name: おおおおおおお, Length: 4, dtype: object")
assert _rep(s) == expected
s.index = [u'ああ', u'いいいい', u'う', u'えええ']
expected = (u"ああ あ\n ... \n"
u"えええ ええええ\n"
u"Name: おおおおおおお, Length: 4, dtype: object")
assert _rep(s) == expected
# Emable Unicode option -----------------------------------------
with option_context('display.unicode.east_asian_width', True):
# unicode index
s = Series(['a', 'bb', 'CCC', 'D'],
index=[u'あ', u'いい', u'ううう', u'ええええ'])
expected = (u"あ a\nいい bb\nううう CCC\n"
u"ええええ D\ndtype: object")
assert _rep(s) == expected
# unicode values
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
index=['a', 'bb', 'c', 'ddd'])
expected = (u"a あ\nbb いい\nc ううう\n"
u"ddd ええええ\ndtype: object")
assert _rep(s) == expected
# both
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
index=[u'ああ', u'いいいい', u'う', u'えええ'])
expected = (u"ああ あ\n"
u"いいいい いい\n"
u"う ううう\n"
u"えええ ええええ\ndtype: object")
assert _rep(s) == expected
# unicode footer
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
index=[u'ああ', u'いいいい', u'う', u'えええ'],
name=u'おおおおおおお')
expected = (u"ああ あ\n"
u"いいいい いい\n"
u"う ううう\n"
u"えええ ええええ\n"
u"Name: おおおおおおお, dtype: object")
assert _rep(s) == expected
# MultiIndex
idx = pd.MultiIndex.from_tuples([(u'あ', u'いい'), (u'う', u'え'), (
u'おおお', u'かかかか'), (u'き', u'くく')])
s = Series([1, 22, 3333, 44444], index=idx)
expected = (u"あ いい 1\n"
u"う え 22\n"
u"おおお かかかか 3333\n"
u"き くく 44444\n"
u"dtype: int64")
assert _rep(s) == expected
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, 'AB', np.nan, u'あああ'])
expected = (u"1 1\nAB 22\nNaN 3333\n"
u"あああ 44444\ndtype: int64")
assert _rep(s) == expected
# object dtype, longer than unicode repr
s = Series([1, 22, 3333, 44444],
index=[1, 'AB', pd.Timestamp('2011-01-01'), u'あああ'])
expected = (u"1 1\n"
u"AB 22\n"
u"2011-01-01 00:00:00 3333\n"
u"あああ 44444\ndtype: int64")
assert _rep(s) == expected
# truncate
with option_context('display.max_rows', 3):
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
name=u'おおおおおおお')
expected = (u"0 あ\n ... \n"
u"3 ええええ\n"
u"Name: おおおおおおお, Length: 4, dtype: object")
assert _rep(s) == expected
s.index = [u'ああ', u'いいいい', u'う', u'えええ']
expected = (u"ああ あ\n"
u" ... \n"
u"えええ ええええ\n"
u"Name: おおおおおおお, Length: 4, dtype: object")
assert _rep(s) == expected
# ambiguous unicode
s = Series([u'¡¡', u'い¡¡', u'ううう', u'ええええ'],
index=[u'ああ', u'¡¡¡¡いい', u'¡¡', u'えええ'])
expected = (u"ああ ¡¡\n"
u"¡¡¡¡いい い¡¡\n"
u"¡¡ ううう\n"
u"えええ ええええ\ndtype: object")
assert _rep(s) == expected
def test_float_trim_zeros(self):
vals = [2.08430917305e+10, 3.52205017305e+10, 2.30674817305e+10,
2.03954217305e+10, 5.59897817305e+10]
for line in repr(Series(vals)).split('\n'):
if line.startswith('dtype:'):
continue
if _three_digit_exp():
assert '+010' in line
else:
assert '+10' in line
def test_datetimeindex(self):
index = date_range('20130102', periods=6)
s = Series(1, index=index)
result = s.to_string()
assert '2013-01-02' in result
# nat in index
s2 = Series(2, index=[Timestamp('20130111'), NaT])
s = s2.append(s)
result = s.to_string()
assert 'NaT' in result
# nat in summary
result = str(s2.index)
assert 'NaT' in result
def test_timedelta64(self):
from datetime import datetime, timedelta
Series(np.array([1100, 20], dtype='timedelta64[ns]')).to_string()
s = Series(date_range('2012-1-1', periods=3, freq='D'))
# GH2146
# adding NaTs
y = s - s.shift(1)
result = y.to_string()
assert '1 days' in result
assert '00:00:00' not in result
assert 'NaT' in result
# with frac seconds
o = Series([datetime(2012, 1, 1, microsecond=150)] * 3)
y = s - o
result = y.to_string()
assert '-1 days +23:59:59.999850' in result
# rounding?
o = Series([datetime(2012, 1, 1, 1)] * 3)
y = s - o
result = y.to_string()
assert '-1 days +23:00:00' in result
assert '1 days 23:00:00' in result
o = Series([datetime(2012, 1, 1, 1, 1)] * 3)
y = s - o
result = y.to_string()
assert '-1 days +22:59:00' in result
assert '1 days 22:59:00' in result
o = Series([datetime(2012, 1, 1, 1, 1, microsecond=150)] * 3)
y = s - o
result = y.to_string()
assert '-1 days +22:58:59.999850' in result
assert '0 days 22:58:59.999850' in result
# neg time
td = timedelta(minutes=5, seconds=3)
s2 = Series(date_range('2012-1-1', periods=3, freq='D')) + td
y = s - s2
result = y.to_string()
assert '-1 days +23:54:57' in result
td = timedelta(microseconds=550)
s2 = Series(date_range('2012-1-1', periods=3, freq='D')) + td
y = s - td
result = y.to_string()
assert '2012-01-01 23:59:59.999450' in result
# no boxing of the actual elements
td = Series(pd.timedelta_range('1 days', periods=3))
result = td.to_string()
assert result == u("0 1 days\n1 2 days\n2 3 days")
def test_mixed_datetime64(self):
df = DataFrame({'A': [1, 2], 'B': ['2012-01-01', '2012-01-02']})
df['B'] = pd.to_datetime(df.B)
result = repr(df.loc[0])
assert '2012-01-01' in result
def test_period(self):
# GH 12615
index = pd.period_range('2013-01', periods=6, freq='M')
s = Series(np.arange(6, dtype='int64'), index=index)
exp = ("2013-01 0\n2013-02 1\n2013-03 2\n2013-04 3\n"
"2013-05 4\n2013-06 5\nFreq: M, dtype: int64")
assert str(s) == exp
s = Series(index)
exp = ("0 2013-01\n1 2013-02\n2 2013-03\n3 2013-04\n"
"4 2013-05\n5 2013-06\ndtype: object")
assert str(s) == exp
# periods with mixed freq
s = Series([pd.Period('2011-01', freq='M'),
pd.Period('2011-02-01', freq='D'),
pd.Period('2011-03-01 09:00', freq='H')])
exp = ("0 2011-01\n1 2011-02-01\n"
"2 2011-03-01 09:00\ndtype: object")
assert str(s) == exp
def test_max_multi_index_display(self):
# GH 7101
# doc example (indexing.rst)
# multi-index
arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = list(zip(*arrays))
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
s = Series(np.random.randn(8), index=index)
with option_context("display.max_rows", 10):
assert len(str(s).split('\n')) == 10
with option_context("display.max_rows", 3):
assert len(str(s).split('\n')) == 5
with option_context("display.max_rows", 2):
assert len(str(s).split('\n')) == 5
with option_context("display.max_rows", 1):
assert len(str(s).split('\n')) == 4
with option_context("display.max_rows", 0):
assert len(str(s).split('\n')) == 10
# index
s = Series(np.random.randn(8), None)
with option_context("display.max_rows", 10):
assert len(str(s).split('\n')) == 9
with option_context("display.max_rows", 3):
assert len(str(s).split('\n')) == 4
with option_context("display.max_rows", 2):
assert len(str(s).split('\n')) == 4
with option_context("display.max_rows", 1):
assert len(str(s).split('\n')) == 3
with option_context("display.max_rows", 0):
assert len(str(s).split('\n')) == 9
# Make sure #8532 is fixed
def test_consistent_format(self):
s = pd.Series([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9999, 1, 1] * 10)
with option_context("display.max_rows", 10,
"display.show_dimensions", False):
res = repr(s)
exp = ('0 1.0000\n1 1.0000\n2 1.0000\n3 '
'1.0000\n4 1.0000\n ... \n125 '
'1.0000\n126 1.0000\n127 0.9999\n128 '
'1.0000\n129 1.0000\ndtype: float64')
assert res == exp
def chck_ncols(self, s):
with option_context("display.max_rows", 10):
res = repr(s)
lines = res.split('\n')
lines = [line for line in repr(s).split('\n')
if not re.match(r'[^\.]*\.+', line)][:-1]
ncolsizes = len(set(len(line.strip()) for line in lines))
assert ncolsizes == 1
def test_format_explicit(self):
test_sers = gen_series_formatting()
with option_context("display.max_rows", 4,
"display.show_dimensions", False):
res = repr(test_sers['onel'])
exp = '0 a\n1 a\n ..\n98 a\n99 a\ndtype: object'
assert exp == res
res = repr(test_sers['twol'])
exp = ('0 ab\n1 ab\n ..\n98 ab\n99 ab\ndtype:'
' object')
assert exp == res
res = repr(test_sers['asc'])
exp = ('0 a\n1 ab\n ... \n4 abcde\n5'
' abcdef\ndtype: object')
assert exp == res
res = repr(test_sers['desc'])
exp = ('5 abcdef\n4 abcde\n ... \n1 ab\n0'
' a\ndtype: object')
assert exp == res
def test_ncols(self):
test_sers = gen_series_formatting()
for s in test_sers.values():
self.chck_ncols(s)
def test_max_rows_eq_one(self):
s = Series(range(10), dtype='int64')
with option_context("display.max_rows", 1):
strrepr = repr(s).split('\n')
exp1 = ['0', '0']
res1 = strrepr[0].split()
assert exp1 == res1
exp2 = ['..']
res2 = strrepr[1].split()
assert exp2 == res2
def test_truncate_ndots(self):
def getndots(s):
return len(re.match(r'[^\.]*(\.*)', s).groups()[0])
s = Series([0, 2, 3, 6])
with option_context("display.max_rows", 2):
strrepr = repr(s).replace('\n', '')
assert getndots(strrepr) == 2
s = Series([0, 100, 200, 400])
with option_context("display.max_rows", 2):
strrepr = repr(s).replace('\n', '')
assert getndots(strrepr) == 3
def test_show_dimensions(self):
# gh-7117
s = Series(range(5))
assert 'Length' not in repr(s)
with option_context("display.max_rows", 4):
assert 'Length' in repr(s)
with option_context("display.show_dimensions", True):
assert 'Length' in repr(s)
with option_context("display.max_rows", 4,
"display.show_dimensions", False):
assert 'Length' not in repr(s)
def test_to_string_name(self):
s = Series(range(100), dtype='int64')
s.name = 'myser'
res = s.to_string(max_rows=2, name=True)
exp = '0 0\n ..\n99 99\nName: myser'
assert res == exp
res = s.to_string(max_rows=2, name=False)
exp = '0 0\n ..\n99 99'
assert res == exp
def test_to_string_dtype(self):
s = Series(range(100), dtype='int64')
res = s.to_string(max_rows=2, dtype=True)
exp = '0 0\n ..\n99 99\ndtype: int64'
assert res == exp
res = s.to_string(max_rows=2, dtype=False)
exp = '0 0\n ..\n99 99'
assert res == exp
def test_to_string_length(self):
s = Series(range(100), dtype='int64')
res = s.to_string(max_rows=2, length=True)
exp = '0 0\n ..\n99 99\nLength: 100'
assert res == exp
def test_to_string_na_rep(self):
s = pd.Series(index=range(100))
res = s.to_string(na_rep='foo', max_rows=2)
exp = '0 foo\n ..\n99 foo'
assert res == exp
def test_to_string_float_format(self):
s = pd.Series(range(10), dtype='float64')
res = s.to_string(float_format=lambda x: '{0:2.1f}'.format(x),
max_rows=2)
exp = '0 0.0\n ..\n9 9.0'
assert res == exp
def test_to_string_header(self):
s = pd.Series(range(10), dtype='int64')
s.index.name = 'foo'
res = s.to_string(header=True, max_rows=2)
exp = 'foo\n0 0\n ..\n9 9'
assert res == exp
res = s.to_string(header=False, max_rows=2)
exp = '0 0\n ..\n9 9'
assert res == exp
def _three_digit_exp():
return '%.4g' % 1.7e8 == '1.7e+008'
class TestFloatArrayFormatter(object):
def test_misc(self):
obj = fmt.FloatArrayFormatter(np.array([], dtype=np.float64))
result = obj.get_result()
assert len(result) == 0
def test_format(self):
obj = fmt.FloatArrayFormatter(np.array([12, 0], dtype=np.float64))
result = obj.get_result()
assert result[0] == " 12.0"
assert result[1] == " 0.0"
def test_output_significant_digits(self):
# Issue #9764
# In case default display precision changes:
with pd.option_context('display.precision', 6):
# DataFrame example from issue #9764
d = pd.DataFrame(
{'col1': [9.999e-8, 1e-7, 1.0001e-7, 2e-7, 4.999e-7, 5e-7,
5.0001e-7, 6e-7, 9.999e-7, 1e-6, 1.0001e-6, 2e-6,
4.999e-6, 5e-6, 5.0001e-6, 6e-6]})
expected_output = {
(0, 6):
' col1\n'
'0 9.999000e-08\n'
'1 1.000000e-07\n'
'2 1.000100e-07\n'
'3 2.000000e-07\n'
'4 4.999000e-07\n'
'5 5.000000e-07',
(1, 6):
' col1\n'
'1 1.000000e-07\n'
'2 1.000100e-07\n'
'3 2.000000e-07\n'
'4 4.999000e-07\n'
'5 5.000000e-07',
(1, 8):
' col1\n'
'1 1.000000e-07\n'
'2 1.000100e-07\n'
'3 2.000000e-07\n'
'4 4.999000e-07\n'
'5 5.000000e-07\n'
'6 5.000100e-07\n'
'7 6.000000e-07',
(8, 16):
' col1\n'
'8 9.999000e-07\n'
'9 1.000000e-06\n'
'10 1.000100e-06\n'
'11 2.000000e-06\n'
'12 4.999000e-06\n'
'13 5.000000e-06\n'
'14 5.000100e-06\n'
'15 6.000000e-06',
(9, 16):
' col1\n'
'9 0.000001\n'
'10 0.000001\n'
'11 0.000002\n'
'12 0.000005\n'
'13 0.000005\n'
'14 0.000005\n'
'15 0.000006'
}
for (start, stop), v in expected_output.items():
assert str(d[start:stop]) == v
def test_too_long(self):
# GH 10451
with pd.option_context('display.precision', 4):
# need both a number > 1e6 and something that normally formats to
# having length > display.precision + 6
df = pd.DataFrame(dict(x=[12345.6789]))
assert str(df) == ' x\n0 12345.6789'
df = pd.DataFrame(dict(x=[2e6]))
assert str(df) == ' x\n0 2000000.0'
df = pd.DataFrame(dict(x=[12345.6789, 2e6]))
assert str(df) == ' x\n0 1.2346e+04\n1 2.0000e+06'
class TestRepr_timedelta64(object):
def test_none(self):
delta_1d = pd.to_timedelta(1, unit='D')
delta_0d = pd.to_timedelta(0, unit='D')
delta_1s = pd.to_timedelta(1, unit='s')
delta_500ms = pd.to_timedelta(500, unit='ms')
drepr = lambda x: x._repr_base()
assert drepr(delta_1d) == "1 days"
assert drepr(-delta_1d) == "-1 days"
assert drepr(delta_0d) == "0 days"
assert drepr(delta_1s) == "0 days 00:00:01"
assert drepr(delta_500ms) == "0 days 00:00:00.500000"
assert drepr(delta_1d + delta_1s) == "1 days 00:00:01"
assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000"
def test_even_day(self):
delta_1d = pd.to_timedelta(1, unit='D')
delta_0d = pd.to_timedelta(0, unit='D')
delta_1s = pd.to_timedelta(1, unit='s')
delta_500ms = pd.to_timedelta(500, unit='ms')
drepr = lambda x: x._repr_base(format='even_day')
assert drepr(delta_1d) == "1 days"
assert drepr(-delta_1d) == "-1 days"
assert drepr(delta_0d) == "0 days"
assert drepr(delta_1s) == "0 days 00:00:01"
assert drepr(delta_500ms) == "0 days 00:00:00.500000"
assert drepr(delta_1d + delta_1s) == "1 days 00:00:01"
assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000"
def test_sub_day(self):
delta_1d = pd.to_timedelta(1, unit='D')
delta_0d = pd.to_timedelta(0, unit='D')
delta_1s = pd.to_timedelta(1, unit='s')
delta_500ms = pd.to_timedelta(500, unit='ms')
drepr = lambda x: x._repr_base(format='sub_day')
assert drepr(delta_1d) == "1 days"
assert drepr(-delta_1d) == "-1 days"
assert drepr(delta_0d) == "00:00:00"
assert drepr(delta_1s) == "00:00:01"
assert drepr(delta_500ms) == "00:00:00.500000"
assert drepr(delta_1d + delta_1s) == "1 days 00:00:01"
assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000"
def test_long(self):
delta_1d = pd.to_timedelta(1, unit='D')
delta_0d = pd.to_timedelta(0, unit='D')
delta_1s = pd.to_timedelta(1, unit='s')
delta_500ms = pd.to_timedelta(500, unit='ms')
drepr = lambda x: x._repr_base(format='long')
assert drepr(delta_1d) == "1 days 00:00:00"
assert drepr(-delta_1d) == "-1 days +00:00:00"
assert drepr(delta_0d) == "0 days 00:00:00"
assert drepr(delta_1s) == "0 days 00:00:01"
assert drepr(delta_500ms) == "0 days 00:00:00.500000"
assert drepr(delta_1d + delta_1s) == "1 days 00:00:01"
assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000"
def test_all(self):
delta_1d = pd.to_timedelta(1, unit='D')
delta_0d = pd.to_timedelta(0, unit='D')
delta_1ns = pd.to_timedelta(1, unit='ns')
drepr = lambda x: x._repr_base(format='all')
assert drepr(delta_1d) == "1 days 00:00:00.000000000"
assert drepr(delta_0d) == "0 days 00:00:00.000000000"
assert drepr(delta_1ns) == "0 days 00:00:00.000000001"
class TestTimedelta64Formatter(object):
def test_days(self):
x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='D')
result = fmt.Timedelta64Formatter(x, box=True).get_result()
assert result[0].strip() == "'0 days'"
assert result[1].strip() == "'1 days'"
result = fmt.Timedelta64Formatter(x[1:2], box=True).get_result()
assert result[0].strip() == "'1 days'"
result = fmt.Timedelta64Formatter(x, box=False).get_result()
assert result[0].strip() == "0 days"
assert result[1].strip() == "1 days"
result = fmt.Timedelta64Formatter(x[1:2], box=False).get_result()
assert result[0].strip() == "1 days"
def test_days_neg(self):
x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='D')
result = fmt.Timedelta64Formatter(-x, box=True).get_result()
assert result[0].strip() == "'0 days'"
assert result[1].strip() == "'-1 days'"
def test_subdays(self):
y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='s')
result = fmt.Timedelta64Formatter(y, box=True).get_result()
assert result[0].strip() == "'00:00:00'"
assert result[1].strip() == "'00:00:01'"
def test_subdays_neg(self):
y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='s')
result = fmt.Timedelta64Formatter(-y, box=True).get_result()
assert result[0].strip() == "'00:00:00'"
assert result[1].strip() == "'-1 days +23:59:59'"
def test_zero(self):
x = pd.to_timedelta(list(range(1)) + [pd.NaT], unit='D')
result = fmt.Timedelta64Formatter(x, box=True).get_result()
assert result[0].strip() == "'0 days'"
x = pd.to_timedelta(list(range(1)), unit='D')
result = fmt.Timedelta64Formatter(x, box=True).get_result()
assert result[0].strip() == "'0 days'"
class TestDatetime64Formatter(object):
def test_mixed(self):
x = Series([datetime(2013, 1, 1), datetime(2013, 1, 1, 12), pd.NaT])
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 00:00:00"
assert result[1].strip() == "2013-01-01 12:00:00"
def test_dates(self):
x = Series([datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT])
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01"
assert result[1].strip() == "2013-01-02"
def test_date_nanos(self):
x = Series([Timestamp(200)])
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "1970-01-01 00:00:00.000000200"
def test_dates_display(self):
# 10170
# make sure that we are consistently display date formatting
x = Series(date_range('20130101 09:00:00', periods=5, freq='D'))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-05 09:00:00"
x = Series(date_range('20130101 09:00:00', periods=5, freq='s'))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-01 09:00:04"
x = Series(date_range('20130101 09:00:00', periods=5, freq='ms'))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00.000"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-01 09:00:00.004"
x = Series(date_range('20130101 09:00:00', periods=5, freq='us'))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00.000000"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-01 09:00:00.000004"
x = Series(date_range('20130101 09:00:00', periods=5, freq='N'))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00.000000000"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-01 09:00:00.000000004"
def test_datetime64formatter_yearmonth(self):
x = Series([datetime(2016, 1, 1), datetime(2016, 2, 2)])
def format_func(x):
return x.strftime('%Y-%m')
formatter = fmt.Datetime64Formatter(x, formatter=format_func)
result = formatter.get_result()
assert result == ['2016-01', '2016-02']
def test_datetime64formatter_hoursecond(self):
x = Series(pd.to_datetime(['10:10:10.100', '12:12:12.120'],
format='%H:%M:%S.%f'))
def format_func(x):
return x.strftime('%H:%M')
formatter = fmt.Datetime64Formatter(x, formatter=format_func)
result = formatter.get_result()
assert result == ['10:10', '12:12']
class TestNaTFormatting(object):
def test_repr(self):
assert repr(pd.NaT) == "NaT"
def test_str(self):
assert str(pd.NaT) == "NaT"
class TestDatetimeIndexFormat(object):
def test_datetime(self):
formatted = pd.to_datetime([datetime(2003, 1, 1, 12), pd.NaT]).format()
assert formatted[0] == "2003-01-01 12:00:00"
assert formatted[1] == "NaT"
def test_date(self):
formatted = pd.to_datetime([datetime(2003, 1, 1), pd.NaT]).format()
assert formatted[0] == "2003-01-01"
assert formatted[1] == "NaT"
def test_date_tz(self):
formatted = pd.to_datetime([datetime(2013, 1, 1)], utc=True).format()
assert formatted[0] == "2013-01-01 00:00:00+00:00"
formatted = pd.to_datetime(
[datetime(2013, 1, 1), pd.NaT], utc=True).format()
assert formatted[0] == "2013-01-01 00:00:00+00:00"
def test_date_explict_date_format(self):
formatted = pd.to_datetime([datetime(2003, 2, 1), pd.NaT]).format(
date_format="%m-%d-%Y", na_rep="UT")
assert formatted[0] == "02-01-2003"
assert formatted[1] == "UT"
class TestDatetimeIndexUnicode(object):
def test_dates(self):
text = str(pd.to_datetime([datetime(2013, 1, 1), datetime(2014, 1, 1)
]))
assert "['2013-01-01'," in text
assert ", '2014-01-01']" in text
def test_mixed(self):
text = str(pd.to_datetime([datetime(2013, 1, 1), datetime(
2014, 1, 1, 12), datetime(2014, 1, 1)]))
assert "'2013-01-01 00:00:00'," in text
assert "'2014-01-01 00:00:00']" in text
class TestStringRepTimestamp(object):
def test_no_tz(self):
dt_date = datetime(2013, 1, 2)
assert str(dt_date) == str(Timestamp(dt_date))
dt_datetime = datetime(2013, 1, 2, 12, 1, 3)
assert str(dt_datetime) == str(Timestamp(dt_datetime))
dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45)
assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us))
ts_nanos_only = Timestamp(200)
assert str(ts_nanos_only) == "1970-01-01 00:00:00.000000200"
ts_nanos_micros = Timestamp(1200)
assert str(ts_nanos_micros) == "1970-01-01 00:00:00.000001200"
def test_tz_pytz(self):
dt_date = datetime(2013, 1, 2, tzinfo=pytz.utc)
assert str(dt_date) == str(Timestamp(dt_date))
dt_datetime = datetime(2013, 1, 2, 12, 1, 3, tzinfo=pytz.utc)
assert str(dt_datetime) == str(Timestamp(dt_datetime))
dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45, tzinfo=pytz.utc)
assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us))
def test_tz_dateutil(self):
utc = dateutil.tz.tzutc()
dt_date = datetime(2013, 1, 2, tzinfo=utc)
assert str(dt_date) == str(Timestamp(dt_date))
dt_datetime = datetime(2013, 1, 2, 12, 1, 3, tzinfo=utc)
assert str(dt_datetime) == str(Timestamp(dt_datetime))
dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45, tzinfo=utc)
assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us))
def test_nat_representations(self):
for f in (str, repr, methodcaller('isoformat')):
assert f(pd.NaT) == 'NaT'
def test_format_percentiles():
result = fmt.format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999])
expected = ['1.999%', '2.001%', '50%', '66.667%', '99.99%']
assert result == expected
result = fmt.format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999])
expected = ['0%', '50%', '2.0%', '50%', '66.67%', '99.99%']
assert result == expected
pytest.raises(ValueError, fmt.format_percentiles, [0.1, np.nan, 0.5])
pytest.raises(ValueError, fmt.format_percentiles, [-0.001, 0.1, 0.5])
pytest.raises(ValueError, fmt.format_percentiles, [2, 0.1, 0.5])
pytest.raises(ValueError, fmt.format_percentiles, [0.1, 0.5, 'a'])
| bsd-3-clause |
procoder317/scikit-learn | sklearn/tests/test_base.py | 216 | 7045 | # Author: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.utils import deprecated
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""Sklearn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_clone_nan():
# Regression test for cloning estimators with default parameter as np.nan
clf = MyEstimator(empty=np.nan)
clf2 = clone(clf)
assert_true(clf.empty is clf2.empty)
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline([('svc_cv',
GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_score_sample_weight():
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
| bsd-3-clause |
edhuckle/statsmodels | statsmodels/regression/_prediction.py | 27 | 6035 | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 19 11:29:18 2014
Author: Josef Perktold
License: BSD-3
"""
import numpy as np
from scipy import stats
# this is similar to ContrastResults after t_test, partially copied and adjusted
class PredictionResults(object):
def __init__(self, predicted_mean, var_pred_mean, var_resid,
df=None, dist=None, row_labels=None):
self.predicted_mean = predicted_mean
self.var_pred_mean = var_pred_mean
self.df = df
self.var_resid = var_resid
self.row_labels = row_labels
if dist is None or dist == 'norm':
self.dist = stats.norm
self.dist_args = ()
elif dist == 't':
self.dist = stats.t
self.dist_args = (self.df,)
else:
self.dist = dist
self.dist_args = ()
@property
def se_obs(self):
return np.sqrt(self.var_pred_mean + self.var_resid)
@property
def se_mean(self):
return np.sqrt(self.var_pred_mean)
def conf_int(self, obs=False, alpha=0.05):
"""
Returns the confidence interval of the value, `effect` of the constraint.
This is currently only available for t and z tests.
Parameters
----------
alpha : float, optional
The significance level for the confidence interval.
ie., The default `alpha` = .05 returns a 95% confidence interval.
Returns
-------
ci : ndarray, (k_constraints, 2)
The array has the lower and the upper limit of the confidence
interval in the columns.
"""
se = self.se_obs if obs else self.se_mean
q = self.dist.ppf(1 - alpha / 2., *self.dist_args)
lower = self.predicted_mean - q * se
upper = self.predicted_mean + q * se
return np.column_stack((lower, upper))
def summary_frame(self, what='all', alpha=0.05):
# TODO: finish and cleanup
import pandas as pd
from statsmodels.compat.collections import OrderedDict
ci_obs = self.conf_int(alpha=alpha, obs=True) # need to split
ci_mean = self.conf_int(alpha=alpha, obs=False)
to_include = OrderedDict()
to_include['mean'] = self.predicted_mean
to_include['mean_se'] = self.se_mean
to_include['mean_ci_lower'] = ci_mean[:, 0]
to_include['mean_ci_upper'] = ci_mean[:, 1]
to_include['obs_ci_lower'] = ci_obs[:, 0]
to_include['obs_ci_upper'] = ci_obs[:, 1]
self.table = to_include
#OrderedDict doesn't work to preserve sequence
# pandas dict doesn't handle 2d_array
#data = np.column_stack(list(to_include.values()))
#names = ....
res = pd.DataFrame(to_include, index=self.row_labels,
columns=to_include.keys())
return res
def get_prediction(self, exog=None, transform=True, weights=None,
row_labels=None, pred_kwds=None):
"""
compute prediction results
Parameters
----------
exog : array-like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
weights : array_like, optional
Weights interpreted as in WLS, used for the variance of the predicted
residual.
args, kwargs :
Some models can take additional arguments or keywords, see the
predict method of the model for the details.
Returns
-------
prediction_results : instance
The prediction results instance contains prediction and prediction
variance and can on demand calculate confidence intervals and summary
tables for the prediction of the mean and of new observations.
"""
### prepare exog and row_labels, based on base Results.predict
if transform and hasattr(self.model, 'formula') and exog is not None:
from patsy import dmatrix
exog = dmatrix(self.model.data.design_info.builder,
exog)
if exog is not None:
if row_labels is None:
if hasattr(exog, 'index'):
row_labels = exog.index
else:
row_labels = None
exog = np.asarray(exog)
if exog.ndim == 1 and (self.model.exog.ndim == 1 or
self.model.exog.shape[1] == 1):
exog = exog[:, None]
exog = np.atleast_2d(exog) # needed in count model shape[1]
else:
exog = self.model.exog
if weights is None:
weights = getattr(self.model, 'weights', None)
if row_labels is None:
row_labels = getattr(self.model.data, 'row_labels', None)
# need to handle other arrays, TODO: is delegating to model possible ?
if weights is not None:
weights = np.asarray(weights)
if (weights.size > 1 and
(weights.ndim != 1 or weights.shape[0] == exog.shape[1])):
raise ValueError('weights has wrong shape')
### end
if pred_kwds is None:
pred_kwds = {}
predicted_mean = self.model.predict(self.params, exog, **pred_kwds)
covb = self.cov_params()
var_pred_mean = (exog * np.dot(covb, exog.T).T).sum(1)
# TODO: check that we have correct scale, Refactor scale #???
var_resid = self.scale / weights # self.mse_resid / weights
# special case for now:
if self.cov_type == 'fixed scale':
var_resid = self.cov_kwds['scale'] / weights
dist = ['norm', 't'][self.use_t]
return PredictionResults(predicted_mean, var_pred_mean, var_resid,
df=self.df_resid, dist=dist,
row_labels=row_labels)
| bsd-3-clause |
albertbup/deep-belief-network | example_classification.py | 3 | 1158 | import numpy as np
np.random.seed(1337) # for reproducibility
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.metrics.classification import accuracy_score
from dbn.tensorflow import SupervisedDBNClassification
# Loading dataset
digits = load_digits()
X, Y = digits.data, digits.target
# Data scaling
X = (X / 16).astype(np.float32)
# Splitting data
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0)
# Training
classifier = SupervisedDBNClassification(hidden_layers_structure=[256, 256],
learning_rate_rbm=0.05,
learning_rate=0.1,
n_epochs_rbm=10,
n_iter_backprop=100,
batch_size=32,
activation_function='relu',
dropout_p=0.2)
classifier.fit(X_train, Y_train)
# Test
Y_pred = classifier.predict(X_test)
print('Done.\nAccuracy: %f' % accuracy_score(Y_test, Y_pred))
| mit |
maxlikely/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 6 | 7671 | import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_multilabel_classification():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
X, y = make_blobs(n_samples=50, n_features=2,
centers=[[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]],
random_state=0)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_equal(X[:, 0], t * np.cos(t))
assert_array_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_equal(X[:, 0], np.sin(t))
assert_array_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
| bsd-3-clause |
ilo10/scikit-learn | sklearn/ensemble/__init__.py | 217 | 1307 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification and regression.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
lukeiwanski/tensorflow | tensorflow/python/estimator/canned/dnn_test.py | 25 | 16780 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dnn.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import numpy as np
import six
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.estimator.canned import dnn
from tensorflow.python.estimator.canned import dnn_testing_utils
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.feature_column import feature_column
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _dnn_classifier_fn(*args, **kwargs):
return dnn.DNNClassifier(*args, **kwargs)
class DNNModelFnTest(dnn_testing_utils.BaseDNNModelFnTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNModelFnTest.__init__(self, dnn._dnn_model_fn)
class DNNLogitFnTest(dnn_testing_utils.BaseDNNLogitFnTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNLogitFnTest.__init__(self,
dnn._dnn_logit_fn_builder)
class DNNWarmStartingTest(dnn_testing_utils.BaseDNNWarmStartingTest,
test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNWarmStartingTest.__init__(self, _dnn_classifier_fn,
_dnn_regressor_fn)
class DNNClassifierEvaluateTest(
dnn_testing_utils.BaseDNNClassifierEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierEvaluateTest.__init__(
self, _dnn_classifier_fn)
class DNNClassifierPredictTest(
dnn_testing_utils.BaseDNNClassifierPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierPredictTest.__init__(
self, _dnn_classifier_fn)
class DNNClassifierTrainTest(
dnn_testing_utils.BaseDNNClassifierTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierTrainTest.__init__(
self, _dnn_classifier_fn)
def _dnn_regressor_fn(*args, **kwargs):
return dnn.DNNRegressor(*args, **kwargs)
class DNNRegressorEvaluateTest(
dnn_testing_utils.BaseDNNRegressorEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__(
self, _dnn_regressor_fn)
class DNNRegressorPredictTest(
dnn_testing_utils.BaseDNNRegressorPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorPredictTest.__init__(
self, _dnn_regressor_fn)
class DNNRegressorTrainTest(
dnn_testing_utils.BaseDNNRegressorTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorTrainTest.__init__(
self, _dnn_regressor_fn)
def _queue_parsed_features(feature_map):
tensors_to_enqueue = []
keys = []
for key, tensor in six.iteritems(feature_map):
keys.append(key)
tensors_to_enqueue.append(tensor)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = data_flow_ops.FIFOQueue(capacity=100, dtypes=queue_dtypes)
queue_runner.add_queue_runner(
queue_runner.QueueRunner(
input_queue,
[input_queue.enqueue(tensors_to_enqueue)]))
dequeued_tensors = input_queue.dequeue()
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
class DNNRegressorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
label_dimension, batch_size):
feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
est = dnn.DNNRegressor(
hidden_units=(2, 2),
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
label_dimension = 1
batch_size = 10
data = np.linspace(0., 2., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
class DNNClassifierIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _as_label(self, data_in_float):
return np.rint(data_in_float).astype(np.int64)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
n_classes, batch_size):
feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
est = dnn.DNNClassifier(
hidden_units=(2, 2),
feature_columns=feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predicted_proba = np.array([
x[prediction_keys.PredictionKeys.PROBABILITIES]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
n_classes = 3
input_dimension = 2
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
x_data = data.reshape(batch_size, input_dimension)
y_data = np.reshape(self._as_label(data[:batch_size]), (batch_size, 1))
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
input_dimension = 1
n_classes = 3
batch_size = 10
data = np.linspace(0., n_classes - 1., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(self._as_label(data))
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
n_classes = 3
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum)),
'y':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=self._as_label(datum[:1]))),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([1], dtypes.int64),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
if __name__ == '__main__':
test.main()
| apache-2.0 |
FederatedAI/FATE | python/federatedml/evaluation/metrics/classification_metric.py | 1 | 21454 | import copy
import sys
import numpy as np
import pandas as pd
from scipy.stats import stats
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import average_precision_score
ROUND_NUM = 6
def neg_pos_count(labels: np.ndarray, pos_label: int):
pos_num = ((labels == pos_label) + 0).sum()
neg_num = len(labels) - pos_num
return pos_num, neg_num
def sort_score_and_label(labels: np.ndarray, pred_scores: np.ndarray):
labels = np.array(labels)
pred_scores = np.array(pred_scores)
sort_idx = np.flip(pred_scores.argsort())
sorted_labels = labels[sort_idx]
sorted_scores = pred_scores[sort_idx]
return sorted_labels, sorted_scores
class ConfusionMatrix(object):
@staticmethod
def compute(sorted_labels: list, sorted_pred_scores: list, score_thresholds: list, ret: list, pos_label=1):
for ret_type in ret:
assert ret_type in ['tp', 'tn', 'fp', 'fn']
sorted_labels = np.array(sorted_labels)
sorted_scores = np.array(sorted_pred_scores)
sorted_labels[sorted_labels != pos_label] = 0
sorted_labels[sorted_labels == pos_label] = 1
score_thresholds = np.array([score_thresholds]).transpose()
pred_labels = (sorted_scores > score_thresholds) + 0
ret_dict = {}
if 'tp' in ret or 'tn' in ret:
match_arr = (pred_labels + sorted_labels)
if 'tp' in ret:
tp_num = (match_arr == 2).sum(axis=-1)
ret_dict['tp'] = tp_num
if 'tn' in ret:
tn_num = (match_arr == 0).sum(axis=-1)
ret_dict['tn'] = tn_num
if 'fp' in ret or 'fn' in ret:
match_arr = (sorted_labels - pred_labels)
if 'fp' in ret:
fp_num = (match_arr == -1).sum(axis=-1)
ret_dict['fp'] = fp_num
if 'fn' in ret:
fn_num = (match_arr == 1).sum(axis=-1)
ret_dict['fn'] = fn_num
return ret_dict
class ThresholdCutter(object):
@staticmethod
def cut_by_step(sorted_scores, steps=0.01):
assert isinstance(steps, float) and (0 < steps < 1)
thresholds = list(set(sorted_scores))
thresholds, cuts = ThresholdCutter.__filt_threshold(thresholds, 0.01)
score_threshold = thresholds
return score_threshold, cuts
@staticmethod
def cut_by_index(sorted_scores):
cuts = np.array([c / 100 for c in range(100)])
data_size = len(sorted_scores)
indexs = [int(data_size * cut) for cut in cuts]
score_threshold = [sorted_scores[idx] for idx in indexs]
return score_threshold, cuts
@staticmethod
def __filt_threshold(thresholds, step):
cuts = list(map(float, np.arange(0, 1, step)))
size = len(list(thresholds))
thresholds.sort(reverse=True)
index_list = [int(size * cut) for cut in cuts]
new_thresholds = [thresholds[idx] for idx in index_list]
return new_thresholds, cuts
@staticmethod
def cut_by_quantile(scores, quantile_list=None, interpolation='nearest', remove_duplicate=True):
if quantile_list is None: # default is 20 intervals
quantile_list = [round(i * 0.05, 3) for i in range(20)] + [1.0]
quantile_val = np.quantile(scores, quantile_list, interpolation=interpolation)
if remove_duplicate:
quantile_val = sorted(list(set(quantile_val)))
else:
quantile_val = sorted(list(quantile_val))
if len(quantile_val) == 1:
quantile_val = [np.min(scores), np.max(scores)]
return quantile_val
class KS(object):
@staticmethod
def compute(labels, pred_scores, pos_label=1):
sorted_labels, sorted_scores = sort_score_and_label(labels, pred_scores)
score_threshold, cuts = ThresholdCutter.cut_by_index(sorted_scores)
confusion_mat = ConfusionMatrix.compute(sorted_labels, sorted_scores, score_threshold, ret=['tp', 'fp'],
pos_label=pos_label)
pos_num, neg_num = neg_pos_count(sorted_labels, pos_label=pos_label)
assert pos_num > 0 and neg_num > 0, "error when computing KS metric, pos sample number and neg sample number" \
"must be larger than 0"
tpr_arr = confusion_mat['tp'] / pos_num
fpr_arr = confusion_mat['fp'] / neg_num
tpr = np.append(tpr_arr, np.array([1.0]))
fpr = np.append(fpr_arr, np.array([1.0]))
cuts = np.append(cuts, np.array([1.0]))
ks_curve = tpr[:-1] - fpr[:-1]
ks_val = np.max(ks_curve)
return ks_val, fpr, tpr, score_threshold, cuts
class BiClassMetric(object):
def __init__(self, cut_method='step', remove_duplicate=False, pos_label=1):
assert cut_method in ['step', 'quantile']
self.cut_method = cut_method
self.remove_duplicate = remove_duplicate # available when cut_method is quantile
self.pos_label = pos_label
def prepare_confusion_mat(self, labels, scores, add_to_end=True, ):
sorted_labels, sorted_scores = sort_score_and_label(labels, scores)
score_threshold, cuts = None, None
if self.cut_method == 'step':
score_threshold, cuts = ThresholdCutter.cut_by_step(sorted_scores, steps=0.01)
if add_to_end:
score_threshold.append(min(score_threshold) - 0.001)
cuts.append(1)
elif self.cut_method == 'quantile':
score_threshold = ThresholdCutter.cut_by_quantile(sorted_scores, remove_duplicate=self.remove_duplicate)
score_threshold = list(np.flip(score_threshold))
confusion_mat = ConfusionMatrix.compute(sorted_labels, sorted_scores, score_threshold,
ret=['tp', 'fp', 'fn', 'tn'], pos_label=self.pos_label)
return confusion_mat, score_threshold, cuts
def compute(self, labels, scores, ):
confusion_mat, score_threshold, cuts = self.prepare_confusion_mat(labels, scores, )
metric_scores = self.compute_metric_from_confusion_mat(confusion_mat)
return list(metric_scores), score_threshold, cuts
def compute_metric_from_confusion_mat(self, *args):
raise NotImplementedError()
class Lift(BiClassMetric):
"""
Compute lift
"""
@staticmethod
def _lift_helper(val):
tp, fp, fn, tn, labels_num = val[0], val[1], val[2], val[3], val[4]
lift_x_type, lift_y_type = [], []
for label_type in ['1', '0']:
if label_type == '0':
tp, tn = tn, tp
fp, fn = fn, fp
if labels_num == 0:
lift_x = 1
denominator = 1
else:
lift_x = (tp + fp) / labels_num
denominator = (tp + fn) / labels_num
if tp + fp == 0:
numerator = 1
else:
numerator = tp / (tp + fp)
if denominator == 0:
lift_y = sys.float_info.max
else:
lift_y = numerator / denominator
lift_x_type.insert(0, lift_x)
lift_y_type.insert(0, lift_y)
return lift_x_type, lift_y_type
def compute(self, labels, pred_scores, pos_label=1):
confusion_mat, score_threshold, cuts = self.prepare_confusion_mat(labels, pred_scores, add_to_end=False, )
lifts_y, lifts_x = self.compute_metric_from_confusion_mat(confusion_mat, len(labels), )
return lifts_y, lifts_x, list(score_threshold)
def compute_metric_from_confusion_mat(self, confusion_mat, labels_len, ):
labels_nums = np.zeros(len(confusion_mat['tp'])) + labels_len
rs = map(self._lift_helper, zip(confusion_mat['tp'], confusion_mat['fp'],
confusion_mat['fn'], confusion_mat['tn'], labels_nums))
rs = list(rs)
lifts_x, lifts_y = [i[0] for i in rs], [i[1] for i in rs]
return lifts_y, lifts_x
class Gain(BiClassMetric):
"""
Compute Gain
"""
@staticmethod
def _gain_helper(val):
tp, fp, fn, tn, num_label = val[0], val[1], val[2], val[3], val[4]
gain_x_type, gain_y_type = [], []
for pos_label in ['1', '0']:
if pos_label == '0':
tp, tn = tn, tp
fp, fn = fn, fp
if num_label == 0:
gain_x = 1
else:
gain_x = float((tp + fp) / num_label)
num_positives = tp + fn
if num_positives == 0:
gain_y = 1
else:
gain_y = float(tp / num_positives)
gain_x_type.insert(0, gain_x)
gain_y_type.insert(0, gain_y)
return gain_x_type, gain_y_type
def compute(self, labels, pred_scores, pos_label=1):
confusion_mat, score_threshold, cuts = self.prepare_confusion_mat(labels, pred_scores, add_to_end=False, )
gain_y, gain_x = self.compute_metric_from_confusion_mat(confusion_mat, len(labels))
return gain_y, gain_x, list(score_threshold)
def compute_metric_from_confusion_mat(self, confusion_mat, labels_len):
labels_nums = np.zeros(len(confusion_mat['tp'])) + labels_len
rs = map(self._gain_helper, zip(confusion_mat['tp'], confusion_mat['fp'],
confusion_mat['fn'], confusion_mat['tn'], labels_nums))
rs = list(rs)
gain_x, gain_y = [i[0] for i in rs], [i[1] for i in rs]
return gain_y, gain_x
class BiClassPrecision(BiClassMetric):
"""
Compute binary classification precision
"""
def compute_metric_from_confusion_mat(self, confusion_mat, formatted=True, impute_val=1.0):
numerator = confusion_mat['tp']
denominator = (confusion_mat['tp'] + confusion_mat['fp'])
zero_indexes = (denominator == 0)
denominator[zero_indexes] = 1
precision_scores = numerator / denominator
precision_scores[zero_indexes] = impute_val # impute_val is for prettifying when drawing pr curves
if formatted:
score_formatted = [[0, i] for i in precision_scores]
return score_formatted
else:
return precision_scores
class MultiClassPrecision(object):
"""
Compute multi-classification precision
"""
def compute(self, labels, pred_scores):
all_labels = list(set(labels).union(set(pred_scores)))
all_labels.sort()
return precision_score(labels, pred_scores, average=None), all_labels
class BiClassRecall(BiClassMetric):
"""
Compute binary classification recall
"""
def compute_metric_from_confusion_mat(self, confusion_mat, formatted=True):
recall_scores = confusion_mat['tp'] / (confusion_mat['tp'] + confusion_mat['fn'])
if formatted:
score_formatted = [[0, i] for i in recall_scores]
return score_formatted
else:
return recall_scores
class MultiClassRecall(object):
"""
Compute multi-classification recall
"""
def compute(self, labels, pred_scores):
all_labels = list(set(labels).union(set(pred_scores)))
all_labels.sort()
return recall_score(labels, pred_scores, average=None), all_labels
class BiClassAccuracy(BiClassMetric):
"""
Compute binary classification accuracy
"""
def compute(self, labels, scores, normalize=True):
confusion_mat, score_threshold, cuts = self.prepare_confusion_mat(labels, scores)
metric_scores = self.compute_metric_from_confusion_mat(confusion_mat, normalize=normalize)
return list(metric_scores), score_threshold[: len(metric_scores)], cuts[: len(metric_scores)]
def compute_metric_from_confusion_mat(self, confusion_mat, normalize=True):
rs = (confusion_mat['tp'] + confusion_mat['tn']) / \
(confusion_mat['tp'] + confusion_mat['tn'] + confusion_mat['fn'] + confusion_mat['fp']) if normalize \
else (confusion_mat['tp'] + confusion_mat['tn'])
return rs[:-1]
class MultiClassAccuracy(object):
"""
Compute multi-classification accuracy
"""
def compute(self, labels, pred_scores, normalize=True):
return accuracy_score(labels, pred_scores, normalize)
class FScore(object):
"""
Compute F score from bi-class confusion mat
"""
@staticmethod
def compute(labels, pred_scores, beta=1, pos_label=1):
sorted_labels, sorted_scores = sort_score_and_label(labels, pred_scores)
score_threshold, cuts = ThresholdCutter.cut_by_step(sorted_scores, steps=0.01)
score_threshold.append(0)
confusion_mat = ConfusionMatrix.compute(sorted_labels, sorted_scores,
score_threshold,
ret=['tp', 'fp', 'fn', 'tn'], pos_label=pos_label)
precision_computer = BiClassPrecision()
recall_computer = BiClassRecall()
p_score = precision_computer.compute_metric_from_confusion_mat(confusion_mat, formatted=False)
r_score = recall_computer.compute_metric_from_confusion_mat(confusion_mat, formatted=False)
beta_2 = beta * beta
denominator = (beta_2 * p_score + r_score)
denominator[denominator == 0] = 1e-6 # in case denominator is 0
numerator = (1 + beta_2) * (p_score * r_score)
f_score = numerator / denominator
return f_score, score_threshold, cuts
class PSI(object):
def compute(self, train_scores: list, validate_scores: list, train_labels=None, validate_labels=None,
debug=False, str_intervals=False, round_num=3, pos_label=1):
"""
train/validate scores: predicted scores on train/validate set
train/validate labels: true labels
debug: print debug message
if train&validate labels are not None, count positive sample percentage in every interval
pos_label: pos label
round_num: round number
str_intervals: return str intervals
"""
train_scores = np.array(train_scores)
validate_scores = np.array(validate_scores)
quantile_points = ThresholdCutter().cut_by_quantile(train_scores)
train_count = self.quantile_binning_and_count(train_scores, quantile_points)
validate_count = self.quantile_binning_and_count(validate_scores, quantile_points)
train_pos_perc, validate_pos_perc = None, None
if train_labels is not None and validate_labels is not None:
assert len(train_labels) == len(train_scores) and len(validate_labels) == len(validate_scores)
train_labels, validate_labels = np.array(train_labels), np.array(validate_labels)
train_pos_count = self.quantile_binning_and_count(train_scores[train_labels == pos_label], quantile_points)
validate_pos_count = self.quantile_binning_and_count(validate_scores[validate_labels == pos_label],
quantile_points)
train_pos_perc = np.array(train_pos_count['count']) / np.array(train_count['count'])
validate_pos_perc = np.array(validate_pos_count['count']) / np.array(validate_count['count'])
# handle special cases
train_pos_perc[train_pos_perc == np.inf] = -1
validate_pos_perc[validate_pos_perc == np.inf] = -1
train_pos_perc[np.isnan(train_pos_perc)] = 0
validate_pos_perc[np.isnan(validate_pos_perc)] = 0
if debug:
print(train_count)
print(validate_count)
assert (train_count['interval'] == validate_count['interval']), 'train count interval is not equal to ' \
'validate count interval'
expected_interval = np.array(train_count['count'])
actual_interval = np.array(validate_count['count'])
expected_interval = expected_interval.astype(np.float)
actual_interval = actual_interval.astype(np.float)
psi_scores, total_psi, expected_interval, actual_interval, expected_percentage, actual_percentage \
= self.psi_score(expected_interval, actual_interval, len(train_scores), len(validate_scores))
intervals = train_count['interval'] if not str_intervals else PSI.intervals_to_str(train_count['interval'],
round_num=round_num)
if train_labels is None and validate_labels is None:
return psi_scores, total_psi, expected_interval, expected_percentage, actual_interval, actual_percentage, \
intervals
else:
return psi_scores, total_psi, expected_interval, expected_percentage, actual_interval, actual_percentage, \
train_pos_perc, validate_pos_perc, intervals
@staticmethod
def quantile_binning_and_count(scores, quantile_points):
"""
left edge and right edge of last interval are closed
"""
assert len(quantile_points) >= 2
left_bounds = copy.deepcopy(quantile_points[:-1])
right_bounds = copy.deepcopy(quantile_points[1:])
last_interval_left = left_bounds.pop()
last_interval_right = right_bounds.pop()
bin_result_1, bin_result_2 = None, None
if len(left_bounds) != 0 and len(right_bounds) != 0:
bin_result_1 = pd.cut(scores, pd.IntervalIndex.from_arrays(left_bounds, right_bounds, closed='left'))
bin_result_2 = pd.cut(scores, pd.IntervalIndex.from_arrays([last_interval_left], [last_interval_right],
closed='both'))
count1 = None if bin_result_1 is None else bin_result_1.value_counts().reset_index()
count2 = bin_result_2.value_counts().reset_index()
# if predict scores are the same, count1 will be None, only one interval exists
final_interval = list(count1['index']) + list(count2['index']) if count1 is not None else list(count2['index'])
final_count = list(count1[0]) + list(count2[0]) if count1 is not None else list(count2[0])
rs = {'interval': final_interval, 'count': final_count}
return rs
@staticmethod
def interval_psi_score(val):
expected, actual = val[0], val[1]
return (actual - expected) * np.log(actual / expected)
@staticmethod
def intervals_to_str(intervals, round_num=3):
str_intervals = []
for interval in intervals:
left_bound, right_bound = '[', ']'
if interval.closed == 'left':
right_bound = ')'
elif interval.closed == 'right':
left_bound = '('
str_intervals.append("{}{}, {}{}".format(left_bound, round(interval.left, round_num),
round(interval.right, round_num), right_bound))
return str_intervals
@staticmethod
def psi_score(expected_interval: np.ndarray, actual_interval: np.ndarray, expect_total_num, actual_total_num,
debug=False):
expected_interval[expected_interval == 0] = 1e-6 # in case no overlap samples
actual_interval[actual_interval == 0] = 1e-6 # in case no overlap samples
expected_percentage = expected_interval / expect_total_num
actual_percentage = actual_interval / actual_total_num
if debug:
print(expected_interval)
print(actual_interval)
print(expected_percentage)
print(actual_percentage)
psi_scores = list(map(PSI.interval_psi_score, zip(expected_percentage, actual_percentage)))
psi_scores = np.array(psi_scores)
total_psi = psi_scores.sum()
return psi_scores, total_psi, expected_interval, actual_interval, expected_percentage, actual_percentage
class KSTest(object):
@staticmethod
def compute(train_scores, validate_scores):
"""
train/validate scores: predicted scores on train/validate set
"""
return stats.ks_2samp(train_scores, validate_scores).pvalue
class AveragePrecisionScore(object):
@staticmethod
def compute(train_scores, validate_scores, train_labels, validate_labels):
"""
train/validate scores: predicted scores on train/validate set
train/validate labels: true labels
"""
train_mAP = average_precision_score(train_labels, train_scores)
validate_mAP = average_precision_score(validate_labels, validate_scores)
return abs(train_mAP - validate_mAP)
class Distribution(object):
@staticmethod
def compute(train_scores: list, validate_scores: list):
"""
train/validate scores: predicted scores on train/validate set
"""
train_scores = np.array(train_scores)
validate_scores = np.array(validate_scores)
validate_scores = dict(validate_scores)
count = 0
for key, value in train_scores:
if key in validate_scores.keys() and value != validate_scores.get(key):
count += 1
return count / len(train_scores)
| apache-2.0 |
kawamon/hue | desktop/core/ext-py/pydruid-0.5.11/setup.py | 2 | 2017 | import io
import sys
from setuptools import find_packages, setup
install_requires = ["six >= 1.9.0", "requests"]
extras_require = {
"pandas": ["pandas<1.0.0"],
"async": ["tornado"],
"sqlalchemy": ["sqlalchemy"],
"cli": ["pygments", "prompt_toolkit<2.0.0", "tabulate"],
}
# only require simplejson on python < 2.6
if sys.version_info < (2, 6):
install_requires.append("simplejson >= 3.3.0")
with io.open("README.md", encoding="utf-8") as f:
long_description = f.read()
setup(
name="pydruid",
version="0.5.11",
author="Druid Developers",
author_email="[email protected]",
packages=find_packages(where='pydruid'),
package_dir={
'': 'pydruid',
},
url="https://druid.apache.org",
project_urls={
"Bug Tracker": "https://github.com/druid-io/pydruid/issues",
"Documentation": "https://pythonhosted.org/pydruid/",
"Source Code": "https://github.com/druid-io/pydruid",
},
license="Apache License, Version 2.0",
description="A Python connector for Druid.",
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=install_requires,
extras_require=extras_require,
tests_require=["pytest", "six", "mock"],
entry_points={
"console_scripts": ["pydruid = pydruid.console:main"],
"sqlalchemy.dialects": [
"druid = pydruid.db.sqlalchemy:DruidHTTPDialect",
"druid.http = pydruid.db.sqlalchemy:DruidHTTPDialect",
"druid.https = pydruid.db.sqlalchemy:DruidHTTPSDialect",
],
},
include_package_data=True,
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
| apache-2.0 |
appapantula/scikit-learn | examples/mixture/plot_gmm.py | 248 | 2817 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians with EM
and variational Dirichlet process.
Both models have access to five components with which to fit the
data. Note that the EM model will necessarily use all five components
while the DP model will effectively only use as many as are needed for
a good fit. This is a property of the Dirichlet Process prior. Here we
can see that the EM model splits some components arbitrarily, because it
is trying to fit too many components, while the Dirichlet Process model
adapts it number of state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a mixture of Gaussians with EM using five components
gmm = mixture.GMM(n_components=5, covariance_type='full')
gmm.fit(X)
# Fit a Dirichlet process mixture of Gaussians using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full')
dpgmm.fit(X)
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([(gmm, 'GMM'),
(dpgmm, 'Dirichlet Process GMM')]):
splot = plt.subplot(2, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title(title)
plt.show()
| bsd-3-clause |
maxalbert/bokeh | bokeh/sampledata/periodic_table.py | 45 | 1542 | '''
This module provides the periodic table as a data set. It exposes an attribute 'elements'
which is a pandas dataframe with the following fields
elements['atomic Number'] (units: g/cm^3)
elements['symbol']
elements['name']
elements['atomic mass'] (units: amu)
elements['CPK'] (convention for molecular modeling color)
elements['electronic configuration']
elements['electronegativity'] (units: Pauling)
elements['atomic radius'] (units: pm)
elements['ionic radius'] (units: pm)
elements['van der waals radius'] (units: pm)
elements['ionization enerygy'] (units: kJ/mol)
elements['electron affinity'] (units: kJ/mol)
elements['phase'] (standard state: solid, liquid, gas)
elements['bonding type']
elements['melting point'] (units: K)
elements['boiling point'] (units: K)
elements['density'] (units: g/cm^3)
elements['type'] (see below)
elements['year discovered']
elements['group']
elements['period']
element types: actinoid, alkali metal, alkaline earth metal, halogen, lanthanoid, metal, metalloid, noble gas, nonmetal, transition metalloid
'''
from __future__ import absolute_import
from os.path import dirname, join
try:
import pandas as pd
except ImportError as e:
raise RuntimeError("elements data requires pandas (http://pandas.pydata.org) to be installed")
elements = pd.read_csv(join(dirname(__file__), 'elements.csv'))
| bsd-3-clause |
kabrapratik28/DeepNews | statistics_calculator/word2vec_stats.py | 1 | 1076 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 25 01:19:44 2017
@author: student
"""
import codecs
import os
import numpy as np
import matplotlib.pyplot as plt
#def getstats(temp_results='../../temp_results',raw_file_name='annotated_news_text.txt',n_words=20):
temp_results='../../'
raw_file_name='news_data.txt'
file_path = os.path.join(temp_results,raw_file_name)
textfile = codecs.open(file_path, "r", "utf-8")
article_word_freq = {}
#headline_word_freq ={}
article_len = 0
count_articles=0
#lines = [line for line in textfile]
#first_lines = lines[0:5]
for line in textfile:
tokens = line.split(' ')
for words in tokens:
if words in article_word_freq:
article_word_freq[words]+=1
else:
article_word_freq[words]=1
article_len += len(tokens)
count_articles+=1
article_len/=count_articles
print "Total Unique Tokens in Articles: "+ str(len(article_word_freq))
print "Average Length of Article "+ str(article_len)
| apache-2.0 |
wavelets/machine-learning | code/utils.py | 1 | 4217 | # utils
# Utility functions for handling data
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Thu Feb 26 17:47:35 2015 -0500
#
# Copyright (C) 2015 District Data Labs
# For license information, see LICENSE.txt
#
# ID: utils.py [] [email protected] $
"""
Utility functions for handling data
"""
##########################################################################
## Imports
##########################################################################
import os
import csv
import time
import json
import numpy as np
from sklearn.datasets.base import Bunch
##########################################################################
## Module Constants
##########################################################################
SKL_DATA = "SCIKIT_LEARN_DATA"
BASE_DIR = os.path.normpath(os.path.join(os.path.dirname(__file__), ".."))
DATA_DIR = os.path.join(BASE_DIR, "data")
CODE_DIR = os.path.join(BASE_DIR, "code")
##########################################################################
## Helper Functions
##########################################################################
def timeit(func):
"""
Returns how long a function took to execute, along with the output
"""
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
return result, time.time() - start
return timeit
##########################################################################
## Dataset Loading
##########################################################################
def get_data_home(data_home=None):
"""
Returns the path of the data directory
"""
if data_home is None:
data_home = os.environ.get(SKL_DATA, DATA_DIR)
data_home = os.path.expanduser(data_home)
if not os.path.exists(data_home):
os.makedirs(data_home)
return data_home
def load_data(path, descr=None, target_index=-1):
"""
Returns a scklearn dataset Bunch which includes several important
attributes that are used in modeling:
data: array of shape n_samples * n_features
target: array of length n_samples
feature_names: names of the features
target_names: names of the targets
filenames: names of the files that were loaded
DESCR: contents of the readme
This data therefore has the look and feel of the toy datasets.
Pass in a path usually just the name of the location in the data dir.
It will be joined with the result of `get_data_home`. The contents are:
path
- README.md # The file to load into DESCR
- meta.json # A file containing metadata to load
- dataset.txt # The numpy loadtxt file
- dataset.csv # The pandas read_csv file
You can specify another descr, another feature_names, and whether or
not the dataset has a header row. You can also specify the index of the
target, which by default is the last item in the row (-1)
"""
root = os.path.join(get_data_home(), path)
filenames = {
'meta': os.path.join(root, 'meta.json'),
'rdme': os.path.join(root, 'README.md'),
'data': os.path.join(root, 'dataset.txt'),
}
target_names = None
feature_names = None
DESCR = None
with open(filenames['meta'], 'r') as f:
meta = json.load(f)
target_names = meta['target_names']
feature_names = meta['feature_names']
with open(filenames['rdme'], 'r') as f:
DESCR = f.read()
dataset = np.loadtxt(filenames['data'])
data = None
target = None
# Target assumed to be either last or first row
if target_index == -1:
data = dataset[:,0:-1]
target = dataset[:,-1]
elif target_index == 0:
data = dataset[:,1:]
target = dataset[:,0]
else:
raise ValueError("Target index must be either -1 or 0")
return Bunch(data=data,
target=target,
filenames=filenames,
target_names=target_names,
feature_names=feature_names,
DESCR=DESCR)
def load_wheat():
return load_data('wheat')
| mit |
fredhusser/scikit-learn | examples/manifold/plot_lle_digits.py | 181 | 8510 | """
=============================================================================
Manifold learning on handwritten digits: Locally Linear Embedding, Isomap...
=============================================================================
An illustration of various embeddings on the digits dataset.
The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not
technically a manifold embedding method, as it learn a high-dimensional
representation on which we apply a dimensionality reduction method.
However, it is often useful to cast a dataset into a representation in
which the classes are linearly-separable.
t-SNE will be initialized with the embedding that is generated by PCA in
this example, which is not the default setting. It ensures global stability
of the embedding, i.e., the embedding does not depend on random
initialization.
"""
# Authors: Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble, lda,
random_projection)
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(digits.data.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
#----------------------------------------------------------------------
# Plot images of the digits
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
#----------------------------------------------------------------------
# Random 2D projection using a random unitary matrix
print("Computing random projection")
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, "Random Projection of the digits")
#----------------------------------------------------------------------
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,
"Principal Components projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print("Computing LDA projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = lda.LDA(n_components=2).fit_transform(X2, y)
plot_embedding(X_lda,
"Linear Discriminant projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Isomap projection of the digits dataset
print("Computing Isomap embedding")
t0 = time()
X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
print("Done.")
plot_embedding(X_iso,
"Isomap projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Locally linear embedding of the digits dataset
print("Computing LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='standard')
t0 = time()
X_lle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_lle,
"Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Modified Locally linear embedding of the digits dataset
print("Computing modified LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='modified')
t0 = time()
X_mlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_mlle,
"Modified Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# HLLE embedding of the digits dataset
print("Computing Hessian LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_hlle,
"Hessian Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# LTSA embedding of the digits dataset
print("Computing LTSA embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_ltsa,
"Local Tangent Space Alignment of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# MDS embedding of the digits dataset
print("Computing MDS embedding")
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print("Done. Stress: %f" % clf.stress_)
plot_embedding(X_mds,
"MDS embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Random Trees embedding of the digits dataset
print("Computing Totally Random Trees embedding")
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(X_reduced,
"Random forest embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Spectral embedding of the digits dataset
print("Computing Spectral embedding")
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0,
eigen_solver="arpack")
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(X_se,
"Spectral embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne,
"t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
plt.show()
| bsd-3-clause |
trovdimi/wikilinks | weighted_pagerank.py | 1 | 33021 | from wsd.database import MySQLDatabase
from graph_tool.all import *
from conf import *
import logging
import MySQLdb
import cPickle as pickle
from scipy.stats.stats import pearsonr,spearmanr,kendalltau
import cPickle as pickle
import numpy as np
import pandas as pd
from scipy.sparse import sparsetools
from joblib import Parallel, delayed
from scipy.sparse import csr_matrix
from scipy.sparse.sparsetools import csr_scale_rows
def read_pickle(fpath):
with open(fpath, 'rb') as infile:
obj = pickle.load(infile)
return obj
def write_pickle(fpath, obj):
with open(fpath, 'wb') as outfile:
pickle.dump(obj, outfile, -1)
def weighted_pagerank():
db = MySQLDatabase(DATABASE_HOST, DATABASE_USER, DATABASE_PASSWORD, DATABASE_NAME)
conn = db._create_connection()
cursor = conn.cursor()
cursor.execute('SELECT source_article_id, target_article_id, occ FROM link_occurences;')
result = cursor.fetchall()
wikipedia = Graph()
eprop = wikipedia.new_edge_property("int")
for link in result:
e = wikipedia.add_edge(link[0], link[1])
eprop[e] = link[2]
# filter all nodes that have no edges
wikipedia = GraphView(wikipedia, vfilt=lambda v : v.out_degree()+v.in_degree()>0 )
print "page_rank_weighted"
for damping in [0.8, 0.85, 0.9 ,0.95]:
print damping
key = "page_rank_weighted"+str(damping)
wikipedia.vertex_properties[key] = pagerank(wikipedia, weight=eprop,damping=damping)
print "page_rank"
for damping in [0.8, 0.85, 0.9 ,0.95]:
print damping
key = "page_rank"+str(damping)
wikipedia.vertex_properties[key] = pagerank(wikipedia, damping=damping)
wikipedia.save("output/weightedpagerank/wikipedianetwork_link_occ.xml.gz")
print 'link_occ done'
cursor.execute('SELECT source_article_id, target_article_id, sim FROM semantic_similarity group by '
'source_article_id, target_article_id;')
result = cursor.fetchall()
wikipedia = Graph()
eprop = wikipedia.new_edge_property("double")
for link in result:
e = wikipedia.add_edge(link[0], link[1])
eprop[e] = link[2]
# filter all nodes that have no edges
print 'filter nodes graph tool specific code'
wikipedia = GraphView(wikipedia, vfilt=lambda v : v.out_degree()+v.in_degree()>0 )
print "page_rank_weighted"
for damping in [0.8, 0.85, 0.9 ,0.95]:
print damping
key = "page_rank_weighted"+str(damping)
wikipedia.vertex_properties[key] = pagerank(wikipedia, weight=eprop,damping=damping)
print "page_rank"
for damping in [0.8, 0.85, 0.9 ,0.95]:
print damping
key = "page_rank"+str(damping)
wikipedia.vertex_properties[key] = pagerank(wikipedia, damping=damping)
wikipedia.save("output/weightedpagerank/wikipedianetwork_sem_sim_distinct_links.xml.gz")
print 'sem sim distrinct links done'
cursor.execute('SELECT source_article_id, target_article_id, sim FROM semantic_similarity;')
result = cursor.fetchall()
wikipedia = Graph()
eprop = wikipedia.new_edge_property("double")
for link in result:
e = wikipedia.add_edge(link[0], link[1])
eprop[e] = link[2]
# filter all nodes that have no edges
wikipedia = GraphView(wikipedia, vfilt=lambda v : v.out_degree()+v.in_degree()>0 )
print "page_rank_weighted"
for damping in [0.8, 0.85, 0.9 ,0.95]:
print damping
key = "page_rank_weighted"+str(damping)
wikipedia.vertex_properties[key] = pagerank(wikipedia, weight=eprop,damping=damping)
print "page_rank"
for damping in [0.8, 0.85, 0.9 ,0.95]:
print damping
key = "page_rank"+str(damping)
wikipedia.vertex_properties[key] = pagerank(wikipedia, damping=damping)
wikipedia.save("output/weightedpagerank/wikipedianetwork_sem_sim.xml.gz")
print 'sem_sim done'
def norm (hypothesis):
hypothesis = hypothesis.copy()
norma = hypothesis.sum(axis=1)
n_nzeros = np.where(norma > 0)
n_zeros,_ = np.where(norma == 0)
norma[n_nzeros] = 1.0 / norma[n_nzeros]
norma = norma.T[0]
csr_scale_rows(hypothesis.shape[0], hypothesis.shape[1], hypothesis.indptr, hypothesis.indices, hypothesis.data, norma)
return hypothesis
def weighted_pagerank_hyp_engineering_struct(labels):
#read vocab, graph
graph = read_pickle(SSD_HOME+"pickle/graph")
print "loaded graph"
values = read_pickle(SSD_HOME+"pickle/values")
values_kcore = read_pickle(SSD_HOME+"pickle/values_kcore")
# transform kcore values to model going out of the kcore
values_kcore = [1./np.sqrt(float(x)) for x in values_kcore]
print 'kcore values tranfsormation'
#sem_sim_hyp = read_pickle(SSD_HOME+"pickle/sem_sim_hyp")
#print "sem_sim_hyp values"
#lead_hyp = read_pickle(SSD_HOME+"pickle/lead_hyp")
#infobox_hyp = read_pickle(SSD_HOME+"pickle/infobox_hyp")
#left_body_hyp = read_pickle(SSD_HOME+"pickle/left-body_hyp")
#print "gamma values"
vocab = read_pickle(SSD_HOME+"pickle/vocab")
print "loaded vocab"
state_count = len(vocab)
states = vocab.keys()
shape = (state_count, state_count)
hyp_structural = csr_matrix((values, (graph[0], graph[1])),
shape=shape, dtype=np.float)
hyp_kcore = csr_matrix((values_kcore, (graph[0], graph[1])),
shape=shape, dtype=np.float)
print "hyp_kcore"
del graph
del values_kcore
print "after delete"
#read sem sim form db and create hyp
db = MySQLDatabase(DATABASE_HOST, DATABASE_USER, DATABASE_PASSWORD, DATABASE_NAME)
conn = db._create_connection()
print 'read'
df = pd.read_sql('select source_article_id, target_article_id, sim from semantic_similarity', conn)
print 'map sem sim'
sem_sim_hyp_i = map_to_hyp_indicies(vocab, df['source_article_id'])
sem_sim_hyp_j = map_to_hyp_indicies(vocab, df['target_article_id'])
hyp_sem_sim = csr_matrix((df['sim'].values, (sem_sim_hyp_i, sem_sim_hyp_j)),
shape=shape, dtype=np.float)
print 'done map sem sim'
print hyp_sem_sim.shape
del sem_sim_hyp_i
del sem_sim_hyp_j
del df
#read vis form csv and create hyp
lead = pd.read_csv(TMP+'lead.tsv',sep='\t')
lead_i = map_to_hyp_indicies(vocab, lead['source_article_id'])
lead_j = map_to_hyp_indicies(vocab, lead['target_article_id'])
lead_v = np.ones(len(lead_i), dtype=np.float)
hyp_lead = csr_matrix((lead_v, (lead_i, lead_j)),
shape=shape, dtype=np.float)
print 'done map lead'
print hyp_lead.shape
del lead
del lead_i
del lead_j
del lead_v
infobox = pd.read_csv(TMP+'infobox.tsv',sep='\t')
infobox_i = map_to_hyp_indicies(vocab, infobox['source_article_id'])
infobox_j = map_to_hyp_indicies(vocab, infobox['target_article_id'])
infobox_v = np.ones(len(infobox_i), dtype=np.float)
hyp_infobox = csr_matrix((infobox_v, (infobox_i, infobox_j)),
shape=shape, dtype=np.float)
print 'done map infobox'
print hyp_infobox.shape
del infobox
del infobox_i
del infobox_j
del infobox_v
left_body = pd.read_csv(TMP+'left-body.tsv',sep='\t')
left_body_i = map_to_hyp_indicies(vocab, left_body['source_article_id'])
left_body_j = map_to_hyp_indicies(vocab, left_body['target_article_id'])
left_body_v = np.ones(len(left_body_i), dtype=np.float)
hyp_left_body = csr_matrix((left_body_v, (left_body_i, left_body_j)),
shape=shape, dtype=np.float)
print 'done map infobox'
print hyp_left_body.shape
del left_body
del left_body_i
del left_body_j
del left_body_v
#add the visual hyps to one matrix and set all non zero fields to 1.0
print 'before gamma'
hyp_gamma = hyp_left_body + hyp_infobox + hyp_lead
hyp_gamma.data = np.ones_like(hyp_gamma.data, dtype=np.float)
print 'after gamma'
del hyp_left_body
del hyp_infobox
del hyp_lead
#norm
print "in norm each "
hyp_structural = norm(hyp_structural)
hyp_kcore = norm(hyp_kcore)
hyp_sem_sim = norm(hyp_sem_sim)
hyp_gamma = norm(hyp_gamma)
#engineering of hypos and norm again
hyp_mix_semsim_kcore = norm(hyp_structural+hyp_kcore + hyp_sem_sim)
hyp_mix_semsim_visual = norm(hyp_structural+hyp_sem_sim + hyp_gamma)
hyp_mix_kcore_visual= norm(hyp_structural+hyp_kcore + hyp_gamma)
print 'test hypos'
hypos={}
hypos['hyp_mix_semsim_kcore']=hyp_mix_semsim_kcore
hypos['hyp_mix_semsim_visual']=hyp_mix_semsim_visual
hypos['hyp_mix_kcore_visual']=hyp_mix_kcore_visual
#load network
print "weighted page rank engineering"
wikipedia = load_graph("output/wikipedianetwork.xml.gz")
#for label, hyp in hypos.iteritems():
name = '_'.join(labels)
for label in labels:
print label
eprop = create_eprop(wikipedia, hypos[label], vocab)
wikipedia.edge_properties[label]=eprop
#for damping in [0.8, 0.85, 0.9 ,0.95]:
for damping in [0.8,0.85,0.9]:
key = label+"_page_rank_weighted_"+str(damping)
print key
wikipedia.vertex_properties[key] = pagerank(wikipedia, weight=eprop, damping=damping)
print 'save network'
wikipedia.save("output/weightedpagerank/wikipedianetwork_hyp_engineering_strcut_"+name+".xml.gz")
print 'save network'
wikipedia.save("output/weightedpagerank/wikipedianetwork_hyp_engineering_strcut_"+name+".xml.gz")
print 'done'
def weighted_pagerank_hyp_engineering(labels):
#read vocab, graph
graph = read_pickle(SSD_HOME+"pickle/graph")
print "loaded graph"
values = read_pickle(SSD_HOME+"pickle/values")
values_kcore = read_pickle(SSD_HOME+"pickle/values_kcore")
# transform kcore values to model going out of the kcore
values_kcore = [1./np.sqrt(float(x)) for x in values_kcore]
print 'kcore values tranfsormation'
#sem_sim_hyp = read_pickle(SSD_HOME+"pickle/sem_sim_hyp")
#print "sem_sim_hyp values"
#lead_hyp = read_pickle(SSD_HOME+"pickle/lead_hyp")
#infobox_hyp = read_pickle(SSD_HOME+"pickle/infobox_hyp")
#left_body_hyp = read_pickle(SSD_HOME+"pickle/left-body_hyp")
#print "gamma values"
vocab = read_pickle(SSD_HOME+"pickle/vocab")
print "loaded vocab"
state_count = len(vocab)
states = vocab.keys()
shape = (state_count, state_count)
hyp_structural = csr_matrix((values, (graph[0], graph[1])),
shape=shape, dtype=np.float)
hyp_kcore = csr_matrix((values_kcore, (graph[0], graph[1])),
shape=shape, dtype=np.float)
print "hyp_kcore"
del graph
del values_kcore
print "after delete"
#read sem sim form db and create hyp
db = MySQLDatabase(DATABASE_HOST, DATABASE_USER, DATABASE_PASSWORD, DATABASE_NAME)
conn = db._create_connection()
print 'read'
df = pd.read_sql('select source_article_id, target_article_id, sim from semantic_similarity', conn)
print 'map sem sim'
sem_sim_hyp_i = map_to_hyp_indicies(vocab, df['source_article_id'])
sem_sim_hyp_j = map_to_hyp_indicies(vocab, df['target_article_id'])
hyp_sem_sim = csr_matrix((df['sim'].values, (sem_sim_hyp_i, sem_sim_hyp_j)),
shape=shape, dtype=np.float)
print 'done map sem sim'
print hyp_sem_sim.shape
del sem_sim_hyp_i
del sem_sim_hyp_j
del df
#read vis form csv and create hyp
lead = pd.read_csv(TMP+'lead.tsv',sep='\t')
lead_i = map_to_hyp_indicies(vocab, lead['source_article_id'])
lead_j = map_to_hyp_indicies(vocab, lead['target_article_id'])
lead_v = np.ones(len(lead_i), dtype=np.float)
hyp_lead = csr_matrix((lead_v, (lead_i, lead_j)),
shape=shape, dtype=np.float)
print 'done map lead'
print hyp_lead.shape
del lead
del lead_i
del lead_j
del lead_v
infobox = pd.read_csv(TMP+'infobox.tsv',sep='\t')
infobox_i = map_to_hyp_indicies(vocab, infobox['source_article_id'])
infobox_j = map_to_hyp_indicies(vocab, infobox['target_article_id'])
infobox_v = np.ones(len(infobox_i), dtype=np.float)
hyp_infobox = csr_matrix((infobox_v, (infobox_i, infobox_j)),
shape=shape, dtype=np.float)
print 'done map infobox'
print hyp_infobox.shape
del infobox
del infobox_i
del infobox_j
del infobox_v
left_body = pd.read_csv(TMP+'left-body.tsv',sep='\t')
left_body_i = map_to_hyp_indicies(vocab, left_body['source_article_id'])
left_body_j = map_to_hyp_indicies(vocab, left_body['target_article_id'])
left_body_v = np.ones(len(left_body_i), dtype=np.float)
hyp_left_body = csr_matrix((left_body_v, (left_body_i, left_body_j)),
shape=shape, dtype=np.float)
print 'done map infobox'
print hyp_left_body.shape
del left_body
del left_body_i
del left_body_j
del left_body_v
#add the visual hyps to one matrix and set all non zero fields to 1.0
print 'before gamma'
hyp_gamma = hyp_left_body + hyp_infobox + hyp_lead
hyp_gamma.data = np.ones_like(hyp_gamma.data, dtype=np.float)
print 'after gamma'
del hyp_left_body
del hyp_infobox
del hyp_lead
#norm
print "in norm each "
hyp_structural = norm(hyp_structural)
hyp_kcore = norm(hyp_kcore)
hyp_sem_sim = norm(hyp_sem_sim)
hyp_gamma = norm(hyp_gamma)
#engineering of hypos and norm again
hyp_kcore_struct = norm(hyp_structural + hyp_kcore)
hyp_visual_struct = norm(hyp_structural + hyp_gamma)
hyp_sem_sim_struct = norm(hyp_structural + hyp_sem_sim)
hyp_mix_semsim_kcore = norm(hyp_kcore + hyp_sem_sim)
hyp_mix_semsim_visual = norm(hyp_sem_sim + hyp_gamma)
hyp_mix_kcore_visual= norm(hyp_kcore + hyp_gamma)
hyp_all = norm(hyp_kcore + hyp_sem_sim + hyp_gamma)
hyp_all_struct = norm(hyp_kcore + hyp_sem_sim + hyp_gamma + hyp_structural)
hyp_semsim_struct = norm(hyp_structural + hyp_kcore)
print 'test hypos'
hypos={}
hypos['hyp_kcore']=hyp_kcore
hypos['hyp_sem_sim']=hyp_sem_sim
hypos['hyp_visual']=hyp_gamma
hypos['hyp_kcore_struct']=hyp_kcore_struct
hypos['hyp_visual_struct']=hyp_visual_struct
hypos['hyp_sem_sim_struct']=hyp_sem_sim_struct
hypos['hyp_mix_semsim_kcore']=hyp_mix_semsim_kcore
hypos['hyp_mix_semsim_visual']=hyp_mix_semsim_visual
hypos['hyp_mix_kcore_visual']=hyp_mix_kcore_visual
hypos['hyp_all']=hyp_all
hypos['hyp_all_struct']=hyp_all_struct
#load network
print "weighted page rank engineering"
wikipedia = load_graph("output/wikipedianetwork.xml.gz")
#for label, hyp in hypos.iteritems():
name = '_'.join(labels)
for label in labels:
print label
eprop = create_eprop(wikipedia, hypos[label], vocab)
wikipedia.edge_properties[label]=eprop
#for damping in [0.8, 0.85, 0.9 ,0.95]:
for damping in [0.85]:
key = label+"_page_rank_weighted_"+str(damping)
print key
wikipedia.vertex_properties[key] = pagerank(wikipedia, weight=eprop, damping=damping)
print 'save network'
wikipedia.save("output/weightedpagerank/wikipedianetwork_hyp_engineering_"+name+".xml.gz")
print 'save network'
wikipedia.save("output/weightedpagerank/wikipedianetwork_hyp_engineering_"+name+".xml.gz")
print 'done'
def create_eprop(network, hyp, vocab):
eprop = network.new_edge_property("double")
i = 0
for edge in network.edges():
i+=1
if i % 100000000==0:
print i
src = vocab[str(edge.source())]
trg = vocab[str(edge.target())]
eprop[edge] = hyp[src,trg]
return eprop
def correlations(network_name):
db = MySQLDatabase(DATABASE_HOST, DATABASE_USER, DATABASE_PASSWORD, DATABASE_NAME)
conn = db._create_connection()
cursor = conn.cursor()
# wikipedia graph structural statistics
results = None
try:
results = cursor.execute('select c.curr_id, sum(c.counts) as counts from clickstream_derived c where c.link_type_derived= %s group by c.curr_id;', ("internal-link",))
results = cursor.fetchall()
except MySQLdb.Error, e:
print ('error retrieving xy coord for all links links %s (%d)' % (e.args[1], e.args[0]))
print 'after sql load'
print 'before load'
wikipedia = load_graph("output/weightedpagerank/wikipedianetwork_"+network_name+".xml.gz")
print 'after load'
cor = {}
#for kk in ['page_rank', 'page_rank_weighted']:
for kk in ['page_rank_weighted']:
correlations_sem_sim_weighted_pagerank ={}
#for damping in [0.8, 0.85, 0.9 ,0.95]:
for damping in [0.85]:
correlations={}
print damping
key = kk+str(damping)
print key
pagerank = wikipedia.vertex_properties[key]
counts=[]
page_rank_values=[]
for row in results:
counts.append(float(row[1]))
page_rank_values.append(pagerank[wikipedia.vertex(int(row[0]))])
#for index, row in df.iterrows():
# counts.append(float(row['counts']))
# page_rank_values.append(pagerank[wikipedia.vertex(int(row['target_article_id']))])
print 'pearson'
p = pearsonr(page_rank_values, counts)
print p
correlations['pearson']=p
print 'spearmanr'
s= spearmanr(page_rank_values, counts)
print s
correlations['spearmanr']=s
print 'kendalltau'
k= kendalltau(page_rank_values, counts)
print k
correlations['kendalltau']=k
correlations_sem_sim_weighted_pagerank[key]=correlations
cor[kk]=correlations_sem_sim_weighted_pagerank
write_pickle(HOME+'output/correlations/correlations_pagerank_without_zeros'+network_name+'.obj', cor)
def map_to_hyp_indicies(vocab, l):
ids = list()
for v in l.values:
ids.append(vocab[str(v)])
return ids
def pickle_correlations_zeros():
db = MySQLDatabase(DATABASE_HOST, DATABASE_USER, DATABASE_PASSWORD, DATABASE_NAME)
conn = db._create_connection()
print 'read'
df = pd.read_sql('select source_article_id, target_article_id, IFNULL(counts, 0) as counts from link_features group by source_article_id, target_article_id', conn)
print 'group'
article_counts = df.groupby(by=["target_article_id"])['counts'].sum().reset_index()
print 'write to file'
article_counts[["target_article_id","counts"]].to_csv(TMP+'article_counts.tsv', sep='\t', index=False)
def pickle_correlations_zeros_january():
db = MySQLDatabase(DATABASE_HOST, DATABASE_USER, DATABASE_PASSWORD, DATABASE_NAME)
conn = db._create_connection()
print 'read'
df = pd.read_sql('select source_article_id, target_article_id from link_features', conn)
print 'loaded links'
df2 = pd.read_sql('select prev_id, curr_id, counts from clickstream_derived_en_201501 where link_type_derived= "internal-link";', conn)
print 'loaded counts'
result = pd.merge(df, df2, how='left', left_on = ['source_article_id', 'target_article_id'], right_on = ['prev_id', 'curr_id'])
print 'merged counts'
print result
article_counts = result.groupby(by=["target_article_id"])['counts'].sum().reset_index()
article_counts['counts'].fillna(0.0, inplace=True)
print article_counts
print 'write to file'
article_counts[["target_article_id","counts"]].to_csv(TMP+'january_article_counts.tsv', sep='\t', index=False)
def correlations_ground_truth():
print 'ground truth'
#load network
wikipedia = load_graph("output/weightedpagerank/wikipedianetwork_hyp_engineering.xml.gz")
#read counts with zeros
article_counts = pd.read_csv(TMP+'article_counts.tsv', sep='\t')
cor = {}
for damping in [0.8,0.9]:
page_rank = pagerank(wikipedia, damping=damping)
wikipedia.vertex_properties['page_rank_'+str(damping)] = page_rank
page_rank_values = list()
counts = list()
correlations_values = {}
for index, row in article_counts.iterrows():
counts.append(float(row['counts']))
page_rank_values.append(page_rank[wikipedia.vertex(int(row['target_article_id']))])
print 'pearson'
p = pearsonr(page_rank_values, counts)
print p
correlations_values['pearson']=p
print 'spearmanr'
s = spearmanr(page_rank_values, counts)
print s
correlations_values['spearmanr']=s
print 'kendalltau'
k = kendalltau(page_rank_values, counts)
print k
correlations_values['kendalltau']=k
cor['page_rank_'+str(damping)]=correlations_values
write_pickle(HOME+'output/correlations/correlations_pagerank.obj', cor)
def correlations_zeros(labels, consider_zeros=True, clickstream_data='', struct=False):
#load network
print struct
name = '_'.join(labels)
wikipedia = load_graph("output/weightedpagerank/wikipedianetwork_hyp_engineering_"+name+".xml.gz")
#read counts with zeros
if consider_zeros:
article_counts = pd.read_csv(TMP+clickstream_data+'article_counts.tsv', sep='\t')
print TMP+clickstream_data+'article_counts.tsv'
correlations_weighted_pagerank = {}
for label in labels:
if struct:
label = label[7:]
for damping in [0.8,0.85,0.9]:
key = label+"_page_rank_weighted_"+str(damping)
pagerank = wikipedia.vertex_properties[key]
page_rank_values = list()
counts = list()
correlations_values = {}
for index, row in article_counts.iterrows():
counts.append(float(row['counts']))
page_rank_values.append(pagerank[wikipedia.vertex(int(row['target_article_id']))])
print 'pearson'
p = pearsonr(page_rank_values, counts)
print p
correlations_values['pearson']=p
print 'spearmanr'
s = spearmanr(page_rank_values, counts)
print s
correlations_values['spearmanr']=s
print 'kendalltau'
k = kendalltau(page_rank_values, counts)
print k
correlations_values['kendalltau']=k
correlations_weighted_pagerank[key]=correlations_values
write_pickle(HOME+'output/correlations/'+clickstream_data+'correlations_pagerank_'+name+'.obj', correlations_weighted_pagerank)
else:
db = MySQLDatabase(DATABASE_HOST, DATABASE_USER, DATABASE_PASSWORD, DATABASE_NAME)
conn = db._create_connection()
cursor = conn.cursor()
# wikipedia graph structural statistics
results = None
try:
if clickstream_data != '':
results = cursor.execute('select c.curr_id, sum(c.counts) as counts from clickstream_derived c where c.link_type_derived= %s group by c.curr_id;', ("internal-link",))
results = cursor.fetchall()
else:
results = cursor.execute('select c.curr_id, sum(c.counts) as counts from clickstream_derived_en_201501 c where c.link_type_derived= %s group by c.curr_id;', ("internal-link",))
results = cursor.fetchall()
except MySQLdb.Error, e:
print ('error retrieving xy coord for all links links %s (%d)' % (e.args[1], e.args[0]))
print 'after sql load'
correlations_weighted_pagerank = {}
for label in labels:
if struct:
label = label[7:]
for damping in [0.8,0.85,0.9]:
key = label+"_page_rank_weighted_"+str(damping)
pagerank = wikipedia.vertex_properties[key]
correlations={}
counts=[]
page_rank_values=[]
for row in results:
counts.append(float(row[1]))
page_rank_values.append(pagerank[wikipedia.vertex(int(row[0]))])
print 'pearson'
p = pearsonr(page_rank_values, counts)
print p
correlations['pearson']=p
print 'spearmanr'
s= spearmanr(page_rank_values, counts)
print s
correlations['spearmanr']=s
print 'kendalltau'
k= kendalltau(page_rank_values, counts)
print k
correlations['kendalltau']=k
correlations_weighted_pagerank[key]=correlations
write_pickle(HOME+'output/correlations/'+clickstream_data+'correlations_pagerank_without_zeros'+name+'.obj', correlations_weighted_pagerank)
def correlations_weighted_unweighted(labels):
#load network
print 'weighted vs unweighted'
name = '_'.join(labels)
wikipedia = load_graph("output/weightedpagerank/wikipedianetwork_hyp_engineering_"+name+".xml.gz")
#read counts with zeros
wikipedia_u = load_graph("output/weightedpagerank/wikipedianetwork_sem_sim_distinct_links.xml.gz")
correlations_weighted_pagerank = {}
for label in labels:
for damping in [0.8,0.85,0.9]:
correlations_values={}
key_weighted = label+"_page_rank_weighted_"+str(damping)
pagerank_weighted = wikipedia.vertex_properties[key_weighted]
key_unweighted = "page_rank"+str(damping)
pagerank_unweighted = wikipedia_u.vertex_properties[key_unweighted]
print 'pearson'
p = pearsonr(pagerank_weighted.a, pagerank_unweighted.a)
print p
correlations_values['pearson']=p
print 'spearmanr'
s = spearmanr(pagerank_weighted.a, pagerank_unweighted.a)
print s
correlations_values['spearmanr']=s
print 'kendalltau'
k = kendalltau(pagerank_weighted.a, pagerank_unweighted.a)
print k
correlations_values['kendalltau']=k
correlations_weighted_pagerank[label+str(damping)]=correlations_values
write_pickle(HOME+'output/correlations/correlations_pagerank_weightedvsunweighted'+name+'.obj', correlations_weighted_pagerank)
def damping_factors(networks_list):
for labels in networks_list:
name = '_'.join(labels)
print name
wikipedia = load_graph("output/weightedpagerank/wikipedianetwork_hyp_engineering_"+name+".xml.gz")
for label in labels:
eprop = wikipedia.edge_properties[label]
for damping in [0.8, 0.9]:
key = label+"_page_rank_weighted_"+str(damping)
print key
wikipedia.vertex_properties[key] = pagerank(wikipedia, weight=eprop, damping=damping)
wikipedia.save("output/weightedpagerank/wikipedianetwork_hyp_engineering_"+name+".xml.gz")
def wpr():
#load network
print "wpr"
wikipedia = load_graph("output/wikipedianetwork.xml.gz")
eprop = wikipedia.new_edge_property("double")
i = 0
for edge in wikipedia.edges():
i+=1
if i % 100000000==0:
print i
v = edge.source()
u = edge.target()
sum_v_out_neighbors_indegree = sum([node.in_degree() for node in v.out_neighbours()])
win = float(u.in_degree())/float(sum_v_out_neighbors_indegree)
sum_v_out_neighbors_out_degree = sum ([node.out_degree() for node in v.out_neighbours()])
wout = float(u.out_degree())/float(sum_v_out_neighbors_out_degree)
eprop[edge] = win*wout
print "done edge prop"
wikipedia.edge_properties['wpr']=eprop
for damping in [0.8, 0.85, 0.9]:
wikipedia.vertex_properties['wpr'+str(damping)] = pagerank(wikipedia, weight=eprop, damping=damping)
print 'save network'
wikipedia.save("output/weightedpagerank/wikipedianetworkwpralg.xml.gz")
print 'done'
if __name__ == '__main__':
#Parallel(n_jobs=3, backend="multiprocessing")(delayed(weighted_pagerank_hyp_engineering)(labels) for labels in
# [['hyp_kcore','hyp_sem_sim','hyp_visual','hyp_kcore_struct'],
# ['hyp_visual_struct','hyp_mix_semsim_kcore','hyp_mix_semsim_visual'],
# ['hyp_all','hyp_all_struct','hyp_mix_kcore_visual']])
#Parallel(n_jobs=3, backend="multiprocessing")(delayed(weighted_pagerank_hyp_engineering_struct)(labels) for labels in
# [['hyp_mix_semsim_kcore'],
# ['hyp_mix_semsim_visual'],
# ['hyp_mix_kcore_visual']])
#Parallel(n_jobs=1, backend="multiprocessing")(delayed(weighted_pagerank_hyp_engineering)(labels) for labels in
# [['hyp_sem_sim_struct']])
#Parallel(n_jobs=4, backend="multiprocessing")(delayed(correlations_zeros)(labels, True) for labels in
# [['hyp_kcore','hyp_sem_sim','hyp_visual','hyp_kcore_struct'],
# ['hyp_visual_struct','hyp_mix_semsim_kcore','hyp_mix_semsim_visual'],
# ['hyp_all','hyp_all_struct','hyp_mix_kcore_visual'],['hyp_sem_sim_struct']])
Parallel(n_jobs=4, backend="multiprocessing")(delayed(correlations_zeros)(labels, True, 'january_', False) for labels in
[['hyp_kcore','hyp_sem_sim','hyp_visual','hyp_kcore_struct'],
['hyp_visual_struct','hyp_mix_semsim_kcore','hyp_mix_semsim_visual'],
['hyp_all','hyp_all_struct','hyp_mix_kcore_visual'],['hyp_sem_sim_struct']])
#Parallel(n_jobs=4, backend="multiprocessing")(delayed(correlations_zeros)(labels, False) for labels in
# [['hyp_kcore','hyp_sem_sim','hyp_visual','hyp_kcore_struct'],
# ['hyp_visual_struct','hyp_mix_semsim_kcore','hyp_mix_semsim_visual'],
# ['hyp_all','hyp_all_struct','hyp_mix_kcore_visual'],['hyp_sem_sim_struct']])
#Parallel(n_jobs=4, backend="multiprocessing")(delayed(correlations_weighted_unweighted)(labels) for labels in
# [['hyp_kcore','hyp_sem_sim','hyp_visual','hyp_kcore_struct'],
# ['hyp_visual_struct','hyp_mix_semsim_kcore','hyp_mix_semsim_visual'],
# ['hyp_all','hyp_all_struct','hyp_mix_kcore_visual'],['hyp_sem_sim_struct']])
#Parallel(n_jobs=3, backend="multiprocessing")(delayed(correlations_zeros)(labels, True, True) for labels in
# [['strcut_hyp_mix_semsim_kcore'],
# ['strcut_hyp_mix_semsim_visual'],
# ['strcut_hyp_mix_kcore_visual']])
#wpr()
#correlations_ground_truth()
#damping_factors([['hyp_kcore','hyp_sem_sim','hyp_visual','hyp_kcore_struct'],
# ['hyp_visual_struct','hyp_mix_semsim_kcore','hyp_mix_semsim_visual'],
# ['hyp_all','hyp_all_struct','hyp_mix_kcore_visual'],['hyp_sem_sim_struct']])
#weigted_pagerank()
#correlations('sem_sim_distinct_links')
#correlations('link_occ')
#correlations('sem_sim')
#correlations('hyp_engineering')
#pickle_correlations_zeros()
#pickle_correlations_zeros_january()
| mit |
sasdelli/lc_predictor | lc_predictor/savgol.py | 1 | 3104 | import numpy as np
# This is Thomas Haslwanter's implementation at:
# http://wiki.scipy.org/Cookbook/SavitzkyGolay
def savitzky_golay(y, window_size, order, deriv=0):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techhniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
"""
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError, msg:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv]
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m, y, mode='valid')
| gpl-3.0 |
RapidApplicationDevelopment/tensorflow | tensorflow/examples/learn/text_classification_cnn.py | 13 | 4470 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for CNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib import learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
EMBEDDING_SIZE = 20
N_FILTERS = 10
WINDOW_SIZE = 20
FILTER_SHAPE1 = [WINDOW_SIZE, EMBEDDING_SIZE]
FILTER_SHAPE2 = [WINDOW_SIZE, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
n_words = 0
def cnn_model(features, target):
"""2 layer ConvNet to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
target = tf.one_hot(target, 15, 1, 0)
word_vectors = tf.contrib.layers.embed_sequence(
features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE, scope='words')
word_vectors = tf.expand_dims(word_vectors, 3)
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.contrib.layers.convolution2d(word_vectors, N_FILTERS,
FILTER_SHAPE1, padding='VALID')
# Add a RELU for non linearity.
conv1 = tf.nn.relu(conv1)
# Max pooling across output of Convolution+Relu.
pool1 = tf.nn.max_pool(
conv1, ksize=[1, POOLING_WINDOW, 1, 1],
strides=[1, POOLING_STRIDE, 1, 1], padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.contrib.layers.convolution2d(pool1, N_FILTERS,
FILTER_SHAPE2, padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.contrib.layers.fully_connected(pool2, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(),
optimizer='Adam', learning_rate=0.01)
return (
{'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits)},
loss, train_op)
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(vocab_processor.fit_transform(x_train)))
x_test = np.array(list(vocab_processor.transform(x_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
classifier = learn.Estimator(model_fn=cnn_model)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(x_test, as_iterable=True)]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
nomadcube/scikit-learn | benchmarks/bench_glmnet.py | 297 | 3848 | """
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of pylab
import pylab as pl
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
pl.clf()
xx = range(0, n * step, step)
pl.title('Lasso regression on sample dataset (%d features)' % n_features)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of samples to classify')
pl.ylabel('Time (s)')
pl.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
pl.figure('scikit-learn vs. glmnet benchmark results')
pl.title('Regression in high dimensional spaces (%d samples)' % n_samples)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
xparedesfortuny/Phot | calibration/nightly_std_test.py | 1 | 4508 | # Author: Xavier Paredes-Fortuny ([email protected])
# License: MIT, see LICENSE.md
import os
import shutil
import subprocess
import numpy as np
from astropy.coordinates import SkyCoord
from astropy import units as u
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
import sys
import pyfits
param = {}
execfile(sys.argv[1])
def make_image_list(i):
t = [d for d in os.listdir(i) if d[-5:] == '.fits']
return t
def make_cat_folder(i):
if os.path.exists(i+'cat'):
shutil.rmtree(i+'cat')
os.makedirs(i+'cat')
def remove_cat_folder(i):
if os.path.exists(i+'cat'):
shutil.rmtree(i+'cat')
def call_sextractor(i, im, sl):
header = pyfits.getheader(i+im)
if int(header['MJD']) < 57416:
GAIN = 12.5
PIXEL_SCALE = 3.9
else:
GAIN = 0.34
PIXEL_SCALE = 2.37
cat_name = i+'cat/'+im[:-6]+'.cat'
cmd = 'sex {} -c se.sex -CATALOG_NAME {} -SATUR_LEVEL {} -GAIN {} -PIXEL_SCALE {}'.format(i+im, cat_name, sl, GAIN, PIXEL_SCALE)
subprocess.call(cmd, shell=True)
return cat_name
def create_catalog_arrays(i, il, sl):
cat_list_ra = []
cat_list_dec = []
cat_list_mag = []
for im in il:
cat_name = call_sextractor(i, im, sl)
mag, x, y, flag = np.loadtxt(cat_name, usecols=(0, 2, 3, 4), unpack=True)
# SExtractor is unable to read the tan-sip wcs produced by Astrometry.net
from astropy import wcs
w = wcs.WCS(i+'/'+im)
ra, dec = w.all_pix2world(x, y, 1)
cat_list_mag.append(mag[flag == 0])
cat_list_ra.append(ra[flag == 0])
cat_list_dec.append(dec[flag == 0])
return cat_list_ra, cat_list_dec, cat_list_mag
def match_stars(cat_list_ra, cat_list_dec, cat_list_mag):
tol = param['astrometric_tolerance']
# Convert from arcsec to deg
tol /= 3600.
cat1 = SkyCoord(cat_list_ra[0]*u.degree, cat_list_dec[0]*u.degree)
match_flag = np.ones(len(cat_list_ra[0]), dtype='bool')
for i in xrange(1, len(cat_list_ra)):
cat2 = SkyCoord(cat_list_ra[i]*u.degree, cat_list_dec[i]*u.degree)
index, sep2d, dist3d = cat1.match_to_catalog_sky(cat2)
cat_list_ra[i] = cat_list_ra[i][index]
cat_list_dec[i] = cat_list_dec[i][index]
cat_list_mag[i] = cat_list_mag[i][index]
wrong_match = [j for (j, d) in enumerate(sep2d.value) if d > tol]
match_flag[wrong_match] = False
for i in xrange(len(cat_list_ra)):
cat_list_ra[i] = cat_list_ra[i][match_flag]
cat_list_dec[i] = cat_list_dec[i][match_flag]
cat_list_mag[i] = cat_list_mag[i][match_flag]
def compute_statistics(cat_list_mag):
avg_mag = [np.average(cat_list_mag[:, k]) for k in xrange(len(cat_list_mag[0, :]))]
std_mag = [np.std(cat_list_mag[:, k]) for k in xrange(len(cat_list_mag[0, :]))]
return np.array(avg_mag), np.array(std_mag)
def plot(x, y, o):
plt.rcdefaults()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y, '.')
ax.set_yscale('log')
ax.set_xlabel(r'$\overline{m}$ (mag)')
ax.set_ylabel(r'$\sigma_{m}$ (mag)')
ax.set_xlim((min(x)*(1-0.05), max(x)*(1+0.05)))
ax.set_ylim((min(y)*(1-0.05), max(y)*(1+0.05)))
ax.xaxis.set_minor_locator(MultipleLocator(0.5))
plt.table(cellText=[['N', r'$\overline{{\sigma}}$'],
[1, '{:.3f}'.format(y[0])],
[5, '{:.3f}'.format(np.average(y[0:5]))],
[10, '{:.3f}'.format(np.average(y[0:10]))],
[25, '{:.3f}'.format(np.average(y[0:25]))],
[50, '{:.3f}'.format(np.average(y[0:50]))],
[100, '{:.3f}'.format(np.average(y[0:100]))]],
colWidths=[0.1, 0.1],
loc='center left')
fig.savefig(o, bbox_inches='tight', pad_inches=0.05)
plt.close(fig)
def perform_test(i, o):
sl = param['saturation_level']
il = make_image_list(i)
make_cat_folder(i)
cat_list_ra, cat_list_dec, cat_list_mag = create_catalog_arrays(i, il, sl)
match_stars(cat_list_ra, cat_list_dec, cat_list_mag)
cat_list_mag = np.array(cat_list_mag)
avg_mag, std_mag = compute_statistics(cat_list_mag)
plot(avg_mag[std_mag.argsort()], sorted(std_mag), o)
remove_cat_folder(i)
if __name__ == '__main__':
# Testing
fn = param['field_name']
f = param['test_path']
perform_test(f+'phot/'+fn+'/tmp/science/', f+'phot/'+fn+'/std_00_test.eps')
print 'DONE'
| mit |
sgrieve/LH_Paper_Plotting | Plotting_Code/Figure_4_Coweeta_revision.py | 1 | 5560 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2015 Stuart W.D Grieve 2015
Developer can be contacted by s.grieve _at_ ed.ac.uk
This program is free software;
you can redistribute it and/or modify it under the terms of the
GNU General Public License as published by the Free Software Foundation;
either version 2 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY;
without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the
GNU General Public License along with this program;
if not, write to:
Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301
USA
Script to generate Figure 4 from Grieve et al. (2015)
Input data is generated using LH_Driver.cpp
Parameters to be modified are highlighted by comments
@author: SWDG
"""
def mm_to_inch(mm):
return mm*0.0393700787
import matplotlib.pyplot as plt
from matplotlib import rcParams
import MuddPyStatsTools as mpy
import numpy as np
import string
# Set up fonts for plots
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['arial']
rcParams['font.size'] = 10
rcParams['xtick.direction'] = 'out'
rcParams['ytick.direction'] = 'out'
#================ modifyable parameters start here ====================
#paths to the data and to save the figure to
path = 'C:/Users/Stuart/Dropbox/LH_Paper/nc/' #path to the folder contaning the hilltopdata files
filename = 'NC_PaperData.txt'#names of the hilltopdata file
figpath = 'C:/Users/Stuart/Desktop/FR/final_figures_revision/' #path to save the final figure
#plot style parameters
xmaxes = [450,200,400]
ymaxes = [9,13,11]
xsteps = [200,100,100]
ysteps = [2,3,2]
v_line_lims = [1.2,0,0]
#plot labels
Methods = ['Hilltop Flow Routing','Slope-Area','Drainage Density']
fig_labels = list(string.ascii_lowercase)[:3] #generate subplot labels
#number of bins in the histograms
nbins = 20
#================ modifyable parameters end here ====================
fig = plt.figure()
#load the paperdata file to get the LH data
with open(path+filename,'r') as f:
f.readline()
data = f.readlines()
lh_ = []
SA = []
SA_Plot =[]
DD = []
for d in data:
split = d.split()
lh = float(split[2])
sa_lh = float(split[9])
dd = float(split[11])
if (sa_lh > 2.0):
SA.append(sa_lh)
if (sa_lh < 350.):
SA_Plot.append(sa_lh)
if (lh > 2.0):
lh_.append(lh)
if (dd > 2.0):
DD.append(dd)
Calc_Data = [lh_,SA,DD]
Plot_Data = [lh_,SA_Plot,DD]
for subplot_count, (Method,xmax,ymax,xstep,ystep,labels,v_line_lim) in enumerate(zip(Methods,xmaxes,ymaxes,xsteps,ysteps,fig_labels,v_line_lims)):
LH = Calc_Data[subplot_count]
#get the median absolute devaition
MAD = mpy.calculate_MedianAbsoluteDeviation(LH)
#set up the 4 subplots
ax = plt.subplot(3,1,subplot_count + 1)
#Add a title with the method name
ax.text(.5,.9,Method, horizontalalignment='center', transform=ax.transAxes,fontsize=12)
#plot the histogram and get the patches so we can colour them
n,bins,patches = plt.hist(Plot_Data[subplot_count],bins=nbins,color='k',linewidth=0)
#get the median -/+ median devaition
MinMAD = np.median(LH)-MAD
MaxMAD = np.median(LH)+MAD
#color the bins that fall within +/- MAD of the median
#http://stackoverflow.com/questions/6352740/matplotlib-label-each-bin
for patch, rightside, leftside in zip(patches, bins[1:], bins[:-1]):
if rightside < MinMAD:
patch.set_alpha(0.4)
elif leftside > MaxMAD:
patch.set_alpha(0.4)
#Insert dashed red line at median
plt.vlines(np.median(LH),0,ymax-v_line_lim,label='Median', color='r',linewidth=1,linestyle='dashed')
#set x axis limits
plt.xlim(0,xmax)
plt.ylim(0,ymax)
#format the ticks to only appear on the bottom and left axes
plt.tick_params(axis='x', which='both', top='off',length=2)
plt.tick_params(axis='y', which='both', right='off',length=2)
#configure tick spacing based on the defined spacings given
ax.xaxis.set_ticks(np.arange(0,xmax+1,xstep))
ax.yaxis.set_ticks(np.arange(0,ymax+1,ystep))
#annotate the plot with the median and MAD and the subplot label
plt.annotate('Median = '+str(int(round(np.median(LH),0)))+' m\nMAD = '+str(int(round(MAD,0)))+' m', xy=(0.6, 0.7), xycoords='axes fraction', fontsize=10, horizontalalignment='left', verticalalignment='top')
plt.annotate(labels, xy=(0.95, 0.95), xycoords='axes fraction', fontsize=10, horizontalalignment='left', verticalalignment='top')
#spacing of the plots
plt.subplots_adjust(hspace = 0.25,left=0.2)
#x and y axis labels
fig.text(0.5, 0.05, 'Hillslope length (m)', ha='center', va='center', size=12)
fig.text(0.06, 0.5, 'Count', ha='center', va='center', rotation='vertical', size=12)
# title
fig.text(0.5, 0.925, 'Coweeta', ha='center', va='center', size=14)
#set the size of the plot to be saved. These are the JGR sizes:
#quarter page = 95*115
#half page = 190*115 (horizontal) 95*230 (vertical)
#full page = 190*230
fig.set_size_inches(mm_to_inch(95), mm_to_inch(200))
plt.savefig(figpath+'Figure_4.png', dpi = 500) #change to *.tif for submission
| gpl-2.0 |
hdmetor/scikit-learn | sklearn/covariance/__init__.py | 389 | 1157 | """
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, \
LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope
__all__ = ['EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
| bsd-3-clause |
kazuar/ThinkStats2 | code/populations.py | 68 | 2609 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import csv
import logging
import sys
import numpy as np
import pandas
import thinkplot
import thinkstats2
def ReadData(filename='PEP_2012_PEPANNRES_with_ann.csv'):
"""Reads filename and returns populations in thousands
filename: string
returns: pandas Series of populations in thousands
"""
df = pandas.read_csv(filename, header=None, skiprows=2,
encoding='iso-8859-1')
populations = df[7]
populations.replace(0, np.nan, inplace=True)
return populations.dropna()
def MakeFigures():
"""Plots the CDF of populations in several forms.
On a log-log scale the tail of the CCDF looks like a straight line,
which suggests a Pareto distribution, but that turns out to be misleading.
On a log-x scale the distribution has the characteristic sigmoid of
a lognormal distribution.
The normal probability plot of log(sizes) confirms that the data fit the
lognormal model very well.
Many phenomena that have been described with Pareto models can be described
as well, or better, with lognormal models.
"""
pops = ReadData()
print('Number of cities/towns', len(pops))
log_pops = np.log10(pops)
cdf = thinkstats2.Cdf(pops, label='data')
cdf_log = thinkstats2.Cdf(log_pops, label='data')
# pareto plot
xs, ys = thinkstats2.RenderParetoCdf(xmin=5000, alpha=1.4, low=0, high=1e7)
thinkplot.Plot(np.log10(xs), 1-ys, label='model', color='0.8')
thinkplot.Cdf(cdf_log, complement=True)
thinkplot.Config(xlabel='log10 population',
ylabel='CCDF',
yscale='log')
thinkplot.Save(root='populations_pareto')
# lognormal plot
thinkplot.PrePlot(cols=2)
mu, sigma = log_pops.mean(), log_pops.std()
xs, ps = thinkstats2.RenderNormalCdf(mu, sigma, low=0, high=8)
thinkplot.Plot(xs, ps, label='model', color='0.8')
thinkplot.Cdf(cdf_log)
thinkplot.Config(xlabel='log10 population',
ylabel='CDF')
thinkplot.SubPlot(2)
thinkstats2.NormalProbabilityPlot(log_pops, label='data')
thinkplot.Config(xlabel='z',
ylabel='log10 population',
xlim=[-5, 5])
thinkplot.Save(root='populations_normal')
def main():
thinkstats2.RandomSeed(17)
MakeFigures()
if __name__ == "__main__":
main()
| gpl-3.0 |
ywcui1990/htmresearch | projects/sequence_classification/util_functions.py | 11 | 11219 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
import os
import matplotlib.lines as lines
import numpy as np
def loadDataset(dataName, datasetName, useDeltaEncoder=False):
fileDir = os.path.join('./{}'.format(datasetName),
dataName, dataName+'_TRAIN')
trainData = np.loadtxt(fileDir, delimiter=',')
trainLabel = trainData[:, 0].astype('int')
trainData = trainData[:, 1:]
fileDir = os.path.join('./{}'.format(datasetName),
dataName, dataName + '_TEST')
testData = np.loadtxt(fileDir, delimiter=',')
testLabel = testData[:, 0].astype('int')
testData = testData[:, 1:]
if useDeltaEncoder:
trainData = np.diff(trainData)
testData = np.diff(testData)
classList = np.unique(trainLabel)
classMap = {}
for i in range(len(classList)):
classMap[classList[i]] = i
for i in range(len(trainLabel)):
trainLabel[i] = classMap[trainLabel[i]]
for i in range(len(testLabel)):
testLabel[i] = classMap[testLabel[i]]
return trainData, trainLabel, testData, testLabel
def listDataSets(datasetName):
dataSets = [d for d in os.listdir('./{}'.format(datasetName)) if os.path.isdir(
os.path.join('./{}'.format(datasetName), d))]
return dataSets
def calculateAccuracy(distanceMat, trainLabel, testLabel):
outcome = []
for i in range(len(testLabel)):
predictedClass = trainLabel[np.argmax(distanceMat[i, :])]
correct = 1 if predictedClass == testLabel[i] else 0
outcome.append(correct)
accuracy = np.mean(np.array(outcome))
return accuracy, outcome
def calculateEuclideanModelAccuracy(trainData, trainLabel, testData, testLabel):
outcomeEuclidean = []
for i in range(testData.shape[0]):
predictedClass = one_nearest_neighbor(trainData, trainLabel, testData[i, :])
correct = 1 if predictedClass == testLabel[i] else 0
outcomeEuclidean.append(correct)
return outcomeEuclidean
def one_nearest_neighbor(trainData, trainLabel, unknownSequence):
"""
One nearest neighbor with Euclidean Distance
@param trainData (nSample, NT) training data
@param trainLabel (nSample, ) training data labels
@param unknownSequence (1, NT) sequence to be classified
"""
distance = np.zeros((trainData.shape[0],))
for i in range(trainData.shape[0]):
distance[i] = np.sqrt(np.sum(np.square(trainData[i, :]-unknownSequence)))
predictedClass = trainLabel[np.argmin(distance)]
return predictedClass
def sortDistanceMat(distanceMat, trainLabel, testLabel):
"""
Sort Distance Matrix according to training/testing class labels such that
nearby entries shares same class labels
:param distanceMat: original (unsorted) distance matrix
:param trainLabel: list of training labels
:param testLabel: list of testing labels
:return:
"""
numTrain = len(trainLabel)
numTest = len(testLabel)
sortIdxTrain = np.argsort(trainLabel)
sortIdxTest = np.argsort(testLabel)
distanceMatSort = np.zeros((numTest, numTrain))
for i in xrange(numTest):
for j in xrange(numTrain):
distanceMatSort[i, j] = distanceMat[sortIdxTest[i], sortIdxTrain[j]]
return distanceMatSort
def smoothArgMax(array):
idx = np.where(array == np.max(array))[0]
return np.median(idx).astype('int')
def calculateClassLines(trainLabel, testLabel, classList):
sortIdxTrain = np.argsort(trainLabel)
sortIdxTest = np.argsort(testLabel)
vLineLocs = []
hLineLocs = []
for c in classList[:-1]:
hLineLocs.append(np.max(np.where(testLabel[sortIdxTest] == c)[0]) + .5)
vLineLocs.append(np.max(np.where(trainLabel[sortIdxTrain] == c)[0]) + .5)
return vLineLocs, hLineLocs
def addClassLines(ax, vLineLocs, hLineLocs):
for vline in vLineLocs:
ax.add_line(lines.Line2D([vline, vline], ax.get_ylim(), color='k'))
for hline in hLineLocs:
ax.add_line(lines.Line2D(ax.get_xlim(), [hline, hline], color='k'))
def calculateEuclideanDistanceMat(testData, trainData):
EuclideanDistanceMat = np.zeros((testData.shape[0], trainData.shape[0]))
for i in range(testData.shape[0]):
for j in range(trainData.shape[0]):
EuclideanDistanceMat[i, j] = np.sqrt(np.sum(
np.square(testData[i, :] - trainData[j, :])))
return EuclideanDistanceMat
def overlapDist(s1, s2):
if len(s1.union(s2)) == 0:
return 0
else:
return float(len(s1.intersection(s2)))/len(s1.union(s2))
def calculateDistanceMat(activeColumnsTest, activeColumnsTrain):
nTest = len(activeColumnsTest)
nTrain = len(activeColumnsTrain)
sequenceLength = len(activeColumnsTrain[0])
activeColumnOverlapTest = np.zeros((nTest, nTrain))
for i in range(nTest):
for j in range(nTrain):
if type(activeColumnsTest[0]) is np.ndarray:
activeColumnOverlapTest[i, j] = np.sum(np.sqrt(np.multiply(activeColumnsTest[i], activeColumnsTrain[j])))
# activeColumnOverlapTest[i, j] = np.sum(np.minimum(activeColumnsTest[i], activeColumnsTrain[j]))
else:
for t in range(sequenceLength):
activeColumnOverlapTest[i, j] += overlapDist(
activeColumnsTest[i][t], activeColumnsTrain[j][t])
return activeColumnOverlapTest
def calculateDistanceMatTrain(activeColumnsTrain):
nTrain = len(activeColumnsTrain)
sequenceLength = len(activeColumnsTrain[0])
activeColumnOverlap = np.zeros((nTrain, nTrain))
for i in range(nTrain):
for j in range(i+1, nTrain):
for t in range(sequenceLength):
activeColumnOverlap[i, j] += len(
activeColumnsTrain[i][t].intersection(activeColumnsTrain[j][t]))
activeColumnOverlap[j, i] = activeColumnOverlap[i, j]
return activeColumnOverlap
def constructDistanceMat(distMatColumn, distMatCell, trainLabel, wOpt, bOpt):
numTest, numTrain = distMatColumn.shape
classList = np.unique(trainLabel).tolist()
distanceMat = np.zeros((numTest, numTrain))
for classI in classList:
classIidx = np.where(trainLabel == classI)[0]
distanceMat[:, classIidx] = \
(1 - wOpt[classI]) * distMatColumn[:, classIidx] + \
wOpt[classI] * distMatCell[:, classIidx] + bOpt[classI]
return distanceMat
def costFuncSharedW(newW, w, b, distMatColumn, distMatCell,
trainLabel, classList):
wTest = copy.deepcopy(w)
for classI in classList:
wTest[classI] = newW
distanceMatXV = constructDistanceMat(
distMatColumn, distMatCell, trainLabel, wTest, b)
accuracy, outcome = calculateAccuracy(distanceMatXV, trainLabel, trainLabel)
return -accuracy
def costFuncW(newW, classI, w, b, activeColumnOverlap, activeCellOverlap, trainLabel, classList):
wTest = copy.deepcopy(w)
wTest[classList[classI]] = newW
numXVRpts = 10
accuracyRpt = np.zeros((numXVRpts,))
for rpt in range(numXVRpts):
(activeColumnOverlapXV, activeCellOverlapXV,
trainLabelXV, trainLabeltrain) = generateNestedXCdata(
trainLabel, activeColumnOverlap, activeCellOverlap, seed=rpt)
distanceMat = constructDistanceMat(
activeColumnOverlapXV, activeCellOverlapXV, trainLabeltrain, wTest, b)
accuracy, outcome = calculateAccuracy(
distanceMat, trainLabeltrain, trainLabelXV)
accuracyRpt[rpt] = accuracy
return -np.mean(accuracyRpt)
def costFuncB(newB, classI, w, b, activeColumnOverlap, activeCellOverlap, trainLabel, classList):
bTest = copy.deepcopy(b)
bTest[classList[classI]] = newB
numXVRpts = 10
accuracyRpt = np.zeros((numXVRpts,))
for rpt in range(numXVRpts):
(activeColumnOverlapXV, activeCellOverlapXV,
trainLabelXV, trainLabeltrain) = generateNestedXCdata(
trainLabel, activeColumnOverlap, activeCellOverlap, seed=rpt)
distanceMat = constructDistanceMat(
activeColumnOverlapXV, activeCellOverlapXV, trainLabeltrain, w, bTest)
accuracy, outcome = calculateAccuracy(
distanceMat, trainLabeltrain, trainLabelXV)
accuracyRpt[rpt] = accuracy
return -np.mean(accuracyRpt)
def prepareClassifierInput(distMatColumn, distMatCell, classList, classLabel, options):
classIdxMap = {}
for classIdx in classList:
classIdxMap[classIdx] = np.where(classLabel == classIdx)[0]
classifierInput = []
numSample, numTrain = distMatColumn.shape
classList = classIdxMap.keys()
numClass = len(classList)
for i in range(numSample):
if options['useColumnRepresentation']:
columnNN = np.zeros((numClass,))
else:
columnNN = np.array([])
if options['useCellRepresentation']:
cellNN = np.zeros((numClass,))
else:
cellNN = np.array([])
for classIdx in classList:
if options['useColumnRepresentation']:
columnNN[classIdx] = np.max(
distMatColumn[i, classIdxMap[classIdx]])
if options['useCellRepresentation']:
cellNN[classIdx] = np.max(distMatCell[i, classIdxMap[classIdx]])
# if options['useColumnRepresentation']:
# columnNN[columnNN < np.max(columnNN)] = 0
# columnNN[columnNN == np.max(columnNN)] = 1
#
# if options['useCellRepresentation']:
# cellNN[cellNN < np.max(cellNN)] = 0
# cellNN[cellNN == np.max(cellNN)] = 1
classifierInput.append(np.concatenate((columnNN, cellNN)))
return classifierInput
def generateNestedXCdata(trainLabel, distMatColumn, distMatCell,
seed=1, xcPrct=0.5):
"""
Set aside a portion of the training data for nested cross-validation
:param trainLabel:
:param distMatColumn:
:param distMatCell:
:param xcPrct:
:return:
"""
np.random.seed(seed)
randomIdx = np.random.permutation(len(trainLabel))
numXVsamples = int(len(trainLabel) * xcPrct)
numTrainSample = len(trainLabel) - numXVsamples
selectXVSamples = randomIdx[:numXVsamples]
selectTrainSamples = randomIdx[numXVsamples:]
selectXVSamples = np.sort(selectXVSamples)
selectTrainSamples = np.sort(selectTrainSamples)
distMatColumnXV = np.zeros((numXVsamples, numTrainSample))
distMatCellXV = np.zeros((numXVsamples, numTrainSample))
for i in range(numXVsamples):
distMatColumnXV[i, :] = distMatColumn[
selectXVSamples[i], selectTrainSamples]
distMatCellXV[i, :] = distMatCell[
selectXVSamples[i], selectTrainSamples]
trainLabelXV = trainLabel[selectXVSamples]
trainLabeltrain = trainLabel[selectTrainSamples]
return (distMatColumnXV, distMatCellXV,
trainLabelXV, trainLabeltrain) | agpl-3.0 |
andrewnc/scikit-learn | examples/applications/svm_gui.py | 287 | 11161 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <[email protected]>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
syl20bnr/nupic | examples/opf/tools/sp_plotter.py | 8 | 15763 | #! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import sys
import os
import time
import copy
import csv
import numpy as np
from nupic.research import FDRCSpatial2
from nupic.bindings.math import GetNTAReal
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
realDType = GetNTAReal()
############################################################################
def generatePlot(outputs, origData):
""" Generates a table where each cell represent a frequency of pairs
as described below.
x coordinate is the % difference between input records (origData list),
y coordinate is the % difference between corresponding output records.
"""
PLOT_PRECISION = 100
distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1))
outputSize = len(outputs)
for i in range(0,outputSize):
for j in range(i+1,outputSize):
in1 = outputs[i]
in2 = outputs[j]
dist = (abs(in1-in2) > 0.1)
intDist = int(dist.sum()/2+0.1)
orig1 = origData[i]
orig2 = origData[j]
origDist = (abs(orig1-orig2) > 0.1)
intOrigDist = int(origDist.sum()/2+0.1)
if intDist < 2 and intOrigDist > 10:
print 'Elements %d,%d has very small SP distance: %d' % (i, j, intDist)
print 'Input elements distance is %d' % intOrigDist
x = int(PLOT_PRECISION*intDist/40.0)
y = int(PLOT_PRECISION*intOrigDist/42.0)
if distribMatrix[x, y] < 0.1:
distribMatrix[x, y] = 3
else:
if distribMatrix[x, y] < 10:
distribMatrix[x, y] += 1
# Add some elements for the scale drawing
distribMatrix[4, 50] = 3
distribMatrix[4, 52] = 4
distribMatrix[4, 54] = 5
distribMatrix[4, 56] = 6
distribMatrix[4, 58] = 7
distribMatrix[4, 60] = 8
distribMatrix[4, 62] = 9
distribMatrix[4, 64] = 10
return distribMatrix
############################################################################
def generateRandomInput(numRecords, elemSize = 400, numSet = 42):
""" Generates a set of input record
Params:
numRecords - how many records to generate
elemSize - the size of each record (num 0s or 1s)
numSet - how many 1s in each record
Returns: a list of inputs
"""
inputs = []
for _ in xrange(numRecords):
input = np.zeros(elemSize, dtype=realDType)
for _ in range(0,numSet):
ind = np.random.random_integers(0, elemSize-1, 1)[0]
input[ind] = 1
while abs(input.sum() - numSet) > 0.1:
ind = np.random.random_integers(0, elemSize-1, 1)[0]
input[ind] = 1
inputs.append(input)
return inputs
############################################################################
def appendInputWithSimilarValues(inputs):
""" Creates an 'one-off' record for each record in the inputs. Appends new
records to the same inputs list.
"""
numInputs = len(inputs)
for i in xrange(numInputs):
input = inputs[i]
for j in xrange(len(input)-1):
if input[j] == 1 and input[j+1] == 0:
newInput = copy.deepcopy(input)
newInput[j] = 0
newInput[j+1] = 1
inputs.append(newInput)
break
############################################################################
def appendInputWithNSimilarValues(inputs, numNear = 10):
""" Creates a neighboring record for each record in the inputs and adds
new records at the end of the inputs list
"""
numInputs = len(inputs)
skipOne = False
for i in xrange(numInputs):
input = inputs[i]
numChanged = 0
newInput = copy.deepcopy(input)
for j in xrange(len(input)-1):
if skipOne:
skipOne = False
continue
if input[j] == 1 and input[j+1] == 0:
newInput[j] = 0
newInput[j+1] = 1
inputs.append(newInput)
newInput = copy.deepcopy(newInput)
#print input
#print newInput
numChanged += 1
skipOne = True
if numChanged == numNear:
break
############################################################################
def modifyBits(inputVal, maxChanges):
""" Modifies up to maxChanges number of bits in the inputVal
"""
changes = np.random.random_integers(0, maxChanges, 1)[0]
if changes == 0:
return inputVal
inputWidth = len(inputVal)
whatToChange = np.random.random_integers(0, 41, changes)
runningIndex = -1
numModsDone = 0
for i in xrange(inputWidth):
if numModsDone >= changes:
break
if inputVal[i] == 1:
runningIndex += 1
if runningIndex in whatToChange:
if i != 0 and inputVal[i-1] == 0:
inputVal[i-1] = 1
inputVal[i] = 0
return inputVal
############################################################################
def getRandomWithMods(inputSpace, maxChanges):
""" Returns a random selection from the inputSpace with randomly modified
up to maxChanges number of bits.
"""
size = len(inputSpace)
ind = np.random.random_integers(0, size-1, 1)[0]
value = copy.deepcopy(inputSpace[ind])
if maxChanges == 0:
return value
return modifyBits(value, maxChanges)
############################################################################
def testSP():
""" Run a SP test
"""
elemSize = 400
numSet = 42
addNear = True
numRecords = 2
wantPlot = True
poolPct = 0.5
itr = 1
doLearn = True
while numRecords < 3:
# Setup a SP
sp = FDRCSpatial2.FDRCSpatial2(
coincidencesShape=(2048, 1),
inputShape = (1, elemSize),
inputBorder = elemSize/2-1,
coincInputRadius = elemSize/2,
numActivePerInhArea = 40,
spVerbosity = 0,
stimulusThreshold = 0,
seed = 1,
coincInputPoolPct = poolPct,
globalInhibition = True
)
# Generate inputs using rand()
inputs = generateRandomInput(numRecords, elemSize, numSet)
if addNear:
# Append similar entries (distance of 1)
appendInputWithNSimilarValues(inputs, 42)
inputSize = len(inputs)
print 'Num random records = %d, inputs to process %d' % (numRecords, inputSize)
# Run a number of iterations, with learning on or off,
# retrieve results from the last iteration only
outputs = np.zeros((inputSize,2048))
numIter = 1
if doLearn:
numIter = itr
for iter in xrange(numIter):
for i in xrange(inputSize):
time.sleep(0.001)
if iter == numIter - 1:
outputs[i] = sp.compute(inputs[i], learn=doLearn, infer=False)
#print outputs[i].sum(), outputs[i]
else:
sp.compute(inputs[i], learn=doLearn, infer=False)
# Build a plot from the generated input and output and display it
distribMatrix = generatePlot(outputs, inputs)
# If we don't want a plot, just continue
if wantPlot:
plt.imshow(distribMatrix, origin='lower', interpolation = "nearest")
plt.ylabel('SP (2048/40) distance in %')
plt.xlabel('Input (400/42) distance in %')
title = 'SP distribution'
if doLearn:
title += ', leaning ON'
else:
title += ', learning OFF'
title += ', inputs = %d' % len(inputs)
title += ', iterations = %d' % numIter
title += ', poolPct =%f' % poolPct
plt.suptitle(title, fontsize=12)
plt.show()
#plt.savefig(os.path.join('~/Desktop/ExperimentResults/videos5', '%s' % numRecords))
#plt.clf()
numRecords += 1
return
############################################################################
def testSPNew():
""" New version of the test"""
elemSize = 400
numSet = 42
addNear = True
numRecords = 1000
wantPlot = False
poolPct = 0.5
itr = 5
pattern = [60, 1000]
doLearn = True
start = 1
learnIter = 0
noLearnIter = 0
numLearns = 0
numTests = 0
numIter = 1
numGroups = 1000
PLOT_PRECISION = 100.0
distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1))
inputs = generateRandomInput(numGroups, elemSize, numSet)
# Setup a SP
sp = FDRCSpatial2.FDRCSpatial2(
coincidencesShape=(2048, 1),
inputShape = (1, elemSize),
inputBorder = elemSize/2-1,
coincInputRadius = elemSize/2,
numActivePerInhArea = 40,
spVerbosity = 0,
stimulusThreshold = 0,
synPermConnected = 0.12,
seed = 1,
coincInputPoolPct = poolPct,
globalInhibition = True
)
cleanPlot = False
for i in xrange(numRecords):
input1 = getRandomWithMods(inputs, 4)
if i % 2 == 0:
input2 = getRandomWithMods(inputs, 4)
else:
input2 = input1.copy()
input2 = modifyBits(input2, 21)
inDist = (abs(input1-input2) > 0.1)
intInDist = int(inDist.sum()/2+0.1)
#print intInDist
if start == 0:
doLearn = True
learnIter += 1
if learnIter == pattern[start]:
numLearns += 1
start = 1
noLearnIter = 0
elif start == 1:
doLearn = False
noLearnIter += 1
if noLearnIter == pattern[start]:
numTests += 1
start = 0
learnIter = 0
cleanPlot = True
output1 = sp.compute(input1, learn=doLearn, infer=False).copy()
output2 = sp.compute(input2, learn=doLearn, infer=False).copy()
time.sleep(0.001)
outDist = (abs(output1-output2) > 0.1)
intOutDist = int(outDist.sum()/2+0.1)
if not doLearn and intOutDist < 2 and intInDist > 10:
"""
sp.spVerbosity = 10
sp.compute(input1, learn=doLearn, infer=False)
sp.compute(input2, learn=doLearn, infer=False)
sp.spVerbosity = 0
print 'Elements has very small SP distance: %d' % intOutDist
print output1.nonzero()
print output2.nonzero()
print sp._firingBoostFactors[output1.nonzero()[0]]
print sp._synPermBoostFactors[output1.nonzero()[0]]
print 'Input elements distance is %d' % intInDist
print input1.nonzero()
print input2.nonzero()
sys.stdin.readline()
"""
if not doLearn:
x = int(PLOT_PRECISION*intOutDist/40.0)
y = int(PLOT_PRECISION*intInDist/42.0)
if distribMatrix[x, y] < 0.1:
distribMatrix[x, y] = 3
else:
if distribMatrix[x, y] < 10:
distribMatrix[x, y] += 1
#print i
# If we don't want a plot, just continue
if wantPlot and cleanPlot:
plt.imshow(distribMatrix, origin='lower', interpolation = "nearest")
plt.ylabel('SP (2048/40) distance in %')
plt.xlabel('Input (400/42) distance in %')
title = 'SP distribution'
#if doLearn:
# title += ', leaning ON'
#else:
# title += ', learning OFF'
title += ', learn sets = %d' % numLearns
title += ', test sets = %d' % numTests
title += ', iter = %d' % numIter
title += ', groups = %d' % numGroups
title += ', Pct =%f' % poolPct
plt.suptitle(title, fontsize=12)
#plt.show()
plt.savefig(os.path.join('~/Desktop/ExperimentResults/videosNew', '%s' % i))
plt.clf()
distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1))
cleanPlot = False
############################################################################
def testSPFile():
""" Run test on the data file - the file has records previously encoded.
"""
spSize = 2048
spSet = 40
poolPct = 0.5
pattern = [50, 1000]
doLearn = True
PLOT_PRECISION = 100.0
distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1))
inputs = []
#file = open('~/Desktop/ExperimentResults/sampleArtificial.csv', 'rb')
#elemSize = 400
#numSet = 42
#file = open('~/Desktop/ExperimentResults/sampleDataBasilOneField.csv', 'rb')
#elemSize = 499
#numSet = 7
outdir = '~/Desktop/ExperimentResults/Basil100x21'
inputFile = outdir+'.csv'
file = open(inputFile, 'rb')
elemSize = 100
numSet = 21
reader = csv.reader(file)
for row in reader:
input = np.array(map(float, row), dtype=realDType)
if len(input.nonzero()[0]) != numSet:
continue
inputs.append(input.copy())
file.close()
# Setup a SP
sp = FDRCSpatial2.FDRCSpatial2(
coincidencesShape=(spSize, 1),
inputShape = (1, elemSize),
inputBorder = (elemSize-1)/2,
coincInputRadius = elemSize/2,
numActivePerInhArea = spSet,
spVerbosity = 0,
stimulusThreshold = 0,
synPermConnected = 0.10,
seed = 1,
coincInputPoolPct = poolPct,
globalInhibition = True
)
cleanPlot = False
doLearn = False
print 'Finished reading file, inputs/outputs to process =', len(inputs)
size = len(inputs)
for iter in xrange(100):
print 'Iteration', iter
# Learn
if iter != 0:
for learnRecs in xrange(pattern[0]):
ind = np.random.random_integers(0, size-1, 1)[0]
sp.compute(inputs[ind], learn=True, infer=False)
# Test
for _ in xrange(pattern[1]):
rand1 = np.random.random_integers(0, size-1, 1)[0]
rand2 = np.random.random_integers(0, size-1, 1)[0]
output1 = sp.compute(inputs[rand1], learn=False, infer=True).copy()
output2 = sp.compute(inputs[rand2], learn=False, infer=True).copy()
outDist = (abs(output1-output2) > 0.1)
intOutDist = int(outDist.sum()/2+0.1)
inDist = (abs(inputs[rand1]-inputs[rand2]) > 0.1)
intInDist = int(inDist.sum()/2+0.1)
if intInDist != numSet or intOutDist != spSet:
print rand1, rand2, '-', intInDist, intOutDist
x = int(PLOT_PRECISION*intOutDist/spSet)
y = int(PLOT_PRECISION*intInDist/numSet)
if distribMatrix[x, y] < 0.1:
distribMatrix[x, y] = 3
else:
if distribMatrix[x, y] < 10:
distribMatrix[x, y] += 1
if True:
plt.imshow(distribMatrix, origin='lower', interpolation = "nearest")
plt.ylabel('SP (%d/%d) distance in pct' % (spSize, spSet))
plt.xlabel('Input (%d/%d) distance in pct' % (elemSize, numSet))
title = 'SP distribution'
title += ', iter = %d' % iter
title += ', Pct =%f' % poolPct
plt.suptitle(title, fontsize=12)
#plt.savefig(os.path.join('~/Desktop/ExperimentResults/videosArtData', '%s' % iter))
plt.savefig(os.path.join(outdir, '%s' % iter))
plt.clf()
distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1))
############################################################################
if __name__ == '__main__':
np.random.seed(83)
#testSP()
#testSPNew()
testSPFile()
| gpl-3.0 |
madjelan/scikit-learn | sklearn/neighbors/tests/test_kd_tree.py | 129 | 7848 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius(query_pt, r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius(query_pt, r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
| bsd-3-clause |
michigraber/scikit-learn | sklearn/feature_extraction/image.py | 263 | 17600 | """
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Olivier Grisel
# Vlad Niculae
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import sparse
from numpy.lib.stride_tricks import as_strided
from ..utils import check_array, check_random_state
from ..utils.fixes import astype
from ..base import BaseEstimator
__all__ = ['PatchExtractor',
'extract_patches_2d',
'grid_to_graph',
'img_to_graph',
'reconstruct_from_patches_2d']
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
===========
n_x: integer
The size of the grid in the x direction.
n_y: integer
The size of the grid in the y direction.
n_z: integer, optional
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
n_x, n_y, n_z = img.shape
gradient = np.abs(img[edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z] -
img[edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z])
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.in1d(edges[0], inds),
np.in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(n_x, n_y, n_z, mask=None, img=None,
return_as=sparse.coo_matrix, dtype=None):
"""Auxiliary function for img_to_graph and grid_to_graph
"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = np.int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = astype(mask, dtype=np.bool, copy=False)
mask = np.asarray(mask, dtype=np.bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)),
np.hstack((j_idx, diag_idx)))),
(n_voxels, n_voxels),
dtype=dtype)
if return_as is np.ndarray:
return graph.toarray()
return return_as(graph)
def img_to_graph(img, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections
Edges are weighted with the gradient values.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
img : ndarray, 2D or 3D
2D or 3D image
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : None or dtype, optional
The data of the returned sparse matrix. By default it is the
dtype of img
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=sparse.coo_matrix,
dtype=np.int):
"""Graph of the pixel-to-pixel connections
Edges exist if 2 voxels are connected.
Parameters
----------
n_x : int
Dimension in x axis
n_y : int
Dimension in y axis
n_z : int, optional, default 1
Dimension in z axis
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : dtype, optional, default int
The data of the returned sparse matrix. By default it is int
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as,
dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
i_h : int
The image height
i_w : int
The image with
p_h : int
The height of a patch
p_w : int
The width of a patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, (numbers.Integral))
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, (numbers.Real))
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
arr : ndarray
n-dimensional array of which patches are to be extracted
patch_shape : integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = [slice(None, None, st) for st in extraction_step]
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches
The resulting patches are allocated in a dedicated array.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
image : array, shape = (image_height, image_width) or
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling to use if
`max_patches` is not None.
Returns
-------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.feature_extraction import image
>>> one_image = np.arange(16).reshape((4, 4))
>>> one_image
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print(patches.shape)
(9, 2, 2)
>>> patches[0]
array([[0, 1],
[4, 5]])
>>> patches[1]
array([[1, 2],
[5, 6]])
>>> patches[8]
array([[10, 11],
[14, 15]])
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=1)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image : array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches per image to extract. If max_patches is a
float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
"""
def __init__(self, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X):
"""Transforms the image samples in X into a matrix of patch data.
Parameters
----------
X : array, shape = (n_samples, image_height, image_width) or
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.patch_size is None:
patch_size = i_h // 10, i_w // 10
else:
patch_size = self.patch_size
# compute the dimensions of the patches array
p_h, p_w = patch_size
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
patches_shape = (n_images * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches:(ii + 1) * n_patches] = extract_patches_2d(
image, patch_size, self.max_patches, self.random_state)
return patches
| bsd-3-clause |
agundy/BusinessCardReader | cardscan/utils.py | 1 | 1757 | import numpy as np
import cv2
from matplotlib import pyplot as plt
import glob
import random as rand
def readImage(imgName, grayscale=False):
'''Simple function to read in an image and reverse the colors'''
if grayscale:
img = cv2.imread(imgName)
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
else:
img = cv2.imread(imgName)
img = np.array(img[::,::,::])
return img
def getImages(path, limit=20):
'''
INPUT: path: String of the folder
i.e. '../example_imgs'
OUTPUT imgs: list of tuples with name and path
i.e. [('img1', '../example_imgs/img1')]
'''
path = './' + path + '/*'
imageNames = glob.glob(path)
imageNames = sorted(imageNames)
imgs = []
for i, imageName in enumerate(imageNames):
if i >= limit and limit != -1:
break
imgPath = imageName.split('/')
photoName = imgPath[len(imgPath)-1]
img = readImage(imageName)
print "Opening image %s" %imageName
imgs.append((photoName, img))
return imgs
def display(images):
'''
Takes a list of [(name, image, grayscaleImage, (keypoints, descriptor))]
and displays them in a grid two wide
'''
# Calculate the height of the the plt. This is the hundreds digit
size = int(np.ceil(len(images)/2.))*100
# Number of images across is the tens digit
size += 20
count = 1
plt.gray()
for imgName, img in images:
if len(img.shape) == 3:
img = img[::,::,::-1]
plt.subplot(size + count)
plt.imshow(img)
plt.title(imgName)
count += 1
plt.show()
if __name__ == "__main__":
imgs = getImages('../../stanford_business_cards/scans', -1)
display(imgs[:5])
| mit |
cbertinato/pandas | pandas/tests/sparse/test_format.py | 1 | 5736 | import warnings
import numpy as np
import pytest
from pandas.compat import is_platform_32bit, is_platform_windows
import pandas as pd
from pandas import option_context
import pandas.util.testing as tm
use_32bit_repr = is_platform_windows() or is_platform_32bit()
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
class TestSparseSeriesFormatting:
@property
def dtype_format_for_platform(self):
return '' if use_32bit_repr else ', dtype=int32'
def test_sparse_max_row(self):
s = pd.Series([1, np.nan, np.nan, 3, np.nan]).to_sparse()
result = repr(s)
dfm = self.dtype_format_for_platform
exp = ("0 1.0\n1 NaN\n2 NaN\n3 3.0\n"
"4 NaN\ndtype: Sparse[float64, nan]\nBlockIndex\n"
"Block locations: array([0, 3]{0})\n"
"Block lengths: array([1, 1]{0})".format(dfm))
assert result == exp
def test_sparsea_max_row_truncated(self):
s = pd.Series([1, np.nan, np.nan, 3, np.nan]).to_sparse()
dfm = self.dtype_format_for_platform
with option_context("display.max_rows", 3):
# GH 10560
result = repr(s)
exp = ("0 1.0\n ... \n4 NaN\n"
"Length: 5, dtype: Sparse[float64, nan]\nBlockIndex\n"
"Block locations: array([0, 3]{0})\n"
"Block lengths: array([1, 1]{0})".format(dfm))
assert result == exp
def test_sparse_mi_max_row(self):
idx = pd.MultiIndex.from_tuples([('A', 0), ('A', 1), ('B', 0),
('C', 0), ('C', 1), ('C', 2)])
s = pd.Series([1, np.nan, np.nan, 3, np.nan, np.nan],
index=idx).to_sparse()
result = repr(s)
dfm = self.dtype_format_for_platform
exp = ("A 0 1.0\n 1 NaN\nB 0 NaN\n"
"C 0 3.0\n 1 NaN\n 2 NaN\n"
"dtype: Sparse[float64, nan]\nBlockIndex\n"
"Block locations: array([0, 3]{0})\n"
"Block lengths: array([1, 1]{0})".format(dfm))
assert result == exp
with option_context("display.max_rows", 3,
"display.show_dimensions", False):
# GH 13144
result = repr(s)
exp = ("A 0 1.0\n ... \nC 2 NaN\n"
"dtype: Sparse[float64, nan]\nBlockIndex\n"
"Block locations: array([0, 3]{0})\n"
"Block lengths: array([1, 1]{0})".format(dfm))
assert result == exp
def test_sparse_bool(self):
# GH 13110
s = pd.SparseSeries([True, False, False, True, False, False],
fill_value=False)
result = repr(s)
dtype = '' if use_32bit_repr else ', dtype=int32'
exp = ("0 True\n1 False\n2 False\n"
"3 True\n4 False\n5 False\n"
"dtype: Sparse[bool, False]\nBlockIndex\n"
"Block locations: array([0, 3]{0})\n"
"Block lengths: array([1, 1]{0})".format(dtype))
assert result == exp
with option_context("display.max_rows", 3):
result = repr(s)
exp = ("0 True\n ... \n5 False\n"
"Length: 6, dtype: Sparse[bool, False]\nBlockIndex\n"
"Block locations: array([0, 3]{0})\n"
"Block lengths: array([1, 1]{0})".format(dtype))
assert result == exp
def test_sparse_int(self):
# GH 13110
s = pd.SparseSeries([0, 1, 0, 0, 1, 0], fill_value=False)
result = repr(s)
dtype = '' if use_32bit_repr else ', dtype=int32'
exp = ("0 0\n1 1\n2 0\n3 0\n4 1\n"
"5 0\ndtype: Sparse[int64, False]\nBlockIndex\n"
"Block locations: array([1, 4]{0})\n"
"Block lengths: array([1, 1]{0})".format(dtype))
assert result == exp
with option_context("display.max_rows", 3,
"display.show_dimensions", False):
result = repr(s)
exp = ("0 0\n ..\n5 0\n"
"dtype: Sparse[int64, False]\nBlockIndex\n"
"Block locations: array([1, 4]{0})\n"
"Block lengths: array([1, 1]{0})".format(dtype))
assert result == exp
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning")
class TestSparseDataFrameFormatting:
def test_sparse_frame(self):
# GH 13110
df = pd.DataFrame({'A': [True, False, True, False, True],
'B': [True, False, True, False, True],
'C': [0, 0, 3, 0, 5],
'D': [np.nan, np.nan, np.nan, 1, 2]})
sparse = df.to_sparse()
assert repr(sparse) == repr(df)
with option_context("display.max_rows", 3):
assert repr(sparse) == repr(df)
def test_sparse_repr_after_set(self):
# GH 15488
sdf = pd.SparseDataFrame([[np.nan, 1], [2, np.nan]])
res = sdf.copy()
# Ignore the warning
with pd.option_context('mode.chained_assignment', None):
sdf[0][1] = 2 # This line triggers the bug
repr(sdf)
tm.assert_sp_frame_equal(sdf, res)
def test_repr_no_warning():
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
df = pd.SparseDataFrame({"A": [1, 2]})
s = df['A']
with tm.assert_produces_warning(None):
repr(df)
repr(s)
| bsd-3-clause |
toobaz/grimm | grimm_lib/matplotlib_future/backend_gtk3.py | 1 | 36285 | from __future__ import division
import os, sys
def fn_name(): return sys._getframe(1).f_code.co_name
try:
from gi.repository import Gtk, Gdk, GObject
except ImportError:
raise ImportError("GTK3 backend requires pygobject to be installed.")
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, cursors, TimerBase
from matplotlib.backend_bases import ShowBase
from matplotlib.cbook import is_string_like, is_writable_file_like
from matplotlib.colors import colorConverter
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
from matplotlib import lines
from matplotlib import cbook
from matplotlib import verbose
from matplotlib import rcParams
backend_version = "%s.%s.%s" % (Gtk.get_major_version(), Gtk.get_micro_version(), Gtk.get_minor_version())
_debug = False
#_debug = True
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 96
cursord = {
cursors.MOVE : Gdk.Cursor.new(Gdk.CursorType.FLEUR),
cursors.HAND : Gdk.Cursor.new(Gdk.CursorType.HAND2),
cursors.POINTER : Gdk.Cursor.new(Gdk.CursorType.LEFT_PTR),
cursors.SELECT_REGION : Gdk.Cursor.new(Gdk.CursorType.TCROSS),
}
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw_idle()
class Show(ShowBase):
def mainloop(self):
if Gtk.main_level() == 0:
Gtk.main()
show = Show()
class TimerGTK3(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses GTK3 for timer events.
Attributes:
* interval: The time between timer events in milliseconds. Default
is 1000 ms.
* single_shot: Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
* callbacks: Stores list of (func, args) tuples that will be called
upon timer events. This list can be manipulated directly, or the
functions add_callback and remove_callback can be used.
'''
def _timer_start(self):
# Need to stop it, otherwise we potentially leak a timer id that will
# never be stopped.
self._timer_stop()
self._timer = GObject.timeout_add(self._interval, self._on_timer)
def _timer_stop(self):
if self._timer is not None:
GObject.source_remove(self._timer)
self._timer = None
def _timer_set_interval(self):
# Only stop and restart it if the timer has already been started
if self._timer is not None:
self._timer_stop()
self._timer_start()
def _on_timer(self):
TimerBase._on_timer(self)
# Gtk timeout_add() requires that the callback returns True if it
# is to be called again.
if len(self.callbacks) > 0 and not self._single:
return True
else:
self._timer = None
return False
class FigureCanvasGTK3 (Gtk.DrawingArea, FigureCanvasBase):
keyvald = {65507 : 'control',
65505 : 'shift',
65513 : 'alt',
65508 : 'control',
65506 : 'shift',
65514 : 'alt',
65361 : 'left',
65362 : 'up',
65363 : 'right',
65364 : 'down',
65307 : 'escape',
65470 : 'f1',
65471 : 'f2',
65472 : 'f3',
65473 : 'f4',
65474 : 'f5',
65475 : 'f6',
65476 : 'f7',
65477 : 'f8',
65478 : 'f9',
65479 : 'f10',
65480 : 'f11',
65481 : 'f12',
65300 : 'scroll_lock',
65299 : 'break',
65288 : 'backspace',
65293 : 'enter',
65379 : 'insert',
65535 : 'delete',
65360 : 'home',
65367 : 'end',
65365 : 'pageup',
65366 : 'pagedown',
65438 : '0',
65436 : '1',
65433 : '2',
65435 : '3',
65430 : '4',
65437 : '5',
65432 : '6',
65429 : '7',
65431 : '8',
65434 : '9',
65451 : '+',
65453 : '-',
65450 : '*',
65455 : '/',
65439 : 'dec',
65421 : 'enter',
}
# Setting this as a static constant prevents
# this resulting expression from leaking
event_mask = (Gdk.EventMask.BUTTON_PRESS_MASK |
Gdk.EventMask.BUTTON_RELEASE_MASK |
Gdk.EventMask.EXPOSURE_MASK |
Gdk.EventMask.KEY_PRESS_MASK |
Gdk.EventMask.KEY_RELEASE_MASK |
Gdk.EventMask.ENTER_NOTIFY_MASK |
Gdk.EventMask.LEAVE_NOTIFY_MASK |
Gdk.EventMask.POINTER_MOTION_MASK |
Gdk.EventMask.POINTER_MOTION_HINT_MASK)
def __init__(self, figure):
if _debug: print 'FigureCanvasGTK3.%s' % fn_name()
FigureCanvasBase.__init__(self, figure)
GObject.GObject.__init__(self)
self._idle_draw_id = 0
self._need_redraw = True
self._lastCursor = None
self.connect('scroll_event', self.scroll_event)
self.connect('button_press_event', self.button_press_event)
self.connect('button_release_event', self.button_release_event)
self.connect('configure_event', self.configure_event)
self.connect('draw', self.on_draw_event)
self.connect('key_press_event', self.key_press_event)
self.connect('key_release_event', self.key_release_event)
self.connect('motion_notify_event', self.motion_notify_event)
self.connect('leave_notify_event', self.leave_notify_event)
self.connect('enter_notify_event', self.enter_notify_event)
self.set_events(self.__class__.event_mask)
self.set_double_buffered(True)
self.set_can_focus(True)
self._renderer_init()
self._idle_event_id = GObject.idle_add(self.idle_event)
def destroy(self):
#Gtk.DrawingArea.destroy(self)
self.close_event()
GObject.source_remove(self._idle_event_id)
if self._idle_draw_id != 0:
GObject.source_remove(self._idle_draw_id)
def scroll_event(self, widget, event):
if _debug: print 'FigureCanvasGTK3.%s' % fn_name()
x = event.x
# flipy so y=0 is bottom of canvas
y = self.get_allocation().height - event.y
if event.direction==Gdk.ScrollDirection.UP:
step = 1
else:
step = -1
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
return False # finish event propagation?
def button_press_event(self, widget, event):
if _debug: print 'FigureCanvasGTK3.%s' % fn_name()
x = event.x
# flipy so y=0 is bottom of canvas
y = self.get_allocation().height - event.y
FigureCanvasBase.button_press_event(self, x, y, event.button, guiEvent=event)
return False # finish event propagation?
def button_release_event(self, widget, event):
if _debug: print 'FigureCanvasGTK3.%s' % fn_name()
x = event.x
# flipy so y=0 is bottom of canvas
y = self.get_allocation().height - event.y
FigureCanvasBase.button_release_event(self, x, y, event.button, guiEvent=event)
return False # finish event propagation?
def key_press_event(self, widget, event):
if _debug: print 'FigureCanvasGTK3.%s' % fn_name()
key = self._get_key(event)
if _debug: print "hit", key
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
return False # finish event propagation?
def key_release_event(self, widget, event):
if _debug: print 'FigureCanvasGTK3.%s' % fn_name()
key = self._get_key(event)
if _debug: print "release", key
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
return False # finish event propagation?
def motion_notify_event(self, widget, event):
if _debug: print 'FigureCanvasGTK3.%s' % fn_name()
if event.is_hint:
t, x, y, state = event.window.get_pointer()
else:
x, y, state = event.x, event.y, event.get_state()
# flipy so y=0 is bottom of canvas
y = self.get_allocation().height - y
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
return False # finish event propagation?
def leave_notify_event(self, widget, event):
FigureCanvasBase.leave_notify_event(self, event)
def enter_notify_event(self, widget, event):
FigureCanvasBase.enter_notify_event(self, event)
def _get_key(self, event):
if event.keyval in self.keyvald:
key = self.keyvald[event.keyval]
elif event.keyval < 256:
key = chr(event.keyval)
else:
key = None
modifiers = [
(Gdk.ModifierType.MOD4_MASK, 'super'),
(Gdk.ModifierType.MOD1_MASK, 'alt'),
(Gdk.ModifierType.CONTROL_MASK, 'ctrl'),
]
for key_mask, prefix in modifiers:
if event.state & key_mask:
key = '{}+{}'.format(prefix, key)
return key
def configure_event(self, widget, event):
if _debug: print 'FigureCanvasGTK3.%s' % fn_name()
if widget.get_property("window") is None:
return
w, h = event.width, event.height
if w < 3 or h < 3:
return # empty fig
# resize the figure (in inches)
dpi = self.figure.dpi
self.figure.set_size_inches (w/dpi, h/dpi)
self._need_redraw = True
return False # finish event propagation?
def on_draw_event(self, widget, ctx):
# to be overwritten by GTK3Agg or GTK3Cairo
pass
def draw(self):
self._need_redraw = True
if self.get_visible() and self.get_mapped():
self.queue_draw()
# do a synchronous draw (its less efficient than an async draw,
# but is required if/when animation is used)
self.get_property("window").process_updates (False)
def draw_idle(self):
def idle_draw(*args):
self.draw()
self._idle_draw_id = 0
return False
if self._idle_draw_id == 0:
self._idle_draw_id = GObject.idle_add(idle_draw)
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of :class:`backend_bases.Timer`.
This is useful for getting periodic events through the backend's native
event loop. Implemented only for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerGTK3(*args, **kwargs)
def flush_events(self):
Gdk.threads_enter()
while Gtk.events_pending():
Gtk.main_iteration(True)
Gdk.flush()
Gdk.threads_leave()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
FigureCanvas = FigureCanvasGTK3
class FigureManagerGTK3(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The Gtk.Toolbar (gtk only)
vbox : The Gtk.VBox containing the canvas and toolbar (gtk only)
window : The Gtk.Window (gtk only)
"""
def __init__(self, canvas, num):
if _debug: print 'FigureManagerGTK3.%s' % fn_name()
FigureManagerBase.__init__(self, canvas, num)
self.window = Gtk.Window()
self.set_window_title("Figure %d" % num)
if (window_icon):
try:
self.window.set_icon_from_file(window_icon)
except:
# some versions of gtk throw a glib.GError but not
# all, so I am not sure how to catch it. I am unhappy
# diong a blanket catch here, but an not sure what a
# better way is - JDH
verbose.report('Could not load matplotlib icon: %s' % sys.exc_info()[1])
self.vbox = Gtk.Box()
self.vbox.set_property("orientation", Gtk.Orientation.VERTICAL)
self.window.add(self.vbox)
self.vbox.show()
self.canvas.show()
# attach a show method to the figure for pylab ease of use
self.canvas.figure.show = lambda *args: self.window.show()
self.vbox.pack_start(self.canvas, True, True, 0)
self.toolbar = self._get_toolbar(canvas)
# calculate size for window
w = int (self.canvas.figure.bbox.width)
h = int (self.canvas.figure.bbox.height)
if self.toolbar is not None:
self.toolbar.show()
self.vbox.pack_end(self.toolbar, False, False, 0)
size_request = self.toolbar.size_request()
h += size_request.height
self.window.set_default_size (w, h)
def destroy(*args):
Gcf.destroy(num)
self.window.connect("destroy", destroy)
self.window.connect("delete_event", destroy)
if matplotlib.is_interactive():
self.window.show()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar is not None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
self.canvas.grab_focus()
def destroy(self, *args):
if _debug: print 'FigureManagerGTK3.%s' % fn_name()
self.vbox.destroy()
self.window.destroy()
self.canvas.destroy()
if self.toolbar:
self.toolbar.destroy()
self.__dict__.clear() #Is this needed? Other backends don't have it.
if Gcf.get_num_fig_managers()==0 and \
not matplotlib.is_interactive() and \
Gtk.main_level() >= 1:
Gtk.main_quit()
def show(self):
# show the figure window
self.window.show()
def full_screen_toggle (self):
self._full_screen_flag = not self._full_screen_flag
if self._full_screen_flag:
self.window.fullscreen()
else:
self.window.unfullscreen()
_full_screen_flag = False
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if rcParams['toolbar'] == 'classic':
toolbar = NavigationToolbar (canvas, self.window)
elif rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2GTK3 (canvas, self.window)
else:
toolbar = None
return toolbar
def get_window_title(self):
return self.window.get_title()
def set_window_title(self, title):
self.window.set_title(title)
def resize(self, width, height):
'set the canvas size in pixels'
#_, _, cw, ch = self.canvas.allocation
#_, _, ww, wh = self.window.allocation
#self.window.resize (width-cw+ww, height-ch+wh)
self.window.resize(width, height)
class NavigationToolbar2GTK3(NavigationToolbar2, Gtk.Toolbar):
def __init__(self, canvas, window):
self.win = window
GObject.GObject.__init__(self)
NavigationToolbar2.__init__(self, canvas)
self.ctx = None
def set_message(self, s):
self.message.set_label(s)
def set_cursor(self, cursor):
self.canvas.get_property("window").set_cursor(cursord[cursor])
#self.canvas.set_cursor(cursord[cursor])
def release(self, event):
try: del self._pixmapBack
except AttributeError: pass
def dynamic_update(self):
# legacy method; new method is canvas.draw_idle
self.canvas.draw_idle()
def draw_rubberband(self, event, x0, y0, x1, y1):
'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744'
self.ctx = self.canvas.get_property("window").cairo_create()
# todo: instead of redrawing the entire figure, copy the part of
# the figure that was covered by the previous rubberband rectangle
self.canvas.draw()
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val)for val in min(x0,x1), min(y0, y1), w, h]
self.ctx.new_path()
self.ctx.set_line_width(0.5)
self.ctx.rectangle(rect[0], rect[1], rect[2], rect[3])
self.ctx.set_source_rgb(0, 0, 0)
self.ctx.stroke()
def _init_toolbar(self):
self.set_style(Gtk.ToolbarStyle.ICONS)
basedir = os.path.join(rcParams['datapath'],'images')
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.insert( Gtk.SeparatorToolItem(), -1 )
continue
fname = os.path.join(basedir, image_file + '.png')
image = Gtk.Image()
image.set_from_file(fname)
tbutton = Gtk.ToolButton()
tbutton.set_label(text)
tbutton.set_icon_widget(image)
self.insert(tbutton, -1)
tbutton.connect('clicked', getattr(self, callback))
tbutton.set_tooltip_text(tooltip_text)
toolitem = Gtk.SeparatorToolItem()
self.insert(toolitem, -1)
toolitem.set_draw(False)
toolitem.set_expand(True)
toolitem = Gtk.ToolItem()
self.insert(toolitem, -1)
self.message = Gtk.Label()
toolitem.add(self.message)
self.show_all()
def get_filechooser(self):
fc = FileChooserDialog(
title='Save the figure',
parent=self.win,
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
fc.set_current_name(self.canvas.get_default_filename())
return fc
def save_figure(self, *args):
fname, format = self.get_filechooser().get_filename_from_user()
if fname:
try:
self.canvas.print_figure(fname, format=format)
except Exception, e:
error_msg_gtk(str(e), parent=self)
def configure_subplots(self, button):
toolfig = Figure(figsize=(6,3))
canvas = self._get_canvas(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
w = int (toolfig.bbox.width)
h = int (toolfig.bbox.height)
window = Gtk.Window()
if (window_icon):
try: window.set_icon_from_file(window_icon)
except:
# we presumably already logged a message on the
# failure of the main plot, don't keep reporting
pass
window.set_title("Subplot Configuration Tool")
window.set_default_size(w, h)
vbox = Gtk.Box()
vbox.set_property("orientation", Gtk.Orientation.VERTICAL)
window.add(vbox)
vbox.show()
canvas.show()
vbox.pack_start(canvas, True, True, 0)
window.show()
def _get_canvas(self, fig):
return self.canvas.__class__(fig)
class NavigationToolbar(Gtk.Toolbar):
"""
Public attributes
canvas - the FigureCanvas (Gtk.DrawingArea)
win - the Gtk.Window
"""
# list of toolitems to add to the toolbar, format is:
# text, tooltip_text, image, callback(str), callback_arg, scroll(bool)
toolitems = (
('Left', 'Pan left with click or wheel mouse (bidirectional)',
Gtk.STOCK_GO_BACK, 'panx', -1, True),
('Right', 'Pan right with click or wheel mouse (bidirectional)',
Gtk.STOCK_GO_FORWARD, 'panx', 1, True),
('Zoom In X',
'Zoom In X (shrink the x axis limits) with click or wheel'
' mouse (bidirectional)',
Gtk.STOCK_ZOOM_IN, 'zoomx', 1, True),
('Zoom Out X',
'Zoom Out X (expand the x axis limits) with click or wheel'
' mouse (bidirectional)',
Gtk.STOCK_ZOOM_OUT, 'zoomx', -1, True),
(None, None, None, None, None, None,),
('Up', 'Pan up with click or wheel mouse (bidirectional)',
Gtk.STOCK_GO_UP, 'pany', 1, True),
('Down', 'Pan down with click or wheel mouse (bidirectional)',
Gtk.STOCK_GO_DOWN, 'pany', -1, True),
('Zoom In Y',
'Zoom in Y (shrink the y axis limits) with click or wheel'
' mouse (bidirectional)',
Gtk.STOCK_ZOOM_IN, 'zoomy', 1, True),
('Zoom Out Y',
'Zoom Out Y (expand the y axis limits) with click or wheel'
' mouse (bidirectional)',
Gtk.STOCK_ZOOM_OUT, 'zoomy', -1, True),
(None, None, None, None, None, None,),
('Save', 'Save the figure',
Gtk.STOCK_SAVE, 'save_figure', None, False),
)
def __init__(self, canvas, window):
"""
figManager is the FigureManagerGTK3 instance that contains the
toolbar, with attributes figure, window and drawingArea
"""
GObject.GObject.__init__(self)
self.canvas = canvas
# Note: Gtk.Toolbar already has a 'window' attribute
self.win = window
self.set_style(Gtk.ToolbarStyle.ICONS)
self._create_toolitems()
self.update = self._update
self.fileselect = FileChooserDialog(
title='Save the figure',
parent=self.win,
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
self.show_all()
self.update()
def _create_toolitems(self):
iconSize = Gtk.IconSize.SMALL_TOOLBAR
for text, tooltip_text, image_num, callback, callback_arg, scroll \
in self.toolitems:
if text is None:
self.insert( Gtk.SeparatorToolItem(), -1 )
continue
image = Gtk.Image()
image.set_from_stock(image_num, iconSize)
tbutton = Gtk.ToolButton()
tbutton.set_label(text)
tbutton.set_icon_widget(image)
self.insert(tbutton, -1)
if callback_arg:
tbutton.connect('clicked', getattr(self, callback),
callback_arg)
else:
tbutton.connect('clicked', getattr(self, callback))
if scroll:
tbutton.connect('scroll_event', getattr(self, callback))
tbutton.set_tooltip_text(tooltip_text)
# Axes toolitem, is empty at start, update() adds a menu if >=2 axes
self.axes_toolitem = Gtk.ToolItem()
self.insert(self.axes_toolitem, 0)
self.axes_toolitem.set_tooltip_text(
'Select axes that controls affect')
align = Gtk.Alignment (xalign=0.5, yalign=0.5, xscale=0.0, yscale=0.0)
self.axes_toolitem.add(align)
self.menubutton = Gtk.Button ("Axes")
align.add (self.menubutton)
def position_menu (menu):
"""Function for positioning a popup menu.
Place menu below the menu button, but ensure it does not go off
the bottom of the screen.
The default is to popup menu at current mouse position
"""
x0, y0 = self.window.get_origin()
x1, y1, m = self.window.get_pointer()
x2, y2 = self.menubutton.get_pointer()
sc_h = self.get_screen().get_height()
w, h = menu.size_request()
x = x0 + x1 - x2
y = y0 + y1 - y2 + self.menubutton.allocation.height
y = min(y, sc_h - h)
return x, y, True
def button_clicked (button, data=None):
self.axismenu.popup (None, None, position_menu, 0,
Gtk.get_current_event_time())
self.menubutton.connect ("clicked", button_clicked)
def _update(self):
# called by __init__() and FigureManagerGTK3
self._axes = self.canvas.figure.axes
if len(self._axes) >= 2:
self.axismenu = self._make_axis_menu()
self.menubutton.show_all()
else:
self.menubutton.hide()
self.set_active(range(len(self._axes)))
def _make_axis_menu(self):
# called by self._update*()
def toggled(item, data=None):
if item == self.itemAll:
for item in items: item.set_active(True)
elif item == self.itemInvert:
for item in items:
item.set_active(not item.get_active())
ind = [i for i,item in enumerate(items) if item.get_active()]
self.set_active(ind)
menu = Gtk.Menu()
self.itemAll = Gtk.MenuItem("All")
menu.append(self.itemAll)
self.itemAll.connect("activate", toggled)
self.itemInvert = Gtk.MenuItem("Invert")
menu.append(self.itemInvert)
self.itemInvert.connect("activate", toggled)
items = []
for i in range(len(self._axes)):
item = Gtk.CheckMenuItem("Axis %d" % (i+1))
menu.append(item)
item.connect("toggled", toggled)
item.set_active(True)
items.append(item)
menu.show_all()
return menu
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def panx(self, button, direction):
'panx in direction'
for a in self._active:
a.xaxis.pan(direction)
self.canvas.draw()
self._need_redraw = True
return True
def pany(self, button, direction):
'pany in direction'
for a in self._active:
a.yaxis.pan(direction)
self.canvas.draw()
return True
def zoomx(self, button, direction):
'zoomx in direction'
for a in self._active:
a.xaxis.zoom(direction)
self.canvas.draw()
return True
def zoomy(self, button, direction):
'zoomy in direction'
for a in self._active:
a.yaxis.zoom(direction)
self.canvas.draw()
return True
def get_filechooser(self):
return FileChooserDialog(
title='Save the figure',
parent=self.win,
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
def save_figure(self, *args):
fname, format = self.get_filechooser().get_filename_from_user()
if fname:
try:
self.canvas.print_figure(fname, format=format)
except Exception, e:
error_msg_gtk(str(e), parent=self)
class FileChooserDialog(Gtk.FileChooserDialog):
"""GTK+ file selector which remembers the last file/directory
selected and presents the user with a menu of supported image formats
"""
def __init__ (self,
title = 'Save file',
parent = None,
action = Gtk.FileChooserAction.SAVE,
buttons = (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_SAVE, Gtk.ResponseType.OK),
path = None,
filetypes = [],
default_filetype = None
):
super (FileChooserDialog, self).__init__ (title, parent, action,
buttons)
self.set_default_response (Gtk.ResponseType.OK)
if not path: path = os.getcwd() + os.sep
# create an extra widget to list supported image formats
self.set_current_folder (path)
self.set_current_name ('image.' + default_filetype)
hbox = Gtk.Box(spacing=10)
hbox.pack_start(Gtk.Label(label="File Format:"), False, False, 0)
liststore = Gtk.ListStore(GObject.TYPE_STRING)
cbox = Gtk.ComboBox() #liststore)
cbox.set_model(liststore)
cell = Gtk.CellRendererText()
cbox.pack_start(cell, True)
cbox.add_attribute(cell, 'text', 0)
hbox.pack_start(cbox, False, False, 0)
self.filetypes = filetypes
self.sorted_filetypes = filetypes.items()
self.sorted_filetypes.sort()
default = 0
for i, (ext, name) in enumerate(self.sorted_filetypes):
liststore.append(["%s (*.%s)" % (name, ext)])
if ext == default_filetype:
default = i
cbox.set_active(default)
self.ext = default_filetype
def cb_cbox_changed (cbox, data=None):
"""File extension changed"""
head, filename = os.path.split(self.get_filename())
root, ext = os.path.splitext(filename)
ext = ext[1:]
new_ext = self.sorted_filetypes[cbox.get_active()][0]
self.ext = new_ext
if ext in self.filetypes:
filename = root + '.' + new_ext
elif ext == '':
filename = filename.rstrip('.') + '.' + new_ext
self.set_current_name (filename)
cbox.connect ("changed", cb_cbox_changed)
hbox.show_all()
self.set_extra_widget(hbox)
def get_filename_from_user (self):
while True:
filename = None
if self.run() != int(Gtk.ResponseType.OK):
break
filename = self.get_filename()
break
self.hide()
return filename, self.ext
class DialogLineprops:
"""
A GUI dialog for controlling lineprops
"""
signals = (
'on_combobox_lineprops_changed',
'on_combobox_linestyle_changed',
'on_combobox_marker_changed',
'on_colorbutton_linestyle_color_set',
'on_colorbutton_markerface_color_set',
'on_dialog_lineprops_okbutton_clicked',
'on_dialog_lineprops_cancelbutton_clicked',
)
linestyles = [ls for ls in lines.Line2D.lineStyles if ls.strip()]
linestyled = dict([ (s,i) for i,s in enumerate(linestyles)])
markers = [m for m in lines.Line2D.markers if cbook.is_string_like(m)]
markerd = dict([(s,i) for i,s in enumerate(markers)])
def __init__(self, lines):
import Gtk.glade
datadir = matplotlib.get_data_path()
gladefile = os.path.join(datadir, 'lineprops.glade')
if not os.path.exists(gladefile):
raise IOError('Could not find gladefile lineprops.glade in %s'%datadir)
self._inited = False
self._updateson = True # suppress updates when setting widgets manually
self.wtree = Gtk.glade.XML(gladefile, 'dialog_lineprops')
self.wtree.signal_autoconnect(dict([(s, getattr(self, s)) for s in self.signals]))
self.dlg = self.wtree.get_widget('dialog_lineprops')
self.lines = lines
cbox = self.wtree.get_widget('combobox_lineprops')
cbox.set_active(0)
self.cbox_lineprops = cbox
cbox = self.wtree.get_widget('combobox_linestyles')
for ls in self.linestyles:
cbox.append_text(ls)
cbox.set_active(0)
self.cbox_linestyles = cbox
cbox = self.wtree.get_widget('combobox_markers')
for m in self.markers:
cbox.append_text(m)
cbox.set_active(0)
self.cbox_markers = cbox
self._lastcnt = 0
self._inited = True
def show(self):
'populate the combo box'
self._updateson = False
# flush the old
cbox = self.cbox_lineprops
for i in range(self._lastcnt-1,-1,-1):
cbox.remove_text(i)
# add the new
for line in self.lines:
cbox.append_text(line.get_label())
cbox.set_active(0)
self._updateson = True
self._lastcnt = len(self.lines)
self.dlg.show()
def get_active_line(self):
'get the active line'
ind = self.cbox_lineprops.get_active()
line = self.lines[ind]
return line
def get_active_linestyle(self):
'get the active lineinestyle'
ind = self.cbox_linestyles.get_active()
ls = self.linestyles[ind]
return ls
def get_active_marker(self):
'get the active lineinestyle'
ind = self.cbox_markers.get_active()
m = self.markers[ind]
return m
def _update(self):
'update the active line props from the widgets'
if not self._inited or not self._updateson: return
line = self.get_active_line()
ls = self.get_active_linestyle()
marker = self.get_active_marker()
line.set_linestyle(ls)
line.set_marker(marker)
button = self.wtree.get_widget('colorbutton_linestyle')
color = button.get_color()
r, g, b = [val/65535. for val in color.red, color.green, color.blue]
line.set_color((r,g,b))
button = self.wtree.get_widget('colorbutton_markerface')
color = button.get_color()
r, g, b = [val/65535. for val in color.red, color.green, color.blue]
line.set_markerfacecolor((r,g,b))
line.figure.canvas.draw()
def on_combobox_lineprops_changed(self, item):
'update the widgets from the active line'
if not self._inited: return
self._updateson = False
line = self.get_active_line()
ls = line.get_linestyle()
if ls is None: ls = 'None'
self.cbox_linestyles.set_active(self.linestyled[ls])
marker = line.get_marker()
if marker is None: marker = 'None'
self.cbox_markers.set_active(self.markerd[marker])
r,g,b = colorConverter.to_rgb(line.get_color())
color = Gdk.Color(*[int(val*65535) for val in r,g,b])
button = self.wtree.get_widget('colorbutton_linestyle')
button.set_color(color)
r,g,b = colorConverter.to_rgb(line.get_markerfacecolor())
color = Gdk.Color(*[int(val*65535) for val in r,g,b])
button = self.wtree.get_widget('colorbutton_markerface')
button.set_color(color)
self._updateson = True
def on_combobox_linestyle_changed(self, item):
self._update()
def on_combobox_marker_changed(self, item):
self._update()
def on_colorbutton_linestyle_color_set(self, button):
self._update()
def on_colorbutton_markerface_color_set(self, button):
'called colorbutton marker clicked'
self._update()
def on_dialog_lineprops_okbutton_clicked(self, button):
self._update()
self.dlg.hide()
def on_dialog_lineprops_cancelbutton_clicked(self, button):
self.dlg.hide()
# set icon used when windows are minimized
try:
if sys.platform == 'win32':
icon_filename = 'matplotlib.png'
else:
icon_filename = 'matplotlib.svg'
window_icon = os.path.join(rcParams['datapath'], 'images', icon_filename)
except:
window_icon = None
verbose.report('Could not load matplotlib icon: %s' % sys.exc_info()[1])
def error_msg_gtk(msg, parent=None):
if parent is not None: # find the toplevel Gtk.Window
parent = parent.get_toplevel()
if not parent.is_toplevel():
parent = None
if not is_string_like(msg):
msg = ','.join(map(str,msg))
dialog = Gtk.MessageDialog(
parent = parent,
type = Gtk.MessageType.ERROR,
buttons = Gtk.ButtonsType.OK,
message_format = msg)
dialog.run()
dialog.destroy()
| gpl-3.0 |
cython-testbed/pandas | pandas/tests/scalar/timedelta/test_formats.py | 9 | 1068 | # -*- coding: utf-8 -*-
import pytest
from pandas import Timedelta
@pytest.mark.parametrize('td, expected_repr', [
(Timedelta(10, unit='d'), "Timedelta('10 days 00:00:00')"),
(Timedelta(10, unit='s'), "Timedelta('0 days 00:00:10')"),
(Timedelta(10, unit='ms'), "Timedelta('0 days 00:00:00.010000')"),
(Timedelta(-10, unit='ms'), "Timedelta('-1 days +23:59:59.990000')")])
def test_repr(td, expected_repr):
assert repr(td) == expected_repr
@pytest.mark.parametrize('td, expected_iso', [
(Timedelta(days=6, minutes=50, seconds=3, milliseconds=10, microseconds=10,
nanoseconds=12), 'P6DT0H50M3.010010012S'),
(Timedelta(days=4, hours=12, minutes=30, seconds=5), 'P4DT12H30M5S'),
(Timedelta(nanoseconds=123), 'P0DT0H0M0.000000123S'),
# trim nano
(Timedelta(microseconds=10), 'P0DT0H0M0.00001S'),
# trim micro
(Timedelta(milliseconds=1), 'P0DT0H0M0.001S'),
# don't strip every 0
(Timedelta(minutes=1), 'P0DT0H1M0S')])
def test_isoformat(td, expected_iso):
assert td.isoformat() == expected_iso
| bsd-3-clause |
JessMcintosh/SonoGestures | CrossValidateSVM.py | 1 | 7789 | import serial
import math
import time
import datetime
import sys
import getopt
import os
import subprocess
import extract
import numpy as np
import itertools
from GraphConfusionMatrix import plot_confusion_matrix
from sklearn import preprocessing
from sklearn.metrics import confusion_matrix
from sklearn import svm
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.grid_search import GridSearchCV
#from svmutil import *
from time import sleep
gesturesAll = ('thumb','index','middle','ring','fist','point','call','gun','flex','adduct')
#gesturesFingers = ('Thumb', 'Index', 'Middle', 'Ring', 'Pinky')
#gesturesWrist = ('Ext', 'Flex', 'UDev', 'RDev', 'Pronate', 'Supinate')
#gestures = [gesturesAll, gesturesFingers, gesturesWrist]
gestures = [gesturesAll]
#trainingDirAll = 'TrainingData'
#trainingDirEMG = 'TrainingDataEMG'
#trainingDirFSR = 'TrainingDataFSR'
#trainingDirs = [trainingDirAll, trainingDirEMG, trainingDirFSR]
def cross_validate(gestures, originalPath):
parentPath = os.getcwd()
#os.chdir(trainingDir)
features = []
for i in os.listdir(os.getcwd()):
if i in gestures:
#print i
os.chdir(parentPath + "/" + i)
for dataFile in os.listdir(os.getcwd()):
#print dataFile
realpath = os.path.realpath(dataFile)
#print realpath
startfeature = time.time()
result = extract.readFeatures(realpath)
endfeature = time.time()
featuretime = endfeature - startfeature
features.append(result)
features_scaled = (preprocessing.scale(features)).tolist()
#print np.round(features_scaled, 2)
#print np.round(features, 1)
os.chdir(parentPath)
predictions = []
cum_rate = 0.0
# loop through 10 times
for i in range(10):
#print 'fold' , i
# select subset of features for the fold
count = 0
trainingSet = []
testingSet = []
trainingLabels = []
testingLabels = []
currentGesture = -1
#for i in range(len(features)):
# print features[i]
#print '\n\n\n\n'
#for feature_vector in features:
# print feature_vector
for feature_vector in features_scaled:
if count%10 == 0:
currentGesture += 1
if count%10 == i:
testingSet.append(feature_vector)
testingLabels.append(currentGesture)
else:
trainingSet.append(feature_vector)
trainingLabels.append(currentGesture)
count += 1
# train the data on the new subset
# iterate through parameters
C_values = [1, 2, 8, 32, 128, 512, 2048, 8192]
gamma_values = [0.001,0.01,0.05,0.1,0.3,0.5,0.7]
#C_values = [2]
#gamma_values = [0.5]
best_C = 0
best_G = 0
best_percentage = 0
#clf = svm.SVC(C=2.0,gamma=0.5)
C_range = np.logspace(-2, 10, num=13, base=2)
gamma_range = np.logspace(-5, 1, num=7, base=10)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(trainingLabels, n_iter=3, test_size=0.31, random_state=42)
grid = GridSearchCV(svm.SVC(), param_grid=param_grid, cv=cv)
grid.fit(trainingSet, trainingLabels)
#C_range = np.logspace(-1, 1, num=2, base=2)
#gamma_range = np.logspace(-1, 1, num=2, base=10)
#param_grid = dict(gamma=gamma_range, C=C_range)
#cv = StratifiedShuffleSplit(trainingLabels, n_iter=1, test_size=0.11, random_state=42)
#grid = GridSearchCV(svm.SVC(), param_grid=param_grid, cv=cv)
#grid.fit(trainingSet, trainingLabels)
best_C = grid.best_params_['C']
best_G = grid.best_params_['gamma']
#print("The best parameters are %s with a score of %0.2f"
# % (grid.best_params_, grid.best_score_))
#start = time.time()
clf = svm.SVC(C=best_C,gamma=best_G)
clfoutput = clf.fit(trainingSet, trainingLabels)
result = clf.predict(testingSet)
#end = time.time()
#print end-start + featuretime
predictions.append(result.tolist())
num_correct = 0
for j,k in zip(result,testingLabels):
if j == k:
num_correct += 1
percentage = (float(num_correct) * 100.0) / float(len(gestures))
#print 'percentage: ' , percentage
best_percentage = percentage
# for params in itertools.product(C_values, gamma_values):
#
# clf = svm.SVC(C=params[0],gamma=params[1])
# clfoutput = clf.fit(trainingSet, trainingLabels)
## classify
# result = clf.predict(testingSet)
# predictions.append(result.tolist())
#
# num_correct = 0
# for j,k in zip(result,testingLabels):
# if j == k:
# num_correct += 1
#
#
# percentage = (float(num_correct) * 100.0) / float(len(gestures))
# if percentage >= best_percentage:
# best_percentage = percentage
# best_C = params[0]
# best_G = params[1]
#
#print params
#print percentage
cum_rate += best_percentage
#print np.round(percentage,2), '%'
#print 'best percentage: ', best_percentage
#print 'best C: ', best_C
#print 'best G: ', best_G
#print result
#prob = svm_problem(trainingLabels, trainingSet)
#param = svm_parameter('-s 0 -t 2 -c 2 -g 0.5')
#param = svm_parameter('-s 0 -t 2 -c 2 -g 0.5')
#m = svm_train(prob, param)
#p_label, p_acc, p_val = svm_predict(testingLabels, testingSet, m)
#print p_label
#print p_acc
#print p_val
#print predictions
rate = cum_rate / 10.0
print np.round(rate,2), '%'
#print np.round(rate,3)
linear_pred = []
linear_true = []
for i in predictions:
count = 0
for j in i:
linear_pred.append(j)
linear_true.append(count)
count += 1
#print linear_pred
#print linear_true
c_matrix = confusion_matrix(linear_true, linear_pred)
#print c_matrix
#plot_confusion_matrix(c_matrix,"confusionmatrix")
return rate, c_matrix
def validate_participant(directory):
cv_rates = []
c_matrices = []
originalWorkingPath = os.getcwd()
#os.chdir(os.path.join(directory, "features/"))
os.chdir(os.path.abspath(directory))
#print 'validating:', os.path.abspath(directory)
for r in gestures:
#print r[1]
cv_rate, c_matrix = cross_validate(r, originalWorkingPath)
cv_rates.append(cv_rate)
c_matrices.append(c_matrix)
#print(np.round(cv_rate,2))
os.chdir(originalWorkingPath)
#return cv_rates, c_matrices
return cv_rate, c_matrix
if __name__ == '__main__':
#cv_rate, c_matrix = cross_validate(gesturesAll, 'TrainingData')
#print np.round(cv_rate,2), '%'
#print c_matrix
#for r in itertools.product(gestures, trainingDirs):
# #print r
# cv_rate, c_matrix = cross_validate(r[0], r[1])
# print np.round(cv_rate,2), '%'
# #print c_matrix
#all_c_matrices = []
#sum_c_matrices = []
#for i in range (2,4):
#dirs = []
#dirs.append("/home/nappy/Drive/SonicGesturesData/08-03-2016/dpa/")
#dirs.append("/home/nappy/Drive/SonicGesturesData/08-03-2016/lpa/")
#dirs.append("/home/nappy/Drive/SonicGesturesData/08-03-2016/tda/")
#dirs.append("/home/nappy/Drive/SonicGesturesData/08-03-2016/tpa/")
#dirs.append("/home/nappy/Drive/SonicGesturesData/08-03-2016/tpp/")
#cv_rates, c_matrices = validate_participant(dirs[0])
#cv_rates, c_matrices = validate_participant(dirs[1])
#cv_rates, c_matrices = validate_participant(dirs[2])
#cv_rates, c_matrices = validate_participant(dirs[3])
#cv_rates, c_matrices = validate_participant(dirs[4])
cv_rates, c_matrices = validate_participant(sys.argv[1])
#all_c_matrices.append(c_matrices)
# FOR CONFUSION MATRICES
#first = True
#for idx, n in enumerate(all_c_matrices):
# if first:
# sum_c_matrices = n
# first = False
# else:
# for idy, j in enumerate(sum_c_matrices):
# sum_c_matrices[idy] = sum_c_matrices[idy] + all_c_matrices[idx][idy]
#for idx, n in enumerate(zip(sum_c_matrices, i)):
# print idx
# print n
#
# n[0] = n[0] + n[1]
#sum_c_matrices[:] = [((x * 100.0) / 120.0) for x in sum_c_matrices]
#for x in sum_c_matrices:
# for y in x:
# for idx, z in enumerate(y):
# if idx != len(y) - 1:
# print '%d,' % z,
# else:
# print '%d' % z
#print all_c_matrices
| gpl-3.0 |
rohit21122012/DCASE2013 | runs/2016/dnn2016med_traps/traps18/src/dataset.py | 55 | 78980 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import locale
import socket
import tarfile
import urllib2
import zipfile
from sklearn.cross_validation import StratifiedShuffleSplit, KFold
from files import *
from general import *
from ui import *
class Dataset(object):
"""Dataset base class.
The specific dataset classes are inherited from this class, and only needed methods are reimplemented.
"""
def __init__(self, data_path='data', name='dataset'):
"""__init__ method.
Parameters
----------
data_path : str
Basepath where the dataset is stored.
(Default value='data')
"""
# Folder name for dataset
self.name = name
# Path to the dataset
self.local_path = os.path.join(data_path, self.name)
# Create the dataset path if does not exist
if not os.path.isdir(self.local_path):
os.makedirs(self.local_path)
# Evaluation setup folder
self.evaluation_setup_folder = 'evaluation_setup'
# Path to the folder containing evaluation setup files
self.evaluation_setup_path = os.path.join(self.local_path, self.evaluation_setup_folder)
# Meta data file, csv-format
self.meta_filename = 'meta.txt'
# Path to meta data file
self.meta_file = os.path.join(self.local_path, self.meta_filename)
# Hash file to detect removed or added files
self.filelisthash_filename = 'filelist.hash'
# Number of evaluation folds
self.evaluation_folds = 1
# List containing dataset package items
# Define this in the inherited class.
# Format:
# {
# 'remote_package': download_url,
# 'local_package': os.path.join(self.local_path, 'name_of_downloaded_package'),
# 'local_audio_path': os.path.join(self.local_path, 'name_of_folder_containing_audio_files'),
# }
self.package_list = []
# List of audio files
self.files = None
# List of meta data dict
self.meta_data = None
# Training meta data for folds
self.evaluation_data_train = {}
# Testing meta data for folds
self.evaluation_data_test = {}
# Recognized audio extensions
self.audio_extensions = {'wav', 'flac'}
# Info fields for dataset
self.authors = ''
self.name_remote = ''
self.url = ''
self.audio_source = ''
self.audio_type = ''
self.recording_device_model = ''
self.microphone_model = ''
@property
def audio_files(self):
"""Get all audio files in the dataset
Parameters
----------
Nothing
Returns
-------
filelist : list
File list with absolute paths
"""
if self.files is None:
self.files = []
for item in self.package_list:
path = item['local_audio_path']
if path:
l = os.listdir(path)
for f in l:
file_name, file_extension = os.path.splitext(f)
if file_extension[1:] in self.audio_extensions:
self.files.append(os.path.abspath(os.path.join(path, f)))
self.files.sort()
return self.files
@property
def audio_file_count(self):
"""Get number of audio files in dataset
Parameters
----------
Nothing
Returns
-------
filecount : int
Number of audio files
"""
return len(self.audio_files)
@property
def meta(self):
"""Get meta data for dataset. If not already read from disk, data is read and returned.
Parameters
----------
Nothing
Returns
-------
meta_data : list
List containing meta data as dict.
Raises
-------
IOError
meta file not found.
"""
if self.meta_data is None:
self.meta_data = []
meta_id = 0
if os.path.isfile(self.meta_file):
f = open(self.meta_file, 'rt')
try:
reader = csv.reader(f, delimiter='\t')
for row in reader:
if len(row) == 2:
# Scene meta
self.meta_data.append({'file': row[0], 'scene_label': row[1].rstrip()})
elif len(row) == 4:
# Audio tagging meta
self.meta_data.append(
{'file': row[0], 'scene_label': row[1].rstrip(), 'tag_string': row[2].rstrip(),
'tags': row[3].split(';')})
elif len(row) == 6:
# Event meta
self.meta_data.append({'file': row[0],
'scene_label': row[1].rstrip(),
'event_onset': float(row[2]),
'event_offset': float(row[3]),
'event_label': row[4].rstrip(),
'event_type': row[5].rstrip(),
'id': meta_id
})
meta_id += 1
finally:
f.close()
else:
raise IOError("Meta file not found [%s]" % self.meta_file)
return self.meta_data
@property
def meta_count(self):
"""Number of meta data items.
Parameters
----------
Nothing
Returns
-------
meta_item_count : int
Meta data item count
"""
return len(self.meta)
@property
def fold_count(self):
"""Number of fold in the evaluation setup.
Parameters
----------
Nothing
Returns
-------
fold_count : int
Number of folds
"""
return self.evaluation_folds
@property
def scene_labels(self):
"""List of unique scene labels in the meta data.
Parameters
----------
Nothing
Returns
-------
labels : list
List of scene labels in alphabetical order.
"""
labels = []
for item in self.meta:
if 'scene_label' in item and item['scene_label'] not in labels:
labels.append(item['scene_label'])
labels.sort()
return labels
@property
def scene_label_count(self):
"""Number of unique scene labels in the meta data.
Parameters
----------
Nothing
Returns
-------
scene_label_count : int
Number of unique scene labels.
"""
return len(self.scene_labels)
@property
def event_labels(self):
"""List of unique event labels in the meta data.
Parameters
----------
Nothing
Returns
-------
labels : list
List of event labels in alphabetical order.
"""
labels = []
for item in self.meta:
if 'event_label' in item and item['event_label'].rstrip() not in labels:
labels.append(item['event_label'].rstrip())
labels.sort()
return labels
@property
def event_label_count(self):
"""Number of unique event labels in the meta data.
Parameters
----------
Nothing
Returns
-------
event_label_count : int
Number of unique event labels
"""
return len(self.event_labels)
@property
def audio_tags(self):
"""List of unique audio tags in the meta data.
Parameters
----------
Nothing
Returns
-------
labels : list
List of audio tags in alphabetical order.
"""
tags = []
for item in self.meta:
if 'tags' in item:
for tag in item['tags']:
if tag and tag not in tags:
tags.append(tag)
tags.sort()
return tags
@property
def audio_tag_count(self):
"""Number of unique audio tags in the meta data.
Parameters
----------
Nothing
Returns
-------
audio_tag_count : int
Number of unique audio tags
"""
return len(self.audio_tags)
def __getitem__(self, i):
"""Getting meta data item
Parameters
----------
i : int
item id
Returns
-------
meta_data : dict
Meta data item
"""
if i < len(self.meta):
return self.meta[i]
else:
return None
def __iter__(self):
"""Iterator for meta data items
Parameters
----------
Nothing
Returns
-------
Nothing
"""
i = 0
meta = self[i]
# yield window while it's valid
while meta is not None:
yield meta
# get next item
i += 1
meta = self[i]
@staticmethod
def print_bytes(num_bytes):
"""Output number of bytes according to locale and with IEC binary prefixes
Parameters
----------
num_bytes : int > 0 [scalar]
Bytes
Returns
-------
bytes : str
Human readable string
"""
KiB = 1024
MiB = KiB * KiB
GiB = KiB * MiB
TiB = KiB * GiB
PiB = KiB * TiB
EiB = KiB * PiB
ZiB = KiB * EiB
YiB = KiB * ZiB
locale.setlocale(locale.LC_ALL, '')
output = locale.format("%d", num_bytes, grouping=True) + ' bytes'
if num_bytes > YiB:
output += ' (%.4g YiB)' % (num_bytes / YiB)
elif num_bytes > ZiB:
output += ' (%.4g ZiB)' % (num_bytes / ZiB)
elif num_bytes > EiB:
output += ' (%.4g EiB)' % (num_bytes / EiB)
elif num_bytes > PiB:
output += ' (%.4g PiB)' % (num_bytes / PiB)
elif num_bytes > TiB:
output += ' (%.4g TiB)' % (num_bytes / TiB)
elif num_bytes > GiB:
output += ' (%.4g GiB)' % (num_bytes / GiB)
elif num_bytes > MiB:
output += ' (%.4g MiB)' % (num_bytes / MiB)
elif num_bytes > KiB:
output += ' (%.4g KiB)' % (num_bytes / KiB)
return output
def download(self):
"""Download dataset over the internet to the local path
Parameters
----------
Nothing
Returns
-------
Nothing
Raises
-------
IOError
Download failed.
"""
section_header('Download dataset')
for item in self.package_list:
try:
if item['remote_package'] and not os.path.isfile(item['local_package']):
data = None
req = urllib2.Request(item['remote_package'], data, {})
handle = urllib2.urlopen(req)
if "Content-Length" in handle.headers.items():
size = int(handle.info()["Content-Length"])
else:
size = None
actualSize = 0
blocksize = 64 * 1024
tmp_file = os.path.join(self.local_path, 'tmp_file')
fo = open(tmp_file, "wb")
terminate = False
while not terminate:
block = handle.read(blocksize)
actualSize += len(block)
if size:
progress(title_text=os.path.split(item['local_package'])[1],
percentage=actualSize / float(size),
note=self.print_bytes(actualSize))
else:
progress(title_text=os.path.split(item['local_package'])[1],
note=self.print_bytes(actualSize))
if len(block) == 0:
break
fo.write(block)
fo.close()
os.rename(tmp_file, item['local_package'])
except (urllib2.URLError, socket.timeout), e:
try:
fo.close()
except:
raise IOError('Download failed [%s]' % (item['remote_package']))
foot()
def extract(self):
"""Extract the dataset packages
Parameters
----------
Nothing
Returns
-------
Nothing
"""
section_header('Extract dataset')
for item_id, item in enumerate(self.package_list):
if item['local_package']:
if item['local_package'].endswith('.zip'):
with zipfile.ZipFile(item['local_package'], "r") as z:
# Trick to omit first level folder
parts = []
for name in z.namelist():
if not name.endswith('/'):
parts.append(name.split('/')[:-1])
prefix = os.path.commonprefix(parts) or ''
if prefix:
if len(prefix) > 1:
prefix_ = list()
prefix_.append(prefix[0])
prefix = prefix_
prefix = '/'.join(prefix) + '/'
offset = len(prefix)
# Start extraction
members = z.infolist()
file_count = 1
for i, member in enumerate(members):
if len(member.filename) > offset:
member.filename = member.filename[offset:]
if not os.path.isfile(os.path.join(self.local_path, member.filename)):
z.extract(member, self.local_path)
progress(
title_text='Extracting [' + str(item_id) + '/' + str(len(self.package_list)) + ']',
percentage=(file_count / float(len(members))),
note=member.filename)
file_count += 1
elif item['local_package'].endswith('.tar.gz'):
tar = tarfile.open(item['local_package'], "r:gz")
for i, tar_info in enumerate(tar):
if not os.path.isfile(os.path.join(self.local_path, tar_info.name)):
tar.extract(tar_info, self.local_path)
progress(title_text='Extracting [' + str(item_id) + '/' + str(len(self.package_list)) + ']',
note=tar_info.name)
tar.members = []
tar.close()
foot()
def on_after_extract(self):
"""Dataset meta data preparation, this will be overloaded in dataset specific classes
Parameters
----------
Nothing
Returns
-------
Nothing
"""
pass
def get_filelist(self):
"""List of files under local_path
Parameters
----------
Nothing
Returns
-------
filelist: list
File list
"""
filelist = []
for path, subdirs, files in os.walk(self.local_path):
for name in files:
filelist.append(os.path.join(path, name))
return filelist
def check_filelist(self):
"""Generates hash from file list and check does it matches with one saved in filelist.hash.
If some files have been deleted or added, checking will result False.
Parameters
----------
Nothing
Returns
-------
result: bool
Result
"""
if os.path.isfile(os.path.join(self.local_path, self.filelisthash_filename)):
hash = load_text(os.path.join(self.local_path, self.filelisthash_filename))[0]
if hash != get_parameter_hash(sorted(self.get_filelist())):
return False
else:
return True
else:
return False
def save_filelist_hash(self):
"""Generates file list hash, and saves it as filelist.hash under local_path.
Parameters
----------
Nothing
Returns
-------
Nothing
"""
filelist = self.get_filelist()
filelist_hash_not_found = True
for file in filelist:
if self.filelisthash_filename in file:
filelist_hash_not_found = False
if filelist_hash_not_found:
filelist.append(os.path.join(self.local_path, self.filelisthash_filename))
save_text(os.path.join(self.local_path, self.filelisthash_filename), get_parameter_hash(sorted(filelist)))
def fetch(self):
"""Download, extract and prepare the dataset.
Parameters
----------
Nothing
Returns
-------
Nothing
"""
if not self.check_filelist():
self.download()
self.extract()
self.on_after_extract()
self.save_filelist_hash()
return self
def train(self, fold=0):
"""List of training items.
Parameters
----------
fold : int > 0 [scalar]
Fold id, if zero all meta data is returned.
(Default value=0)
Returns
-------
list : list of dicts
List containing all meta data assigned to training set for given fold.
"""
if fold not in self.evaluation_data_train:
self.evaluation_data_train[fold] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
if len(row) == 2:
# Scene meta
self.evaluation_data_train[fold].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1]
})
elif len(row) == 4:
# Audio tagging meta
self.evaluation_data_train[fold].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1],
'tag_string': row[2],
'tags': row[3].split(';')
})
elif len(row) == 5:
# Event meta
self.evaluation_data_train[fold].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1],
'event_onset': float(row[2]),
'event_offset': float(row[3]),
'event_label': row[4]
})
else:
data = []
for item in self.meta:
if 'event_label' in item:
data.append({'file': self.relative_to_absolute_path(item['file']),
'scene_label': item['scene_label'],
'event_onset': item['event_onset'],
'event_offset': item['event_offset'],
'event_label': item['event_label'],
})
else:
data.append({'file': self.relative_to_absolute_path(item['file']),
'scene_label': item['scene_label']
})
self.evaluation_data_train[0] = data
return self.evaluation_data_train[fold]
def test(self, fold=0):
"""List of testing items.
Parameters
----------
fold : int > 0 [scalar]
Fold id, if zero all meta data is returned.
(Default value=0)
Returns
-------
list : list of dicts
List containing all meta data assigned to testing set for given fold.
"""
if fold not in self.evaluation_data_test:
self.evaluation_data_test[fold] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
self.evaluation_data_test[fold].append({'file': self.relative_to_absolute_path(row[0])})
else:
data = []
files = []
for item in self.meta:
if self.relative_to_absolute_path(item['file']) not in files:
data.append({'file': self.relative_to_absolute_path(item['file'])})
files.append(self.relative_to_absolute_path(item['file']))
self.evaluation_data_test[fold] = data
return self.evaluation_data_test[fold]
def folds(self, mode='folds'):
"""List of fold ids
Parameters
----------
mode : str {'folds','full'}
Fold setup type, possible values are 'folds' and 'full'. In 'full' mode fold number is set 0 and all data is used for training.
(Default value=folds)
Returns
-------
list : list of integers
Fold ids
"""
if mode == 'folds':
return range(1, self.evaluation_folds + 1)
elif mode == 'full':
return [0]
def file_meta(self, file):
"""Meta data for given file
Parameters
----------
file : str
File name
Returns
-------
list : list of dicts
List containing all meta data related to given file.
"""
file = self.absolute_to_relative(file)
file_meta = []
for item in self.meta:
if item['file'] == file:
file_meta.append(item)
return file_meta
def relative_to_absolute_path(self, path):
"""Converts relative path into absolute path.
Parameters
----------
path : str
Relative path
Returns
-------
path : str
Absolute path
"""
return os.path.abspath(os.path.join(self.local_path, path))
def absolute_to_relative(self, path):
"""Converts absolute path into relative path.
Parameters
----------
path : str
Absolute path
Returns
-------
path : str
Relative path
"""
if path.startswith(os.path.abspath(self.local_path)):
return os.path.relpath(path, self.local_path)
else:
return path
# =====================================================
# DCASE 2016
# =====================================================
class TUTAcousticScenes_2016_DevelopmentSet(Dataset):
"""TUT Acoustic scenes 2016 development dataset
This dataset is used in DCASE2016 - Task 1, Acoustic scene classification
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-acoustic-scenes-2016-development')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Acoustic Scenes 2016, development dataset'
self.url = 'https://zenodo.org/record/45739'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 4
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.doc.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.doc.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.meta.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.meta.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.1.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.1.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.2.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.2.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.3.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.3.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.4.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.4.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.5.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.5.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.6.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.6.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.7.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.7.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.8.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.8.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
}
]
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file):
section_header('Generating meta file for dataset')
meta_data = {}
for fold in xrange(1, self.evaluation_folds):
# Read train files in
train_filename = os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')
f = open(train_filename, 'rt')
reader = csv.reader(f, delimiter='\t')
for row in reader:
if row[0] not in meta_data:
meta_data[row[0]] = row[1]
f.close()
# Read evaluation files in
eval_filename = os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt')
f = open(eval_filename, 'rt')
reader = csv.reader(f, delimiter='\t')
for row in reader:
if row[0] not in meta_data:
meta_data[row[0]] = row[1]
f.close()
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in meta_data:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = meta_data[file]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
class TUTAcousticScenes_2016_EvaluationSet(Dataset):
"""TUT Acoustic scenes 2016 evaluation dataset
This dataset is used in DCASE2016 - Task 1, Acoustic scene classification
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-acoustic-scenes-2016-evaluation')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Acoustic Scenes 2016, evaluation dataset'
self.url = 'http://www.cs.tut.fi/sgn/arg/dcase2016/download/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 1
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
]
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
eval_filename = os.path.join(self.evaluation_setup_path, 'evaluate.txt')
if not os.path.isfile(self.meta_file) and os.path.isfile(eval_filename):
section_header('Generating meta file for dataset')
meta_data = {}
f = open(eval_filename, 'rt')
reader = csv.reader(f, delimiter='\t')
for row in reader:
if row[0] not in meta_data:
meta_data[row[0]] = row[1]
f.close()
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in meta_data:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = meta_data[file]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
def train(self, fold=0):
raise IOError('Train setup not available.')
# TUT Sound events 2016 development and evaluation sets
class TUTSoundEvents_2016_DevelopmentSet(Dataset):
"""TUT Sound events 2016 development dataset
This dataset is used in DCASE2016 - Task 3, Sound event detection in real life audio
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-sound-events-2016-development')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Sound Events 2016, development dataset'
self.url = 'https://zenodo.org/record/45759'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 4
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'residential_area'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'home'),
},
{
'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.doc.zip',
'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.doc.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.meta.zip',
'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.meta.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.audio.zip',
'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.audio.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
]
def event_label_count(self, scene_label=None):
return len(self.event_labels(scene_label=scene_label))
def event_labels(self, scene_label=None):
labels = []
for item in self.meta:
if scene_label is None or item['scene_label'] == scene_label:
if 'event_label' in item and item['event_label'].rstrip() not in labels:
labels.append(item['event_label'].rstrip())
labels.sort()
return labels
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for filename in self.audio_files:
raw_path, raw_filename = os.path.split(filename)
relative_path = self.absolute_to_relative(raw_path)
scene_label = relative_path.replace('audio', '')[1:]
base_filename, file_extension = os.path.splitext(raw_filename)
annotation_filename = os.path.join(self.local_path, relative_path.replace('audio', 'meta'),
base_filename + '.ann')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename),
scene_label,
float(annotation_file_row[0].replace(',', '.')),
float(annotation_file_row[1].replace(',', '.')),
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
def train(self, fold=0, scene_label=None):
if fold not in self.evaluation_data_train:
self.evaluation_data_train[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.evaluation_data_train[fold]:
self.evaluation_data_train[fold][scene_label_] = []
if fold > 0:
with open(
os.path.join(self.evaluation_setup_path, scene_label_ + '_fold' + str(fold) + '_train.txt'),
'rt') as f:
for row in csv.reader(f, delimiter='\t'):
if len(row) == 5:
# Event meta
self.evaluation_data_train[fold][scene_label_].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1],
'event_onset': float(row[2]),
'event_offset': float(row[3]),
'event_label': row[4]
})
else:
data = []
for item in self.meta:
if item['scene_label'] == scene_label_:
if 'event_label' in item:
data.append({'file': self.relative_to_absolute_path(item['file']),
'scene_label': item['scene_label'],
'event_onset': item['event_onset'],
'event_offset': item['event_offset'],
'event_label': item['event_label'],
})
self.evaluation_data_train[0][scene_label_] = data
if scene_label:
return self.evaluation_data_train[fold][scene_label]
else:
data = []
for scene_label_ in self.scene_labels:
for item in self.evaluation_data_train[fold][scene_label_]:
data.append(item)
return data
def test(self, fold=0, scene_label=None):
if fold not in self.evaluation_data_test:
self.evaluation_data_test[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.evaluation_data_test[fold]:
self.evaluation_data_test[fold][scene_label_] = []
if fold > 0:
with open(
os.path.join(self.evaluation_setup_path, scene_label_ + '_fold' + str(fold) + '_test.txt'),
'rt') as f:
for row in csv.reader(f, delimiter='\t'):
self.evaluation_data_test[fold][scene_label_].append(
{'file': self.relative_to_absolute_path(row[0])})
else:
data = []
files = []
for item in self.meta:
if scene_label_ in item:
if self.relative_to_absolute_path(item['file']) not in files:
data.append({'file': self.relative_to_absolute_path(item['file'])})
files.append(self.relative_to_absolute_path(item['file']))
self.evaluation_data_test[0][scene_label_] = data
if scene_label:
return self.evaluation_data_test[fold][scene_label]
else:
data = []
for scene_label_ in self.scene_labels:
for item in self.evaluation_data_test[fold][scene_label_]:
data.append(item)
return data
class TUTSoundEvents_2016_EvaluationSet(Dataset):
"""TUT Sound events 2016 evaluation dataset
This dataset is used in DCASE2016 - Task 3, Sound event detection in real life audio
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-sound-events-2016-evaluation')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Sound Events 2016, evaluation dataset'
self.url = 'http://www.cs.tut.fi/sgn/arg/dcase2016/download/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 1
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'home'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'residential_area'),
},
]
@property
def scene_labels(self):
labels = ['home', 'residential_area']
labels.sort()
return labels
def event_label_count(self, scene_label=None):
return len(self.event_labels(scene_label=scene_label))
def event_labels(self, scene_label=None):
labels = []
for item in self.meta:
if scene_label is None or item['scene_label'] == scene_label:
if 'event_label' in item and item['event_label'] not in labels:
labels.append(item['event_label'])
labels.sort()
return labels
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file) and os.path.isdir(os.path.join(self.local_path, 'meta')):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for filename in self.audio_files:
raw_path, raw_filename = os.path.split(filename)
relative_path = self.absolute_to_relative(raw_path)
scene_label = relative_path.replace('audio', '')[1:]
base_filename, file_extension = os.path.splitext(raw_filename)
annotation_filename = os.path.join(self.local_path, relative_path.replace('audio', 'meta'),
base_filename + '.ann')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename),
scene_label,
float(annotation_file_row[0].replace(',', '.')),
float(annotation_file_row[1].replace(',', '.')),
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
def train(self, fold=0, scene_label=None):
raise IOError('Train setup not available.')
def test(self, fold=0, scene_label=None):
if fold not in self.evaluation_data_test:
self.evaluation_data_test[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.evaluation_data_test[fold]:
self.evaluation_data_test[fold][scene_label_] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, scene_label + '_fold' + str(fold) + '_test.txt'),
'rt') as f:
for row in csv.reader(f, delimiter='\t'):
self.evaluation_data_test[fold][scene_label_].append(
{'file': self.relative_to_absolute_path(row[0])})
else:
data = []
files = []
for item in self.audio_files:
if scene_label_ in item:
if self.relative_to_absolute_path(item) not in files:
data.append({'file': self.relative_to_absolute_path(item)})
files.append(self.relative_to_absolute_path(item))
self.evaluation_data_test[0][scene_label_] = data
if scene_label:
return self.evaluation_data_test[fold][scene_label]
else:
data = []
for scene_label_ in self.scene_labels:
for item in self.evaluation_data_test[fold][scene_label_]:
data.append(item)
return data
# CHIME home
class CHiMEHome_DomesticAudioTag_DevelopmentSet(Dataset):
def __init__(self, data_path=None):
Dataset.__init__(self, data_path=data_path, name='CHiMeHome-audiotag-development')
self.authors = 'Peter Foster, Siddharth Sigtia, Sacha Krstulovic, Jon Barker, and Mark Plumbley'
self.name_remote = 'The CHiME-Home dataset is a collection of annotated domestic environment audio recordings.'
self.url = ''
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Unknown'
self.evaluation_folds = 10
self.package_list = [
{
'remote_package': 'https://archive.org/download/chime-home/chime_home.tar.gz',
'local_package': os.path.join(self.local_path, 'chime_home.tar.gz'),
'local_audio_path': os.path.join(self.local_path, 'chime_home', 'chunks'),
},
]
@property
def audio_files(self):
"""Get all audio files in the dataset, use only file from CHime-Home-refined set.
Parameters
----------
nothing
Returns
-------
files : list
audio files
"""
if self.files is None:
refined_files = []
with open(os.path.join(self.local_path, 'chime_home', 'chunks_refined.csv'), 'rt') as f:
for row in csv.reader(f, delimiter=','):
refined_files.append(row[1])
self.files = []
for file in self.package_list:
path = file['local_audio_path']
if path:
l = os.listdir(path)
p = path.replace(self.local_path + os.path.sep, '')
for f in l:
fileName, fileExtension = os.path.splitext(f)
if fileExtension[1:] in self.audio_extensions and fileName in refined_files:
self.files.append(os.path.abspath(os.path.join(path, f)))
self.files.sort()
return self.files
def read_chunk_meta(self, meta_filename):
if os.path.isfile(meta_filename):
meta_file_handle = open(meta_filename, 'rt')
try:
meta_file_reader = csv.reader(meta_file_handle, delimiter=',')
data = {}
for meta_file_row in meta_file_reader:
data[meta_file_row[0]] = meta_file_row[1]
finally:
meta_file_handle.close()
return data
def tagcode_to_taglabel(self, tag):
map = {'c': 'child speech',
'm': 'adult male speech',
'f': 'adult female speech',
'v': 'video game/tv',
'p': 'percussive sound',
'b': 'broadband noise',
'o': 'other',
'S': 'silence/background',
'U': 'unidentifiable'
}
if tag in map:
return map[tag]
else:
return None
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Legacy dataset meta files are converted to be compatible with current scheme.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file):
section_header('Generating meta file for dataset')
scene_label = 'home'
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
base_filename, file_extension = os.path.splitext(raw_filename)
annotation_filename = os.path.join(raw_path, base_filename + '.csv')
meta_data = self.read_chunk_meta(annotation_filename)
tags = []
for i, tag in enumerate(meta_data['majorityvote']):
if tag is 'b':
print file
if tag is not 'S' and tag is not 'U':
tags.append(self.tagcode_to_taglabel(tag))
tags = ';'.join(tags)
writer.writerow(
(os.path.join(relative_path, raw_filename), scene_label, meta_data['majorityvote'], tags))
finally:
f.close()
foot()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
for target_tag in self.audio_tags:
if not os.path.isfile(os.path.join(self.evaluation_setup_path,
'fold' + str(fold) + '_' + target_tag.replace('/', '-').replace(' ',
'_') + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path,
'fold' + str(fold) + '_' + target_tag.replace('/', '-').replace(' ',
'_') + '_test.txt')):
all_folds_found = False
if not all_folds_found:
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
numpy.random.seed(475686)
kf = KFold(n=len(self.audio_files), n_folds=self.evaluation_folds, shuffle=True)
refined_files = []
with open(os.path.join(self.local_path, 'chime_home', 'chunks_refined.csv'), 'rt') as f:
for row in csv.reader(f, delimiter=','):
refined_files.append(
self.relative_to_absolute_path(os.path.join('chime_home', 'chunks', row[1] + '.wav')))
fold = 1
files = numpy.array(refined_files)
for train_index, test_index in kf:
train_files = files[train_index]
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
item = self.file_meta(file)[0]
writer.writerow(
[os.path.join(relative_path, raw_filename), item['scene_label'], item['tag_string'],
';'.join(item['tags'])])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
writer.writerow([os.path.join(relative_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
item = self.file_meta(file)[0]
writer.writerow(
[os.path.join(relative_path, raw_filename), item['scene_label'], item['tag_string'],
';'.join(item['tags'])])
fold += 1
# Legacy datasets
# =====================================================
# DCASE 2013
# =====================================================
class DCASE2013_Scene_DevelopmentSet(Dataset):
"""DCASE 2013 Acoustic scene classification, development dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-scene-development')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP 2013 CASA Challenge - Public Dataset for Scene Classification Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/29/scenes_stereo.zip?sequence=1',
'local_package': os.path.join(self.local_path, 'scenes_stereo.zip'),
'local_audio_path': os.path.join(self.local_path, 'scenes_stereo'),
}
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
if not os.path.isfile(self.meta_file):
section_header('Generating meta file for dataset')
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = os.path.splitext(os.path.split(file)[1])[0][:-2]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
section_header('Generating evaluation setup files for dataset')
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
classes = []
files = []
for item in self.meta:
classes.append(item['scene_label'])
files.append(item['file'])
files = numpy.array(files)
sss = StratifiedShuffleSplit(y=classes, n_iter=self.evaluation_folds, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
# print("TRAIN:", train_index, "TEST:", test_index)
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
writer.writerow([os.path.join(raw_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
fold += 1
foot()
class DCASE2013_Scene_EvaluationSet(DCASE2013_Scene_DevelopmentSet):
"""DCASE 2013 Acoustic scene classification, evaluation dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-scene-challenge')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP 2013 CASA Challenge - Private Dataset for Scene Classification Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'https://archive.org/download/dcase2013_scene_classification_testset/scenes_stereo_testset.zip',
'local_package': os.path.join(self.local_path, 'scenes_stereo_testset.zip'),
'local_audio_path': os.path.join(self.local_path, 'scenes_stereo_testset'),
}
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
if not os.path.isfile(self.meta_file) or 1:
section_header('Generating meta file for dataset')
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = os.path.splitext(os.path.split(file)[1])[0][:-2]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
section_header('Generating evaluation setup files for dataset')
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
classes = []
files = []
for item in self.meta:
classes.append(item['scene_label'])
files.append(item['file'])
files = numpy.array(files)
sss = StratifiedShuffleSplit(y=classes, n_iter=self.evaluation_folds, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
writer.writerow([os.path.join(raw_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
fold += 1
foot()
# Sound events
class DCASE2013_Event_DevelopmentSet(Dataset):
"""DCASE 2013 Sound event detection, development dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-event-development')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP CASA Challenge - Public Dataset for Event Detection Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'https://archive.org/download/dcase2013_event_detection_development_OS/events_OS_development_v2.zip',
'local_package': os.path.join(self.local_path, 'events_OS_development_v2.zip'),
'local_audio_path': os.path.join(self.local_path, 'events_OS_development_v2'),
},
# {
# 'remote_package':'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/28/singlesounds_annotation.zip?sequence=9',
# 'local_package': os.path.join(self.local_path, 'singlesounds_annotation.zip'),
# 'local_audio_path': None,
# },
# {
# 'remote_package':'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/28/singlesounds_stereo.zip?sequence=7',
# 'local_package': os.path.join(self.local_path, 'singlesounds_stereo.zip'),
# 'local_audio_path': os.path.join(self.local_path, 'singlesounds_stereo'),
# },
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
scene_label = 'office'
if not os.path.isfile(self.meta_file):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
base_filename, file_extension = os.path.splitext(raw_filename)
if file.find('singlesounds_stereo') != -1:
annotation_filename = os.path.join(self.local_path, 'Annotation1', base_filename + '_bdm.txt')
label = base_filename[:-2]
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1], label, 'i'))
finally:
annotation_file_handle.close()
elif file.find('events_OS_development_v2') != -1:
annotation_filename = os.path.join(self.local_path, 'events_OS_development_v2',
base_filename + '_v2.txt')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1],
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
# Construct training and testing sets. Isolated sound are used for training and
# polyphonic mixtures are used for testing.
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
files = []
for item in self.meta:
if item['file'] not in files:
files.append(item['file'])
files = numpy.array(files)
f = numpy.zeros(len(files))
sss = StratifiedShuffleSplit(y=f, n_iter=5, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
# print("TRAIN:", train_index, "TEST:", test_index)
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
writer.writerow([os.path.join(relative_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
fold += 1
class DCASE2013_Event_EvaluationSet(Dataset):
"""DCASE 2013 Sound event detection, evaluation dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-event-challenge')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP CASA Challenge - Private Dataset for Event Detection Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'https://archive.org/download/dcase2013_event_detection_testset_OS/dcase2013_event_detection_testset_OS.zip',
'local_package': os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS.zip'),
'local_audio_path': os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS'),
}
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
scene_label = 'office'
if not os.path.isfile(self.meta_file):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
base_filename, file_extension = os.path.splitext(raw_filename)
if file.find('dcase2013_event_detection_testset_OS') != -1:
annotation_filename = os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS',
base_filename + '_v2.txt')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1],
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
else:
annotation_filename = os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS',
base_filename + '.txt')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1],
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
# Construct training and testing sets. Isolated sound are used for training and
# polyphonic mixtures are used for testing.
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
files = []
for item in self.meta:
if item['file'] not in files:
files.append(item['file'])
files = numpy.array(files)
f = numpy.zeros(len(files))
sss = StratifiedShuffleSplit(y=f, n_iter=5, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
# print("TRAIN:", train_index, "TEST:", test_index)
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
writer.writerow([os.path.join(relative_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
fold += 1
| mit |
ssaeger/scikit-learn | sklearn/metrics/cluster/tests/test_unsupervised.py | 26 | 3305 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn.metrics.cluster.unsupervised import silhouette_score
from sklearn.metrics import pairwise_distances
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
def test_silhouette():
# Tests the Silhouette Coefficient.
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
D = pairwise_distances(X, metric='euclidean')
# Given that the actual labels are used, we can assume that S would be
# positive.
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
# Test without calculating D
silhouette_metric = silhouette_score(X, y, metric='euclidean')
assert_almost_equal(silhouette, silhouette_metric)
# Test with sampling
silhouette = silhouette_score(D, y, metric='precomputed',
sample_size=int(X.shape[0] / 2),
random_state=0)
silhouette_metric = silhouette_score(X, y, metric='euclidean',
sample_size=int(X.shape[0] / 2),
random_state=0)
assert(silhouette > 0)
assert(silhouette_metric > 0)
assert_almost_equal(silhouette_metric, silhouette)
# Test with sparse X
X_sparse = csr_matrix(X)
D = pairwise_distances(X_sparse, metric='euclidean')
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
def test_no_nan():
# Assert Silhouette Coefficient != nan when there is 1 sample in a class.
# This tests for the condition that caused issue 960.
# Note that there is only one sample in cluster 0. This used to cause the
# silhouette_score to return nan (see bug #960).
labels = np.array([1, 0, 1, 1, 1])
# The distance matrix doesn't actually matter.
D = np.random.RandomState(0).rand(len(labels), len(labels))
silhouette = silhouette_score(D, labels, metric='precomputed')
assert_false(np.isnan(silhouette))
def test_correct_labelsize():
# Assert 1 < n_labels < n_samples
dataset = datasets.load_iris()
X = dataset.data
# n_labels = n_samples
y = np.arange(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
# n_labels = 1
y = np.zeros(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
def test_non_encoded_labels():
dataset = datasets.load_iris()
X = dataset.data
labels = dataset.target
assert_equal(
silhouette_score(X, labels + 10), silhouette_score(X, labels))
def test_non_numpy_labels():
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
assert_equal(
silhouette_score(list(X), list(y)), silhouette_score(X, y))
| bsd-3-clause |
burakbayramli/kod | movrecom.py | 1 | 2307 | #
# Recommend movies based on Grouplens ratings filee
#
# https://grouplens.org/datasets/movielens/latest/
#
# Download the full file, and unzip in a known
# location update the d variable below
#
from sklearn.metrics.pairwise import cosine_similarity
from scipy.sparse import csr_matrix
import scipy.sparse.linalg
import pandas as pd, numpy as np
import os, sys, re
d = "/media/burak/3d1ece2f-6539-411b-bac2-589d57201626/home/burak/Downloads/ml-latest"
picks = {"Star Trek: First Contact (1996)": 5.0,
"Assassins (1995)": 5.0,
"Tombstone (1993)": 5.0
}
if len(sys.argv) < 2:
print ("Usage movrecom.py [normrec|svdrec]")
exit()
if sys.argv[1] == "normal":
ratings = pd.read_csv(d + "/ratings.csv")
utility_csr = csr_matrix((ratings.rating, (ratings.userId , ratings.movieId)))
mov = pd.read_csv(d + "/movies.csv",index_col="title")['movieId'].to_dict()
tst = np.zeros((1,utility_csr.shape[1]))
for p in picks: tst[0,mov[p]] = picks[p]
similarities = cosine_similarity(utility_csr, tst)
m = np.argsort(similarities[:,0])
movi = pd.read_csv(d + "/movies.csv",index_col="movieId")['title'].to_dict()
res = {}
for idx in range(1,1000):
ii,jj = utility_csr[m[-idx],:].nonzero()
for j in jj:
r = utility_csr[m[-idx],:][0,j]
n = movi[j]
if n not in picks and r >= 4.0: res[n] = r
for x in res: print (x)
if sys.argv[1] == "svd":
ratings = pd.read_csv(d + "/ratings.csv")
utility_csr = csr_matrix((ratings.rating, (ratings.userId , ratings.movieId)))
mov = pd.read_csv(d + "/movies.csv",index_col="title")['movieId'].to_dict()
for p in picks: utility_csr[0,mov[p]] = picks[p]
A = scipy.sparse.linalg.svds(utility_csr, k=10)[0]
similarities = cosine_similarity(A, A[0,:].reshape(1,10))
m = np.argsort(similarities[:,0])
movi = pd.read_csv(d + "/movies.csv",index_col="movieId")['title'].to_dict()
res = {}
for idx in range(1,100):
ii,jj = utility_csr[m[-idx],:].nonzero()
for j in jj:
r = utility_csr[m[-idx],:][0,j]
n = movi[j]
year = int(re.findall('(\d\d\d\d)', n)[0])
if n not in picks and r >= 4.0 and year>1990: res[n] = r
for x in res: print (x)
| gpl-3.0 |
ndingwall/scikit-learn | sklearn/datasets/_lfw.py | 11 | 19308 | """Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import dirname, join, exists, isdir
import logging
import numpy as np
import joblib
from joblib import Memory
from ._base import get_data_home, _fetch_remote, RemoteFileMetadata
from ..utils import Bunch
from ..utils.validation import _deprecate_positional_args
from ..utils.fixes import parse_version
logger = logging.getLogger(__name__)
# The original data can be found in:
# http://vis-www.cs.umass.edu/lfw/lfw.tgz
ARCHIVE = RemoteFileMetadata(
filename='lfw.tgz',
url='https://ndownloader.figshare.com/files/5976018',
checksum=('055f7d9c632d7370e6fb4afc7468d40f'
'970c34a80d4c6f50ffec63f5a8d536c0'))
# The original funneled data can be found in:
# http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz
FUNNELED_ARCHIVE = RemoteFileMetadata(
filename='lfw-funneled.tgz',
url='https://ndownloader.figshare.com/files/5976015',
checksum=('b47c8422c8cded889dc5a13418c4bc2a'
'bbda121092b3533a83306f90d900100a'))
# The original target data can be found in:
# http://vis-www.cs.umass.edu/lfw/pairsDevTrain.txt',
# http://vis-www.cs.umass.edu/lfw/pairsDevTest.txt',
# http://vis-www.cs.umass.edu/lfw/pairs.txt',
TARGETS = (
RemoteFileMetadata(
filename='pairsDevTrain.txt',
url='https://ndownloader.figshare.com/files/5976012',
checksum=('1d454dada7dfeca0e7eab6f65dc4e97a'
'6312d44cf142207be28d688be92aabfa')),
RemoteFileMetadata(
filename='pairsDevTest.txt',
url='https://ndownloader.figshare.com/files/5976009',
checksum=('7cb06600ea8b2814ac26e946201cdb30'
'4296262aad67d046a16a7ec85d0ff87c')),
RemoteFileMetadata(
filename='pairs.txt',
url='https://ndownloader.figshare.com/files/5976006',
checksum=('ea42330c62c92989f9d7c03237ed5d59'
'1365e89b3e649747777b70e692dc1592')),
)
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def _check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if not exists(lfw_home):
makedirs(lfw_home)
for target in TARGETS:
target_filepath = join(lfw_home, target.filename)
if not exists(target_filepath):
if download_if_missing:
logger.info("Downloading LFW metadata: %s", target.url)
_fetch_remote(target, dirname=lfw_home)
else:
raise IOError("%s is missing" % target_filepath)
if funneled:
data_folder_path = join(lfw_home, "lfw_funneled")
archive = FUNNELED_ARCHIVE
else:
data_folder_path = join(lfw_home, "lfw")
archive = ARCHIVE
if not exists(data_folder_path):
archive_path = join(lfw_home, archive.filename)
if not exists(archive_path):
if download_if_missing:
logger.info("Downloading LFW data (~200MB): %s",
archive.url)
_fetch_remote(archive, dirname=lfw_home)
else:
raise IOError("%s is missing" % archive_path)
import tarfile
logger.debug("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# import PIL only when needed
from ..externals._pilutil import imread, imresize
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.debug("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim == 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representation
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in sorted(listdir(folder_path))]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
@_deprecate_positional_args
def fetch_lfw_people(*, data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True, return_X_y=False):
"""Load the Labeled Faces in the Wild (LFW) people dataset \
(classification).
Download it if necessary.
================= =======================
Classes 5749
Samples total 13233
Dimensionality 5828
Features real, between 0 and 255
================= =======================
Read more in the :ref:`User Guide <labeled_faces_in_the_wild_dataset>`.
Parameters
----------
data_home : str, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
funneled : bool, default=True
Download and use the funneled variant of the dataset.
resize : float, default=0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, default=None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : bool, default=False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than the shape with color = False.
slice_ : tuple of slice, default=(slice(70, 195), slice(78, 172))
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
return_X_y : bool, default=False
If True, returns ``(dataset.data, dataset.target)`` instead of a Bunch
object. See below for more information about the `dataset.data` and
`dataset.target` object.
.. versionadded:: 0.20
Returns
-------
dataset : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image
of original size 62 x 47 pixels.
Changing the ``slice_`` or resize parameters will change the
shape of the output.
images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_``
or resize parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each face image.
Those labels range from 0-5748 and correspond to the person IDs.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.20
"""
lfw_home, data_folder_path = _check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.debug('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
if parse_version(joblib.__version__) < parse_version('0.12'):
# Deal with change of API in joblib
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
else:
m = Memory(location=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
X = faces.reshape(len(faces), -1)
module_path = dirname(__file__)
with open(join(module_path, 'descr', 'lfw.rst')) as rst_file:
fdescr = rst_file.read()
if return_X_y:
return X, target
# pack the results as a Bunch instance
return Bunch(data=X, images=faces,
target=target, target_names=target_names,
DESCR=fdescr)
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.decode().strip().split('\t') for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# iterating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@_deprecate_positional_args
def fetch_lfw_pairs(*, subset='train', data_home=None, funneled=True,
resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Load the Labeled Faces in the Wild (LFW) pairs dataset (classification).
Download it if necessary.
================= =======================
Classes 2
Samples total 13233
Dimensionality 5828
Features real, between 0 and 255
================= =======================
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 47.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild_dataset>`.
Parameters
----------
subset : {'train', 'test', '10_folds'}, default='train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : str, default=None
Specify another download and cache folder for the datasets. By
default all scikit-learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : bool, default=True
Download and use the funneled variant of the dataset.
resize : float, default=0.5
Ratio used to resize the each face picture.
color : bool, default=False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than the shape with color = False.
slice_ : tuple of slice, default=(slice(70, 195), slice(78, 172))
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : ndarray of shape (2200, 5828). Shape depends on ``subset``.
Each row corresponds to 2 ravel'd face images
of original size 62 x 47 pixels.
Changing the ``slice_``, ``resize`` or ``subset`` parameters
will change the shape of the output.
pairs : ndarray of shape (2200, 2, 62, 47). Shape depends on ``subset``
Each row has 2 face images corresponding
to same or different person from the dataset
containing 5749 people. Changing the ``slice_``,
``resize`` or ``subset`` parameters will change the shape of the
output.
target : numpy array of shape (2200,). Shape depends on ``subset``.
Labels associated to each pair of images.
The two label values being different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = _check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.debug('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
if parse_version(joblib.__version__) < parse_version('0.12'):
# Deal with change of API in joblib
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
else:
m = Memory(location=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
module_path = dirname(__file__)
with open(join(module_path, 'descr', 'lfw.rst')) as rst_file:
fdescr = rst_file.read()
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR=fdescr)
| bsd-3-clause |
Diyago/Machine-Learning-scripts | general studies/get feature importance.py | 1 | 4500 | from __future__ import division
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
from sklearn.utils import ConvergenceWarning
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score
from sklearn.pipeline import Pipeline
from sklearn.svm import OneClassSVM
from sklearn.feature_selection import chi2
from sklearn.datasets import load_iris
from sklearn.feature_selection import SelectKBest
from sklearn.svm import LinearSVC
from sklearn.datasets import load_iris
from sklearn.feature_selection import SelectFromModel
import matplotlib.pyplot as plt
import random
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
from sklearn import preprocessing
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
# The iris dataset
df_train = pd.read_csv("C:\\Users\\SBT-Ashrapov-IR\\Desktop\\docs\\apps\\CustomerSatisfaction\\data\\train.csv")
df_test = pd.read_csv('C:\\Users\\SBT-Ashrapov-IR\\Desktop\\docs\\apps\\CustomerSatisfaction\\data\\test.csv')
remove = []
# deletening duplicates cols
for col in df_train.columns:
if df_train[col].std() == 0:
remove.append(col)
df_train.drop(remove, axis=1, inplace=True)
df_test.drop(remove, axis=1, inplace=True)
# normalize the data attributes
df_train.apply(lambda x: (x - np.mean(x)) / (np.max(x) - np.min(x)))
df_test.apply(lambda x: (x - np.mean(x)) / (np.max(x) - np.min(x)))
size = len(df_train.columns) -1
# remove duplicated columns
remove = []
listZeroes = []
remove3 = []
c = df_train.columns
k = 1
for i in range(len(c)-1):
v = df_train[c[i]].values
for j in range(i+1,len(c)):
if np.array_equal(v,df_train[c[j]].values):
remove.append(c[j])
df_train.drop(remove, axis=1, inplace=True)
df_test.drop(remove, axis=1, inplace=True)
for i in range(len(df_train)):
if df_train["TARGET"][i] == 0:
listZeroes.append(i)
if df_train["TARGET"][i] == 1:
remove3.append(i)
#print listZeroes
print len(df_train)
#print df_train.index
listZeroes = random.sample(listZeroes, len(df_train) - 3007- len(remove3))
remove3 = random.sample(remove3, len(remove3) - 3007)
listZeroes = listZeroes +remove3
df_ones = df_train.iloc[remove3]
for i in xrange(1, 24,1):
df_train = df_train.append(df_ones)
# df_train.drop(df_train.index[listZeroes],inplace=True)
X = df_train[df_train.columns[0:size]]
y = df_train[[len(df_train.columns)-1]]
test_frac = 0.10
count = len(X_full)
X = X_full.iloc[-int(count*test_frac):]
y = y_full.iloc[-int(count*test_frac):]
print "X", X.shape , "y", y.shape
lsvc = LinearSVC(C=0.01, penalty="l1", dual=False).fit(X, y)
model = SelectFromModel(lsvc, prefit=True)
X_new = model.transform(X)
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), indices)
plt.xlim([-1, X.shape[1]])
plt.show()
# print "X_new.shape", X_new.shape
# print X_new
| apache-2.0 |
toastedcornflakes/scikit-learn | examples/ensemble/plot_adaboost_twoclass.py | 347 | 3268 | """
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
| bsd-3-clause |
kmspriyatham/symath | scipy/scipy/stats/distributions.py | 2 | 232477 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
import math
import sys
import warnings
from scipy.lib.six import callable, string_types, get_method_function
from scipy.lib.six import exec_
from scipy.misc import comb, derivative
from scipy.misc.doccer import inherit_docstring_from
from scipy import special
from scipy import optimize
from scipy import integrate
from scipy.special import gammaln as gamln
import keyword
import re
import inspect
from numpy import all, where, arange, putmask, \
ravel, take, ones, sum, shape, product, reshape, \
zeros, floor, logical_and, log, sqrt, exp, arctanh, tan, sin, arcsin, \
arctan, tanh, ndarray, cos, cosh, sinh, newaxis, log1p, expm1
from numpy import atleast_1d, polyval, ceil, place, extract, \
any, argsort, argmax, vectorize, r_, asarray, nan, inf, pi, isinf, \
NINF, empty
import numpy
import numpy as np
import numpy.random as mtrand
from numpy import flatnonzero as nonzero
from . import vonmises_cython
from ._tukeylambda_stats import tukeylambda_variance as _tlvar, \
tukeylambda_kurtosis as _tlkurt
__all__ = [
'rv_continuous',
'ksone', 'kstwobign', 'norm', 'alpha', 'anglit', 'arcsine',
'beta', 'betaprime', 'bradford', 'burr', 'fisk', 'cauchy',
'chi', 'chi2', 'cosine', 'dgamma', 'dweibull', 'erlang',
'expon', 'exponweib', 'exponpow', 'fatiguelife', 'foldcauchy',
'f', 'foldnorm', 'frechet_r', 'weibull_min', 'frechet_l',
'weibull_max', 'genlogistic', 'genpareto', 'genexpon', 'genextreme',
'gamma', 'gengamma', 'genhalflogistic', 'gompertz', 'gumbel_r',
'gumbel_l', 'halfcauchy', 'halflogistic', 'halfnorm', 'hypsecant',
'gausshyper', 'invgamma', 'invgauss', 'invweibull',
'johnsonsb', 'johnsonsu', 'laplace', 'levy', 'levy_l',
'levy_stable', 'logistic', 'loggamma', 'loglaplace', 'lognorm',
'gilbrat', 'maxwell', 'mielke', 'nakagami', 'ncx2', 'ncf', 't',
'nct', 'pareto', 'lomax', 'pearson3', 'powerlaw', 'powerlognorm',
'powernorm', 'rdist', 'rayleigh', 'reciprocal', 'rice',
'recipinvgauss', 'semicircular', 'triang', 'truncexpon',
'truncnorm', 'tukeylambda', 'uniform', 'vonmises', 'wald',
'wrapcauchy', 'entropy', 'rv_discrete', 'binom', 'bernoulli',
'nbinom', 'geom', 'hypergeom', 'logser', 'poisson', 'planck',
'boltzmann', 'randint', 'zipf', 'dlaplace', 'skellam'
]
floatinfo = numpy.finfo(float)
eps = numpy.finfo(float).eps
gam = special.gamma
random = mtrand.random_sample
import types
from scipy.misc import doccer
try:
from new import instancemethod
except ImportError:
# Python 3
def instancemethod(func, obj, cls):
return types.MethodType(func, obj)
# These are the docstring parts used for substitution in specific
# distribution docstrings
docheaders = {'methods':"""\nMethods\n-------\n""",
'parameters':"""\nParameters\n---------\n""",
'notes':"""\nNotes\n-----\n""",
'examples':"""\nExamples\n--------\n"""}
_doc_rvs = \
"""rvs(%(shapes)s, loc=0, scale=1, size=1)
Random variates.
"""
_doc_pdf = \
"""pdf(x, %(shapes)s, loc=0, scale=1)
Probability density function.
"""
_doc_logpdf = \
"""logpdf(x, %(shapes)s, loc=0, scale=1)
Log of the probability density function.
"""
_doc_pmf = \
"""pmf(x, %(shapes)s, loc=0, scale=1)
Probability mass function.
"""
_doc_logpmf = \
"""logpmf(x, %(shapes)s, loc=0, scale=1)
Log of the probability mass function.
"""
_doc_cdf = \
"""cdf(x, %(shapes)s, loc=0, scale=1)
Cumulative density function.
"""
_doc_logcdf = \
"""logcdf(x, %(shapes)s, loc=0, scale=1)
Log of the cumulative density function.
"""
_doc_sf = \
"""sf(x, %(shapes)s, loc=0, scale=1)
Survival function (1-cdf --- sometimes more accurate).
"""
_doc_logsf = \
"""logsf(x, %(shapes)s, loc=0, scale=1)
Log of the survival function.
"""
_doc_ppf = \
"""ppf(q, %(shapes)s, loc=0, scale=1)
Percent point function (inverse of cdf --- percentiles).
"""
_doc_isf = \
"""isf(q, %(shapes)s, loc=0, scale=1)
Inverse survival function (inverse of sf).
"""
_doc_moment = \
"""moment(n, %(shapes)s, loc=0, scale=1)
Non-central moment of order n
"""
_doc_stats = \
"""stats(%(shapes)s, loc=0, scale=1, moments='mv')
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = \
"""entropy(%(shapes)s, loc=0, scale=1)
(Differential) entropy of the RV.
"""
_doc_fit = \
"""fit(data, %(shapes)s, loc=0, scale=1)
Parameter estimates for generic data.
"""
_doc_expect = \
"""expect(func, %(shapes)s, loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = \
"""expect(func, %(shapes)s, loc=0, lb=None, ub=None, conditional=False)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = \
"""median(%(shapes)s, loc=0, scale=1)
Median of the distribution.
"""
_doc_mean = \
"""mean(%(shapes)s, loc=0, scale=1)
Mean of the distribution.
"""
_doc_var = \
"""var(%(shapes)s, loc=0, scale=1)
Variance of the distribution.
"""
_doc_std = \
"""std(%(shapes)s, loc=0, scale=1)
Standard deviation of the distribution.
"""
_doc_interval = \
"""interval(alpha, %(shapes)s, loc=0, scale=1)
Endpoints of the range that contains alpha percent of the distribution
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
# Note that the two lines for %(shapes) are searched for and replaced in
# rv_continuous and rv_discrete - update there if the exact string changes
_doc_default_callparams = \
"""
Parameters
----------
x : array_like
quantiles
q : array_like
lower or upper tail probability
%(shapes)s : array_like
shape parameters
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
size : int or tuple of ints, optional
shape of random variates (default computed from input arguments )
moments : str, optional
composed of letters ['mvsk'] specifying which moments to compute where
'm' = mean, 'v' = variance, 's' = (Fisher's) skew and
'k' = (Fisher's) kurtosis. (default='mv')
"""
_doc_default_longsummary = \
"""Continuous random variables are defined from a standard form and may
require some shape parameters to complete its specification. Any
optional keyword parameters can be passed to the methods of the RV
object as given below:
"""
_doc_default_frozen_note = \
"""
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = \
"""Examples
--------
>>> from scipy.stats import %(name)s
>>> numargs = %(name)s.numargs
>>> [ %(shapes)s ] = [0.9,] * numargs
>>> rv = %(name)s(%(shapes)s)
Display frozen pdf
>>> x = np.linspace(0, np.minimum(rv.dist.b, 3))
>>> h = plt.plot(x, rv.pdf(x))
Here, ``rv.dist.b`` is the right endpoint of the support of ``rv.dist``.
Check accuracy of cdf and ppf
>>> prb = %(name)s.cdf(x, %(shapes)s)
>>> h = plt.semilogy(np.abs(x - %(name)s.ppf(prb, %(shapes)s)) + 1e-20)
Random number generation
>>> R = %(name)s.rvs(%(shapes)s, size=100)
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
_doc_default_callparams,
_doc_default_frozen_note,
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods,
_doc_default_callparams,
_doc_default_frozen_note])
docdict = {'rvs':_doc_rvs,
'pdf':_doc_pdf,
'logpdf':_doc_logpdf,
'cdf':_doc_cdf,
'logcdf':_doc_logcdf,
'sf':_doc_sf,
'logsf':_doc_logsf,
'ppf':_doc_ppf,
'isf':_doc_isf,
'stats':_doc_stats,
'entropy':_doc_entropy,
'fit':_doc_fit,
'moment':_doc_moment,
'expect':_doc_expect,
'interval':_doc_interval,
'mean':_doc_mean,
'std':_doc_std,
'var':_doc_var,
'median':_doc_median,
'allmethods':_doc_allmethods,
'callparams':_doc_default_callparams,
'longsummary':_doc_default_longsummary,
'frozennote':_doc_default_frozen_note,
'example':_doc_default_example,
'default':_doc_default,
'before_notes':_doc_default_before_notes}
# Reuse common content between continous and discrete docs, change some
# minor bits.
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
docdict_discrete.pop('pdf')
docdict_discrete.pop('logpdf')
_doc_allmethods = ''.join([docdict_discrete[obj] for obj in
_doc_disc_methods])
docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
docdict_discrete['longsummary'] = _doc_default_longsummary.replace(
'Continuous', 'Discrete')
_doc_default_frozen_note = \
"""
Alternatively, the object may be called (as a function) to fix the shape and
location parameters returning a "frozen" discrete RV object:
rv = %(name)s(%(shapes)s, loc=0)
- Frozen RV object with the same methods but holding the given shape and
location fixed.
"""
docdict_discrete['frozennote'] = _doc_default_frozen_note
_doc_default_discrete_example = \
"""Examples
--------
>>> from scipy.stats import %(name)s
>>> [ %(shapes)s ] = [<Replace with reasonable values>]
>>> rv = %(name)s(%(shapes)s)
Display frozen pmf
>>> x = np.arange(0, np.minimum(rv.dist.b, 3))
>>> h = plt.vlines(x, 0, rv.pmf(x), lw=2)
Here, ``rv.dist.b`` is the right endpoint of the support of ``rv.dist``.
Check accuracy of cdf and ppf
>>> prb = %(name)s.cdf(x, %(shapes)s)
>>> h = plt.semilogy(np.abs(x - %(name)s.ppf(prb, %(shapes)s)) + 1e-20)
Random number generation
>>> R = %(name)s.rvs(%(shapes)s, size=100)
"""
docdict_discrete['example'] = _doc_default_discrete_example
_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['callparams'],
docdict_discrete['frozennote']])
docdict_discrete['before_notes'] = _doc_default_before_notes
_doc_default_disc = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['frozennote'],
docdict_discrete['example']])
docdict_discrete['default'] = _doc_default_disc
# clean up all the separate docstring elements, we do not need them anymore
for obj in [s for s in dir() if s.startswith('_doc_')]:
exec('del ' + obj)
del obj
try:
del s
except NameError:
# in Python 3, loop variables are not visible after the loop
pass
def _moment(data, n, mu=None):
if mu is None:
mu = data.mean()
return ((data - mu)**n).mean()
def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
if (n == 0):
return 1.0
elif (n == 1):
if mu is None:
val = moment_func(1,*args)
else:
val = mu
elif (n == 2):
if mu2 is None or mu is None:
val = moment_func(2,*args)
else:
val = mu2 + mu*mu
elif (n == 3):
if g1 is None or mu2 is None or mu is None:
val = moment_func(3,*args)
else:
mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment
val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment
elif (n == 4):
if g1 is None or g2 is None or mu2 is None or mu is None:
val = moment_func(4,*args)
else:
mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment
mu3 = g1*np.power(mu2, 1.5) # 3rd central moment
val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu
else:
val = moment_func(n, *args)
return val
def _skew(data):
"""
skew is third central moment / variance**(1.5)
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m3 = ((data - mu)**3).mean()
return m3 / np.power(m2, 1.5)
def _kurtosis(data):
"""
kurtosis is fourth central moment / variance**2 - 3
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m4 = ((data - mu)**4).mean()
return m4 / m2**2 - 3
# Frozen RV class
class rv_frozen(object):
def __init__(self, dist, *args, **kwds):
self.args = args
self.kwds = kwds
self.dist = dist
def pdf(self, x): # raises AttributeError in frozen discrete distribution
return self.dist.pdf(x, *self.args, **self.kwds)
def logpdf(self, x):
return self.dist.logpdf(x, *self.args, **self.kwds)
def cdf(self, x):
return self.dist.cdf(x, *self.args, **self.kwds)
def logcdf(self, x):
return self.dist.logcdf(x, *self.args, **self.kwds)
def ppf(self, q):
return self.dist.ppf(q, *self.args, **self.kwds)
def isf(self, q):
return self.dist.isf(q, *self.args, **self.kwds)
def rvs(self, size=None):
kwds = self.kwds.copy()
kwds.update({'size':size})
return self.dist.rvs(*self.args, **kwds)
def sf(self, x):
return self.dist.sf(x, *self.args, **self.kwds)
def logsf(self, x):
return self.dist.logsf(x, *self.args, **self.kwds)
def stats(self, moments='mv'):
kwds = self.kwds.copy()
kwds.update({'moments':moments})
return self.dist.stats(*self.args, **kwds)
def median(self):
return self.dist.median(*self.args, **self.kwds)
def mean(self):
return self.dist.mean(*self.args, **self.kwds)
def var(self):
return self.dist.var(*self.args, **self.kwds)
def std(self):
return self.dist.std(*self.args, **self.kwds)
def moment(self, n):
return self.dist.moment(n, *self.args, **self.kwds)
def entropy(self):
return self.dist.entropy(*self.args, **self.kwds)
def pmf(self,k):
return self.dist.pmf(k, *self.args, **self.kwds)
def logpmf(self,k):
return self.dist.logpmf(k, *self.args, **self.kwds)
def interval(self, alpha):
return self.dist.interval(alpha, *self.args, **self.kwds)
def valarray(shape,value=nan,typecode=None):
"""Return an array of all value.
"""
out = ones(shape, dtype=bool) * value
if typecode is not None:
out = out.astype(typecode)
if not isinstance(out, ndarray):
out = asarray(out)
return out
# This should be rewritten
def argsreduce(cond, *args):
"""Return the sequence of ravel(args[i]) where ravel(condition) is
True in 1D.
Examples
--------
>>> import numpy as np
>>> rand = np.random.random_sample
>>> A = rand((4,5))
>>> B = 2
>>> C = rand((1,5))
>>> cond = np.ones(A.shape)
>>> [A1,B1,C1] = argsreduce(cond,A,B,C)
>>> B1.shape
(20,)
>>> cond[2,:] = 0
>>> [A2,B2,C2] = argsreduce(cond,A,B,C)
>>> B2.shape
(15,)
"""
newargs = atleast_1d(*args)
if not isinstance(newargs, list):
newargs = [newargs,]
expand_arr = (cond == cond)
return [extract(cond, arr1 * expand_arr) for arr1 in newargs]
parse_arg_template = """
def _parse_args(self, %(shape_arg_str)s %(locscale_in)s):
return (%(shape_arg_str)s), %(locscale_out)s
def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None):
return (%(shape_arg_str)s), %(locscale_out)s, size
def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'):
return (%(shape_arg_str)s), %(locscale_out)s, moments
"""
class rv_generic(object):
"""Class which encapsulates common functionality between rv_discrete
and rv_continuous.
"""
def _construct_argparser(self, names_to_inspect, locscale_in, locscale_out):
"""Construct the parser for the shape arguments.
Generates the argument-parsing functions dynamically.
Modifies the calling class.
Is supposed to be called in __init__ of a class for each distribution.
If self.shapes is a non-empty string, interprets it as a comma-separated
list of shape parameters.
Otherwise inspects the call signatures of `names_to_inspect`
and constructs the argument-parsing functions from these.
In this case also sets `shapes` and `numargs`.
"""
if self.shapes:
# sanitize the user-supplied shapes
if not isinstance(self.shapes, string_types):
raise TypeError('shapes must be a string.')
shapes = self.shapes.replace(',', ' ').split()
for field in shapes:
if keyword.iskeyword(field):
raise SyntaxError('keywords cannot be used as shapes.')
if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field):
raise SyntaxError('shapes must be valid python identifiers')
else:
# find out the call signatures (_pdf, _cdf etc), deduce shape arguments
shapes_list = []
for name in names_to_inspect:
# look for names in instance methods, then global namespace
# the latter is needed for rv_discrete with explicit `values`
try:
meth = get_method_function(getattr(self, name))
except:
meth = globals()[name]
shapes_args = inspect.getargspec(meth)
shapes_list.append(shapes_args.args)
# *args or **kwargs are not allowed w/automatic shapes
# (generic methods have 'self, x' only)
if len(shapes_args.args) > 2:
if shapes_args.varargs is not None:
raise TypeError('*args are not allowed w/out explicit shapes')
if shapes_args.keywords is not None:
raise TypeError('**kwds are not allowed w/out explicit shapes')
if shapes_args.defaults is not None:
raise TypeError('defaults are not allowed for shapes')
shapes = max(shapes_list, key=lambda x: len(x))
shapes = shapes[2:] # remove self, x,
# make sure the signatures are consistent
# (generic methods have 'self, x' only)
for item in shapes_list:
if len(item) > 2 and item[2:] != shapes:
raise TypeError('Shape arguments are inconsistent.')
# have the arguments, construct the method from template
shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None
dct = dict(shape_arg_str=shapes_str,
locscale_in=locscale_in,
locscale_out=locscale_out,
)
ns = {}
exec_(parse_arg_template % dct, ns)
# NB: attach to the instance, not class
for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:
setattr(self, name,
instancemethod(ns[name], self, self.__class__)
)
self.shapes = ', '.join(shapes) if shapes else None
if not hasattr(self, 'numargs'):
# allows more general subclassing with *args
self.numargs = len(shapes)
# These are actually called, and should not be overwritten if you
# want to keep error checking.
def rvs(self, *args, **kwds):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
size : int or tuple of ints, optional
Defining number of random variates (default=1).
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
discrete = kwds.pop('discrete', None)
args, loc, scale, size = self._parse_args_rvs(*args, **kwds)
cond = logical_and(self._argcheck(*args), (scale >= 0))
if not all(cond):
raise ValueError("Domain error in arguments.")
# self._size is total size of all output values
self._size = product(size, axis=0)
if self._size is not None and self._size > 1:
size = numpy.array(size, ndmin=1)
if np.all(scale == 0):
return loc*ones(size, 'd')
vals = self._rvs(*args)
if self._size is not None:
vals = reshape(vals, size)
vals = vals * scale + loc
# Cast to int if discrete
if discrete:
if numpy.isscalar(vals):
vals = int(vals)
else:
vals = vals.astype(int)
return vals
def median(self, *args, **kwds):
"""
Median of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter, Default is 0.
scale : array_like, optional
Scale parameter, Default is 1.
Returns
-------
median : float
The median of the distribution.
See Also
--------
stats.distributions.rv_discrete.ppf
Inverse of the CDF
"""
return self.ppf(0.5, *args, **kwds)
def mean(self, *args, **kwds):
"""
Mean of the distribution
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
mean : float
the mean of the distribution
"""
kwds['moments'] = 'm'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def var(self, *args, **kwds):
"""
Variance of the distribution
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
var : float
the variance of the distribution
"""
kwds['moments'] = 'v'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def std(self, *args, **kwds):
"""
Standard deviation of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
std : float
standard deviation of the distribution
"""
kwds['moments'] = 'v'
res = sqrt(self.stats(*args, **kwds))
return res
def interval(self, alpha, *args, **kwds):
"""
Confidence interval with equal areas around the median.
Parameters
----------
alpha : array_like of float
Probability that an rv will be drawn from the returned range.
Each value should be in the range [0, 1].
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : ndarray of float
end-points of range that contain ``100 * alpha %`` of the rv's possible
values.
"""
alpha = asarray(alpha)
if any((alpha > 1) | (alpha < 0)):
raise ValueError("alpha must be between 0 and 1 inclusive")
q1 = (1.0-alpha)/2
q2 = (1.0+alpha)/2
a = self.ppf(q1, *args, **kwds)
b = self.ppf(q2, *args, **kwds)
return a, b
## continuous random variables: implement maybe later
##
## hf --- Hazard Function (PDF / SF)
## chf --- Cumulative hazard function (-log(SF))
## psf --- Probability sparsity function (reciprocal of the pdf) in
## units of percent-point-function (as a function of q).
## Also, the derivative of the percent-point function.
class rv_continuous(rv_generic):
"""
A generic continuous random variable class meant for subclassing.
`rv_continuous` is a base class to construct specific distribution classes
and instances from for continuous random variables. It cannot be used
directly as a distribution.
Parameters
----------
momtype : int, optional
The type of generic moment calculation to use: 0 for pdf, 1 (default)
for ppf.
a : float, optional
Lower bound of the support of the distribution, default is minus
infinity.
b : float, optional
Upper bound of the support of the distribution, default is plus
infinity.
xtol : float, optional
The tolerance for fixed point calculation for generic ppf.
badvalue : object, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the two shape arguments for all
its methods.
extradoc : str, optional, deprecated
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
Methods
-------
rvs(<shape(s)>, loc=0, scale=1, size=1)
random variates
pdf(x, <shape(s)>, loc=0, scale=1)
probability density function
logpdf(x, <shape(s)>, loc=0, scale=1)
log of the probability density function
cdf(x, <shape(s)>, loc=0, scale=1)
cumulative density function
logcdf(x, <shape(s)>, loc=0, scale=1)
log of the cumulative density function
sf(x, <shape(s)>, loc=0, scale=1)
survival function (1-cdf --- sometimes more accurate)
logsf(x, <shape(s)>, loc=0, scale=1)
log of the survival function
ppf(q, <shape(s)>, loc=0, scale=1)
percent point function (inverse of cdf --- quantiles)
isf(q, <shape(s)>, loc=0, scale=1)
inverse survival function (inverse of sf)
moment(n, <shape(s)>, loc=0, scale=1)
non-central n-th moment of the distribution. May not work for array arguments.
stats(<shape(s)>, loc=0, scale=1, moments='mv')
mean('m'), variance('v'), skew('s'), and/or kurtosis('k')
entropy(<shape(s)>, loc=0, scale=1)
(differential) entropy of the RV.
fit(data, <shape(s)>, loc=0, scale=1)
Parameter estimates for generic data
expect(func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds)
Expected value of a function with respect to the distribution.
Additional kwd arguments passed to integrate.quad
median(<shape(s)>, loc=0, scale=1)
Median of the distribution.
mean(<shape(s)>, loc=0, scale=1)
Mean of the distribution.
std(<shape(s)>, loc=0, scale=1)
Standard deviation of the distribution.
var(<shape(s)>, loc=0, scale=1)
Variance of the distribution.
interval(alpha, <shape(s)>, loc=0, scale=1)
Interval that with `alpha` percent probability contains a random
realization of this distribution.
__call__(<shape(s)>, loc=0, scale=1)
Calling a distribution instance creates a frozen RV object with the
same methods but holding the given shape, location, and scale fixed.
See Notes section.
**Parameters for Methods**
x : array_like
quantiles
q : array_like
lower or upper tail probability
<shape(s)> : array_like
shape parameters
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
size : int or tuple of ints, optional
shape of random variates (default computed from input arguments )
moments : string, optional
composed of letters ['mvsk'] specifying which moments to compute where
'm' = mean, 'v' = variance, 's' = (Fisher's) skew and
'k' = (Fisher's) kurtosis. (default='mv')
n : int
order of moment to calculate in method moments
Notes
-----
**Methods that can be overwritten by subclasses**
::
_rvs
_pdf
_cdf
_sf
_ppf
_isf
_stats
_munp
_entropy
_argcheck
There are additional (internal and private) generic methods that can
be useful for cross-checking and for debugging, but might work in all
cases when directly called.
**Frozen Distribution**
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = generic(<shape(s)>, loc=0, scale=1)
frozen RV object with the same methods but holding the given shape,
location, and scale fixed
**Subclassing**
New random variables can be defined by subclassing rv_continuous class
and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized
to location 0 and scale 1) which will be given clean arguments (in between
a and b) and passing the argument check method.
If positive argument checking is not correct for your RV
then you will also need to re-define the ``_argcheck`` method.
Correct, but potentially slow defaults exist for the remaining
methods but for speed and/or accuracy you can over-ride::
_logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf
Rarely would you override ``_isf``, ``_sf`` or ``_logsf``, but you could.
Statistics are computed using numerical integration by default.
For speed you can redefine this using ``_stats``:
- take shape parameters and return mu, mu2, g1, g2
- If you can't compute one of these, return it as None
- Can also be defined with a keyword argument ``moments=<str>``,
where <str> is a string composed of 'm', 'v', 's',
and/or 'k'. Only the components appearing in string
should be computed and returned in the order 'm', 'v',
's', or 'k' with missing values returned as None.
Alternatively, you can override ``_munp``, which takes n and shape
parameters and returns the nth non-central moment of the distribution.
A note on ``shapes``: subclasses need not specify them explicitly. In this
case, the `shapes` will be automatically deduced from the signatures of the
overridden methods.
If, for some reason, you prefer to avoid relying on introspection, you can
specify ``shapes`` explicitly as an argument to the instance constructor.
Examples
--------
To create a new Gaussian distribution, we would do the following::
class gaussian_gen(rv_continuous):
"Gaussian distribution"
def _pdf(self, x):
...
...
"""
def __init__(self, momtype=1, a=None, b=None, xtol=1e-14,
badvalue=None, name=None, longname=None,
shapes=None, extradoc=None):
rv_generic.__init__(self)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.name = name
self.a = a
self.b = b
if a is None:
self.a = -inf
if b is None:
self.b = inf
self.xtol = xtol
self._size = 1
self.m = 0.0
self.moment_type = momtype
self.expandarr = 1
self.shapes = shapes
self._construct_argparser(names_to_inspect=['_pdf', '_cdf'],
locscale_in='loc=0, scale=1',
locscale_out='loc, scale')
# nin correction
self.vecfunc = vectorize(self._ppf_single_call, otypes='d')
self.vecfunc.nin = self.numargs + 1
self.vecentropy = vectorize(self._entropy, otypes='d')
self.vecentropy.nin = self.numargs + 1
self.veccdf = vectorize(self._cdf_single_call, otypes='d')
self.veccdf.nin = self.numargs + 1
self.extradoc = extradoc
if momtype == 0:
self.generic_moment = vectorize(self._mom0_sc, otypes='d')
else:
self.generic_moment = vectorize(self._mom1_sc, otypes='d')
self.generic_moment.nin = self.numargs+1 # Because of the *args argument
# of _mom0_sc, vectorize cannot count the number of arguments correctly.
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname, extradoc=extradoc)
else:
self._construct_doc()
## This only works for old-style classes...
# self.__class__.__doc__ = self.__doc__
def _construct_default_doc(self, longname=None, extradoc=None):
"""Construct instance docstring from the default template."""
if longname is None:
longname = 'A'
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s continuous random variable.' % longname,
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc()
def _construct_doc(self):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['callparams', 'default', 'before_notes']:
tempdict[item] = tempdict[item].replace(
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
def _ppf_to_solve(self, x, q,*args):
return self.cdf(*(x, )+args)-q
def _ppf_single_call(self, q, *args):
left = right = None
if self.a > -np.inf:
left = self.a
if self.b < np.inf:
right = self.b
factor = 10.
if not left: # i.e. self.a = -inf
left = -1.*factor
while self._ppf_to_solve(left, q,*args) > 0.:
right = left
left *= factor
# left is now such that cdf(left) < q
if not right: # i.e. self.b = inf
right = factor
while self._ppf_to_solve(right, q,*args) < 0.:
left = right
right *= factor
# right is now such that cdf(right) > q
return optimize.brentq(self._ppf_to_solve,
left, right, args=(q,)+args, xtol=self.xtol)
# moment from definition
def _mom_integ0(self, x,m,*args):
return x**m * self.pdf(x,*args)
def _mom0_sc(self, m,*args):
return integrate.quad(self._mom_integ0, self.a,
self.b, args=(m,)+args)[0]
# moment calculated using ppf
def _mom_integ1(self, q,m,*args):
return (self.ppf(q,*args))**m
def _mom1_sc(self, m,*args):
return integrate.quad(self._mom_integ1, 0, 1,args=(m,)+args)[0]
## These are the methods you must define (standard form functions)
def _argcheck(self, *args):
# Default check for correct values on args and keywords.
# Returns condition array of 1's where arguments are correct and
# 0's where they are not.
cond = 1
for arg in args:
cond = logical_and(cond,(asarray(arg) > 0))
return cond
def _pdf(self,x,*args):
return derivative(self._cdf,x,dx=1e-5,args=args,order=5)
## Could also define any of these
def _logpdf(self, x, *args):
return log(self._pdf(x, *args))
##(return 1-d using self._size to get number)
def _rvs(self, *args):
## Use basic inverse cdf algorithm for RV generation as default.
U = mtrand.sample(self._size)
Y = self._ppf(U,*args)
return Y
def _cdf_single_call(self, x, *args):
return integrate.quad(self._pdf, self.a, x, args=args)[0]
def _cdf(self, x, *args):
return self.veccdf(x,*args)
def _logcdf(self, x, *args):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x,*args)
def _logsf(self, x, *args):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self.vecfunc(q,*args)
def _isf(self, q, *args):
return self._ppf(1.0-q,*args) # use correct _ppf for subclasses
# The actual calculation functions (no basic checking need be done)
# If these are defined, the others won't be looked at.
# Otherwise, the other set can be defined.
def _stats(self,*args, **kwds):
return None, None, None, None
# Central moments
def _munp(self,n,*args):
return self.generic_moment(n,*args)
def pdf(self,x,*args,**kwds):
"""
Probability density function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
pdf : ndarray
Probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x,loc,scale = map(asarray,(x,loc,scale))
args = tuple(map(asarray,args))
x = asarray((x-loc)*1.0/scale)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
cond = cond0 & cond1
output = zeros(shape(cond),'d')
putmask(output,(1-cond0)+np.isnan(x),self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output,cond,self._pdf(*goodargs) / scale)
if output.ndim == 0:
return output[()]
return output
def logpdf(self, x, *args, **kwds):
"""
Log of the probability density function at x of the given RV.
This uses a more numerically accurate calculation if available.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logpdf : array_like
Log of the probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x,loc,scale = map(asarray,(x,loc,scale))
args = tuple(map(asarray,args))
x = asarray((x-loc)*1.0/scale)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
putmask(output,(1-cond0)+np.isnan(x),self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output,cond,self._logpdf(*goodargs) - log(scale))
if output.ndim == 0:
return output[()]
return output
def cdf(self,x,*args,**kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `x`
"""
args, loc, scale = self._parse_args(*args, **kwds)
x,loc,scale = map(asarray,(x,loc,scale))
args = tuple(map(asarray,args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = zeros(shape(cond),'d')
place(output,(1-cond0)+np.isnan(x),self.badvalue)
place(output,cond2,1.0)
if any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output,cond,self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self,x,*args,**kwds):
"""
Log of the cumulative distribution function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x,loc,scale = map(asarray,(x,loc,scale))
args = tuple(map(asarray,args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
place(output,(1-cond0)*(cond1 == cond1)+np.isnan(x),self.badvalue)
place(output,cond2,0.0)
if any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output,cond,self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self,x,*args,**kwds):
"""
Survival function (1-cdf) at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
sf : array_like
Survival function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x,loc,scale = map(asarray,(x,loc,scale))
args = tuple(map(asarray,args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = zeros(shape(cond),'d')
place(output,(1-cond0)+np.isnan(x),self.badvalue)
place(output,cond2,1.0)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output,cond,self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self,x,*args,**kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as (1 - `cdf`),
evaluated at `x`.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `x`.
"""
args, loc, scale = self._parse_args(*args, **kwds)
x,loc,scale = map(asarray,(x,loc,scale))
args = tuple(map(asarray,args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
place(output,(1-cond0)+np.isnan(x),self.badvalue)
place(output,cond2,0.0)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output,cond,self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self,q,*args,**kwds):
"""
Percent point function (inverse of cdf) at q of the given RV.
Parameters
----------
q : array_like
lower tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : array_like
quantile corresponding to the lower tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray,(q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 0)
cond3 = cond0 & (q == 1)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = self.a * scale + loc
upper_bound = self.b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale,loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._ppf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""
Inverse survival function at q of the given RV.
Parameters
----------
q : array_like
upper tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : ndarray or scalar
Quantile corresponding to the upper tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 1)
cond3 = cond0 & (q == 0)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = self.a * scale + loc
upper_bound = self.b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(scale,loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._isf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def stats(self,*args,**kwds):
"""
Some statistics of the given RV
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
moments : str, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default='mv')
Returns
-------
stats : sequence
of requested moments.
"""
args, loc, scale, moments = self._parse_args_stats(*args, **kwds)
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond = self._argcheck(*args) & (scale > 0) & (loc == loc)
signature = inspect.getargspec(get_method_function(self._stats))
if (signature[2] is not None) or ('moments' in signature[0]):
mu, mu2, g1, g2 = self._stats(*args,**{'moments':moments})
else:
mu, mu2, g1, g2 = self._stats(*args)
if g1 is None:
mu3 = None
else:
mu3 = g1*np.power(mu2,1.5) # (mu2**1.5) breaks down for nan and inf
default = valarray(shape(cond), self.badvalue)
output = []
# Use only entries that are valid in calculation
if any(cond):
goodargs = argsreduce(cond, *(args+(scale,loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
if 'm' in moments:
if mu is None:
mu = self._munp(1.0,*goodargs)
out0 = default.copy()
place(out0,cond,mu*scale+loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
mu2 = mu2p - mu*mu
if np.isinf(mu):
#if mean is inf then var is also inf
mu2 = np.inf
out0 = default.copy()
place(out0,cond,mu2*scale*scale)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
mu2 = mu2p - mu*mu
mu3 = mu3p - 3*mu*mu2 - mu**3
g1 = mu3 / np.power(mu2, 1.5)
out0 = default.copy()
place(out0,cond,g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
mu2 = mu2p - mu*mu
if mu3 is None:
mu3p = self._munp(3.0,*goodargs)
mu3 = mu3p - 3*mu*mu2 - mu**3
mu4 = mu4p - 4*mu*mu3 - 6*mu*mu*mu2 - mu**4
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0,cond,g2)
output.append(out0)
else: # no valid args
output = []
for _ in moments:
out0 = default.copy()
output.append(out0)
if len(output) == 1:
return output[0]
else:
return tuple(output)
def moment(self, n, *args, **kwds):
"""
n'th order non-central moment of distribution.
Parameters
----------
n : int, n>=1
Order of moment.
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
kwds : keyword arguments, optional
These can include "loc" and "scale", as well as other keyword
arguments relevant for a given distribution.
"""
args, loc, scale = self._parse_args(*args, **kwds)
if not (self._argcheck(*args) and (scale > 0)):
return nan
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0):
raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
signature = inspect.getargspec(get_method_function(self._stats))
if (signature[2] is not None) or ('moments' in signature[0]):
mdict = {'moments':{1:'m',2:'v',3:'vs',4:'vk'}[n]}
else:
mdict = {}
mu, mu2, g1, g2 = self._stats(*args,**mdict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
# Convert to transformed X = L + S*Y
# so E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n,k)*(S/L)^k E[Y^k],k=0...n)
if loc == 0:
return scale**n * val
else:
result = 0
fac = float(scale) / float(loc)
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
result += comb(n,k,exact=True)*(fac**k) * valk
result += fac**n * val
return result * loc**n
def _nnlf(self, x, *args):
return -sum(self._logpdf(x, *args),axis=0)
def nnlf(self, theta, x):
'''Return negative loglikelihood function
Notes
-----
This is ``-sum(log pdf(x, theta), axis=0)`` where theta are the
parameters (including loc and scale).
'''
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
cond0 = (x <= self.a) | (self.b <= x)
if (any(cond0)):
return inf
else:
N = len(x)
return self._nnlf(x, *args) + N * log(scale)
def _penalized_nnlf(self, theta, x):
''' Return negative loglikelihood function,
i.e., - sum (log pdf(x, theta),axis=0)
where theta are the parameters (including loc and scale)
'''
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
loginf = log(floatinfo.machar.xmax)
if np.isneginf(self.a).all() and np.isinf(self.b).all():
Nbad = 0
else:
cond0 = (x <= self.a) | (self.b <= x)
Nbad = sum(cond0)
if Nbad > 0:
x = argsreduce(~cond0, x)[0]
N = len(x)
return self._nnlf(x, *args) + N*log(scale) + Nbad * 100.0 * loginf
# return starting point for fit (shape arguments + loc + scale)
def _fitstart(self, data, args=None):
if args is None:
args = (1.0,)*self.numargs
return args + self.fit_loc_scale(data, *args)
# Return the (possibly reduced) function to optimize in order to find MLE
# estimates for the .fit method
def _reduce_func(self, args, kwds):
args = list(args)
Nargs = len(args)
fixedn = []
index = list(range(Nargs))
names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']
x0 = []
for n, key in zip(index, names):
if key in kwds:
fixedn.append(n)
args[n] = kwds[key]
else:
x0.append(args[n])
if len(fixedn) == 0:
func = self._penalized_nnlf
restore = None
else:
if len(fixedn) == len(index):
raise ValueError("All parameters fixed. There is nothing to optimize.")
def restore(args, theta):
# Replace with theta for all numbers not in fixedn
# This allows the non-fixed values to vary, but
# we still call self.nnlf with all parameters.
i = 0
for n in range(Nargs):
if n not in fixedn:
args[n] = theta[i]
i += 1
return args
def func(theta, x):
newtheta = restore(args[:], theta)
return self._penalized_nnlf(newtheta, x)
return x0, func, restore, args
def fit(self, data, *args, **kwds):
"""
Return MLEs for shape, location, and scale parameters from data.
MLE stands for Maximum Likelihood Estimate. Starting estimates for
the fit are given by input arguments; for any arguments not provided
with starting estimates, ``self._fitstart(data)`` is called to generate
such.
One can hold some parameters fixed to specific values by passing in
keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)
and ``floc`` and ``fscale`` (for location and scale parameters,
respectively).
Parameters
----------
data : array_like
Data to use in calculating the MLEs.
args : floats, optional
Starting value(s) for any shape-characterizing arguments (those not
provided will be determined by a call to ``_fitstart(data)``).
No default value.
kwds : floats, optional
Starting values for the location and scale parameters; no default.
Special keyword arguments are recognized as holding certain
parameters fixed:
f0...fn : hold respective shape parameters fixed.
floc : hold location parameter fixed to specified value.
fscale : hold scale parameter fixed to specified value.
optimizer : The optimizer to use. The optimizer must take func,
and starting position as the first two arguments,
plus args (for extra arguments to pass to the
function to be optimized) and disp=0 to suppress
output as keyword arguments.
Returns
-------
shape, loc, scale : tuple of floats
MLEs for any shape statistics, followed by those for location and
scale.
Notes
-----
This fit is computed by maximizing a log-likelihood function, with
penalty applied for samples outside of range of the distribution. The
returned answer is not guaranteed to be the globally optimal MLE, it
may only be locally optimal, or the optimization may fail altogether.
"""
Narg = len(args)
if Narg > self.numargs:
raise TypeError("Too many input arguments.")
start = [None]*2
if (Narg < self.numargs) or not ('loc' in kwds and
'scale' in kwds):
start = self._fitstart(data) # get distribution specific starting locations
args += start[Narg:-2]
loc = kwds.get('loc', start[-2])
scale = kwds.get('scale', start[-1])
args += (loc, scale)
x0, func, restore, args = self._reduce_func(args, kwds)
optimizer = kwds.get('optimizer', optimize.fmin)
# convert string to function in scipy.optimize
if not callable(optimizer) and isinstance(optimizer, string_types):
if not optimizer.startswith('fmin_'):
optimizer = "fmin_"+optimizer
if optimizer == 'fmin_':
optimizer = 'fmin'
try:
optimizer = getattr(optimize, optimizer)
except AttributeError:
raise ValueError("%s is not a valid optimizer" % optimizer)
vals = optimizer(func,x0,args=(ravel(data),),disp=0)
if restore is not None:
vals = restore(args, vals)
vals = tuple(vals)
return vals
def fit_loc_scale(self, data, *args):
"""
Estimate loc and scale parameters from data using 1st and 2nd moments.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
mu, mu2 = self.stats(*args,**{'moments':'mv'})
tmp = asarray(data)
muhat = tmp.mean()
mu2hat = tmp.var()
Shat = sqrt(mu2hat / mu2)
Lhat = muhat - Shat*mu
if not np.isfinite(Lhat):
Lhat = 0
if not (np.isfinite(Shat) and (0 < Shat)):
Shat = 1
return Lhat, Shat
@np.deprecate
def est_loc_scale(self, data, *args):
"""This function is deprecated, use self.fit_loc_scale(data) instead."""
return self.fit_loc_scale(data, *args)
def freeze(self,*args,**kwds):
"""Freeze the distribution for the given arguments.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution. Should include all
the non-optional arguments, may include ``loc`` and ``scale``.
Returns
-------
rv_frozen : rv_frozen instance
The frozen distribution.
"""
return rv_frozen(self,*args,**kwds)
def __call__(self, *args, **kwds):
return self.freeze(*args, **kwds)
def _entropy(self, *args):
def integ(x):
val = self._pdf(x, *args)
return special.xlogy(val, val)
entr = -integrate.quad(integ,self.a,self.b)[0]
if not np.isnan(entr):
return entr
else: # try with different limits if integration problems
low,upp = self.ppf([0.001,0.999],*args)
if np.isinf(self.b):
upper = upp
else:
upper = self.b
if np.isinf(self.a):
lower = low
else:
lower = self.a
return -integrate.quad(integ,lower,upper)[0]
def entropy(self, *args, **kwds):
"""
Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
"""
args, loc, scale = self._parse_args(*args, **kwds)
args = tuple(map(asarray,args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = zeros(shape(cond0),'d')
place(output,(1-cond0),self.badvalue)
goodargs = argsreduce(cond0, *args)
# np.vectorize doesn't work when numargs == 0 in numpy 1.5.1
if self.numargs == 0:
place(output,cond0,self._entropy()+log(scale))
else:
place(output,cond0,self.vecentropy(*goodargs)+log(scale))
return output
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds):
"""Calculate expected value of a function with respect to the distribution
The expected value of a function ``f(x)`` with respect to a
distribution ``dist`` is defined as::
ubound
E[x] = Integral(f(x) * dist.pdf(x))
lbound
Parameters
----------
func : callable, optional
Function for which integral is calculated. Takes only one argument.
The default is the identity mapping f(x) = x.
args : tuple, optional
Argument (parameters) of the distribution.
lb, ub : scalar, optional
Lower and upper bound for integration. default is set to the support
of the distribution.
conditional : bool, optional
If True, the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Default is False.
Additional keyword arguments are passed to the integration routine.
Returns
-------
expect : float
The calculated expected value.
Notes
-----
The integration behavior of this function is inherited from
`integrate.quad`.
"""
lockwds = {'loc': loc,
'scale':scale}
self._argcheck(*args)
if func is None:
def fun(x, *args):
return x * self.pdf(x, *args, **lockwds)
else:
def fun(x, *args):
return func(x) * self.pdf(x, *args, **lockwds)
if lb is None:
lb = loc + self.a * scale
if ub is None:
ub = loc + self.b * scale
if conditional:
invfac = (self.sf(lb, *args, **lockwds)
- self.sf(ub, *args, **lockwds))
else:
invfac = 1.0
kwds['args'] = args
return integrate.quad(fun, lb, ub, **kwds)[0] / invfac
_EULER = 0.577215664901532860606512090082402431042 # -special.psi(1)
_ZETA3 = 1.202056903159594285399738161511449990765 # special.zeta(3,1) Apery's constant
## Kolmogorov-Smirnov one-sided and two-sided test statistics
class ksone_gen(rv_continuous):
"""General Kolmogorov-Smirnov one-sided test.
%(default)s
"""
def _cdf(self, x, n):
return 1.0 - special.smirnov(n, x)
def _ppf(self, q, n):
return special.smirnovi(n, 1.0 - q)
ksone = ksone_gen(a=0.0, name='ksone')
class kstwobign_gen(rv_continuous):
"""Kolmogorov-Smirnov two-sided test for large N.
%(default)s
"""
def _cdf(self,x):
return 1.0-special.kolmogorov(x)
def _sf(self,x):
return special.kolmogorov(x)
def _ppf(self,q):
return special.kolmogi(1.0-q)
kstwobign = kstwobign_gen(a=0.0, name='kstwobign')
## Normal distribution
# loc = mu, scale = std
# Keep these implementations out of the class definition so they can be reused
# by other distributions.
_norm_pdf_C = math.sqrt(2*pi)
_norm_pdf_logC = math.log(_norm_pdf_C)
def _norm_pdf(x):
return exp(-x**2/2.0) / _norm_pdf_C
def _norm_logpdf(x):
return -x**2 / 2.0 - _norm_pdf_logC
def _norm_cdf(x):
return special.ndtr(x)
def _norm_logcdf(x):
return special.log_ndtr(x)
def _norm_ppf(q):
return special.ndtri(q)
def _norm_sf(x):
return special.ndtr(-x)
def _norm_logsf(x):
return special.log_ndtr(-x)
def _norm_isf(q):
return -special.ndtri(q)
class norm_gen(rv_continuous):
"""A normal continuous random variable.
The location (loc) keyword specifies the mean.
The scale (scale) keyword specifies the standard deviation.
%(before_notes)s
Notes
-----
The probability density function for `norm` is::
norm.pdf(x) = exp(-x**2/2)/sqrt(2*pi)
%(example)s
"""
def _rvs(self):
return mtrand.standard_normal(self._size)
def _pdf(self,x):
return _norm_pdf(x)
def _logpdf(self, x):
return _norm_logpdf(x)
def _cdf(self,x):
return _norm_cdf(x)
def _logcdf(self, x):
return _norm_logcdf(x)
def _sf(self, x):
return _norm_sf(x)
def _logsf(self, x):
return _norm_logsf(x)
def _ppf(self,q):
return _norm_ppf(q)
def _isf(self,q):
return _norm_isf(q)
def _stats(self):
return 0.0, 1.0, 0.0, 0.0
def _entropy(self):
return 0.5*(log(2*pi)+1)
@inherit_docstring_from(rv_continuous)
def fit(self, data, **kwds):
"""%(super)s
This function (norm_gen.fit) uses explicit formulas for the maximum
likelihood estimation of the parameters, so the `optimizer` argument
is ignored.
"""
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
if floc is None:
loc = data.mean()
else:
loc = floc
if fscale is None:
scale = np.sqrt(((data - loc)**2).mean())
else:
scale = fscale
return loc, scale
norm = norm_gen(name='norm')
class alpha_gen(rv_continuous):
"""An alpha continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `alpha` is::
alpha.pdf(x,a) = 1/(x**2*Phi(a)*sqrt(2*pi)) * exp(-1/2 * (a-1/x)**2),
where ``Phi(alpha)`` is the normal CDF, ``x > 0``, and ``a > 0``.
%(example)s
"""
def _pdf(self, x, a):
return 1.0/(x**2)/special.ndtr(a)*_norm_pdf(a-1.0/x)
def _logpdf(self, x, a):
return -2*log(x) + _norm_logpdf(a-1.0/x) - log(special.ndtr(a))
def _cdf(self, x, a):
return special.ndtr(a-1.0/x) / special.ndtr(a)
def _ppf(self, q, a):
return 1.0/asarray(a-special.ndtri(q*special.ndtr(a)))
def _stats(self, a):
return [inf]*2 + [nan]*2
alpha = alpha_gen(a=0.0, name='alpha')
class anglit_gen(rv_continuous):
"""An anglit continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `anglit` is::
anglit.pdf(x) = sin(2*x + pi/2) = cos(2*x),
for ``-pi/4 <= x <= pi/4``.
%(example)s
"""
def _pdf(self, x):
return cos(2*x)
def _cdf(self, x):
return sin(x+pi/4)**2.0
def _ppf(self, q):
return (arcsin(sqrt(q))-pi/4)
def _stats(self):
return 0.0, pi*pi/16-0.5, 0.0, -2*(pi**4 - 96)/(pi*pi-8)**2
def _entropy(self):
return 1-log(2)
anglit = anglit_gen(a=-pi/4, b=pi/4, name='anglit')
class arcsine_gen(rv_continuous):
"""An arcsine continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `arcsine` is::
arcsine.pdf(x) = 1/(pi*sqrt(x*(1-x)))
for 0 < x < 1.
%(example)s
"""
def _pdf(self, x):
return 1.0/pi/sqrt(x*(1-x))
def _cdf(self, x):
return 2.0/pi*arcsin(sqrt(x))
def _ppf(self, q):
return sin(pi/2.0*q)**2.0
def _stats(self):
mu = 0.5
mu2 = 1.0/8
g1 = 0
g2 = -3.0/2.0
return mu, mu2, g1, g2
def _entropy(self):
return -0.24156447527049044468
arcsine = arcsine_gen(a=0.0, b=1.0, name='arcsine')
class FitDataError(ValueError):
# This exception is raised by, for example, beta_gen.fit when both floc
# and fscale are fixed and there are values in the data not in the open
# interval (floc, floc+fscale).
def __init__(self, distr, lower, upper):
self.args = ("Invalid values in `data`. Maximum likelihood "
"estimation with {distr!r} requires that {lower!r} < x "
"< {upper!r} for each x in `data`.".format(distr=distr,
lower=lower, upper=upper),)
class FitSolverError(RuntimeError):
# This exception is raised by, for example, beta_gen.fit when
# optimize.fsolve returns with ier != 1.
def __init__(self, mesg):
emsg = "Solver for the MLE equations failed to converge: "
emsg += mesg.replace('\n', '')
self.args = (emsg,)
def _beta_mle_a(a, b, n, s1):
# The zeros of this function give the MLE for `a`, with
# `b`, `n` and `s1` given. `s1` is the sum of the logs of
# the data. `n` is the number of data points.
psiab = special.psi(a + b)
func = s1 - n * (-psiab + special.psi(a))
return func
def _beta_mle_ab(theta, n, s1, s2):
# Zeros of this function are critical points of
# the maximum likelihood function. Solving this system
# for theta (which contains a and b) gives the MLE for a and b
# given `n`, `s1` and `s2`. `s1` is the sum of the logs of the data,
# and `s2` is the sum of the logs of 1 - data. `n` is the number
# of data points.
a, b = theta
psiab = special.psi(a + b)
func = [s1 - n * (-psiab + special.psi(a)),
s2 - n * (-psiab + special.psi(b))]
return func
class beta_gen(rv_continuous):
"""A beta continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `beta` is::
beta.pdf(x, a, b) = gamma(a+b)/(gamma(a)*gamma(b)) * x**(a-1) *
(1-x)**(b-1),
for ``0 < x < 1``, ``a > 0``, ``b > 0``.
%(example)s
"""
def _rvs(self, a, b):
return mtrand.beta(a,b,self._size)
def _pdf(self, x, a, b):
return np.exp(self._logpdf(x, a, b))
def _logpdf(self, x, a, b):
lPx = special.xlog1py(b-1.0, -x) + special.xlogy(a-1.0, x)
lPx -= special.betaln(a,b)
return lPx
def _cdf(self, x, a, b):
return special.btdtr(a,b,x)
def _ppf(self, q, a, b):
return special.btdtri(a,b,q)
def _stats(self, a, b):
mn = a*1.0 / (a + b)
var = (a*b*1.0)/(a+b+1.0)/(a+b)**2.0
g1 = 2.0*(b-a)*sqrt((1.0+a+b)/(a*b)) / (2+a+b)
g2 = 6.0*(a**3 + a**2*(1-2*b) + b**2*(1+b) - 2*a*b*(2+b))
g2 /= a*b*(a+b+2)*(a+b+3)
return mn, var, g1, g2
def _fitstart(self, data):
g1 = _skew(data)
g2 = _kurtosis(data)
def func(x):
a, b = x
sk = 2*(b-a)*sqrt(a + b + 1) / (a + b + 2) / sqrt(a*b)
ku = a**3 - a**2*(2*b-1) + b**2*(b+1) - 2*a*b*(b+2)
ku /= a*b*(a+b+2)*(a+b+3)
ku *= 6
return [sk-g1, ku-g2]
a, b = optimize.fsolve(func, (1.0, 1.0))
return super(beta_gen, self)._fitstart(data, args=(a,b))
@inherit_docstring_from(rv_continuous)
def fit(self, data, *args, **kwds):
"""%(super)s
In the special case where both `floc` and `fscale` are given, a
`ValueError` is raised if any value `x` in `data` does not satisfy
`floc < x < floc + fscale`.
"""
# Override rv_continuous.fit, so we can more efficiently handle the
# case where floc and fscale are given.
f0 = kwds.get('f0', None)
f1 = kwds.get('f1', None)
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is None or fscale is None:
# do general fit
return super(beta_gen, self).fit(data, *args, **kwds)
if f0 is not None and f1 is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
# Special case: loc and scale are constrained, so we are fitting
# just the shape parameters. This can be done much more efficiently
# than the method used in `rv_continuous.fit`. (See the subsection
# "Two unknown parameters" in the section "Maximum likelihood" of
# the Wikipedia article on the Beta distribution for the formulas.)
# Normalize the data to the interval [0,1].
data = (ravel(data) - floc) / fscale
if np.any(data <= 0) or np.any(data >= 1):
raise FitDataError("beta", lower=floc, upper=floc + fscale)
xbar = data.mean()
if f0 is not None or f1 is not None:
# One of the shape parameters is fixed.
if f0 is not None:
# The shape parameter a is fixed, so swap the parameters
# and flip the data. We always solve for `a`. The result
# will be swapped back before returning.
b = f0
data = 1 - data
xbar = 1 - xbar
else:
b = f1
# Initial guess for a. Use the formula for the mean of the beta
# distribution, E[x] = a / (a + b), to generate a reasonable
# starting point based on the mean of the data and the given
# value of b.
a = b * xbar / (1 - xbar)
# Compute the MLE for `a` by solving _beta_mle_a.
theta, info, ier, mesg = optimize.fsolve(_beta_mle_a, a,
args=(b, len(data), np.log(data).sum()), full_output=True)
if ier != 1:
raise FitSolverError(mesg=mesg)
a = theta[0]
if f0 is not None:
# The shape parameter a was fixed, so swap back the
# parameters.
a, b = b, a
else:
# Neither of the shape parameters is fixed.
# s1 and s2 are used in the extra arguments passed to _beta_mle_ab
# by optimize.fsolve.
s1 = np.log(data).sum()
s2 = np.log(1 - data).sum()
# Use the "method of moments" to estimate the initial
# guess for a and b.
fac = xbar * (1 - xbar) / data.var(ddof=0) - 1
a = xbar * fac
b = (1 - xbar) * fac
# Compute the MLE for a and b by solving _beta_mle_ab.
theta, info, ier, mesg = optimize.fsolve(_beta_mle_ab, [a, b],
args=(len(data), s1, s2), full_output=True)
if ier != 1:
raise FitSolverError(mesg=mesg)
a, b = theta
return a, b, floc, fscale
beta = beta_gen(a=0.0, b=1.0, name='beta')
class betaprime_gen(rv_continuous):
"""A beta prime continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `betaprime` is::
betaprime.pdf(x, a, b) = x**(a-1) * (1+x)**(-a-b) / beta(a, b)
for ``x > 0``, ``a > 0``, ``b > 0``, where ``beta(a, b)`` is the beta
function (see `scipy.special.beta`).
%(example)s
"""
def _rvs(self, a, b):
u1 = gamma.rvs(a,size=self._size)
u2 = gamma.rvs(b,size=self._size)
return (u1 / u2)
def _pdf(self, x, a, b):
return np.exp(self._logpdf(x, a, b))
def _logpdf(self, x, a, b):
return special.xlogy(a-1.0, x) - special.xlog1py(a+b, x) - special.betaln(a,b)
def _cdf_skip(self, x, a, b):
# remove for now: special.hyp2f1 is incorrect for large a
x = where(x == 1.0, 1.0-1e-6,x)
return pow(x,a)*special.hyp2f1(a+b,a,1+a,-x)/a/special.beta(a,b)
def _munp(self, n, a, b):
if (n == 1.0):
return where(b > 1, a/(b-1.0), inf)
elif (n == 2.0):
return where(b > 2, a*(a+1.0)/((b-2.0)*(b-1.0)), inf)
elif (n == 3.0):
return where(b > 3, a*(a+1.0)*(a+2.0)/((b-3.0)*(b-2.0)*(b-1.0)),
inf)
elif (n == 4.0):
return where(b > 4,
a*(a+1.0)*(a+2.0)*(a+3.0)/((b-4.0)*(b-3.0)
* (b-2.0)*(b-1.0)), inf)
else:
raise NotImplementedError
betaprime = betaprime_gen(a=0.0, b=500.0, name='betaprime')
class bradford_gen(rv_continuous):
"""A Bradford continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `bradford` is::
bradford.pdf(x, c) = c / (k * (1+c*x)),
for ``0 < x < 1``, ``c > 0`` and ``k = log(1+c)``.
%(example)s
"""
def _pdf(self, x, c):
return c / (c*x + 1.0) / log(1.0+c)
def _cdf(self, x, c):
return log(1.0+c*x) / log(c+1.0)
def _ppf(self, q, c):
return ((1.0+c)**q-1)/c
def _stats(self, c, moments='mv'):
k = log(1.0+c)
mu = (c-k)/(c*k)
mu2 = ((c+2.0)*k-2.0*c)/(2*c*k*k)
g1 = None
g2 = None
if 's' in moments:
g1 = sqrt(2)*(12*c*c-9*c*k*(c+2)+2*k*k*(c*(c+3)+3))
g1 /= sqrt(c*(c*(k-2)+2*k))*(3*c*(k-2)+6*k)
if 'k' in moments:
g2 = c**3*(k-3)*(k*(3*k-16)+24)+12*k*c*c*(k-4)*(k-3) \
+ 6*c*k*k*(3*k-14) + 12*k**3
g2 /= 3*c*(c*(k-2)+2*k)**2
return mu, mu2, g1, g2
def _entropy(self, c):
k = log(1+c)
return k/2.0 - log(c/k)
bradford = bradford_gen(a=0.0, b=1.0, name='bradford')
class burr_gen(rv_continuous):
"""A Burr continuous random variable.
%(before_notes)s
See Also
--------
fisk : a special case of `burr` with ``d = 1``
Notes
-----
The probability density function for `burr` is::
burr.pdf(x, c, d) = c * d * x**(-c-1) * (1+x**(-c))**(-d-1)
for ``x > 0``.
%(example)s
"""
def _pdf(self, x, c, d):
return c*d*(x**(-c-1.0))*((1+x**(-c*1.0))**(-d-1.0))
def _cdf(self, x, c, d):
return (1+x**(-c*1.0))**(-d**1.0)
def _ppf(self, q, c, d):
return (q**(-1.0/d)-1)**(-1.0/c)
def _stats(self, c, d, moments='mv'):
g2c, g2cd = gam(1-2.0/c), gam(2.0/c+d)
g1c, g1cd = gam(1-1.0/c), gam(1.0/c+d)
gd = gam(d)
k = gd*g2c*g2cd - g1c**2 * g1cd**2
mu = g1c*g1cd / gd
mu2 = k / gd**2.0
g1, g2 = None, None
g3c, g3cd = None, None
if 's' in moments:
g3c, g3cd = gam(1-3.0/c), gam(3.0/c+d)
g1 = 2*g1c**3 * g1cd**3 + gd*gd*g3c*g3cd - 3*gd*g2c*g1c*g1cd*g2cd
g1 /= sqrt(k**3)
if 'k' in moments:
if g3c is None:
g3c = gam(1-3.0/c)
if g3cd is None:
g3cd = gam(3.0/c+d)
g4c, g4cd = gam(1-4.0/c), gam(4.0/c+d)
g2 = 6*gd*g2c*g2cd * g1c**2 * g1cd**2 + gd**3 * g4c*g4cd
g2 -= 3*g1c**4 * g1cd**4 - 4*gd**2*g3c*g1c*g1cd*g3cd
return mu, mu2, g1, g2
burr = burr_gen(a=0.0, name='burr')
#XXX: cf PR #2552
class fisk_gen(burr_gen):
"""A Fisk continuous random variable.
The Fisk distribution is also known as the log-logistic distribution, and
equals the Burr distribution with ``d == 1``.
%(before_notes)s
See Also
--------
burr
%(example)s
"""
def _pdf(self, x, c):
return burr_gen._pdf(self, x, c, 1.0)
def _cdf(self, x, c):
return burr_gen._cdf(self, x, c, 1.0)
def _ppf(self, x, c):
return burr_gen._ppf(self, x, c, 1.0)
def _stats(self, c):
return burr_gen._stats(self, c, 1.0)
def _entropy(self, c):
return 2 - log(c)
fisk = fisk_gen(a=0.0, name='fisk')
# median = loc
class cauchy_gen(rv_continuous):
"""A Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `cauchy` is::
cauchy.pdf(x) = 1 / (pi * (1 + x**2))
%(example)s
"""
def _pdf(self, x):
return 1.0/pi/(1.0+x*x)
def _cdf(self, x):
return 0.5 + 1.0/pi*arctan(x)
def _ppf(self, q):
return tan(pi*q-pi/2.0)
def _sf(self, x):
return 0.5 - 1.0/pi*arctan(x)
def _isf(self, q):
return tan(pi/2.0-pi*q)
def _stats(self):
return inf, inf, nan, nan
def _entropy(self):
return log(4*pi)
def _fitstart(self, data, args=None):
return (0, 1)
cauchy = cauchy_gen(name='cauchy')
class chi_gen(rv_continuous):
"""A chi continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi` is::
chi.pdf(x,df) = x**(df-1) * exp(-x**2/2) / (2**(df/2-1) * gamma(df/2))
for ``x > 0``.
Special cases of `chi` are:
- ``chi(1, loc, scale) = `halfnormal`
- ``chi(2, 0, scale) = `rayleigh`
- ``chi(3, 0, scale) : `maxwell`
%(example)s
"""
def _rvs(self, df):
return sqrt(chi2.rvs(df,size=self._size))
def _pdf(self, x, df):
return x**(df-1.)*exp(-x*x*0.5)/(2.0)**(df*0.5-1)/gam(df*0.5)
def _cdf(self, x, df):
return special.gammainc(df*0.5,0.5*x*x)
def _ppf(self, q, df):
return sqrt(2*special.gammaincinv(df*0.5,q))
def _stats(self, df):
mu = sqrt(2)*special.gamma(df/2.0+0.5)/special.gamma(df/2.0)
mu2 = df - mu*mu
g1 = (2*mu**3.0 + mu*(1-2*df))/asarray(np.power(mu2, 1.5))
g2 = 2*df*(1.0-df)-6*mu**4 + 4*mu**2 * (2*df-1)
g2 /= asarray(mu2**2.0)
return mu, mu2, g1, g2
chi = chi_gen(a=0.0, name='chi')
## Chi-squared (gamma-distributed with loc=0 and scale=2 and shape=df/2)
class chi2_gen(rv_continuous):
"""A chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi2` is::
chi2.pdf(x,df) = 1 / (2*gamma(df/2)) * (x/2)**(df/2-1) * exp(-x/2)
%(example)s
"""
def _rvs(self, df):
return mtrand.chisquare(df,self._size)
def _pdf(self, x, df):
return exp(self._logpdf(x, df))
def _logpdf(self, x, df):
# term1 = (df/2.-1)*log(x)
# term1[(df==2)*(x==0)] = 0
# avoid 0*log(0)==nan
return (df/2.-1)*log(x+1e-300) - x/2. - gamln(df/2.) - (log(2)*df)/2.
def _cdf(self, x, df):
return special.chdtr(df, x)
def _sf(self, x, df):
return special.chdtrc(df, x)
def _isf(self, p, df):
return special.chdtri(df, p)
def _ppf(self, p, df):
return self._isf(1.0-p, df)
def _stats(self, df):
mu = df
mu2 = 2*df
g1 = 2*sqrt(2.0/df)
g2 = 12.0/df
return mu, mu2, g1, g2
chi2 = chi2_gen(a=0.0, name='chi2')
class cosine_gen(rv_continuous):
"""A cosine continuous random variable.
%(before_notes)s
Notes
-----
The cosine distribution is an approximation to the normal distribution.
The probability density function for `cosine` is::
cosine.pdf(x) = 1/(2*pi) * (1+cos(x))
for ``-pi <= x <= pi``.
%(example)s
"""
def _pdf(self, x):
return 1.0/2/pi*(1+cos(x))
def _cdf(self, x):
return 1.0/2/pi*(pi + x + sin(x))
def _stats(self):
return 0.0, pi*pi/3.0-2.0, 0.0, -6.0*(pi**4-90)/(5.0*(pi*pi-6)**2)
def _entropy(self):
return log(4*pi)-1.0
cosine = cosine_gen(a=-pi, b=pi, name='cosine')
class dgamma_gen(rv_continuous):
"""A double gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dgamma` is::
dgamma.pdf(x, a) = 1 / (2*gamma(a)) * abs(x)**(a-1) * exp(-abs(x))
for ``a > 0``.
%(example)s
"""
def _rvs(self, a):
u = random(size=self._size)
return (gamma.rvs(a,size=self._size)*where(u >= 0.5,1,-1))
def _pdf(self, x, a):
ax = abs(x)
return 1.0/(2*special.gamma(a))*ax**(a-1.0) * exp(-ax)
def _logpdf(self, x, a):
ax = abs(x)
return (a-1.0)*log(ax) - ax - log(2) - gamln(a)
def _cdf(self, x, a):
fac = 0.5*special.gammainc(a,abs(x))
return where(x > 0,0.5+fac,0.5-fac)
def _sf(self, x, a):
fac = 0.5*special.gammainc(a,abs(x))
return where(x > 0,0.5-fac,0.5+fac)
def _ppf(self, q, a):
fac = special.gammainccinv(a,1-abs(2*q-1))
return where(q > 0.5, fac, -fac)
def _stats(self, a):
mu2 = a*(a+1.0)
return 0.0, mu2, 0.0, (a+2.0)*(a+3.0)/mu2-3.0
dgamma = dgamma_gen(name='dgamma')
class dweibull_gen(rv_continuous):
"""A double Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dweibull` is::
dweibull.pdf(x, c) = c / 2 * abs(x)**(c-1) * exp(-abs(x)**c)
%(example)s
"""
def _rvs(self, c):
u = random(size=self._size)
return weibull_min.rvs(c, size=self._size)*(where(u >= 0.5,1,-1))
def _pdf(self, x, c):
ax = abs(x)
Px = c/2.0*ax**(c-1.0)*exp(-ax**c)
return Px
def _logpdf(self, x, c):
ax = abs(x)
return log(c) - log(2.0) + (c-1.0)*log(ax) - ax**c
def _cdf(self, x, c):
Cx1 = 0.5*exp(-abs(x)**c)
return where(x > 0, 1-Cx1, Cx1)
def _ppf_skip(self, q, c):
fac = where(q <= 0.5,2*q,2*q-1)
fac = pow(asarray(log(1.0/fac)),1.0/c)
return where(q > 0.5,fac,-fac)
def _stats(self, c):
var = gam(1+2.0/c)
return 0.0, var, 0.0, gam(1+4.0/c)/var
dweibull = dweibull_gen(name='dweibull')
## Exponential (gamma distributed with a=1.0, loc=loc and scale=scale)
class expon_gen(rv_continuous):
"""An exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `expon` is::
expon.pdf(x) = lambda * exp(- lambda*x)
for ``x >= 0``.
The scale parameter is equal to ``scale = 1.0 / lambda``.
`expon` does not have shape parameters.
%(example)s
"""
def _rvs(self):
return mtrand.standard_exponential(self._size)
def _pdf(self, x):
return exp(-x)
def _logpdf(self, x):
return -x
def _cdf(self, x):
return -expm1(-x)
def _ppf(self, q):
return -log1p(-q)
def _sf(self,x):
return exp(-x)
def _logsf(self, x):
return -x
def _isf(self,q):
return -log(q)
def _stats(self):
return 1.0, 1.0, 2.0, 6.0
def _entropy(self):
return 1.0
expon = expon_gen(a=0.0, name='expon')
class exponweib_gen(rv_continuous):
"""An exponentiated Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponweib` is::
exponweib.pdf(x, a, c) =
a * c * (1-exp(-x**c))**(a-1) * exp(-x**c)*x**(c-1)
for ``x > 0``, ``a > 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, a, c):
exc = exp(-x**c)
return a*c*(1-exc)**asarray(a-1) * exc * x**(c-1)
def _logpdf(self, x, a, c):
exc = exp(-x**c)
return log(a) + log(c) + (a-1.)*log(1-exc) - x**c + (c-1.0)*log(x)
def _cdf(self, x, a, c):
exm1c = -expm1(-x**c)
return (exm1c)**a
def _ppf(self, q, a, c):
return (-log1p(-q**(1.0/a)))**asarray(1.0/c)
exponweib = exponweib_gen(a=0.0, name='exponweib')
class exponpow_gen(rv_continuous):
"""An exponential power continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponpow` is::
exponpow.pdf(x, b) = b * x**(b-1) * exp(1+x**b - exp(x**b))
for ``x >= 0``, ``b > 0``.
%(example)s
"""
def _pdf(self, x, b):
xbm1 = x**(b-1.0)
xb = xbm1 * x
return exp(1)*b*xbm1 * exp(xb - exp(xb))
def _logpdf(self, x, b):
xb = x**(b-1.0)*x
return 1 + log(b) + (b-1.0)*log(x) + xb - exp(xb)
def _cdf(self, x, b):
return -expm1(-expm1(x**b))
def _sf(self, x, b):
return exp(-expm1(x**b))
def _isf(self, x, b):
return (log1p(-log(x)))**(1./b)
def _ppf(self, q, b):
return pow(log1p(-log1p(-q)), 1.0/b)
exponpow = exponpow_gen(a=0.0, name='exponpow')
class fatiguelife_gen(rv_continuous):
"""A fatigue-life (Birnbaum-Sanders) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `fatiguelife` is::
fatiguelife.pdf(x,c) =
(x+1) / (2*c*sqrt(2*pi*x**3)) * exp(-(x-1)**2/(2*x*c**2))
for ``x > 0``.
%(example)s
"""
def _rvs(self, c):
z = norm.rvs(size=self._size)
x = 0.5*c*z
x2 = x*x
t = 1.0 + 2*x2 + 2*x*sqrt(1 + x2)
return t
def _pdf(self, x, c):
return (x+1)/asarray(2*c*sqrt(2*pi*x**3))*exp(-(x-1)**2/asarray((2.0*x*c**2)))
def _logpdf(self, x, c):
return log(x+1) - (x-1)**2 / (2.0*x*c**2) - log(2*c) - 0.5*(log(2*pi) + 3*log(x))
def _cdf(self, x, c):
return special.ndtr(1.0/c*(sqrt(x)-1.0/asarray(sqrt(x))))
def _ppf(self, q, c):
tmp = c*special.ndtri(q)
return 0.25*(tmp + sqrt(tmp**2 + 4))**2
def _stats(self, c):
c2 = c*c
mu = c2 / 2.0 + 1
den = 5*c2 + 4
mu2 = c2*den / 4.0
g1 = 4*c*sqrt(11*c2+6.0)/np.power(den, 1.5)
g2 = 6*c2*(93*c2+41.0) / den**2.0
return mu, mu2, g1, g2
fatiguelife = fatiguelife_gen(a=0.0, name='fatiguelife')
class foldcauchy_gen(rv_continuous):
"""A folded Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldcauchy` is::
foldcauchy.pdf(x, c) = 1/(pi*(1+(x-c)**2)) + 1/(pi*(1+(x+c)**2))
for ``x >= 0``.
%(example)s
"""
def _rvs(self, c):
return abs(cauchy.rvs(loc=c,size=self._size))
def _pdf(self, x, c):
return 1.0/pi*(1.0/(1+(x-c)**2) + 1.0/(1+(x+c)**2))
def _cdf(self, x, c):
return 1.0/pi*(arctan(x-c) + arctan(x+c))
def _stats(self, c):
return inf, inf, nan, nan
foldcauchy = foldcauchy_gen(a=0.0, name='foldcauchy')
class f_gen(rv_continuous):
"""An F continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `f` is::
df2**(df2/2) * df1**(df1/2) * x**(df1/2-1)
F.pdf(x, df1, df2) = --------------------------------------------
(df2+df1*x)**((df1+df2)/2) * B(df1/2, df2/2)
for ``x > 0``.
%(example)s
"""
def _rvs(self, dfn, dfd):
return mtrand.f(dfn, dfd, self._size)
def _pdf(self, x, dfn, dfd):
return exp(self._logpdf(x, dfn, dfd))
def _logpdf(self, x, dfn, dfd):
n = 1.0*dfn
m = 1.0*dfd
lPx = m/2*log(m) + n/2*log(n) + (n/2-1)*log(x)
lPx -= ((n+m)/2)*log(m+n*x) + special.betaln(n/2,m/2)
return lPx
def _cdf(self, x, dfn, dfd):
return special.fdtr(dfn, dfd, x)
def _sf(self, x, dfn, dfd):
return special.fdtrc(dfn, dfd, x)
def _ppf(self, q, dfn, dfd):
return special.fdtri(dfn, dfd, q)
def _stats(self, dfn, dfd):
v2 = asarray(dfd*1.0)
v1 = asarray(dfn*1.0)
mu = where(v2 > 2, v2 / asarray(v2 - 2), inf)
mu2 = 2*v2*v2*(v2+v1-2)/(v1*(v2-2)**2 * (v2-4))
mu2 = where(v2 > 4, mu2, inf)
g1 = 2*(v2+2*v1-2)/(v2-6)*sqrt((2*v2-4)/(v1*(v2+v1-2)))
g1 = where(v2 > 6, g1, nan)
g2 = 3/(2*v2-16)*(8+g1*g1*(v2-6))
g2 = where(v2 > 8, g2, nan)
return mu, mu2, g1, g2
f = f_gen(a=0.0, name='f')
## Folded Normal
## abs(Z) where (Z is normal with mu=L and std=S so that c=abs(L)/S)
##
## note: regress docs have scale parameter correct, but first parameter
## he gives is a shape parameter A = c * scale
## Half-normal is folded normal with shape-parameter c=0.
class foldnorm_gen(rv_continuous):
"""A folded normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldnorm` is::
foldnormal.pdf(x, c) = sqrt(2/pi) * cosh(c*x) * exp(-(x**2+c**2)/2)
for ``c >= 0``.
%(example)s
"""
def _argcheck(self, c):
return (c >= 0)
def _rvs(self, c):
return abs(norm.rvs(loc=c,size=self._size))
def _pdf(self, x, c):
return sqrt(2.0/pi)*cosh(c*x)*exp(-(x*x+c*c)/2.0)
def _cdf(self, x, c):
return special.ndtr(x-c) + special.ndtr(x+c) - 1.0
def _stats(self, c):
fac = special.erf(c/sqrt(2))
mu = sqrt(2.0/pi)*exp(-0.5*c*c)+c*fac
mu2 = c*c + 1 - mu*mu
c2 = c*c
g1 = sqrt(2/pi)*exp(-1.5*c2)*(4-pi*exp(c2)*(2*c2+1.0))
g1 += 2*c*fac*(6*exp(-c2) + 3*sqrt(2*pi)*c*exp(-c2/2.0)*fac +
pi*c*(fac*fac-1))
g1 /= pi*np.power(mu2, 1.5)
g2 = c2*c2+6*c2+3+6*(c2+1)*mu*mu - 3*mu**4
g2 -= 4*exp(-c2/2.0)*mu*(sqrt(2.0/pi)*(c2+2)+c*(c2+3)*exp(c2/2.0)*fac)
g2 /= mu2**2.0
return mu, mu2, g1, g2
foldnorm = foldnorm_gen(a=0.0, name='foldnorm')
## Extreme Value Type II or Frechet
## (defined in Regress+ documentation as Extreme LB) as
## a limiting value distribution.
##
class frechet_r_gen(rv_continuous):
"""A Frechet right (or Weibull minimum) continuous random variable.
%(before_notes)s
See Also
--------
weibull_min : The same distribution as `frechet_r`.
frechet_l, weibull_max
Notes
-----
The probability density function for `frechet_r` is::
frechet_r.pdf(x, c) = c * x**(c-1) * exp(-x**c)
for ``x > 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return c*pow(x,c-1)*exp(-pow(x,c))
def _logpdf(self, x, c):
return log(c) + (c-1)*log(x) - pow(x,c)
def _cdf(self, x, c):
return -expm1(-pow(x,c))
def _ppf(self, q, c):
return pow(-log1p(-q),1.0/c)
def _munp(self, n, c):
return special.gamma(1.0+n*1.0/c)
def _entropy(self, c):
return -_EULER / c - log(c) + _EULER + 1
frechet_r = frechet_r_gen(a=0.0, name='frechet_r')
weibull_min = frechet_r_gen(a=0.0, name='weibull_min')
class frechet_l_gen(rv_continuous):
"""A Frechet left (or Weibull maximum) continuous random variable.
%(before_notes)s
See Also
--------
weibull_max : The same distribution as `frechet_l`.
frechet_r, weibull_min
Notes
-----
The probability density function for `frechet_l` is::
frechet_l.pdf(x, c) = c * (-x)**(c-1) * exp(-(-x)**c)
for ``x < 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return c*pow(-x,c-1)*exp(-pow(-x,c))
def _cdf(self, x, c):
return exp(-pow(-x,c))
def _ppf(self, q, c):
return -pow(-log(q),1.0/c)
def _munp(self, n, c):
val = special.gamma(1.0+n*1.0/c)
if (int(n) % 2):
sgn = -1
else:
sgn = 1
return sgn * val
def _entropy(self, c):
return -_EULER / c - log(c) + _EULER + 1
frechet_l = frechet_l_gen(b=0.0, name='frechet_l')
weibull_max = frechet_l_gen(b=0.0, name='weibull_max')
class genlogistic_gen(rv_continuous):
"""A generalized logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genlogistic` is::
genlogistic.pdf(x, c) = c * exp(-x) / (1 + exp(-x))**(c+1)
for ``x > 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
Px = c*exp(-x)/(1+exp(-x))**(c+1.0)
return Px
def _logpdf(self, x, c):
return log(c) - x - (c+1.0)*log1p(exp(-x))
def _cdf(self, x, c):
Cx = (1+exp(-x))**(-c)
return Cx
def _ppf(self, q, c):
vals = -log(pow(q,-1.0/c)-1)
return vals
def _stats(self, c):
zeta = special.zeta
mu = _EULER + special.psi(c)
mu2 = pi*pi/6.0 + zeta(2,c)
g1 = -2*zeta(3,c) + 2*_ZETA3
g1 /= np.power(mu2, 1.5)
g2 = pi**4/15.0 + 6*zeta(4,c)
g2 /= mu2**2.0
return mu, mu2, g1, g2
genlogistic = genlogistic_gen(name='genlogistic')
class genpareto_gen(rv_continuous):
"""A generalized Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genpareto` is::
genpareto.pdf(x, c) = (1 + c * x)**(-1 - 1/c)
for ``c != 0``, and for ``x >= 0`` for all c,
and ``x < 1/abs(c)`` for ``c < 0``.
%(example)s
"""
def _argcheck(self, c):
c = asarray(c)
self.b = where(c < 0, 1.0/abs(c), inf)
return where(c == 0, 0, 1)
def _pdf(self, x, c):
Px = pow(1+c*x,asarray(-1.0-1.0/c))
return Px
def _logpdf(self, x, c):
return (-1.0-1.0/c) * np.log1p(c*x)
def _cdf(self, x, c):
return 1.0 - pow(1+c*x,asarray(-1.0/c))
def _ppf(self, q, c):
vals = 1.0/c * (pow(1-q, -c)-1)
return vals
def _munp(self, n, c):
k = arange(0,n+1)
val = (-1.0/c)**n * sum(comb(n,k)*(-1)**k / (1.0-c*k),axis=0)
return where(c*n < 1, val, inf)
def _entropy(self, c):
if (c > 0):
return 1+c
else:
self.b = -1.0 / c
return rv_continuous._entropy(self, c)
genpareto = genpareto_gen(a=0.0, name='genpareto')
class genexpon_gen(rv_continuous):
"""A generalized exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genexpon` is::
genexpon.pdf(x, a, b, c) = (a + b * (1 - exp(-c*x))) * \
exp(-a*x - b*x + b/c * (1-exp(-c*x)))
for ``x >= 0``, ``a,b,c > 0``.
References
----------
H.K. Ryu, "An Extension of Marshall and Olkin's Bivariate Exponential
Distribution", Journal of the American Statistical Association, 1993.
N. Balakrishnan, "The Exponential Distribution: Theory, Methods and
Applications", Asit P. Basu.
%(example)s
"""
def _pdf(self, x, a, b, c):
return (a+b*(-expm1(-c*x)))*exp((-a-b)*x+b*(-expm1(-c*x))/c)
def _cdf(self, x, a, b, c):
return -expm1((-a-b)*x + b*(-expm1(-c*x))/c)
def _logpdf(self, x, a, b, c):
return np.log(a+b*(-expm1(-c*x))) + (-a-b)*x+b*(-expm1(-c*x))/c
genexpon = genexpon_gen(a=0.0, name='genexpon')
class genextreme_gen(rv_continuous):
"""A generalized extreme value continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r
Notes
-----
For ``c=0``, `genextreme` is equal to `gumbel_r`.
The probability density function for `genextreme` is::
genextreme.pdf(x, c) =
exp(-exp(-x))*exp(-x), for c==0
exp(-(1-c*x)**(1/c))*(1-c*x)**(1/c-1), for x <= 1/c, c > 0
%(example)s
"""
def _argcheck(self, c):
min = np.minimum
max = np.maximum
sml = floatinfo.machar.xmin
self.b = where(c > 0, 1.0 / max(c, sml),inf)
self.a = where(c < 0, 1.0 / min(c,-sml), -inf)
return where(abs(c) == inf, 0, 1)
def _pdf(self, x, c):
cx = c*x
logex2 = where((c == 0)*(x == x),0.0,log1p(-cx))
logpex2 = where((c == 0)*(x == x),-x,logex2/c)
pex2 = exp(logpex2)
# Handle special cases
logpdf = where((cx == 1) | (cx == -inf),-inf,-pex2+logpex2-logex2)
putmask(logpdf, (c == 1) & (x == 1), 0.0)
return exp(logpdf)
def _cdf(self, x, c):
loglogcdf = where((c == 0)*(x == x),-x,log1p(-c*x)/c)
return exp(-exp(loglogcdf))
def _ppf(self, q, c):
x = -log(-log(q))
return where((c == 0)*(x == x),x,-expm1(-c*x)/c)
def _stats(self, c):
g = lambda n: gam(n*c+1)
g1 = g(1)
g2 = g(2)
g3 = g(3)
g4 = g(4)
g2mg12 = where(abs(c) < 1e-7,(c*pi)**2.0/6.0,g2-g1**2.0)
gam2k = where(abs(c) < 1e-7,pi**2.0/6.0, expm1(gamln(2.0*c+1.0)-2*gamln(c+1.0))/c**2.0)
eps = 1e-14
gamk = where(abs(c) < eps,-_EULER,expm1(gamln(c+1))/c)
m = where(c < -1.0,nan,-gamk)
v = where(c < -0.5,nan,g1**2.0*gam2k)
# skewness
sk1 = where(c < -1./3,nan,np.sign(c)*(-g3+(g2+2*g2mg12)*g1)/((g2mg12)**(3./2.)))
sk = where(abs(c) <= eps**0.29,12*sqrt(6)*_ZETA3/pi**3,sk1)
# kurtosis
ku1 = where(c < -1./4,nan,(g4+(-4*g3+3*(g2+g2mg12)*g1)*g1)/((g2mg12)**2))
ku = where(abs(c) <= (eps)**0.23,12.0/5.0,ku1-3.0)
return m,v,sk,ku
def _munp(self, n, c):
k = arange(0,n+1)
vals = 1.0/c**n * sum(comb(n,k) * (-1)**k * special.gamma(c*k + 1),axis=0)
return where(c*n > -1, vals, inf)
genextreme = genextreme_gen(name='genextreme')
def _digammainv(y):
# Inverse of the digamma function (real positive arguments only).
# This function is used in the `fit` method of `gamma_gen`.
# The function uses either optimize.fsolve or optimize.newton
# to solve `digamma(x) - y = 0`. There is probably room for
# improvement, but currently it works over a wide range of y:
# >>> y = 64*np.random.randn(1000000)
# >>> y.min(), y.max()
# (-311.43592651416662, 351.77388222276869)
# x = [_digammainv(t) for t in y]
# np.abs(digamma(x) - y).max()
# 1.1368683772161603e-13
#
_em = 0.5772156649015328606065120
func = lambda x: special.digamma(x) - y
if y > -0.125:
x0 = exp(y) + 0.5
if y < 10:
# Some experimentation shows that newton reliably converges
# must faster than fsolve in this y range. For larger y,
# newton sometimes fails to converge.
value = optimize.newton(func, x0, tol=1e-10)
return value
elif y > -3:
x0 = exp(y/2.332) + 0.08661
else:
x0 = 1.0 / (-y - _em)
value, info, ier, mesg = optimize.fsolve(func, x0, xtol=1e-11,
full_output=True)
if ier != 1:
raise RuntimeError("_digammainv: fsolve failed, y = %r" % y)
return value[0]
## Gamma (Use MATLAB and MATHEMATICA (b=theta=scale, a=alpha=shape) definition)
## gamma(a, loc, scale) with a an integer is the Erlang distribution
## gamma(1, loc, scale) is the Exponential distribution
## gamma(df/2, 0, 2) is the chi2 distribution with df degrees of freedom.
class gamma_gen(rv_continuous):
"""A gamma continuous random variable.
%(before_notes)s
See Also
--------
erlang, expon
Notes
-----
The probability density function for `gamma` is::
gamma.pdf(x, a) = lambda**a * x**(a-1) * exp(-lambda*x) / gamma(a)
for ``x >= 0``, ``a > 0``. Here ``gamma(a)`` refers to the gamma function.
The scale parameter is equal to ``scale = 1.0 / lambda``.
`gamma` has a shape parameter `a` which needs to be set explicitly. For instance:
>>> from scipy.stats import gamma
>>> rv = gamma(3., loc = 0., scale = 2.)
produces a frozen form of `gamma` with shape ``a = 3.``, ``loc =0.``
and ``lambda = 1./scale = 1./2.``.
When ``a`` is an integer, `gamma` reduces to the Erlang
distribution, and when ``a=1`` to the exponential distribution.
%(example)s
"""
def _rvs(self, a):
return mtrand.standard_gamma(a, self._size)
def _pdf(self, x, a):
return exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return special.xlogy(a-1.0, x) - x - gamln(a)
def _cdf(self, x, a):
return special.gammainc(a, x)
def _sf(self, x, a):
return special.gammaincc(a, x)
def _ppf(self, q, a):
return special.gammaincinv(a,q)
def _stats(self, a):
return a, a, 2.0/sqrt(a), 6.0/a
def _entropy(self, a):
return special.psi(a)*(1-a) + 1 + gamln(a)
def _fitstart(self, data):
# The skewness of the gamma distribution is `4 / sqrt(a)`.
# We invert that to estimate the shape `a` using the skewness
# of the data. The formula is regularized with 1e-8 in the
# denominator to allow for degenerate data where the skewness
# is close to 0.
a = 4 / (1e-8 + _skew(data)**2)
return super(gamma_gen, self)._fitstart(data, args=(a,))
@inherit_docstring_from(rv_continuous)
def fit(self, data, *args, **kwds):
f0 = kwds.get('f0', None)
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is None:
# loc is not fixed. Use the default fit method.
return super(gamma_gen, self).fit(data, *args, **kwds)
# Special case: loc is fixed.
if f0 is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
# Fixed location is handled by shifting the data.
data = np.asarray(data)
if np.any(data <= floc):
raise FitDataError("gamma", lower=floc, upper=np.inf)
if floc != 0:
# Don't do the subtraction in-place, because `data` might be a
# view of the input array.
data = data - floc
xbar = data.mean()
# Three cases to handle:
# * shape and scale both free
# * shape fixed, scale free
# * shape free, scale fixed
if fscale is None:
# scale is free
if f0 is not None:
# shape is fixed
a = f0
else:
# shape and scale are both free.
# The MLE for the shape parameter `a` is the solution to:
# log(a) - special.digamma(a) - log(xbar) + log(data.mean) = 0
s = log(xbar) - log(data).mean()
func = lambda a: log(a) - special.digamma(a) - s
aest = (3-s + math.sqrt((s-3)**2 + 24*s)) / (12*s)
xa = aest*(1-0.4)
xb = aest*(1+0.4)
a = optimize.brentq(func, xa, xb, disp=0)
# The MLE for the scale parameter is just the data mean
# divided by the shape parameter.
scale = xbar / a
else:
# scale is fixed, shape is free
# The MLE for the shape parameter `a` is the solution to:
# special.digamma(a) - log(data).mean() + log(fscale) = 0
c = log(data).mean() - log(fscale)
a = _digammainv(c)
scale = fscale
return a, floc, scale
gamma = gamma_gen(a=0.0, name='gamma')
class erlang_gen(gamma_gen):
"""An Erlang continuous random variable.
%(before_notes)s
See Also
--------
gamma
Notes
-----
The Erlang distribution is a special case of the Gamma distribution, with
the shape parameter `a` an integer. Note that this restriction is not
enforced by `erlang`. It will, however, generate a warning the first time
a non-integer value is used for the shape parameter.
Refer to `gamma` for examples.
"""
def _argcheck(self, a):
allint = np.all(np.floor(a) == a)
allpos = np.all(a > 0)
if not allint:
# An Erlang distribution shouldn't really have a non-integer
# shape parameter, so warn the user.
warnings.warn('The shape parameter of the erlang distribution '
'has been given a non-integer value %r.' % (a,),
RuntimeWarning)
return allpos
def _fitstart(self, data):
# Override gamma_gen_fitstart so that an integer initial value is
# used. (Also regularize the division, to avoid issues when
# _skew(data) is 0 or close to 0.)
a = int(4.0 / (1e-8 + _skew(data)**2))
return super(gamma_gen, self)._fitstart(data, args=(a,))
# Trivial override of the fit method, so we can monkey-patch its
# docstring.
def fit(self, data, *args, **kwds):
return super(erlang_gen, self).fit(data, *args, **kwds)
if fit.__doc__ is not None:
fit.__doc__ = (rv_continuous.fit.__doc__ +
"""
Notes
-----
The Erlang distribution is generally defined to have integer values
for the shape parameter. This is not enforced by the `erlang` class.
When fitting the distribution, it will generally return a non-integer
value for the shape parameter. By using the keyword argument
`f0=<integer>`, the fit method can be constrained to fit the data to
a specific integer shape parameter.
""")
erlang = erlang_gen(a=0.0, name='erlang')
class gengamma_gen(rv_continuous):
"""A generalized gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gengamma` is::
gengamma.pdf(x, a, c) = abs(c) * x**(c*a-1) * exp(-x**c) / gamma(a)
for ``x > 0``, ``a > 0``, and ``c != 0``.
%(example)s
"""
def _argcheck(self, a, c):
return (a > 0) & (c != 0)
def _pdf(self, x, a, c):
return abs(c) * exp((c*a-1)*log(x)-x**c - gamln(a))
def _cdf(self, x, a, c):
val = special.gammainc(a,x**c)
cond = c + 0*val
return where(cond > 0,val,1-val)
def _ppf(self, q, a, c):
val1 = special.gammaincinv(a,q)
val2 = special.gammaincinv(a,1.0-q)
ic = 1.0/c
cond = c+0*val1
return where(cond > 0,val1**ic,val2**ic)
def _munp(self, n, a, c):
return special.gamma(a+n*1.0/c) / special.gamma(a)
def _entropy(self, a,c):
val = special.psi(a)
return a*(1-val) + 1.0/c*val + gamln(a)-log(abs(c))
gengamma = gengamma_gen(a=0.0, name='gengamma')
class genhalflogistic_gen(rv_continuous):
"""A generalized half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genhalflogistic` is::
genhalflogistic.pdf(x, c) = 2 * (1-c*x)**(1/c-1) / (1+(1-c*x)**(1/c))**2
for ``0 <= x <= 1/c``, and ``c > 0``.
%(example)s
"""
def _argcheck(self, c):
self.b = 1.0 / c
return (c > 0)
def _pdf(self, x, c):
limit = 1.0/c
tmp = asarray(1-c*x)
tmp0 = tmp**(limit-1)
tmp2 = tmp0*tmp
return 2*tmp0 / (1+tmp2)**2
def _cdf(self, x, c):
limit = 1.0/c
tmp = asarray(1-c*x)
tmp2 = tmp**(limit)
return (1.0-tmp2) / (1+tmp2)
def _ppf(self, q, c):
return 1.0/c*(1-((1.0-q)/(1.0+q))**c)
def _entropy(self,c):
return 2 - (2*c+1)*log(2)
genhalflogistic = genhalflogistic_gen(a=0.0, name='genhalflogistic')
class gompertz_gen(rv_continuous):
"""A Gompertz (or truncated Gumbel) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gompertz` is::
gompertz.pdf(x, c) = c * exp(x) * exp(-c*(exp(x)-1))
for ``x >= 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
ex = exp(x)
return c*ex*exp(-c*(ex-1))
def _cdf(self, x, c):
return 1.0-exp(-c*(exp(x)-1))
def _ppf(self, q, c):
return log(1-1.0/c*log(1-q))
def _entropy(self, c):
return 1.0 - log(c) - exp(c)*special.expn(1,c)
gompertz = gompertz_gen(a=0.0, name='gompertz')
class gumbel_r_gen(rv_continuous):
"""A right-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_l, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_r` is::
gumbel_r.pdf(x) = exp(-(x + exp(-x)))
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(example)s
"""
def _pdf(self, x):
ex = exp(-x)
return ex*exp(-ex)
def _logpdf(self, x):
return -x - exp(-x)
def _cdf(self, x):
return exp(-exp(-x))
def _logcdf(self, x):
return -exp(-x)
def _ppf(self, q):
return -log(-log(q))
def _stats(self):
return _EULER, pi*pi/6.0, \
12*sqrt(6)/pi**3 * _ZETA3, 12.0/5
def _entropy(self):
return 1.0608407169541684911
gumbel_r = gumbel_r_gen(name='gumbel_r')
class gumbel_l_gen(rv_continuous):
"""A left-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_l` is::
gumbel_l.pdf(x) = exp(x - exp(x))
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(example)s
"""
def _pdf(self, x):
ex = exp(x)
return ex*exp(-ex)
def _logpdf(self, x):
return x - exp(x)
def _cdf(self, x):
return 1.0-exp(-exp(x))
def _ppf(self, q):
return log(-log(1-q))
def _stats(self):
return -_EULER, pi*pi/6.0, \
-12*sqrt(6)/pi**3 * _ZETA3, 12.0/5
def _entropy(self):
return 1.0608407169541684911
gumbel_l = gumbel_l_gen(name='gumbel_l')
class halfcauchy_gen(rv_continuous):
"""A Half-Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfcauchy` is::
halfcauchy.pdf(x) = 2 / (pi * (1 + x**2))
for ``x >= 0``.
%(example)s
"""
def _pdf(self, x):
return 2.0/pi/(1.0+x*x)
def _logpdf(self, x):
return np.log(2.0/pi) - np.log1p(x*x)
def _cdf(self, x):
return 2.0/pi*arctan(x)
def _ppf(self, q):
return tan(pi/2*q)
def _stats(self):
return inf, inf, nan, nan
def _entropy(self):
return log(2*pi)
halfcauchy = halfcauchy_gen(a=0.0, name='halfcauchy')
class halflogistic_gen(rv_continuous):
"""A half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halflogistic` is::
halflogistic.pdf(x) = 2 * exp(-x) / (1+exp(-x))**2 = 1/2 * sech(x/2)**2
for ``x >= 0``.
%(example)s
"""
def _pdf(self, x):
return 0.5/(cosh(x/2.0))**2.0
def _cdf(self, x):
return tanh(x/2.0)
def _ppf(self, q):
return 2*arctanh(q)
def _munp(self, n):
if n == 1:
return 2*log(2)
if n == 2:
return pi*pi/3.0
if n == 3:
return 9*_ZETA3
if n == 4:
return 7*pi**4 / 15.0
return 2*(1-pow(2.0,1-n))*special.gamma(n+1)*special.zeta(n,1)
def _entropy(self):
return 2-log(2)
halflogistic = halflogistic_gen(a=0.0, name='halflogistic')
class halfnorm_gen(rv_continuous):
"""A half-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfnorm` is::
halfnorm.pdf(x) = sqrt(2/pi) * exp(-x**2/2)
for ``x > 0``.
`halfnorm` is a special case of `chi` with ``df == 1``.
%(example)s
"""
def _rvs(self):
return abs(norm.rvs(size=self._size))
def _pdf(self, x):
return sqrt(2.0/pi)*exp(-x*x/2.0)
def _logpdf(self, x):
return 0.5 * np.log(2.0/pi) - x*x/2.0
def _cdf(self, x):
return special.ndtr(x)*2-1.0
def _ppf(self, q):
return special.ndtri((1+q)/2.0)
def _stats(self):
return sqrt(2.0/pi), 1-2.0/pi, sqrt(2)*(4-pi)/(pi-2)**1.5, \
8*(pi-3)/(pi-2)**2
def _entropy(self):
return 0.5*log(pi/2.0)+0.5
halfnorm = halfnorm_gen(a=0.0, name='halfnorm')
class hypsecant_gen(rv_continuous):
"""A hyperbolic secant continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `hypsecant` is::
hypsecant.pdf(x) = 1/pi * sech(x)
%(example)s
"""
def _pdf(self, x):
return 1.0/(pi*cosh(x))
def _cdf(self, x):
return 2.0/pi*arctan(exp(x))
def _ppf(self, q):
return log(tan(pi*q/2.0))
def _stats(self):
return 0, pi*pi/4, 0, 2
def _entropy(self):
return log(2*pi)
hypsecant = hypsecant_gen(name='hypsecant')
class gausshyper_gen(rv_continuous):
"""A Gauss hypergeometric continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gausshyper` is::
gausshyper.pdf(x, a, b, c, z) =
C * x**(a-1) * (1-x)**(b-1) * (1+z*x)**(-c)
for ``0 <= x <= 1``, ``a > 0``, ``b > 0``, and
``C = 1 / (B(a,b) F[2,1](c, a; a+b; -z))``
%(example)s
"""
def _argcheck(self, a, b, c, z):
return (a > 0) & (b > 0) & (c == c) & (z == z)
def _pdf(self, x, a, b, c, z):
Cinv = gam(a)*gam(b)/gam(a+b)*special.hyp2f1(c,a,a+b,-z)
return 1.0/Cinv * x**(a-1.0) * (1.0-x)**(b-1.0) / (1.0+z*x)**c
def _munp(self, n, a, b, c, z):
fac = special.beta(n+a,b) / special.beta(a,b)
num = special.hyp2f1(c,a+n,a+b+n,-z)
den = special.hyp2f1(c,a,a+b,-z)
return fac*num / den
gausshyper = gausshyper_gen(a=0.0, b=1.0, name='gausshyper')
class invgamma_gen(rv_continuous):
"""An inverted gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgamma` is::
invgamma.pdf(x, a) = x**(-a-1) / gamma(a) * exp(-1/x)
for x > 0, a > 0.
`invgamma` is a special case of `gengamma` with ``c == -1``.
%(example)s
"""
def _pdf(self, x, a):
return exp(self._logpdf(x,a))
def _logpdf(self, x, a):
return (-(a+1)*log(x)-gamln(a) - 1.0/x)
def _cdf(self, x, a):
return 1.0-special.gammainc(a, 1.0/x)
def _ppf(self, q, a):
return 1.0/special.gammaincinv(a,1-q)
def _munp(self, n, a):
return exp(gamln(a-n) - gamln(a))
def _entropy(self, a):
return a - (a+1.0)*special.psi(a) + gamln(a)
invgamma = invgamma_gen(a=0.0, name='invgamma')
# scale is gamma from DATAPLOT and B from Regress
class invgauss_gen(rv_continuous):
"""An inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgauss` is::
invgauss.pdf(x, mu) = 1 / sqrt(2*pi*x**3) * exp(-(x-mu)**2/(2*x*mu**2))
for ``x > 0``.
When `mu` is too small, evaluating the cumulative density function will be
inaccurate due to ``cdf(mu -> 0) = inf * 0``.
NaNs are returned for ``mu <= 0.0028``.
%(example)s
"""
def _rvs(self, mu):
return mtrand.wald(mu, 1.0, size=self._size)
def _pdf(self, x, mu):
return 1.0/sqrt(2*pi*x**3.0)*exp(-1.0/(2*x)*((x-mu)/mu)**2)
def _logpdf(self, x, mu):
return -0.5*log(2*pi) - 1.5*log(x) - ((x-mu)/mu)**2/(2*x)
def _cdf(self, x, mu):
fac = sqrt(1.0/x)
# Numerical accuracy for small `mu` is bad. See #869.
C1 = norm.cdf(fac*(x-mu)/mu)
C1 += exp(1.0/mu) * norm.cdf(-fac*(x+mu)/mu) * exp(1.0/mu)
return C1
def _stats(self, mu):
return mu, mu**3.0, 3*sqrt(mu), 15*mu
invgauss = invgauss_gen(a=0.0, name='invgauss')
class invweibull_gen(rv_continuous):
"""An inverted Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invweibull` is::
invweibull.pdf(x, c) = c * x**(-c-1) * exp(-x**(-c))
for ``x > 0``, ``c > 0``.
References
----------
F.R.S. de Gusmao, E.M.M Ortega and G.M. Cordeiro, "The generalized inverse
Weibull distribution", Stat. Papers, vol. 52, pp. 591-619, 2011.
%(example)s
"""
def _pdf(self, x, c):
xc1 = x**(-c-1.0)
xc2 = x**(-c)
xc2 = exp(-xc2)
return c*xc1*xc2
def _cdf(self, x, c):
xc1 = x**(-c)
return exp(-xc1)
def _ppf(self, q, c):
return pow(-log(q),asarray(-1.0/c))
def _munp(self, n, c):
return special.gamma(1 - n / c)
def _entropy(self, c):
return 1+_EULER + _EULER / c - log(c)
invweibull = invweibull_gen(a=0, name='invweibull')
class johnsonsb_gen(rv_continuous):
"""A Johnson SB continuous random variable.
%(before_notes)s
See Also
--------
johnsonsu
Notes
-----
The probability density function for `johnsonsb` is::
johnsonsb.pdf(x, a, b) = b / (x*(1-x)) * phi(a + b * log(x/(1-x)))
for ``0 < x < 1`` and ``a,b > 0``, and ``phi`` is the normal pdf.
%(example)s
"""
def _argcheck(self, a, b):
return (b > 0) & (a == a)
def _pdf(self, x, a, b):
trm = norm.pdf(a+b*log(x/(1.0-x)))
return b*1.0/(x*(1-x))*trm
def _cdf(self, x, a, b):
return norm.cdf(a+b*log(x/(1.0-x)))
def _ppf(self, q, a, b):
return 1.0/(1+exp(-1.0/b*(norm.ppf(q)-a)))
johnsonsb = johnsonsb_gen(a=0.0, b=1.0, name='johnsonb')
class johnsonsu_gen(rv_continuous):
"""A Johnson SU continuous random variable.
%(before_notes)s
See Also
--------
johnsonsb
Notes
-----
The probability density function for `johnsonsu` is::
johnsonsu.pdf(x, a, b) = b / sqrt(x**2 + 1) *
phi(a + b * log(x + sqrt(x**2 + 1)))
for all ``x, a, b > 0``, and `phi` is the normal pdf.
%(example)s
"""
def _argcheck(self, a, b):
return (b > 0) & (a == a)
def _pdf(self, x, a, b):
x2 = x*x
trm = norm.pdf(a+b*log(x+sqrt(x2+1)))
return b*1.0/sqrt(x2+1.0)*trm
def _cdf(self, x, a, b):
return norm.cdf(a+b*log(x+sqrt(x*x+1)))
def _ppf(self, q, a, b):
return sinh((norm.ppf(q)-a)/b)
johnsonsu = johnsonsu_gen(name='johnsonsu')
class laplace_gen(rv_continuous):
"""A Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `laplace` is::
laplace.pdf(x) = 1/2 * exp(-abs(x))
%(example)s
"""
def _rvs(self):
return mtrand.laplace(0, 1, size=self._size)
def _pdf(self, x):
return 0.5*exp(-abs(x))
def _cdf(self, x):
return where(x > 0, 1.0-0.5*exp(-x), 0.5*exp(x))
def _ppf(self, q):
return where(q > 0.5, -log(2*(1-q)), log(2*q))
def _stats(self):
return 0, 2, 0, 3
def _entropy(self):
return log(2)+1
laplace = laplace_gen(name='laplace')
class levy_gen(rv_continuous):
"""A Levy continuous random variable.
%(before_notes)s
See Also
--------
levy_stable, levy_l
Notes
-----
The probability density function for `levy` is::
levy.pdf(x) = 1 / (x * sqrt(2*pi*x)) * exp(-1/(2*x))
for ``x > 0``.
This is the same as the Levy-stable distribution with a=1/2 and b=1.
%(example)s
"""
def _pdf(self, x):
return 1/sqrt(2*pi*x)/x*exp(-1/(2*x))
def _cdf(self, x):
return 2*(1-norm._cdf(1/sqrt(x)))
def _ppf(self, q):
val = norm._ppf(1-q/2.0)
return 1.0/(val*val)
def _stats(self):
return inf, inf, nan, nan
levy = levy_gen(a=0.0,name="levy")
class levy_l_gen(rv_continuous):
"""A left-skewed Levy continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_stable
Notes
-----
The probability density function for `levy_l` is::
levy_l.pdf(x) = 1 / (abs(x) * sqrt(2*pi*abs(x))) * exp(-1/(2*abs(x)))
for ``x < 0``.
This is the same as the Levy-stable distribution with a=1/2 and b=-1.
%(example)s
"""
def _pdf(self, x):
ax = abs(x)
return 1/sqrt(2*pi*ax)/ax*exp(-1/(2*ax))
def _cdf(self, x):
ax = abs(x)
return 2*norm._cdf(1/sqrt(ax))-1
def _ppf(self, q):
val = norm._ppf((q+1.0)/2)
return -1.0/(val*val)
def _stats(self):
return inf, inf, nan, nan
levy_l = levy_l_gen(b=0.0, name="levy_l")
class levy_stable_gen(rv_continuous):
"""A Levy-stable continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_l
Notes
-----
Levy-stable distribution (only random variates available -- ignore other
docs)
%(example)s
"""
def _rvs(self, alpha, beta):
sz = self._size
TH = uniform.rvs(loc=-pi/2.0,scale=pi,size=sz)
W = expon.rvs(size=sz)
if alpha == 1:
return 2/pi*(pi/2+beta*TH)*tan(TH)-beta*log((pi/2*W*cos(TH))/(pi/2+beta*TH))
ialpha = 1.0/alpha
aTH = alpha*TH
if beta == 0:
return W/(cos(TH)/tan(aTH)+sin(TH))*((cos(aTH)+sin(aTH)*tan(TH))/W)**ialpha
val0 = beta*tan(pi*alpha/2)
th0 = arctan(val0)/alpha
val3 = W/(cos(TH)/tan(alpha*(th0+TH))+sin(TH))
res3 = val3*((cos(aTH)+sin(aTH)*tan(TH)-val0*(sin(aTH)-cos(aTH)*tan(TH)))/W)**ialpha
return res3
def _argcheck(self, alpha, beta):
if beta == -1:
self.b = 0.0
elif beta == 1:
self.a = 0.0
return (alpha > 0) & (alpha <= 2) & (beta <= 1) & (beta >= -1)
def _pdf(self, x, alpha, beta):
raise NotImplementedError
levy_stable = levy_stable_gen(name='levy_stable')
class logistic_gen(rv_continuous):
"""A logistic (or Sech-squared) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `logistic` is::
logistic.pdf(x) = exp(-x) / (1+exp(-x))**2
`logistic` is a special case of `genlogistic` with ``c == 1``.
%(example)s
"""
def _rvs(self):
return mtrand.logistic(size=self._size)
def _pdf(self, x):
ex = exp(-x)
return ex / (1+ex)**2.0
def _cdf(self, x):
return 1.0/(1+exp(-x))
def _ppf(self, q):
return -log(1.0/q-1)
def _stats(self):
return 0, pi*pi/3.0, 0, 6.0/5.0
def _entropy(self):
return 1.0
logistic = logistic_gen(name='logistic')
class loggamma_gen(rv_continuous):
"""A log gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loggamma` is::
loggamma.pdf(x, c) = exp(c*x-exp(x)) / gamma(c)
for all ``x, c > 0``.
%(example)s
"""
def _rvs(self, c):
return log(mtrand.gamma(c, size=self._size))
def _pdf(self, x, c):
return exp(c*x-exp(x)-gamln(c))
def _cdf(self, x, c):
return special.gammainc(c, exp(x))
def _ppf(self, q, c):
return log(special.gammaincinv(c,q))
def _stats(self, c):
# See, for example, "A Statistical Study of Log-Gamma Distribution", by
# Ping Shing Chan (thesis, McMaster University, 1993).
mean = special.digamma(c)
var = special.polygamma(1, c)
skewness = special.polygamma(2, c) / np.power(var, 1.5)
excess_kurtosis = special.polygamma(3, c) / (var*var)
return mean, var, skewness, excess_kurtosis
loggamma = loggamma_gen(name='loggamma')
class loglaplace_gen(rv_continuous):
"""A log-Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loglaplace` is::
loglaplace.pdf(x, c) = c / 2 * x**(c-1), for 0 < x < 1
= c / 2 * x**(-c-1), for x >= 1
for ``c > 0``.
References
----------
T.J. Kozubowski and K. Podgorski, "A log-Laplace growth rate model",
The Mathematical Scientist, vol. 28, pp. 49-60, 2003.
%(example)s
"""
def _pdf(self, x, c):
cd2 = c/2.0
c = where(x < 1, c, -c)
return cd2*x**(c-1)
def _cdf(self, x, c):
return where(x < 1, 0.5*x**c, 1-0.5*x**(-c))
def _ppf(self, q, c):
return where(q < 0.5, (2.0*q)**(1.0/c), (2*(1.0-q))**(-1.0/c))
def _munp(self, n, c):
return c**2 / (c**2 - n**2)
def _entropy(self, c):
return log(2.0/c) + 1.0
loglaplace = loglaplace_gen(a=0.0, name='loglaplace')
def _lognorm_logpdf(x, s):
return -log(x)**2 / (2*s**2) + np.where(x == 0, 0, -log(s*x*sqrt(2*pi)))
class lognorm_gen(rv_continuous):
"""A lognormal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `lognorm` is::
lognorm.pdf(x, s) = 1 / (s*x*sqrt(2*pi)) * exp(-1/2*(log(x)/s)**2)
for ``x > 0``, ``s > 0``.
If ``log(x)`` is normally distributed with mean ``mu`` and variance ``sigma**2``,
then ``x`` is log-normally distributed with shape parameter sigma and scale
parameter ``exp(mu)``.
%(example)s
"""
def _rvs(self, s):
return exp(s * mtrand.standard_normal(self._size))
def _pdf(self, x, s):
return exp(self._logpdf(x, s))
def _logpdf(self, x, s):
return _lognorm_logpdf(x, s)
def _cdf(self, x, s):
return _norm_cdf(log(x) / s)
def _ppf(self, q, s):
return exp(s * _norm_ppf(q))
def _stats(self, s):
p = exp(s*s)
mu = sqrt(p)
mu2 = p*(p-1)
g1 = sqrt((p-1))*(2+p)
g2 = numpy.polyval([1,2,3,0,-6.0],p)
return mu, mu2, g1, g2
def _entropy(self, s):
return 0.5 * (1 + log(2*pi) + 2 * log(s))
lognorm = lognorm_gen(a=0.0, name='lognorm')
class gilbrat_gen(rv_continuous):
"""A Gilbrat continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gilbrat` is::
gilbrat.pdf(x) = 1/(x*sqrt(2*pi)) * exp(-1/2*(log(x))**2)
`gilbrat` is a special case of `lognorm` with ``s = 1``.
%(example)s
"""
def _rvs(self):
return exp(mtrand.standard_normal(self._size))
def _pdf(self, x):
return exp(self._logpdf(x))
def _logpdf(self, x):
return _lognorm_logpdf(x, 1.0)
def _cdf(self, x):
return _norm_cdf(log(x))
def _ppf(self, q):
return exp(_norm_ppf(q))
def _stats(self):
p = np.e
mu = sqrt(p)
mu2 = p * (p - 1)
g1 = sqrt((p - 1)) * (2 + p)
g2 = numpy.polyval([1, 2, 3, 0, -6.0], p)
return mu, mu2, g1, g2
def _entropy(self):
return 0.5 * log(2 * pi) + 0.5
gilbrat = gilbrat_gen(a=0.0, name='gilbrat')
class maxwell_gen(rv_continuous):
"""A Maxwell continuous random variable.
%(before_notes)s
Notes
-----
A special case of a `chi` distribution, with ``df = 3``, ``loc = 0.0``,
and given ``scale = a``, where ``a`` is the parameter used in the
Mathworld description [1]_.
The probability density function for `maxwell` is::
maxwell.pdf(x) = sqrt(2/pi)x**2 * exp(-x**2/2)
for ``x > 0``.
References
----------
.. [1] http://mathworld.wolfram.com/MaxwellDistribution.html
%(example)s
"""
def _rvs(self):
return chi.rvs(3.0,size=self._size)
def _pdf(self, x):
return sqrt(2.0/pi)*x*x*exp(-x*x/2.0)
def _cdf(self, x):
return special.gammainc(1.5,x*x/2.0)
def _ppf(self, q):
return sqrt(2*special.gammaincinv(1.5,q))
def _stats(self):
val = 3*pi-8
return 2*sqrt(2.0/pi), 3-8/pi, sqrt(2)*(32-10*pi)/val**1.5, \
(-12*pi*pi + 160*pi - 384) / val**2.0
def _entropy(self):
return _EULER + 0.5*log(2*pi)-0.5
maxwell = maxwell_gen(a=0.0, name='maxwell')
class mielke_gen(rv_continuous):
"""A Mielke's Beta-Kappa continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `mielke` is::
mielke.pdf(x, k, s) = k * x**(k-1) / (1+x**s)**(1+k/s)
for ``x > 0``.
%(example)s
"""
def _pdf(self, x, k, s):
return k*x**(k-1.0) / (1.0+x**s)**(1.0+k*1.0/s)
def _cdf(self, x, k, s):
return x**k / (1.0+x**s)**(k*1.0/s)
def _ppf(self, q, k, s):
qsk = pow(q,s*1.0/k)
return pow(qsk/(1.0-qsk),1.0/s)
mielke = mielke_gen(a=0.0, name='mielke')
class nakagami_gen(rv_continuous):
"""A Nakagami continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `nakagami` is::
nakagami.pdf(x, nu) = 2 * nu**nu / gamma(nu) *
x**(2*nu-1) * exp(-nu*x**2)
for ``x > 0``, ``nu > 0``.
%(example)s
"""
def _pdf(self, x, nu):
return 2*nu**nu/gam(nu)*(x**(2*nu-1.0))*exp(-nu*x*x)
def _cdf(self, x, nu):
return special.gammainc(nu,nu*x*x)
def _ppf(self, q, nu):
return sqrt(1.0/nu*special.gammaincinv(nu,q))
def _stats(self, nu):
mu = gam(nu+0.5)/gam(nu)/sqrt(nu)
mu2 = 1.0-mu*mu
g1 = mu * (1 - 4*nu*mu2) / 2.0 / nu / np.power(mu2, 1.5)
g2 = -6*mu**4*nu + (8*nu-2)*mu**2-2*nu + 1
g2 /= nu*mu2**2.0
return mu, mu2, g1, g2
nakagami = nakagami_gen(a=0.0, name="nakagami")
class ncx2_gen(rv_continuous):
"""A non-central chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncx2` is::
ncx2.pdf(x, df, nc) = exp(-(nc+df)/2) * 1/2 * (x/nc)**((df-2)/4)
* I[(df-2)/2](sqrt(nc*x))
for ``x > 0``.
%(example)s
"""
def _rvs(self, df, nc):
return mtrand.noncentral_chisquare(df,nc,self._size)
def _logpdf(self, x, df, nc):
a = asarray(df/2.0)
fac = -nc/2.0 - x/2.0 + (a-1)*np.log(x) - a*np.log(2) - special.gammaln(a)
return fac + np.nan_to_num(np.log(special.hyp0f1(a, nc * x/4.0)))
def _pdf(self, x, df, nc):
return np.exp(self._logpdf(x, df, nc))
def _cdf(self, x, df, nc):
return special.chndtr(x,df,nc)
def _ppf(self, q, df, nc):
return special.chndtrix(q,df,nc)
def _stats(self, df, nc):
val = df + 2.0*nc
return df + nc, 2*val, sqrt(8)*(val+nc)/val**1.5, \
12.0*(val+2*nc)/val**2.0
ncx2 = ncx2_gen(a=0.0, name='ncx2')
class ncf_gen(rv_continuous):
"""A non-central F distribution continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncf` is::
ncf.pdf(x, df1, df2, nc) = exp(nc/2 + nc*df1*x/(2*(df1*x+df2)))
* df1**(df1/2) * df2**(df2/2) * x**(df1/2-1)
* (df2+df1*x)**(-(df1+df2)/2)
* gamma(df1/2)*gamma(1+df2/2)
* L^{v1/2-1}^{v2/2}(-nc*v1*x/(2*(v1*x+v2)))
/ (B(v1/2, v2/2) * gamma((v1+v2)/2))
for ``df1, df2, nc > 0``.
%(example)s
"""
def _rvs(self, dfn, dfd, nc):
return mtrand.noncentral_f(dfn,dfd,nc,self._size)
def _pdf_skip(self, x, dfn, dfd, nc):
n1,n2 = dfn, dfd
term = -nc/2+nc*n1*x/(2*(n2+n1*x)) + gamln(n1/2.)+gamln(1+n2/2.)
term -= gamln((n1+n2)/2.0)
Px = exp(term)
Px *= n1**(n1/2) * n2**(n2/2) * x**(n1/2-1)
Px *= (n2+n1*x)**(-(n1+n2)/2)
Px *= special.assoc_laguerre(-nc*n1*x/(2.0*(n2+n1*x)),n2/2,n1/2-1)
Px /= special.beta(n1/2,n2/2)
# this function does not have a return
# drop it for now, the generic function seems to work ok
def _cdf(self, x, dfn, dfd, nc):
return special.ncfdtr(dfn,dfd,nc,x)
def _ppf(self, q, dfn, dfd, nc):
return special.ncfdtri(dfn, dfd, nc, q)
def _munp(self, n, dfn, dfd, nc):
val = (dfn * 1.0/dfd)**n
term = gamln(n+0.5*dfn) + gamln(0.5*dfd-n) - gamln(dfd*0.5)
val *= exp(-nc / 2.0+term)
val *= special.hyp1f1(n+0.5*dfn, 0.5*dfn, 0.5*nc)
return val
def _stats(self, dfn, dfd, nc):
mu = where(dfd <= 2, inf, dfd / (dfd-2.0)*(1+nc*1.0/dfn))
mu2 = where(dfd <= 4, inf, 2*(dfd*1.0/dfn)**2.0 *
((dfn+nc/2.0)**2.0 + (dfn+nc)*(dfd-2.0)) /
((dfd-2.0)**2.0 * (dfd-4.0)))
return mu, mu2, None, None
ncf = ncf_gen(a=0.0, name='ncf')
class t_gen(rv_continuous):
"""A Student's T continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `t` is::
gamma((df+1)/2)
t.pdf(x, df) = ---------------------------------------------------
sqrt(pi*df) * gamma(df/2) * (1+x**2/df)**((df+1)/2)
for ``df > 0``.
%(example)s
"""
def _rvs(self, df):
return mtrand.standard_t(df, size=self._size)
def _pdf(self, x, df):
r = asarray(df*1.0)
Px = exp(gamln((r+1)/2)-gamln(r/2))
Px /= sqrt(r*pi)*(1+(x**2)/r)**((r+1)/2)
return Px
def _logpdf(self, x, df):
r = df*1.0
lPx = gamln((r+1)/2)-gamln(r/2)
lPx -= 0.5*log(r*pi) + (r+1)/2*log(1+(x**2)/r)
return lPx
def _cdf(self, x, df):
return special.stdtr(df, x)
def _sf(self, x, df):
return special.stdtr(df, -x)
def _ppf(self, q, df):
return special.stdtrit(df, q)
def _isf(self, q, df):
return -special.stdtrit(df, q)
def _stats(self, df):
mu2 = where(df > 2, df / (df-2.0), inf)
g1 = where(df > 3, 0.0, nan)
g2 = where(df > 4, 6.0/(df-4.0), nan)
return 0, mu2, g1, g2
t = t_gen(name='t')
class nct_gen(rv_continuous):
"""A non-central Student's T continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `nct` is::
df**(df/2) * gamma(df+1)
nct.pdf(x, df, nc) = ----------------------------------------------------
2**df*exp(nc**2/2) * (df+x**2)**(df/2) * gamma(df/2)
for ``df > 0``.
%(example)s
"""
def _argcheck(self, df, nc):
return (df > 0) & (nc == nc)
def _rvs(self, df, nc):
return norm.rvs(loc=nc,size=self._size)*sqrt(df) / sqrt(chi2.rvs(df,size=self._size))
def _pdf(self, x, df, nc):
n = df*1.0
nc = nc*1.0
x2 = x*x
ncx2 = nc*nc*x2
fac1 = n + x2
trm1 = n/2.*log(n) + gamln(n+1)
trm1 -= n*log(2)+nc*nc/2.+(n/2.)*log(fac1)+gamln(n/2.)
Px = exp(trm1)
valF = ncx2 / (2*fac1)
trm1 = sqrt(2)*nc*x*special.hyp1f1(n/2+1,1.5,valF)
trm1 /= asarray(fac1*special.gamma((n+1)/2))
trm2 = special.hyp1f1((n+1)/2,0.5,valF)
trm2 /= asarray(sqrt(fac1)*special.gamma(n/2+1))
Px *= trm1+trm2
return Px
def _cdf(self, x, df, nc):
return special.nctdtr(df, nc, x)
def _ppf(self, q, df, nc):
return special.nctdtrit(df, nc, q)
def _stats(self, df, nc, moments='mv'):
mu, mu2, g1, g2 = None, None, None, None
val1 = gam((df-1.0)/2.0)
val2 = gam(df/2.0)
if 'm' in moments:
mu = nc*sqrt(df/2.0)*val1/val2
if 'v' in moments:
var = (nc*nc+1.0)*df/(df-2.0)
var -= nc*nc*df * val1**2 / 2.0 / val2**2
mu2 = var
if 's' in moments:
g1n = 2*nc*sqrt(df)*val1*((nc*nc*(2*df-7)-3)*val2**2
- nc*nc*(df-2)*(df-3)*val1**2)
g1d = (df-3)*sqrt(2*df*(nc*nc+1)/(df-2) -
nc*nc*df*(val1/val2)**2) * val2 * \
(nc*nc*(df-2)*val1**2 -
2*(nc*nc+1)*val2**2)
g1 = g1n/g1d
if 'k' in moments:
g2n = 2*(-3*nc**4*(df-2)**2 * (df-3) * (df-4)*val1**4 +
2**(6-2*df) * nc*nc*(df-2)*(df-4) *
(nc*nc*(2*df-7)-3)*pi*gam(df+1)**2 -
4*(nc**4*(df-5)-6*nc*nc-3)*(df-3)*val2**4)
g2d = (df-3)*(df-4)*(nc*nc*(df-2)*val1**2 -
2*(nc*nc+1)*val2)**2
g2 = g2n / g2d
return mu, mu2, g1, g2
nct = nct_gen(name="nct")
class pareto_gen(rv_continuous):
"""A Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pareto` is::
pareto.pdf(x, b) = b / x**(b+1)
for ``x >= 1``, ``b > 0``.
%(example)s
"""
def _pdf(self, x, b):
return b * x**(-b-1)
def _cdf(self, x, b):
return 1 - x**(-b)
def _ppf(self, q, b):
return pow(1-q, -1.0/b)
def _stats(self, b, moments='mv'):
mu, mu2, g1, g2 = None, None, None, None
if 'm' in moments:
mask = b > 1
bt = extract(mask,b)
mu = valarray(shape(b),value=inf)
place(mu, mask, bt / (bt-1.0))
if 'v' in moments:
mask = b > 2
bt = extract(mask,b)
mu2 = valarray(shape(b), value=inf)
place(mu2, mask, bt / (bt-2.0) / (bt-1.0)**2)
if 's' in moments:
mask = b > 3
bt = extract(mask,b)
g1 = valarray(shape(b), value=nan)
vals = 2 * (bt + 1.0) * sqrt(bt - 2.0) / ((bt - 3.0) * sqrt(bt))
place(g1, mask, vals)
if 'k' in moments:
mask = b > 4
bt = extract(mask,b)
g2 = valarray(shape(b), value=nan)
vals = 6.0*polyval([1.0,1.0,-6,-2],bt) / \
polyval([1.0,-7.0,12.0,0.0],bt)
place(g2, mask, vals)
return mu, mu2, g1, g2
def _entropy(self, c):
return 1 + 1.0/c - log(c)
pareto = pareto_gen(a=1.0, name="pareto")
class lomax_gen(rv_continuous):
"""A Lomax (Pareto of the second kind) continuous random variable.
%(before_notes)s
Notes
-----
The Lomax distribution is a special case of the Pareto distribution, with
(loc=-1.0).
The probability density function for `lomax` is::
lomax.pdf(x, c) = c / (1+x)**(c+1)
for ``x >= 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return c*1.0/(1.0+x)**(c+1.0)
def _logpdf(self, x, c):
return log(c) - (c+1)*log(1+x)
def _cdf(self, x, c):
return 1.0-1.0/(1.0+x)**c
def _sf(self, x, c):
return 1.0/(1.0+x)**c
def _logsf(self, x, c):
return -c*log(1+x)
def _ppf(self, q, c):
return pow(1.0-q,-1.0/c)-1
def _stats(self, c):
mu, mu2, g1, g2 = pareto.stats(c, loc=-1.0, moments='mvsk')
return mu, mu2, g1, g2
def _entropy(self, c):
return 1+1.0/c-log(c)
lomax = lomax_gen(a=0.0, name="lomax")
class pearson3_gen(rv_continuous):
"""A pearson type III continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pearson3` is::
pearson3.pdf(x, skew) = abs(beta) / gamma(alpha) *
(beta * (x - zeta))**(alpha - 1) * exp(-beta*(x - zeta))
where::
beta = 2 / (skew * stddev)
alpha = (stddev * beta)**2
zeta = loc - alpha / beta
%(example)s
References
----------
R.W. Vogel and D.E. McMartin, "Probability Plot Goodness-of-Fit and
Skewness Estimation Procedures for the Pearson Type 3 Distribution", Water
Resources Research, Vol.27, 3149-3158 (1991).
L.R. Salvosa, "Tables of Pearson's Type III Function", Ann. Math. Statist.,
Vol.1, 191-198 (1930).
"Using Modern Computing Tools to Fit the Pearson Type III Distribution to
Aviation Loads Data", Office of Aviation Research (2003).
"""
def _preprocess(self, x, skew):
# The real 'loc' and 'scale' are handled in the calling pdf(...). The
# local variables 'loc' and 'scale' within pearson3._pdf are set to
# the defaults just to keep them as part of the equations for
# documentation.
loc = 0.0
scale = 1.0
# If skew is small, return _norm_pdf. The divide between pearson3
# and norm was found by brute force and is approximately a skew of
# 0.000016. No one, I hope, would actually use a skew value even
# close to this small.
norm2pearson_transition = 0.000016
ans, x, skew = np.broadcast_arrays([1.0], x, skew)
ans = ans.copy()
mask = np.absolute(skew) < norm2pearson_transition
invmask = ~mask
beta = 2.0 / (skew[invmask] * scale)
alpha = (scale * beta)**2
zeta = loc - alpha / beta
transx = beta * (x[invmask] - zeta)
return ans, x, transx, skew, mask, invmask, beta, alpha, zeta
def _argcheck(self, skew):
# The _argcheck function in rv_continuous only allows positive
# arguments. The skew argument for pearson3 can be zero (which I want
# to handle inside pearson3._pdf) or negative. So just return True
# for all skew args.
return np.ones(np.shape(skew), dtype=bool)
def _stats(self, skew):
ans, x, transx, skew, mask, invmask, beta, alpha, zeta = self._preprocess([1], skew)
m = zeta + alpha / beta
v = alpha / (beta**2)
s = 2.0 / (alpha**0.5) * np.sign(beta)
k = 6.0 / alpha
return m, v, s, k
def _pdf(self, x, skew):
# Do the calculation in _logpdf since helps to limit
# overflow/underflow problems
ans = exp(self._logpdf(x, skew))
if ans.ndim == 0:
if np.isnan(ans):
return 0.0
return ans
ans[np.isnan(ans)] = 0.0
return ans
def _logpdf(self, x, skew):
# PEARSON3 logpdf GAMMA logpdf
# np.log(abs(beta))
# + (alpha - 1)*log(beta*(x - zeta)) + (a - 1)*log(x)
# - beta*(x - zeta) - x
# - gamln(alpha) - gamln(a)
ans, x, transx, skew, mask, invmask, beta, alpha, zeta = self._preprocess(x, skew)
ans[mask] = np.log(_norm_pdf(x[mask]))
ans[invmask] = log(abs(beta)) + gamma._logpdf(transx, alpha)
return ans
def _cdf(self, x, skew):
ans, x, transx, skew, mask, invmask, beta, alpha, zeta = self._preprocess(x, skew)
ans[mask] = _norm_cdf(x[mask])
ans[invmask] = gamma._cdf(transx, alpha)
return ans
def _rvs(self, skew):
ans, x, transx, skew, mask, invmask, beta, alpha, zeta = self._preprocess([0], skew)
if mask[0]:
return mtrand.standard_normal(self._size)
ans = mtrand.standard_gamma(alpha, self._size)/beta + zeta
if ans.size == 1:
return ans[0]
return ans
def _ppf(self, q, skew):
ans, q, transq, skew, mask, invmask, beta, alpha, zeta = self._preprocess(q, skew)
ans[mask] = _norm_ppf(q[mask])
ans[invmask] = special.gammaincinv(alpha,q[invmask])/beta + zeta
return ans
pearson3 = pearson3_gen(name="pearson3")
class powerlaw_gen(rv_continuous):
"""A power-function continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlaw` is::
powerlaw.pdf(x, a) = a * x**(a-1)
for ``0 <= x <= 1``, ``a > 0``.
`powerlaw` is a special case of `beta` with ``d == 1``.
%(example)s
"""
def _pdf(self, x, a):
return a*x**(a-1.0)
def _logpdf(self, x, a):
return log(a) + (a-1)*log(x)
def _cdf(self, x, a):
return x**(a*1.0)
def _logcdf(self, x, a):
return a*log(x)
def _ppf(self, q, a):
return pow(q, 1.0/a)
def _stats(self, a):
return (a / (a + 1.0),
a / (a + 2.0) / (a + 1.0) ** 2,
-2.0 * ((a - 1.0) / (a + 3.0)) * sqrt((a + 2.0) / a),
6 * polyval([1, -1, -6, 2], a) / (a * (a + 3.0) * (a + 4)))
def _entropy(self, a):
return 1 - 1.0/a - log(a)
powerlaw = powerlaw_gen(a=0.0, b=1.0, name="powerlaw")
class powerlognorm_gen(rv_continuous):
"""A power log-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlognorm` is::
powerlognorm.pdf(x, c, s) = c / (x*s) * phi(log(x)/s) *
(Phi(-log(x)/s))**(c-1),
where ``phi`` is the normal pdf, and ``Phi`` is the normal cdf,
and ``x > 0``, ``s, c > 0``.
%(example)s
"""
def _pdf(self, x, c, s):
return c/(x*s)*norm.pdf(log(x)/s)*pow(norm.cdf(-log(x)/s),c*1.0-1.0)
def _cdf(self, x, c, s):
return 1.0 - pow(norm.cdf(-log(x)/s),c*1.0)
def _ppf(self, q, c, s):
return exp(-s*norm.ppf(pow(1.0-q,1.0/c)))
powerlognorm = powerlognorm_gen(a=0.0, name="powerlognorm")
class powernorm_gen(rv_continuous):
"""A power normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powernorm` is::
powernorm.pdf(x, c) = c * phi(x) * (Phi(-x))**(c-1)
where ``phi`` is the normal pdf, and ``Phi`` is the normal cdf,
and ``x > 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return c*_norm_pdf(x) * \
(_norm_cdf(-x)**(c-1.0))
def _logpdf(self, x, c):
return log(c) + _norm_logpdf(x) + (c-1)*_norm_logcdf(-x)
def _cdf(self, x, c):
return 1.0-_norm_cdf(-x)**(c*1.0)
def _ppf(self, q, c):
return -norm.ppf(pow(1.0-q,1.0/c))
powernorm = powernorm_gen(name='powernorm')
class rdist_gen(rv_continuous):
"""An R-distributed continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rdist` is::
rdist.pdf(x, c) = (1-x**2)**(c/2-1) / B(1/2, c/2)
for ``-1 <= x <= 1``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return np.power((1.0 - x**2), c / 2.0 - 1) / special.beta(0.5, c / 2.0)
def _cdf(self, x, c):
term1 = x / special.beta(0.5, c / 2.0)
res = 0.5 + term1 * special.hyp2f1(0.5, 1 - c / 2.0, 1.5, x**2)
# There's an issue with hyp2f1, it returns nans near x = +-1, c > 100.
# Use the generic implementation in that case. See gh-1285 for
# background.
if any(np.isnan(res)):
return rv_continuous._cdf(self, x, c)
return res
def _munp(self, n, c):
return (1 - (n % 2)) * special.beta((n + 1.0) / 2, c / 2.0)
rdist = rdist_gen(a=-1.0, b=1.0, name="rdist")
class rayleigh_gen(rv_continuous):
"""A Rayleigh continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rayleigh` is::
rayleigh.pdf(r) = r * exp(-r**2/2)
for ``x >= 0``.
`rayleigh` is a special case of `chi` with ``df == 2``.
%(example)s
"""
def _rvs(self):
return chi.rvs(2, size=self._size)
def _pdf(self, r):
return r * exp(-0.5 * r**2)
def _cdf(self, r):
return 1 - exp(-0.5 * r**2)
def _ppf(self, q):
return sqrt(-2 * log(1 - q))
def _stats(self):
val = 4 - pi
return np.sqrt(pi/2), val/2, 2*(pi-3)*sqrt(pi)/val**1.5, \
6*pi/val-16/val**2
def _entropy(self):
return _EULER/2.0 + 1 - 0.5*log(2)
rayleigh = rayleigh_gen(a=0.0, name="rayleigh")
class reciprocal_gen(rv_continuous):
"""A reciprocal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `reciprocal` is::
reciprocal.pdf(x, a, b) = 1 / (x*log(b/a))
for ``a <= x <= b``, ``a, b > 0``.
%(example)s
"""
def _argcheck(self, a, b):
self.a = a
self.b = b
self.d = log(b*1.0 / a)
return (a > 0) & (b > 0) & (b > a)
def _pdf(self, x, a, b):
return 1.0 / (x * self.d)
def _logpdf(self, x, a, b):
return -log(x) - log(self.d)
def _cdf(self, x, a, b):
return (log(x)-log(a)) / self.d
def _ppf(self, q, a, b):
return a*pow(b*1.0/a,q)
def _munp(self, n, a, b):
return 1.0/self.d / n * (pow(b*1.0,n) - pow(a*1.0,n))
def _entropy(self,a,b):
return 0.5*log(a*b)+log(log(b/a))
reciprocal = reciprocal_gen(name="reciprocal")
# FIXME: PPF does not work.
class rice_gen(rv_continuous):
"""A Rice continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rice` is::
rice.pdf(x, b) = x * exp(-(x**2+b**2)/2) * I[0](x*b)
for ``x > 0``, ``b > 0``.
%(example)s
"""
def _pdf(self, x, b):
return x*exp(-(x*x+b*b)/2.0)*special.i0(x*b)
def _logpdf(self, x, b):
return log(x) - (x*x + b*b)/2.0 + log(special.i0(x*b))
def _munp(self, n, b):
nd2 = n/2.0
n1 = 1+nd2
b2 = b*b/2.0
return 2.0**(nd2)*exp(-b2)*special.gamma(n1) * \
special.hyp1f1(n1,1,b2)
rice = rice_gen(a=0.0, name="rice")
# FIXME: PPF does not work.
class recipinvgauss_gen(rv_continuous):
"""A reciprocal inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `recipinvgauss` is::
recipinvgauss.pdf(x, mu) = 1/sqrt(2*pi*x) * exp(-(1-mu*x)**2/(2*x*mu**2))
for ``x >= 0``.
%(example)s
"""
def _rvs(self, mu):
return 1.0/mtrand.wald(mu, 1.0, size=self._size)
def _pdf(self, x, mu):
return 1.0/sqrt(2*pi*x)*exp(-(1-mu*x)**2.0 / (2*x*mu**2.0))
def _logpdf(self, x, mu):
return -(1-mu*x)**2.0 / (2*x*mu**2.0) - 0.5*log(2*pi*x)
def _cdf(self, x, mu):
trm1 = 1.0/mu - x
trm2 = 1.0/mu + x
isqx = 1.0/sqrt(x)
return 1.0-_norm_cdf(isqx*trm1)-exp(2.0/mu)*_norm_cdf(-isqx*trm2)
recipinvgauss = recipinvgauss_gen(a=0.0, name='recipinvgauss')
class semicircular_gen(rv_continuous):
"""A semicircular continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `semicircular` is::
semicircular.pdf(x) = 2/pi * sqrt(1-x**2)
for ``-1 <= x <= 1``.
%(example)s
"""
def _pdf(self, x):
return 2.0/pi*sqrt(1-x*x)
def _cdf(self, x):
return 0.5+1.0/pi*(x*sqrt(1-x*x) + arcsin(x))
def _stats(self):
return 0, 0.25, 0, -1.0
def _entropy(self):
return 0.64472988584940017414
semicircular = semicircular_gen(a=-1.0, b=1.0, name="semicircular")
class triang_gen(rv_continuous):
"""A triangular continuous random variable.
%(before_notes)s
Notes
-----
The triangular distribution can be represented with an up-sloping line from
``loc`` to ``(loc + c*scale)`` and then downsloping for ``(loc + c*scale)``
to ``(loc+scale)``.
The standard form is in the range [0, 1] with c the mode.
The location parameter shifts the start to `loc`.
The scale parameter changes the width from 1 to `scale`.
%(example)s
"""
def _rvs(self, c):
return mtrand.triangular(0, c, 1, self._size)
def _argcheck(self, c):
return (c >= 0) & (c <= 1)
def _pdf(self, x, c):
return where(x < c, 2*x/c, 2*(1-x)/(1-c))
def _cdf(self, x, c):
return where(x < c, x*x/c, (x*x-2*x+c)/(c-1))
def _ppf(self, q, c):
return where(q < c, sqrt(c*q), 1-sqrt((1-c)*(1-q)))
def _stats(self, c):
return (c+1.0)/3.0, (1.0-c+c*c)/18, sqrt(2)*(2*c-1)*(c+1)*(c-2) / \
(5 * np.power((1.0-c+c*c), 1.5)), -3.0/5.0
def _entropy(self,c):
return 0.5-log(2)
triang = triang_gen(a=0.0, b=1.0, name="triang")
class truncexpon_gen(rv_continuous):
"""A truncated exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `truncexpon` is::
truncexpon.pdf(x, b) = exp(-x) / (1-exp(-b))
for ``0 < x < b``.
%(example)s
"""
def _argcheck(self, b):
self.b = b
return (b > 0)
def _pdf(self, x, b):
return exp(-x)/(1-exp(-b))
def _logpdf(self, x, b):
return -x - log(1-exp(-b))
def _cdf(self, x, b):
return (1.0-exp(-x))/(1-exp(-b))
def _ppf(self, q, b):
return -log(1-q+q*exp(-b))
def _munp(self, n, b):
# wrong answer with formula, same as in continuous.pdf
# return gam(n+1)-special.gammainc(1+n,b)
if n == 1:
return (1-(b+1)*exp(-b))/(-expm1(-b))
elif n == 2:
return 2*(1-0.5*(b*b+2*b+2)*exp(-b))/(-expm1(-b))
else:
# return generic for higher moments
# return rv_continuous._mom1_sc(self,n, b)
return self._mom1_sc(n, b)
def _entropy(self, b):
eB = exp(b)
return log(eB-1)+(1+eB*(b-1.0))/(1.0-eB)
truncexpon = truncexpon_gen(a=0.0, name='truncexpon')
class truncnorm_gen(rv_continuous):
"""A truncated normal continuous random variable.
%(before_notes)s
Notes
-----
The standard form of this distribution is a standard normal truncated to
the range [a,b] --- notice that a and b are defined over the domain of the
standard normal. To convert clip values for a specific mean and standard
deviation, use::
a, b = (myclip_a - my_mean) / my_std, (myclip_b - my_mean) / my_std
%(example)s
"""
def _argcheck(self, a, b):
self.a = a
self.b = b
self._nb = _norm_cdf(b)
self._na = _norm_cdf(a)
self._sb = _norm_sf(b)
self._sa = _norm_sf(a)
if self.a > 0:
self._delta = -(self._sb - self._sa)
else:
self._delta = self._nb - self._na
self._logdelta = log(self._delta)
return (a != b)
def _pdf(self, x, a, b):
return _norm_pdf(x) / self._delta
def _logpdf(self, x, a, b):
return _norm_logpdf(x) - self._logdelta
def _cdf(self, x, a, b):
return (_norm_cdf(x) - self._na) / self._delta
def _ppf(self, q, a, b):
if self.a > 0:
return _norm_isf(q*self._sb + self._sa*(1.0-q))
else:
return _norm_ppf(q*self._nb + self._na*(1.0-q))
def _stats(self, a, b):
nA, nB = self._na, self._nb
d = nB - nA
pA, pB = _norm_pdf(a), _norm_pdf(b)
mu = (pA - pB) / d # correction sign
mu2 = 1 + (a*pA - b*pB) / d - mu*mu
return mu, mu2, None, None
truncnorm = truncnorm_gen(name='truncnorm')
# FIXME: RVS does not work.
class tukeylambda_gen(rv_continuous):
"""A Tukey-Lamdba continuous random variable.
%(before_notes)s
Notes
-----
A flexible distribution, able to represent and interpolate between the
following distributions:
- Cauchy (lam=-1)
- logistic (lam=0.0)
- approx Normal (lam=0.14)
- u-shape (lam = 0.5)
- uniform from -1 to 1 (lam = 1)
%(example)s
"""
def _argcheck(self, lam):
return np.ones(np.shape(lam), dtype=bool)
def _pdf(self, x, lam):
Fx = asarray(special.tklmbda(x,lam))
Px = Fx**(lam-1.0) + (asarray(1-Fx))**(lam-1.0)
Px = 1.0/asarray(Px)
return where((lam <= 0) | (abs(x) < 1.0/asarray(lam)), Px, 0.0)
def _cdf(self, x, lam):
return special.tklmbda(x, lam)
def _ppf(self, q, lam):
q = q*1.0
vals1 = (q**lam - (1-q)**lam)/lam
vals2 = log(q/(1-q))
return where((lam == 0) & (q == q), vals2, vals1)
def _stats(self, lam):
return 0, _tlvar(lam), 0, _tlkurt(lam)
def _entropy(self, lam):
def integ(p):
return log(pow(p,lam-1)+pow(1-p,lam-1))
return integrate.quad(integ,0,1)[0]
tukeylambda = tukeylambda_gen(name='tukeylambda')
class uniform_gen(rv_continuous):
"""A uniform continuous random variable.
This distribution is constant between `loc` and ``loc + scale``.
%(before_notes)s
%(example)s
"""
def _rvs(self):
return mtrand.uniform(0.0,1.0,self._size)
def _pdf(self, x):
return 1.0*(x == x)
def _cdf(self, x):
return x
def _ppf(self, q):
return q
def _stats(self):
return 0.5, 1.0/12, 0, -1.2
def _entropy(self):
return 0.0
uniform = uniform_gen(a=0.0, b=1.0, name='uniform')
class vonmises_gen(rv_continuous):
"""A Von Mises continuous random variable.
%(before_notes)s
Notes
-----
If `x` is not in range or `loc` is not in range it assumes they are angles
and converts them to [-pi, pi] equivalents.
The probability density function for `vonmises` is::
vonmises.pdf(x, kappa) = exp(kappa * cos(x)) / (2*pi*I[0](kappa))
for ``-pi <= x <= pi``, ``kappa > 0``.
%(example)s
"""
def _rvs(self, kappa):
return mtrand.vonmises(0.0, kappa, size=self._size)
def _pdf(self, x, kappa):
return exp(kappa * cos(x)) / (2*pi*special.i0(kappa))
def _cdf(self, x, kappa):
return vonmises_cython.von_mises_cdf(kappa, x)
def _stats_skip(self, kappa):
return 0, None, 0, None
vonmises = vonmises_gen(name='vonmises')
class wald_gen(invgauss_gen):
"""A Wald continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wald` is::
wald.pdf(x, a) = 1/sqrt(2*pi*x**3) * exp(-(x-1)**2/(2*x))
for ``x > 0``.
`wald` is a special case of `invgauss` with ``mu == 1``.
%(example)s
"""
def _rvs(self):
return mtrand.wald(1.0, 1.0, size=self._size)
def _pdf(self, x):
return invgauss._pdf(x, 1.0)
def _logpdf(self, x):
return invgauss._logpdf(x, 1.0)
def _cdf(self, x):
return invgauss._cdf(x, 1.0)
def _stats(self):
return 1.0, 1.0, 3.0, 15.0
wald = wald_gen(a=0.0, name="wald")
class wrapcauchy_gen(rv_continuous):
"""A wrapped Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wrapcauchy` is::
wrapcauchy.pdf(x, c) = (1-c**2) / (2*pi*(1+c**2-2*c*cos(x)))
for ``0 <= x <= 2*pi``, ``0 < c < 1``.
%(example)s
"""
def _argcheck(self, c):
return (c > 0) & (c < 1)
def _pdf(self, x, c):
return (1.0-c*c)/(2*pi*(1+c*c-2*c*cos(x)))
def _cdf(self, x, c):
output = 0.0*x
val = (1.0+c)/(1.0-c)
c1 = x < pi
c2 = 1-c1
xp = extract(c1,x)
xn = extract(c2,x)
if (any(xn)):
valn = extract(c2, np.ones_like(x)*val)
xn = 2*pi - xn
yn = tan(xn/2.0)
on = 1.0-1.0/pi*arctan(valn*yn)
place(output, c2, on)
if (any(xp)):
valp = extract(c1, np.ones_like(x)*val)
yp = tan(xp/2.0)
op = 1.0/pi*arctan(valp*yp)
place(output, c1, op)
return output
def _ppf(self, q, c):
val = (1.0-c)/(1.0+c)
rcq = 2*arctan(val*tan(pi*q))
rcmq = 2*pi-2*arctan(val*tan(pi*(1-q)))
return where(q < 1.0/2, rcq, rcmq)
def _entropy(self, c):
return log(2*pi*(1-c*c))
wrapcauchy = wrapcauchy_gen(a=0.0, b=2*pi, name='wrapcauchy')
# DISCRETE DISTRIBUTIONS
def entropy(pk, qk=None, base=None):
"""Calculate the entropy of a distribution for given probability values.
If only probabilities `pk` are given, the entropy is calculated as
``S = -sum(pk * log(pk), axis=0)``.
If `qk` is not None, then compute a relative entropy (also known as
Kullback-Leibler divergence or Kullback-Leibler distance)
``S = sum(pk * log(pk / qk), axis=0)``.
This routine will normalize `pk` and `qk` if they don't sum to 1.
Parameters
----------
pk : sequence
Defines the (discrete) distribution. ``pk[i]`` is the (possibly
unnormalized) probability of event ``i``.
qk : sequence, optional
Sequence against which the relative entropy is computed. Should be in
the same format as `pk`.
base : float, optional
The logarithmic base to use, defaults to ``e`` (natural logarithm).
Returns
-------
S : float
The calculated entropy.
"""
pk = asarray(pk)
pk = 1.0*pk / sum(pk, axis=0)
if qk is None:
vec = special.xlogy(pk, pk)
else:
qk = asarray(qk)
if len(qk) != len(pk):
raise ValueError("qk and pk must have same length.")
qk = 1.0*qk / sum(qk, axis=0)
# If qk is zero anywhere, then unless pk is zero at those places
# too, the relative entropy is infinite.
if any(take(pk, nonzero(qk == 0.0), axis=0) != 0.0, 0):
return inf
vec = -special.xlogy(pk, pk / qk)
S = -sum(vec, axis=0)
if base is not None:
S /= log(base)
return S
## Handlers for generic case where xk and pk are given
def _drv_pmf(self, xk, *args):
try:
return self.P[xk]
except KeyError:
return 0.0
def _drv_cdf(self, xk, *args):
indx = argmax((self.xk > xk),axis=-1)-1
return self.F[self.xk[indx]]
def _drv_ppf(self, q, *args):
indx = argmax((self.qvals >= q),axis=-1)
return self.Finv[self.qvals[indx]]
def _drv_nonzero(self, k, *args):
return 1
def _drv_moment(self, n, *args):
n = asarray(n)
return sum(self.xk**n[newaxis,...] * self.pk, axis=0)
def _drv_moment_gen(self, t, *args):
t = asarray(t)
return sum(exp(self.xk * t[newaxis,...]) * self.pk, axis=0)
def _drv2_moment(self, n, *args):
"""Non-central moment of discrete distribution."""
# many changes, originally not even a return
tot = 0.0
diff = 1e100
# pos = self.a
pos = max(0.0, 1.0*self.a)
count = 0
# handle cases with infinite support
ulimit = max(1000, (min(self.b,1000) + max(self.a,-1000))/2.0)
llimit = min(-1000, (min(self.b,1000) + max(self.a,-1000))/2.0)
while (pos <= self.b) and ((pos <= ulimit) or
(diff > self.moment_tol)):
diff = np.power(pos, n) * self.pmf(pos,*args)
# use pmf because _pmf does not check support in randint
# and there might be problems ? with correct self.a, self.b at this stage
tot += diff
pos += self.inc
count += 1
if self.a < 0: # handle case when self.a = -inf
diff = 1e100
pos = -self.inc
while (pos >= self.a) and ((pos >= llimit) or
(diff > self.moment_tol)):
diff = np.power(pos, n) * self.pmf(pos,*args)
# using pmf instead of _pmf, see above
tot += diff
pos -= self.inc
count += 1
return tot
def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm
b = self.b
a = self.a
if isinf(b): # Be sure ending point is > q
b = int(max(100*q,10))
while 1:
if b >= self.b:
qb = 1.0
break
qb = self._cdf(b,*args)
if (qb < q):
b += 10
else:
break
else:
qb = 1.0
if isinf(a): # be sure starting point < q
a = int(min(-100*q,-10))
while 1:
if a <= self.a:
qb = 0.0
break
qa = self._cdf(a,*args)
if (qa > q):
a -= 10
else:
break
else:
qa = self._cdf(a, *args)
while 1:
if (qa == q):
return a
if (qb == q):
return b
if b <= a+1:
# testcase: return wrong number at lower index
# python -c "from scipy.stats import zipf;print zipf.ppf(0.01,2)" wrong
# python -c "from scipy.stats import zipf;print zipf.ppf([0.01,0.61,0.77,0.83],2)"
# python -c "from scipy.stats import logser;print logser.ppf([0.1,0.66, 0.86,0.93],0.6)"
if qa > q:
return a
else:
return b
c = int((a+b)/2.0)
qc = self._cdf(c, *args)
if (qc < q):
if a != c:
a = c
else:
raise RuntimeError('updating stopped, endless loop')
qa = qc
elif (qc > q):
if b != c:
b = c
else:
raise RuntimeError('updating stopped, endless loop')
qb = qc
else:
return c
def reverse_dict(dict):
newdict = {}
sorted_keys = list(dict.keys())
sorted_keys.sort()
for key in sorted_keys[::-1]:
newdict[dict[key]] = key
return newdict
def make_dict(keys, values):
d = {}
for key, value in zip(keys, values):
d[key] = value
return d
# Must over-ride one of _pmf or _cdf or pass in
# x_k, p(x_k) lists in initialization
class rv_discrete(rv_generic):
"""
A generic discrete random variable class meant for subclassing.
`rv_discrete` is a base class to construct specific distribution classes
and instances from for discrete random variables. rv_discrete can be used
to construct an arbitrary distribution with defined by a list of support
points and the corresponding probabilities.
Parameters
----------
a : float, optional
Lower bound of the support of the distribution, default: 0
b : float, optional
Upper bound of the support of the distribution, default: plus infinity
moment_tol : float, optional
The tolerance for the generic calculation of moments
values : tuple of two array_like
(xk, pk) where xk are points (integers) with positive probability pk
with sum(pk) = 1
inc : integer
increment for the support of the distribution, default: 1
other values have not been tested
badvalue : object, optional
The value in (masked) arrays that indicates a value that should be
ignored.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the first two arguments for all
its methods.
extradoc : str, optional
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
Methods
-------
generic.rvs(<shape(s)>, loc=0, size=1)
random variates
generic.pmf(x, <shape(s)>, loc=0)
probability mass function
logpmf(x, <shape(s)>, loc=0)
log of the probability density function
generic.cdf(x, <shape(s)>, loc=0)
cumulative density function
generic.logcdf(x, <shape(s)>, loc=0)
log of the cumulative density function
generic.sf(x, <shape(s)>, loc=0)
survival function (1-cdf --- sometimes more accurate)
generic.logsf(x, <shape(s)>, loc=0, scale=1)
log of the survival function
generic.ppf(q, <shape(s)>, loc=0)
percent point function (inverse of cdf --- percentiles)
generic.isf(q, <shape(s)>, loc=0)
inverse survival function (inverse of sf)
generic.moment(n, <shape(s)>, loc=0)
non-central n-th moment of the distribution. May not work for array arguments.
generic.stats(<shape(s)>, loc=0, moments='mv')
mean('m', axis=0), variance('v'), skew('s'), and/or kurtosis('k')
generic.entropy(<shape(s)>, loc=0)
entropy of the RV
generic.expect(func=None, args=(), loc=0, lb=None, ub=None, conditional=False)
Expected value of a function with respect to the distribution.
Additional kwd arguments passed to integrate.quad
generic.median(<shape(s)>, loc=0)
Median of the distribution.
generic.mean(<shape(s)>, loc=0)
Mean of the distribution.
generic.std(<shape(s)>, loc=0)
Standard deviation of the distribution.
generic.var(<shape(s)>, loc=0)
Variance of the distribution.
generic.interval(alpha, <shape(s)>, loc=0)
Interval that with `alpha` percent probability contains a random
realization of this distribution.
generic(<shape(s)>, loc=0)
calling a distribution instance returns a frozen distribution
Notes
-----
You can construct an arbitrary discrete rv where ``P{X=xk} = pk``
by passing to the rv_discrete initialization method (through the
values=keyword) a tuple of sequences (xk, pk) which describes only those
values of X (xk) that occur with nonzero probability (pk).
To create a new discrete distribution, we would do the following::
class poisson_gen(rv_discrete):
#"Poisson distribution"
def _pmf(self, k, mu):
...
and create an instance::
poisson = poisson_gen(name="poisson",
longname='A Poisson')
The docstring can be created from a template.
Alternatively, the object may be called (as a function) to fix the shape
and location parameters returning a "frozen" discrete RV object::
myrv = generic(<shape(s)>, loc=0)
- frozen RV object with the same methods but holding the given
shape and location fixed.
A note on ``shapes``: subclasses need not specify them explicitly. In this
case, the `shapes` will be automatically deduced from the signatures of the
overridden methods.
If, for some reason, you prefer to avoid relying on introspection, you can
specify ``shapes`` explicitly as an argument to the instance constructor.
Examples
--------
Custom made discrete distribution:
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> xk = np.arange(7)
>>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.1, 0.1)
>>> custm = stats.rv_discrete(name='custm', values=(xk, pk))
>>> h = plt.plot(xk, custm.pmf(xk))
Random number generation:
>>> R = custm.rvs(size=100)
Display frozen pmf:
>>> numargs = generic.numargs
>>> [ <shape(s)> ] = ['Replace with resonable value', ]*numargs
>>> rv = generic(<shape(s)>)
>>> x = np.arange(0, np.min(rv.dist.b, 3)+1)
>>> h = plt.plot(x, rv.pmf(x))
Here, ``rv.dist.b`` is the right endpoint of the support of ``rv.dist``.
Check accuracy of cdf and ppf:
>>> prb = generic.cdf(x, <shape(s)>)
>>> h = plt.semilogy(np.abs(x-generic.ppf(prb, <shape(s)>))+1e-20)
"""
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8,values=None,inc=1,longname=None,
shapes=None, extradoc=None):
super(rv_generic,self).__init__()
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.a = a
self.b = b
self.name = name
self.moment_tol = moment_tol
self.inc = inc
self._cdfvec = vectorize(self._cdfsingle, otypes='d')
self.return_integers = 1
self.vecentropy = vectorize(self._entropy)
self.shapes = shapes
self.extradoc = extradoc
if values is not None:
self.xk, self.pk = values
self.return_integers = 0
indx = argsort(ravel(self.xk))
self.xk = take(ravel(self.xk),indx, 0)
self.pk = take(ravel(self.pk),indx, 0)
self.a = self.xk[0]
self.b = self.xk[-1]
self.P = make_dict(self.xk, self.pk)
self.qvals = numpy.cumsum(self.pk,axis=0)
self.F = make_dict(self.xk, self.qvals)
self.Finv = reverse_dict(self.F)
self._ppf = instancemethod(vectorize(_drv_ppf, otypes='d'),
self, rv_discrete)
self._pmf = instancemethod(vectorize(_drv_pmf, otypes='d'),
self, rv_discrete)
self._cdf = instancemethod(vectorize(_drv_cdf, otypes='d'),
self, rv_discrete)
self._nonzero = instancemethod(_drv_nonzero, self, rv_discrete)
self.generic_moment = instancemethod(_drv_moment,
self, rv_discrete)
self.moment_gen = instancemethod(_drv_moment_gen,
self, rv_discrete)
self._construct_argparser(names_to_inspect=['_drv_pmf'],
locscale_in='loc=0',
locscale_out='loc, 1') # scale=1 for discrete RVs
else:
self._construct_argparser(names_to_inspect=['_pmf', '_cdf'],
locscale_in='loc=0',
locscale_out='loc, 1') # scale=1 for discrete RVs
# nin correction needs to be after we know numargs
# correct nin for generic moment vectorization
self.vec_generic_moment = vectorize(_drv2_moment, otypes='d')
self.vec_generic_moment.nin = self.numargs + 2
self.generic_moment = instancemethod(self.vec_generic_moment,
self, rv_discrete)
# correct nin for ppf vectorization
_vppf = vectorize(_drv2_ppfsingle, otypes='d')
_vppf.nin = self.numargs + 2 # +1 is for self
self._vecppf = instancemethod(_vppf,
self, rv_discrete)
# now that self.numargs is defined, we can adjust nin
self._cdfvec.nin = self.numargs + 1
# generate docstring for subclass instances
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname, extradoc=extradoc)
else:
self._construct_doc()
#discrete RV do not have the scale parameter, remove it
self.__doc__ = self.__doc__.replace('\n scale : array_like, '
'optional\n scale parameter (default=1)', '')
def _construct_default_doc(self, longname=None, extradoc=None):
"""Construct instance docstring from the rv_discrete template."""
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s discrete random variable.' % longname,
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc()
def _construct_doc(self):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict_discrete.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['callparams', 'default', 'before_notes']:
tempdict[item] = tempdict[item].replace(
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
def _rvs(self, *args):
return self._ppf(mtrand.random_sample(self._size),*args)
def _nonzero(self, k, *args):
return floor(k) == k
def _argcheck(self, *args):
cond = 1
for arg in args:
cond &= (arg > 0)
return cond
def _pmf(self, k, *args):
return self._cdf(k,*args) - self._cdf(k-1,*args)
def _logpmf(self, k, *args):
return log(self._pmf(k, *args))
def _cdfsingle(self, k, *args):
m = arange(int(self.a),k+1)
return sum(self._pmf(m,*args),axis=0)
def _cdf(self, x, *args):
k = floor(x)
return self._cdfvec(k,*args)
def _logcdf(self, x, *args):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x,*args)
def _logsf(self, x, *args):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self._vecppf(q, *args)
def _isf(self, q, *args):
return self._ppf(1-q,*args)
def _stats(self, *args):
return None, None, None, None
def _munp(self, n, *args):
return self.generic_moment(n, *args)
def rvs(self, *args, **kwargs):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
size : int or tuple of ints, optional
Defining number of random variates (default=1). Note that `size`
has to be given as keyword, not as positional argument.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
kwargs['discrete'] = True
return super(rv_discrete, self).rvs(*args, **kwargs)
def pmf(self, k,*args, **kwds):
"""
Probability mass function at k of the given RV.
Parameters
----------
k : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter (default=0).
Returns
-------
pmf : array_like
Probability mass function evaluated at k
"""
args, loc, _ = self._parse_args(*args, **kwds)
k,loc = map(asarray,(k,loc))
args = tuple(map(asarray,args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k,*args)
cond = cond0 & cond1
output = zeros(shape(cond),'d')
place(output,(1-cond0) + np.isnan(k),self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output,cond,self._pmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logpmf(self, k,*args, **kwds):
"""
Log of the probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter. Default is 0.
Returns
-------
logpmf : array_like
Log of the probability mass function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k,loc = map(asarray,(k,loc))
args = tuple(map(asarray,args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k,*args)
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
place(output,(1-cond0) + np.isnan(k),self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output,cond,self._logpmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def cdf(self, k, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k,loc = map(asarray,(k,loc))
args = tuple(map(asarray,args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = zeros(shape(cond),'d')
place(output,(1-cond0) + np.isnan(k),self.badvalue)
place(output,cond2*(cond0 == cond0), 1.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output,cond,self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, k, *args, **kwds):
"""
Log of the cumulative distribution function at k of the given RV
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k,loc = map(asarray,(k,loc))
args = tuple(map(asarray,args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
place(output,(1-cond0) + np.isnan(k),self.badvalue)
place(output,cond2*(cond0 == cond0), 0.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output,cond,self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self,k,*args,**kwds):
"""
Survival function (1-cdf) at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
sf : array_like
Survival function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k,loc = map(asarray,(k,loc))
args = tuple(map(asarray,args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = zeros(shape(cond),'d')
place(output,(1-cond0) + np.isnan(k),self.badvalue)
place(output,cond2,1.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output,cond,self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self,k,*args,**kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as ``1 - cdf``,
evaluated at `k`.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
sf : ndarray
Survival function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k,loc = map(asarray,(k,loc))
args = tuple(map(asarray,args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
place(output,(1-cond0) + np.isnan(k),self.badvalue)
place(output,cond2,0.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output,cond,self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self,q,*args,**kwds):
"""
Percent point function (inverse of cdf) at q of the given RV
Parameters
----------
q : array_like
Lower tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
Returns
-------
k : array_like
Quantile corresponding to the lower tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q,loc = map(asarray,(q,loc))
args = tuple(map(asarray,args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond),value=self.badvalue,typecode='d')
# output type 'd' to handle nin and inf
place(output,(q == 0)*(cond == cond), self.a-1)
place(output,cond2,self.b)
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
place(output,cond,self._ppf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self,q,*args,**kwds):
"""
Inverse survival function (1-sf) at q of the given RV.
Parameters
----------
q : array_like
Upper tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : ndarray or scalar
Quantile corresponding to the upper tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q,loc = map(asarray,(q,loc))
args = tuple(map(asarray,args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
# same problem as with ppf; copied from ppf and changed
output = valarray(shape(cond),value=self.badvalue,typecode='d')
# output type 'd' to handle nin and inf
place(output,(q == 0)*(cond == cond), self.b)
place(output,cond2,self.a-1)
# call place only if at least 1 valid argument
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
place(output,cond,self._isf(*goodargs) + loc) # PB same as ticket 766
if output.ndim == 0:
return output[()]
return output
def stats(self, *args, **kwds):
"""
Some statistics of the given discrete RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
moments : string, optional
Composed of letters ['mvsk'] defining which moments to compute:
- 'm' = mean,
- 'v' = variance,
- 's' = (Fisher's) skew,
- 'k' = (Fisher's) kurtosis.
The default is'mv'.
Returns
-------
stats : sequence
of requested moments.
"""
try:
kwds["moments"] = kwds.pop("moment") # test suite is full of these; a feature?
except KeyError:
pass
args, loc, _, moments = self._parse_args_stats(*args, **kwds)
loc = asarray(loc)
args = tuple(map(asarray,args))
cond = self._argcheck(*args) & (loc == loc)
signature = inspect.getargspec(get_method_function(self._stats))
if (signature[2] is not None) or ('moments' in signature[0]):
mu, mu2, g1, g2 = self._stats(*args,**{'moments':moments})
else:
mu, mu2, g1, g2 = self._stats(*args)
if g1 is None:
mu3 = None
else:
mu3 = g1 * np.power(mu2, 1.5)
default = valarray(shape(cond), self.badvalue)
output = []
# Use only entries that are valid in calculation
goodargs = argsreduce(cond, *(args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
if 'm' in moments:
if mu is None:
mu = self._munp(1.0,*goodargs)
out0 = default.copy()
place(out0,cond,mu+loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
mu2 = mu2p - mu*mu
out0 = default.copy()
place(out0,cond,mu2)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
mu2 = mu2p - mu*mu
mu3 = mu3p - 3*mu*mu2 - mu**3
g1 = mu3 / np.power(mu2, 1.5)
out0 = default.copy()
place(out0,cond,g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
mu2 = mu2p - mu*mu
if mu3 is None:
mu3p = self._munp(3.0,*goodargs)
mu3 = mu3p - 3*mu*mu2 - mu**3
mu4 = mu4p - 4*mu*mu3 - 6*mu*mu*mu2 - mu**4
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0,cond,g2)
output.append(out0)
if len(output) == 1:
return output[0]
else:
return tuple(output)
def moment(self, n, *args, **kwds):
"""
n'th non-central moment of the distribution
Parameters
----------
n : int, n>=1
order of moment
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : float, optional
location parameter (default=0)
scale : float, optional
scale parameter (default=1)
"""
args, loc, scale = self._parse_args(*args, **kwds)
if not (self._argcheck(*args) and (scale > 0)):
return nan
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0):
raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
signature = inspect.getargspec(get_method_function(self._stats))
if (signature[2] is not None) or ('moments' in signature[0]):
dict = {'moments':{1:'m',2:'v',3:'vs',4:'vk'}[n]}
else:
dict = {}
mu, mu2, g1, g2 = self._stats(*args,**dict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
# Convert to transformed X = L + S*Y
# so E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n,k)*(S/L)^k E[Y^k],k=0...n)
if loc == 0:
return scale**n * val
else:
result = 0
fac = float(scale) / float(loc)
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
result += comb(n,k,exact=True)*(fac**k) * valk
result += fac**n * val
return result * loc**n
def freeze(self, *args, **kwds):
return rv_frozen(self, *args, **kwds)
def _entropy(self, *args):
if hasattr(self,'pk'):
return entropy(self.pk)
else:
mu = int(self.stats(*args, **{'moments':'m'}))
val = self.pmf(mu,*args)
ent = -special.xlogy(val, val)
k = 1
term = 1.0
while (abs(term) > eps):
val = self.pmf(mu+k,*args)
term = -special.xlogy(val, val)
val = self.pmf(mu-k,*args)
term -= special.xlogy(val, val)
k += 1
ent += term
return ent
def entropy(self, *args, **kwds):
args, loc, _ = self._parse_args(*args, **kwds)
loc = asarray(loc)
args = list(map(asarray,args))
cond0 = self._argcheck(*args) & (loc == loc)
output = zeros(shape(cond0),'d')
place(output,(1-cond0),self.badvalue)
goodargs = argsreduce(cond0, *args)
# np.vectorize doesn't work when numargs == 0 in numpy 1.5.1
if self.numargs == 0:
place(output, cond0, self._entropy())
else:
place(output, cond0, self.vecentropy(*goodargs))
return output
def __call__(self, *args, **kwds):
return self.freeze(*args,**kwds)
def expect(self, func=None, args=(), loc=0, lb=None, ub=None, conditional=False):
"""
Calculate expected value of a function with respect to the distribution
for discrete distribution
Parameters
----------
fn : function (default: identity mapping)
Function for which sum is calculated. Takes only one argument.
args : tuple
argument (parameters) of the distribution
lb, ub : numbers, optional
lower and upper bound for integration, default is set to the support
of the distribution, lb and ub are inclusive (ul<=k<=ub)
conditional : bool, optional
Default is False.
If true then the expectation is corrected by the conditional
probability of the integration interval. The return value is the
expectation of the function, conditional on being in the given
interval (k such that ul<=k<=ub).
Returns
-------
expect : float
Expected value.
Notes
-----
* function is not vectorized
* accuracy: uses self.moment_tol as stopping criterium
for heavy tailed distribution e.g. zipf(4), accuracy for
mean, variance in example is only 1e-5,
increasing precision (moment_tol) makes zipf very slow
* suppnmin=100 internal parameter for minimum number of points to evaluate
could be added as keyword parameter, to evaluate functions with
non-monotonic shapes, points include integers in (-suppnmin, suppnmin)
* uses maxcount=1000 limits the number of points that are evaluated
to break loop for infinite sums
(a maximum of suppnmin+1000 positive plus suppnmin+1000 negative
integers are evaluated)
"""
# moment_tol = 1e-12 # increase compared to self.moment_tol,
# too slow for only small gain in precision for zipf
# avoid endless loop with unbound integral, eg. var of zipf(2)
maxcount = 1000
suppnmin = 100 # minimum number of points to evaluate (+ and -)
if func is None:
def fun(x):
# loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
# loc and args from outer scope
return func(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint
# and there might be problems(?) with correct self.a, self.b at this stage
# maybe not anymore, seems to work now with _pmf
self._argcheck(*args) # (re)generate scalar self.a and self.b
if lb is None:
lb = (self.a)
else:
lb = lb - loc # convert bound for standardized distribution
if ub is None:
ub = (self.b)
else:
ub = ub - loc # convert bound for standardized distribution
if conditional:
if np.isposinf(ub)[()]:
# work around bug: stats.poisson.sf(stats.poisson.b, 2) is nan
invfac = 1 - self.cdf(lb-1,*args)
else:
invfac = 1 - self.cdf(lb-1,*args) - self.sf(ub,*args)
else:
invfac = 1.0
tot = 0.0
low, upp = self._ppf(0.001, *args), self._ppf(0.999, *args)
low = max(min(-suppnmin, low), lb)
upp = min(max(suppnmin, upp), ub)
supp = np.arange(low, upp+1, self.inc) # check limits
# print 'low, upp', low, upp
tot = np.sum(fun(supp))
diff = 1e100
pos = upp + self.inc
count = 0
# handle cases with infinite support
while (pos <= ub) and (diff > self.moment_tol) and count <= maxcount:
diff = fun(pos)
tot += diff
pos += self.inc
count += 1
if self.a < 0: # handle case when self.a = -inf
diff = 1e100
pos = low - self.inc
while (pos >= lb) and (diff > self.moment_tol) and count <= maxcount:
diff = fun(pos)
tot += diff
pos -= self.inc
count += 1
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
return tot/invfac
class binom_gen(rv_discrete):
"""A binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `binom` is::
binom.pmf(k) = choose(n,k) * p**k * (1-p)**(n-k)
for ``k`` in ``{0,1,...,n}``.
`binom` takes ``n`` and ``p`` as shape parameters.
%(example)s
"""
def _rvs(self, n, p):
return mtrand.binomial(n,p,self._size)
def _argcheck(self, n, p):
self.b = n
return (n >= 0) & (p >= 0) & (p <= 1)
def _logpmf(self, x, n, p):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) +
gamln(n-k+1)))
return combiln + special.xlogy(k,p) + special.xlog1py(n-k, -p)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _cdf(self, x, n, p):
k = floor(x)
vals = special.bdtr(k,n,p)
return vals
def _sf(self, x, n, p):
k = floor(x)
return special.bdtrc(k,n,p)
def _ppf(self, q, n, p):
vals = ceil(special.bdtrik(q,n,p))
vals1 = vals-1
temp = special.bdtr(vals1,n,p)
return where(temp >= q, vals1, vals)
def _stats(self, n, p):
q = 1.0-p
mu = n * p
var = n * p * q
g1 = (q-p) / sqrt(n*p*q)
g2 = (1.0-6*p*q)/(n*p*q)
return mu, var, g1, g2
def _entropy(self, n, p):
k = r_[0:n + 1]
vals = self._pmf(k, n, p)
h = -sum(special.xlogy(vals, vals), axis=0)
return h
binom = binom_gen(name='binom')
class bernoulli_gen(binom_gen):
"""A Bernoulli discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `bernoulli` is::
bernoulli.pmf(k) = 1-p if k = 0
= p if k = 1
for ``k`` in ``{0,1}``.
`bernoulli` takes ``p`` as shape parameter.
%(example)s
"""
def _rvs(self, p):
return binom_gen._rvs(self, 1, p)
def _argcheck(self, p):
return (p >= 0) & (p <= 1)
def _logpmf(self, x, p):
return binom._logpmf(x, 1, p)
def _pmf(self, x, p):
return binom._pmf(x, 1, p)
def _cdf(self, x, p):
return binom._cdf(x, 1, p)
def _sf(self, x, p):
return binom._sf(x, 1, p)
def _ppf(self, q, p):
return binom._ppf(q, 1, p)
def _stats(self, p):
return binom._stats(1, p)
def _entropy(self, p):
h = -special.xlogy(p, p) - special.xlogy(1 - p, 1 - p)
return h
bernoulli = bernoulli_gen(b=1,name='bernoulli')
class nbinom_gen(rv_discrete):
"""A negative binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `nbinom` is::
nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k
for ``k >= 0``.
`nbinom` takes ``n`` and ``p`` as shape parameters.
%(example)s
"""
def _rvs(self, n, p):
return mtrand.negative_binomial(n, p, self._size)
def _argcheck(self, n, p):
return (n >= 0) & (p >= 0) & (p <= 1)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _logpmf(self, x, n, p):
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
return coeff + n*log(p) + x*log(1-p)
def _cdf(self, x, n, p):
k = floor(x)
return special.betainc(n, k+1, p)
def _sf_skip(self, x, n, p):
# skip because special.nbdtrc doesn't work for 0<n<1
k = floor(x)
return special.nbdtrc(k,n,p)
def _ppf(self, q, n, p):
vals = ceil(special.nbdtrik(q,n,p))
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1,n,p)
return where(temp >= q, vals1, vals)
def _stats(self, n, p):
Q = 1.0 / p
P = Q - 1.0
mu = n*P
var = n*P*Q
g1 = (Q+P)/sqrt(n*P*Q)
g2 = (1.0 + 6*P*Q) / (n*P*Q)
return mu, var, g1, g2
nbinom = nbinom_gen(name='nbinom')
class geom_gen(rv_discrete):
"""A geometric discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `geom` is::
geom.pmf(k) = (1-p)**(k-1)*p
for ``k >= 1``.
`geom` takes ``p`` as shape parameter.
%(example)s
"""
def _rvs(self, p):
return mtrand.geometric(p, size=self._size)
def _argcheck(self, p):
return (p <= 1) & (p >= 0)
def _pmf(self, k, p):
return (1-p)**(k-1) * p
def _logpmf(self, k, p):
return (k-1)*log(1-p) + log(p)
def _cdf(self, x, p):
k = floor(x)
return (1.0-(1.0-p)**k)
def _sf(self, x, p):
k = floor(x)
return (1.0-p)**k
def _ppf(self, q, p):
vals = ceil(log(1.0-q)/log(1-p))
temp = 1.0-(1.0-p)**(vals-1)
return where((temp >= q) & (vals > 0), vals-1, vals)
def _stats(self, p):
mu = 1.0/p
qr = 1.0-p
var = qr / p / p
g1 = (2.0-p) / sqrt(qr)
g2 = numpy.polyval([1,-6,6],p)/(1.0-p)
return mu, var, g1, g2
geom = geom_gen(a=1,name='geom', longname="A geometric")
class hypergeom_gen(rv_discrete):
"""A hypergeometric discrete random variable.
The hypergeometric distribution models drawing objects from a bin.
M is the total number of objects, n is total number of Type I objects.
The random variate represents the number of Type I objects in N drawn
without replacement from the total population.
%(before_notes)s
Notes
-----
The probability mass function is defined as::
pmf(k, M, n, N) = choose(n, k) * choose(M - n, N - k) / choose(M, N),
for N - (M-n) <= k <= min(m,N)
Examples
--------
>>> from scipy.stats import hypergeom
Suppose we have a collection of 20 animals, of which 7 are dogs. Then if
we want to know the probability of finding a given number of dogs if we
choose at random 12 of the 20 animals, we can initialize a frozen
distribution and plot the probability mass function:
>>> [M, n, N] = [20, 7, 12]
>>> rv = hypergeom(M, n, N)
>>> x = np.arange(0, n+1)
>>> pmf_dogs = rv.pmf(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, pmf_dogs, 'bo')
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
>>> ax.set_xlabel('# of dogs in our group of chosen animals')
>>> ax.set_ylabel('hypergeom PMF')
>>> plt.show()
Instead of using a frozen distribution we can also use `hypergeom`
methods directly. To for example obtain the cumulative distribution
function, use:
>>> prb = hypergeom.cdf(x, M, n, N)
And to generate random numbers:
>>> R = hypergeom.rvs(M, n, N, size=10)
"""
def _rvs(self, M, n, N):
return mtrand.hypergeometric(n,M-n,N,size=self._size)
def _argcheck(self, M, n, N):
cond = rv_discrete._argcheck(self,M,n,N)
cond &= (n <= M) & (N <= M)
self.a = max(N-(M-n), 0)
self.b = min(n,N)
return cond
def _logpmf(self, k, M, n, N):
tot, good = M, n
bad = tot - good
return gamln(good+1) - gamln(good-k+1) - gamln(k+1) + gamln(bad+1) \
- gamln(bad-N+k+1) - gamln(N-k+1) - gamln(tot+1) + gamln(tot-N+1) \
+ gamln(N+1)
def _pmf(self, k, M, n, N):
# same as the following but numerically more precise
# return comb(good,k) * comb(bad,N-k) / comb(tot,N)
return exp(self._logpmf(k, M, n, N))
def _stats(self, M, n, N):
tot, good = M, n
n = good*1.0
m = (tot-good)*1.0
N = N*1.0
tot = m+n
p = n/tot
mu = N*p
var = m*n*N*(tot-N)*1.0/(tot*tot*(tot-1))
g1 = (m - n)*(tot-2*N) / (tot-2.0)*sqrt((tot-1.0)/(m*n*N*(tot-N)))
m2, m3, m4, m5 = m**2, m**3, m**4, m**5
n2, n3, n4, n5 = n**2, n**2, n**4, n**5
g2 = m3 - m5 + n*(3*m2-6*m3+m4) + 3*m*n2 - 12*m2*n2 + 8*m3*n2 + n3 \
- 6*m*n3 + 8*m2*n3 + m*n4 - n5 - 6*m3*N + 6*m4*N + 18*m2*n*N \
- 6*m3*n*N + 18*m*n2*N - 24*m2*n2*N - 6*n3*N - 6*m*n3*N \
+ 6*n4*N + N*N*(6*m2 - 6*m3 - 24*m*n + 12*m2*n + 6*n2 +
12*m*n2 - 6*n3)
return mu, var, g1, g2
def _entropy(self, M, n, N):
k = r_[N - (M - n):min(n, N) + 1]
vals = self.pmf(k, M, n, N)
h = -sum(special.xlogy(vals, vals), axis=0)
return h
def _sf(self, k, M, n, N):
"""More precise calculation, 1 - cdf doesn't cut it."""
# This for loop is needed because `k` can be an array. If that's the
# case, the sf() method makes M, n and N arrays of the same shape. We
# therefore unpack all inputs args, so we can do the manual integration.
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Manual integration over probability mass function. More accurate
# than integrate.quad.
k2 = np.arange(quant + 1, draw + 1)
res.append(np.sum(self._pmf(k2, tot, good, draw)))
return np.asarray(res)
hypergeom = hypergeom_gen(name='hypergeom')
# FIXME: Fails _cdfvec
class logser_gen(rv_discrete):
"""A Logarithmic (Log-Series, Series) discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `logser` is::
logser.pmf(k) = - p**k / (k*log(1-p))
for ``k >= 1``.
`logser` takes ``p`` as shape parameter.
%(example)s
"""
def _rvs(self, p):
# looks wrong for p>0.5, too few k=1
# trying to use generic is worse, no k=1 at all
return mtrand.logseries(p, size=self._size)
def _argcheck(self, p):
return (p > 0) & (p < 1)
def _pmf(self, k, p):
return -p**k * 1.0 / k / log(1 - p)
def _stats(self, p):
r = log(1 - p)
mu = p / (p - 1.0) / r
mu2p = -p / r / (p - 1.0)**2
var = mu2p - mu*mu
mu3p = -p / r * (1.0+p) / (1.0 - p)**3
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / np.power(var, 1.5)
mu4p = -p / r * (1.0 / (p-1)**2 - 6*p / (p - 1)**3 +
6*p*p / (p-1)**4)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / var**2 - 3.0
return mu, var, g1, g2
logser = logser_gen(a=1,name='logser', longname='A logarithmic')
class poisson_gen(rv_discrete):
"""A Poisson discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `poisson` is::
poisson.pmf(k) = exp(-mu) * mu**k / k!
for ``k >= 0``.
`poisson` takes ``mu`` as shape parameter.
%(example)s
"""
def _rvs(self, mu):
return mtrand.poisson(mu, self._size)
def _logpmf(self, k, mu):
Pk = k*log(mu)-gamln(k+1) - mu
return Pk
def _pmf(self, k, mu):
return exp(self._logpmf(k, mu))
def _cdf(self, x, mu):
k = floor(x)
return special.pdtr(k,mu)
def _sf(self, x, mu):
k = floor(x)
return special.pdtrc(k,mu)
def _ppf(self, q, mu):
vals = ceil(special.pdtrik(q,mu))
vals1 = vals-1
temp = special.pdtr(vals1,mu)
return where((temp >= q), vals1, vals)
def _stats(self, mu):
var = mu
tmp = asarray(mu)
g1 = sqrt(1.0 / tmp)
g2 = 1.0 / tmp
return mu, var, g1, g2
poisson = poisson_gen(name="poisson", longname='A Poisson')
class planck_gen(rv_discrete):
"""A Planck discrete exponential random variable.
%(before_notes)s
Notes
-----
The probability mass function for `planck` is::
planck.pmf(k) = (1-exp(-lambda_))*exp(-lambda_*k)
for ``k*lambda_ >= 0``.
`planck` takes ``lambda_`` as shape parameter.
%(example)s
"""
def _argcheck(self, lambda_):
if (lambda_ > 0):
self.a = 0
self.b = inf
return 1
elif (lambda_ < 0):
self.a = -inf
self.b = 0
return 1
else:
return 0
def _pmf(self, k, lambda_):
fact = (1-exp(-lambda_))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_):
k = floor(x)
return 1-exp(-lambda_*(k+1))
def _ppf(self, q, lambda_):
vals = ceil(-1.0/lambda_ * log1p(-q)-1)
vals1 = (vals-1).clip(self.a, np.inf)
temp = self._cdf(vals1, lambda_)
return where(temp >= q, vals1, vals)
def _stats(self, lambda_):
mu = 1/(exp(lambda_)-1)
var = exp(-lambda_)/(expm1(-lambda_))**2
g1 = 2*cosh(lambda_/2.0)
g2 = 4+2*cosh(lambda_)
return mu, var, g1, g2
def _entropy(self, lambda_):
l = lambda_
C = (1-exp(-l))
return l*exp(-l)/C - log(C)
planck = planck_gen(name='planck',longname='A discrete exponential ')
class boltzmann_gen(rv_discrete):
"""A Boltzmann (Truncated Discrete Exponential) random variable.
%(before_notes)s
Notes
-----
The probability mass function for `boltzmann` is::
boltzmann.pmf(k) = (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N))
for ``k = 0,...,N-1``.
`boltzmann` takes ``lambda_`` and ``N`` as shape parameters.
%(example)s
"""
def _pmf(self, k, lambda_, N):
fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_, N):
k = floor(x)
return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
def _ppf(self, q, lambda_, N):
qnew = q*(1-exp(-lambda_*N))
vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, lambda_, N)
return where(temp >= q, vals1, vals)
def _stats(self, lambda_, N):
z = exp(-lambda_)
zN = exp(-lambda_*N)
mu = z/(1.0-z)-N*zN/(1-zN)
var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
trm = (1-zN)/(1-z)
trm2 = (z*trm**2 - N*N*zN)
g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
g1 = g1 / trm2**(1.5)
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
g2 = g2 / trm2 / trm2
return mu, var, g1, g2
boltzmann = boltzmann_gen(name='boltzmann',
longname='A truncated discrete exponential ')
class randint_gen(rv_discrete):
"""A uniform discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `randint` is::
randint.pmf(k) = 1./(high - low)
for ``k = low, ..., high - 1``.
`randint` takes ``low`` and ``high`` as shape parameters.
Note the difference to the numpy ``random_integers`` which
returns integers on a *closed* interval ``[low, high]``.
%(example)s
"""
def _argcheck(self, low, high):
self.a = low
self.b = high - 1
return (high > low)
def _pmf(self, k, low, high):
fact = 1.0 / (high - low)
return fact
def _cdf(self, x, low, high):
k = floor(x)
return (k-low + 1) * 1.0 / (high - low)
def _ppf(self, q, low, high):
vals = ceil(q*(high - low) + low) - 1
vals1 = (vals-1).clip(low, high)
temp = self._cdf(vals1, low, high)
return where(temp >= q, vals1, vals)
def _stats(self, low, high):
m2, m1 = asarray(high), asarray(low)
mu = (m2 + m1 - 1.0) / 2
d = m2 - m1
var = (d-1)*(d+1.0)/12.0
g1 = 0.0
g2 = -6.0/5.0*(d*d+1.0)/(d-1.0)*(d+1.0)
return mu, var, g1, g2
def _rvs(self, low, high=None):
"""An array of *size* random integers >= ``low`` and < ``high``.
If ``high`` is ``None``, then range is >=0 and < low
"""
return mtrand.randint(low, high, self._size)
def _entropy(self, low, high):
return log(high - low)
randint = randint_gen(name='randint',longname='A discrete uniform '
'(random integer)')
# FIXME: problems sampling.
class zipf_gen(rv_discrete):
"""A Zipf discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `zipf` is::
zipf.pmf(k) = 1/(zeta(a)*k**a)
for ``k >= 1``.
`zipf` takes ``a`` as shape parameter.
%(example)s
"""
def _rvs(self, a):
return mtrand.zipf(a, size=self._size)
def _argcheck(self, a):
return a > 1
def _pmf(self, k, a):
Pk = 1.0 / asarray(special.zeta(a,1) * k**a)
return Pk
def _munp(self, n, a):
return special.zeta(a-n,1) / special.zeta(a,1)
def _stats(self, a):
sv = special.errprint(0)
fac = asarray(special.zeta(a,1))
mu = special.zeta(a-1.0,1)/fac
mu2p = special.zeta(a-2.0,1)/fac
var = mu2p - mu*mu
mu3p = special.zeta(a-3.0,1)/fac
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / asarray(np.power(var, 1.5))
mu4p = special.zeta(a-4.0,1)/fac
sv = special.errprint(sv)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / asarray(var**2) - 3.0
return mu, var, g1, g2
zipf = zipf_gen(a=1,name='zipf', longname='A Zipf')
class dlaplace_gen(rv_discrete):
"""A Laplacian discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `dlaplace` is::
dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k))
for ``a >0``.
`dlaplace` takes ``a`` as shape parameter.
%(example)s
"""
def _pmf(self, k, a):
return tanh(a/2.0)*exp(-a*abs(k))
def _cdf(self, x, a):
k = floor(x)
ind = (k >= 0)
const = exp(a)+1
return where(ind, 1.0-exp(-a*k)/const, exp(a*(k+1))/const)
def _ppf(self, q, a):
const = 1.0/(1+exp(-a))
cons2 = 1+exp(a)
ind = q < const
vals = ceil(where(ind, log(q*cons2)/a-1, -log((1-q)*cons2)/a))
vals1 = (vals-1)
temp = self._cdf(vals1, a)
return where(temp >= q, vals1, vals)
def _stats(self, a):
ea = exp(a)
mu2 = 2.*ea/(ea-1.)**2
mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4
return 0., mu2, 0., mu4/mu2**2 - 3.
def _entropy(self, a):
return a / sinh(a) - log(tanh(a/2.0))
dlaplace = dlaplace_gen(a=-inf,
name='dlaplace', longname='A discrete Laplacian')
class skellam_gen(rv_discrete):
"""A Skellam discrete random variable.
%(before_notes)s
Notes
-----
Probability distribution of the difference of two correlated or
uncorrelated Poisson random variables.
Let k1 and k2 be two Poisson-distributed r.v. with expected values
lam1 and lam2. Then, ``k1 - k2`` follows a Skellam distribution with
parameters ``mu1 = lam1 - rho*sqrt(lam1*lam2)`` and
``mu2 = lam2 - rho*sqrt(lam1*lam2)``, where rho is the correlation
coefficient between k1 and k2. If the two Poisson-distributed r.v.
are independent then ``rho = 0``.
Parameters mu1 and mu2 must be strictly positive.
For details see: http://en.wikipedia.org/wiki/Skellam_distribution
`skellam` takes ``mu1`` and ``mu2`` as shape parameters.
%(example)s
"""
def _rvs(self, mu1, mu2):
n = self._size
return np.random.poisson(mu1, n)-np.random.poisson(mu2, n)
def _pmf(self, x, mu1, mu2):
px = np.where(x < 0, ncx2.pdf(2*mu2, 2*(1-x), 2*mu1)*2,
ncx2.pdf(2*mu1, 2*(x+1), 2*mu2)*2)
# ncx2.pdf() returns nan's for extremely low probabilities
return px
def _cdf(self, x, mu1, mu2):
x = np.floor(x)
px = np.where(x < 0, ncx2.cdf(2*mu2, -2*x, 2*mu1),
1-ncx2.cdf(2*mu1, 2*(x+1), 2*mu2))
return px
def _stats(self, mu1, mu2):
mean = mu1 - mu2
var = mu1 + mu2
g1 = mean / np.sqrt((var)**3)
g2 = 1 / var
return mean, var, g1, g2
skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam')
| apache-2.0 |
voidabhi/python-scripts | pandas_snippets.py | 1 | 5158 | # List unique values in a DataFrame column
# h/t @makmanalp for the updated syntax!
df['Column Name'].unique()
# Convert Series datatype to numeric (will error if column has non-numeric values)
# h/t @makmanalp
pd.to_numeric(df['Column Name'])
# Convert Series datatype to numeric, changing non-numeric values to NaN
# h/t @makmanalp for the updated syntax!
pd.to_numeric(df['Column Name'], errors='coerce')
# Grab DataFrame rows where column has certain values
valuelist = ['value1', 'value2', 'value3']
df = df[df.column.isin(valuelist)]
# Grab DataFrame rows where column doesn't have certain values
valuelist = ['value1', 'value2', 'value3']
df = df[~df.column.isin(value_list)]
# Delete column from DataFrame
del df['column']
# Select from DataFrame using criteria from multiple columns
# (use `|` instead of `&` to do an OR)
newdf = df[(df['column_one']>2004) & (df['column_two']==9)]
# Rename several DataFrame columns
df = df.rename(columns = {
'col1 old name':'col1 new name',
'col2 old name':'col2 new name',
'col3 old name':'col3 new name',
})
# Lower-case all DataFrame column names
df.columns = map(str.lower, df.columns)
# Even more fancy DataFrame column re-naming
# lower-case all DataFrame column names (for example)
df.rename(columns=lambda x: x.split('.')[-1], inplace=True)
# Loop through rows in a DataFrame
# (if you must)
for index, row in df.iterrows():
print index, row['some column']
# Much faster way to loop through DataFrame rows
# if you can work with tuples
# (h/t hughamacmullaniv)
for row in df.itertuples():
print(row)
# Next few examples show how to work with text data in Pandas.
# Full list of .str functions: http://pandas.pydata.org/pandas-docs/stable/text.html
# Slice values in a DataFrame column (aka Series)
df.column.str[0:2]
# Lower-case everything in a DataFrame column
df.column_name = df.column_name.str.lower()
# Get length of data in a DataFrame column
df.column_name.str.len()
# Sort dataframe by multiple columns
df = df.sort(['col1','col2','col3'],ascending=[1,1,0])
# Get top n for each group of columns in a sorted dataframe
# (make sure dataframe is sorted first)
top5 = df.groupby(['groupingcol1', 'groupingcol2']).head(5)
# Grab DataFrame rows where specific column is null/notnull
newdf = df[df['column'].isnull()]
# Select from DataFrame using multiple keys of a hierarchical index
df.xs(('index level 1 value','index level 2 value'), level=('level 1','level 2'))
# Change all NaNs to None (useful before
# loading to a db)
df = df.where((pd.notnull(df)), None)
# More pre-db insert cleanup...make a pass through the dataframe, stripping whitespace
# from strings and changing any empty values to None
# (not especially recommended but including here b/c I had to do this in real life one time)
df = df.applymap(lambda x: str(x).strip() if len(str(x).strip()) else None)
# Get quick count of rows in a DataFrame
len(df.index)
# Pivot data (with flexibility about what what
# becomes a column and what stays a row).
# Syntax works on Pandas >= .14
pd.pivot_table(
df,values='cell_value',
index=['col1', 'col2', 'col3'], #these stay as columns; will fail silently if any of these cols have null values
columns=['col4']) #data values in this column become their own column
# Change data type of DataFrame column
df.column_name = df.column_name.astype(np.int64)
# Get rid of non-numeric values throughout a DataFrame:
for col in refunds.columns.values:
refunds[col] = refunds[col].replace('[^0-9]+.-', '', regex=True)
# Set DataFrame column values based on other column values (h/t: @mlevkov)
df.loc[(df['column1'] == some_value) & (df['column2'] == some_other_value), ['column_to_change']] = new_value
# Clean up missing values in multiple DataFrame columns
df = df.fillna({
'col1': 'missing',
'col2': '99.999',
'col3': '999',
'col4': 'missing',
'col5': 'missing',
'col6': '99'
})
# Concatenate two DataFrame columns into a new, single column
# (useful when dealing with composite keys, for example)
# (h/t @makmanalp for improving this one!)
df['newcol'] = df['col1'].astype(str) + df['col2'].astype(str)
# Doing calculations with DataFrame columns that have missing values
# In example below, swap in 0 for df['col1'] cells that contain null
df['new_col'] = np.where(pd.isnull(df['col1']),0,df['col1']) + df['col2']
# Split delimited values in a DataFrame column into two new columns
df['new_col1'], df['new_col2'] = zip(*df['original_col'].apply(lambda x: x.split(': ', 1)))
# Collapse hierarchical column indexes
df.columns = df.columns.get_level_values(0)
# Convert Django queryset to DataFrame
qs = DjangoModelName.objects.all()
q = qs.values()
df = pd.DataFrame.from_records(q)
# Create a DataFrame from a Python dictionary
df = pd.DataFrame(list(a_dictionary.items()), columns = ['column1', 'column2'])
# Get a report of all duplicate records in a dataframe, based on specific columns
dupes = df[df.duplicated(['col1', 'col2', 'col3'], keep=False)]
# Set up formatting so larger numbers aren't displayed in scientific notation (h/t @thecapacity)
pd.set_option('display.float_format', lambda x: '%.3f' % x)
| mit |
musically-ut/statsmodels | statsmodels/sandbox/nonparametric/tests/ex_gam_new.py | 34 | 3845 | # -*- coding: utf-8 -*-
"""Example for GAM with Poisson Model and PolynomialSmoother
This example was written as a test case.
The data generating process is chosen so the parameters are well identified
and estimated.
Created on Fri Nov 04 13:45:43 2011
Author: Josef Perktold
"""
from __future__ import print_function
from statsmodels.compat.python import lrange, zip
import time
import numpy as np
#import matplotlib.pyplot as plt
np.seterr(all='raise')
from scipy import stats
from statsmodels.sandbox.gam import AdditiveModel
from statsmodels.sandbox.gam import Model as GAM #?
from statsmodels.genmod.families import family
from statsmodels.genmod.generalized_linear_model import GLM
np.random.seed(8765993)
#seed is chosen for nice result, not randomly
#other seeds are pretty off in the prediction or end in overflow
#DGP: simple polynomial
order = 3
sigma_noise = 0.1
nobs = 1000
#lb, ub = -0.75, 3#1.5#0.75 #2.5
lb, ub = -3.5, 3
x1 = np.linspace(lb, ub, nobs)
x2 = np.sin(2*x1)
x = np.column_stack((x1/x1.max()*1, 1.*x2))
exog = (x[:,:,None]**np.arange(order+1)[None, None, :]).reshape(nobs, -1)
idx = lrange((order+1)*2)
del idx[order+1]
exog_reduced = exog[:,idx] #remove duplicate constant
y_true = exog.sum(1) #/ 4.
z = y_true #alias check
d = x
y = y_true + sigma_noise * np.random.randn(nobs)
example = 3
if example == 2:
print("binomial")
f = family.Binomial()
mu_true = f.link.inverse(z)
#b = np.asarray([scipy.stats.bernoulli.rvs(p) for p in f.link.inverse(y)])
b = np.asarray([stats.bernoulli.rvs(p) for p in f.link.inverse(z)])
b.shape = y.shape
m = GAM(b, d, family=f)
toc = time.time()
m.fit(b)
tic = time.time()
print(tic-toc)
#for plotting
yp = f.link.inverse(y)
p = b
if example == 3:
print("Poisson")
f = family.Poisson()
#y = y/y.max() * 3
yp = f.link.inverse(z)
#p = np.asarray([scipy.stats.poisson.rvs(p) for p in f.link.inverse(y)], float)
p = np.asarray([stats.poisson.rvs(p) for p in f.link.inverse(z)], float)
p.shape = y.shape
m = GAM(p, d, family=f)
toc = time.time()
m.fit(p)
tic = time.time()
print(tic-toc)
for ss in m.smoothers:
print(ss.params)
if example > 1:
import matplotlib.pyplot as plt
plt.figure()
for i in np.array(m.history[2:15:3]): plt.plot(i.T)
plt.figure()
plt.plot(exog)
#plt.plot(p, '.', lw=2)
plt.plot(y_true, lw=2)
y_pred = m.results.mu # + m.results.alpha #m.results.predict(d)
plt.figure()
plt.subplot(2,2,1)
plt.plot(p, '.')
plt.plot(yp, 'b-', label='true')
plt.plot(y_pred, 'r-', label='GAM')
plt.legend(loc='upper left')
plt.title('gam.GAM Poisson')
counter = 2
for ii, xx in zip(['z', 'x1', 'x2'], [z, x[:,0], x[:,1]]):
sortidx = np.argsort(xx)
#plt.figure()
plt.subplot(2, 2, counter)
plt.plot(xx[sortidx], p[sortidx], 'k.', alpha=0.5)
plt.plot(xx[sortidx], yp[sortidx], 'b.', label='true')
plt.plot(xx[sortidx], y_pred[sortidx], 'r.', label='GAM')
plt.legend(loc='upper left')
plt.title('gam.GAM Poisson ' + ii)
counter += 1
res = GLM(p, exog_reduced, family=f).fit()
#plot component, compared to true component
x1 = x[:,0]
x2 = x[:,1]
f1 = exog[:,:order+1].sum(1) - 1 #take out constant
f2 = exog[:,order+1:].sum(1) - 1
plt.figure()
#Note: need to correct for constant which is indeterminatedly distributed
#plt.plot(x1, m.smoothers[0](x1)-m.smoothers[0].params[0]+1, 'r')
#better would be subtract f(0) m.smoothers[0](np.array([0]))
plt.plot(x1, f1, linewidth=2)
plt.plot(x1, m.smoothers[0](x1)-m.smoothers[0].params[0], 'r')
plt.figure()
plt.plot(x2, f2, linewidth=2)
plt.plot(x2, m.smoothers[1](x2)-m.smoothers[1].params[0], 'r')
plt.show() | bsd-3-clause |
FernanOrtega/DAT210x | Module6/assignment5.py | 1 | 2547 | import pandas as pd
#https://archive.ics.uci.edu/ml/machine-learning-databases/mushroom/agaricus-lepiota.names
#
# TODO: Load up the mushroom dataset into dataframe 'X'
# Verify you did it properly.
# Indices shouldn't be doubled.
# Header information is on the dataset's website at the UCI ML Repo
# Check NA Encoding
#
X = pd.read_csv('Datasets/agaricus-lepiota.data', names=['class','cap-shape',
'cap-surface', 'cap-color', 'bruises', 'odor', 'gill-attachment', 'gill-spacing',
'gill-size', 'gill-color', 'stalk-shape', 'stalk-root', 'stalk-surface-aring',
'stalk-surface-bring', 'stalk-color-aring', 'stalk-color-bring', 'veil-type',
'veil-color', 'ring-number', 'ring-type', 'spore-print-color', 'population',
'habitat'])
# INFO: An easy way to show which rows have nans in them
print X[pd.isnull(X).any(axis=1)]
#
# TODO: Go ahead and drop any row with a nan
#
# .. your code here ..
print X.shape
#
# TODO: Copy the labels out of the dset into variable 'y' then Remove
# them from X. Encode the labels, using the .map() trick we showed
# you in Module 5 -- canadian:0, kama:1, and rosa:2
#
y = X['class']
X = X.drop('class', axis=1)
#
# TODO: Encode the entire dataset using dummies
#
X = pd.get_dummies(X)
#
# TODO: Split your data into test / train sets
# Your test size can be 30% with random_state 7
# Use variable names: X_train, X_test, y_train, y_test
#
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=7)
#
# TODO: Create an DT classifier. No need to set any parameters
#
from sklearn import tree
dtc = tree.DecisionTreeClassifier()
#
# TODO: train the classifier on the training data / labels:
# TODO: score the classifier on the testing data / labels:
#
dtc.fit(X_train, y_train)
score = dtc.score(X_test, y_test)
print "High-Dimensionality Score: ", round((score*100), 3)
#
# TODO: Use the code on the course's SciKit-Learn page to output a .DOT file
# Then render the .DOT to .PNGs. Ensure you have graphviz installed.
# If not, `brew install graphviz`. If you can't, use: http://webgraphviz.com/.
# On Windows 10, graphviz installs via a msi installer that you can download from
# the graphviz website. Also, a graph editor, gvedit.exe can be used to view the
# tree directly from the exported tree.dot file without having to issue a call.
#
tree.export_graphviz(dtc.tree_, out_file='tree.dot', feature_names=X.columns)
from subprocess import call
call(['dot', '-T', 'png', 'tree.dot', '-o', 'tree.png'])
| mit |
hstau/covar-cryo | covariance/postProc/mrc2svd.py | 1 | 3242 | import os,sys
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from pylab import plot, loadtxt, imshow, show, xlabel, ylabel
import scipy.misc
#first, source 'ManifoldEM' conda environment;
#then run bash script (which will access this one) via: 'sh mrc2svd.sh'
Topo_list = [0, 1] #list of eigenvectors to retain; [0,1] for first 2... [0,1,2] for first 3, etc.
################################################################################
# SINGULAR VALUE DECOMPOSITION #
################################################################################
def op(svd_dir, proj_name, user_dir):
p.init()
p.proj_name = proj_name
p.user_dir = user_dir
set_params.op(1)
outputsDir = os.path.join(p.user_dir, 'outputs_%s' % proj_name)
b = np.zeros((p.nPix**3,p.nClass),dtype=np.float32)
for bin in range(p.nClass):
rec_file = os.path.join(outputsDir, 'post/1_vol/EulerAngles_{}_{}_of_{}.mrc'.format(p.trajName, bin + 1, p.nClass))
with mrcfile.open(rec_file) as mrc:
vol = mrc.data
b[:,bin] = vol.flatten()
topoNum = 8 #number of topos considered
print('Performing SVD...')
U, S, V = svdRF.op(b)
sdiag = np.diag(S)
plt.plot(sdiag**2) #S is square roots of non-zero eigenvalues, thus square diagonal of S
plt.scatter(range(0,50),sdiag**2)
plt.title('Eigenvalue Spectrum')
plt.xlabel(r'$\mathrm{\Psi}$')
plt.ylabel(r'$\mathrm{\lambda}$', rotation=0)
#plt.show()
plt.savefig(svd_dir + '.png', bbox_inches='tight')
i1 = 0
Npixel = p.nPix**3
ConOrder = 1
Topo_mean = np.ones((topoNum,Npixel)) * np.Inf
for ii in range(topoNum):
# s = s + 1 needed?
Topo = np.ones((Npixel, ConOrder)) * np.Inf
for k in range(ConOrder):
Topo[:, k] = U[k * Npixel: (k + 1) * Npixel, ii]
Topo_mean[ii,:] = np.mean(Topo, axis=1)
Topo_mean = Topo_mean.reshape((topoNum,p.nPix,p.nPix,p.nPix))
Topo_mean = Topo_mean.astype(np.float32)
ConImgT = np.zeros((max(U.shape), p.nClass), dtype='float64')
for i in Topo_list:
# %ConImgT = U(:,i) *(sdiag(i)* V(:,i)')*psiC';
ConImgT = ConImgT + np.matmul(U[:, i].reshape(-1, 1), sdiag[i] * (V[:, i].reshape(1, -1)))
ConImgT=ConImgT.T.astype(np.float32)
ConImgT=ConImgT.reshape((p.nClass,p.nPix,p.nPix,p.nPix))
#ConImgT = ConImgT.T
#ConImgT = ConImgT.reshape((p.nClass,p.nPix,p.nPix,p.nPix))
for bin in range(p.nClass):
rec1_file = os.path.join(outputsDir, 'post/2_svd/SVDimgsRELION_{}_{}_of_{}.mrc'.format(p.trajName, bin + 1, p.nClass))
mrc = mrcfile.new(rec1_file)
mrc.set_data(ConImgT[bin,:,:,:])
for t in range(topoNum):
toporec_file = os.path.join(outputsDir, 'post/2_svd/SVDTOPOimgsRELION_{}_{}_of_{}.mrc'.format(p.trajName, t + 1, p.nClass))
mrc = mrcfile.new(toporec_file)
mrc.set_data(Topo_mean[t,:,:,:])
if __name__ == '__main__':
mainDir = sys.argv[2]
modDir = os.path.join(mainDir, 'modules')
sys.path.append(modDir)
import p
import mrcfile
import svdRF
import set_params
op(os.path.splitext(sys.argv[0])[0], sys.argv[1], sys.argv[2]) #enter the params file name
| gpl-2.0 |
depet/scikit-learn | sklearn/preprocessing/tests/test_data.py | 5 | 25313 | import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns
from sklearn.utils.sparsefuncs import mean_variance_axis0
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import add_dummy_feature
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_scaler_1d():
"""Test scaling of dataset along single axis"""
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
def test_scaler_2d_arrays():
"""Test scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
"""Check min max scaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis0(X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis0(
X_csr_scaled.astype(np.float))
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
"""Check that StandardScaler.fit does not change input"""
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis0(X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_warning_scaling_integers():
"""Check warning when scaling integer data"""
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
assert_warns(UserWarning, StandardScaler().fit, X)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
assert_warns(UserWarning, MinMaxScaler().fit, X)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize_errors():
"""Check that invalid arguments yield ValueError"""
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
"""Test that KernelCenterer is equivalent to StandardScaler
in feature space"""
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder():
"""Test OneHotEncoder's fit and transform."""
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raise when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
| bsd-3-clause |
MechCoder/scikit-learn | examples/linear_model/plot_lasso_model_selection.py | 39 | 5425 | """
===================================================
Lasso model selection: Cross-Validation / AIC / BIC
===================================================
Use the Akaike information criterion (AIC), the Bayes Information
criterion (BIC) and cross-validation to select an optimal value
of the regularization parameter alpha of the :ref:`lasso` estimator.
Results obtained with LassoLarsIC are based on AIC/BIC criteria.
Information-criterion based model selection is very fast, but it
relies on a proper estimation of degrees of freedom, are
derived for large samples (asymptotic results) and assume the model
is correct, i.e. that the data are actually generated by this model.
They also tend to break when the problem is badly conditioned
(more features than samples).
For cross-validation, we use 20-fold with 2 algorithms to compute the
Lasso path: coordinate descent, as implemented by the LassoCV class, and
Lars (least angle regression) as implemented by the LassoLarsCV class.
Both algorithms give roughly the same results. They differ with regards
to their execution speed and sources of numerical errors.
Lars computes a path solution only for each kink in the path. As a
result, it is very efficient when there are only of few kinks, which is
the case if there are few features or samples. Also, it is able to
compute the full path without setting any meta parameter. On the
opposite, coordinate descent compute the path points on a pre-specified
grid (here we use the default). Thus it is more efficient if the number
of grid points is smaller than the number of kinks in the path. Such a
strategy can be interesting if the number of features is really large
and there are enough samples to select a large amount. In terms of
numerical errors, for heavily correlated variables, Lars will accumulate
more errors, while the coordinate descent algorithm will only sample the
path on a grid.
Note how the optimal value of alpha varies for each fold. This
illustrates why nested-cross validation is necessary when trying to
evaluate the performance of a method for which a parameter is chosen by
cross-validation: this choice of parameter may not be optimal for unseen
data.
"""
print(__doc__)
# Author: Olivier Grisel, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LassoCV, LassoLarsCV, LassoLarsIC
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
rng = np.random.RandomState(42)
X = np.c_[X, rng.randn(X.shape[0], 14)] # add some bad features
# normalize data as done by Lars to allow for comparison
X /= np.sqrt(np.sum(X ** 2, axis=0))
##############################################################################
# LassoLarsIC: least angle regression with BIC/AIC criterion
model_bic = LassoLarsIC(criterion='bic')
t1 = time.time()
model_bic.fit(X, y)
t_bic = time.time() - t1
alpha_bic_ = model_bic.alpha_
model_aic = LassoLarsIC(criterion='aic')
model_aic.fit(X, y)
alpha_aic_ = model_aic.alpha_
def plot_ic_criterion(model, name, color):
alpha_ = model.alpha_
alphas_ = model.alphas_
criterion_ = model.criterion_
plt.plot(-np.log10(alphas_), criterion_, '--', color=color,
linewidth=3, label='%s criterion' % name)
plt.axvline(-np.log10(alpha_), color=color, linewidth=3,
label='alpha: %s estimate' % name)
plt.xlabel('-log(alpha)')
plt.ylabel('criterion')
plt.figure()
plot_ic_criterion(model_aic, 'AIC', 'b')
plot_ic_criterion(model_bic, 'BIC', 'r')
plt.legend()
plt.title('Information-criterion for model selection (training time %.3fs)'
% t_bic)
##############################################################################
# LassoCV: coordinate descent
# Compute paths
print("Computing regularization path using the coordinate descent lasso...")
t1 = time.time()
model = LassoCV(cv=20).fit(X, y)
t_lasso_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.alphas_)
plt.figure()
ymin, ymax = 2300, 3800
plt.plot(m_log_alphas, model.mse_path_, ':')
plt.plot(m_log_alphas, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha: CV estimate')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: coordinate descent '
'(train time: %.2fs)' % t_lasso_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
##############################################################################
# LassoLarsCV: least angle regression
# Compute paths
print("Computing regularization path using the Lars lasso...")
t1 = time.time()
model = LassoLarsCV(cv=20).fit(X, y)
t_lasso_lars_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.cv_alphas_)
plt.figure()
plt.plot(m_log_alphas, model.mse_path_, ':')
plt.plot(m_log_alphas, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha CV')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: Lars (train time: %.2fs)'
% t_lasso_lars_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
plt.show()
| bsd-3-clause |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/sklearn/metrics/tests/test_regression.py | 272 | 6066 | from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred, multioutput='variance_weighted')
assert_almost_equal(error, 1. - 5. / 2)
error = r2_score(y_true, y_pred, multioutput='uniform_average')
assert_almost_equal(error, -.875)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, multioutput = _check_reg_targets(
y1, y2, None)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2, None)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]]*4
y_pred = [[1, 1]]*4
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [1., 1.], decimal=2)
assert_array_almost_equal(mae, [1., 1.], decimal=2)
assert_array_almost_equal(r, [0., 0.], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='uniform_average'))
evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='raw_values')
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [-1, 2]]
y_pred = [[1, 4], [-1, 1]]
r2 = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(r2, [1., -3.], decimal=2)
assert_equal(np.mean(r2), r2_score(y_true, y_pred,
multioutput='uniform_average'))
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(evs, [1., -3.], decimal=2)
assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
| gpl-2.0 |
shikhardb/scikit-learn | examples/datasets/plot_random_dataset.py | 348 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
elkingtonmcb/scikit-learn | sklearn/grid_search.py | 61 | 37197 | """
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>
# Andreas Mueller <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin, ChangedBehaviorWarning
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
This estimator will be cloned and then fitted.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each grid point.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default 1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a an hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each parameter setting.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
| bsd-3-clause |
morgenst/PyAnalysisTools | PyAnalysisTools/AnalysisTools/CutFlowAnalyser.py | 1 | 42259 | from __future__ import print_function
import operator
import os
import re
import numpy as np
import pandas as pd
from builtins import input
from builtins import map
from builtins import object
from builtins import range
import PyAnalysisTools.PlottingUtils.Formatting as Ft
import PyAnalysisTools.PlottingUtils.PlottingTools as Pt
from PyAnalysisTools.base.Modules import load_modules
from PyAnalysisTools.base.ProcessConfig import Process, find_process_config, parse_and_build_process_config
from collections import defaultdict, OrderedDict
from PyAnalysisTools.base import _logger, InvalidInputError
from PyAnalysisTools.base.FileHandle import FileHandle as FH, filter_empty_trees
from PyAnalysisTools.PlottingUtils.HistTools import scale
from PyAnalysisTools.AnalysisTools.XSHandle import XSHandle, get_xsec_weight
from PyAnalysisTools.PlottingUtils.PlotConfig import PlotConfig, \
parse_and_build_plot_config, get_default_color_scheme
from PyAnalysisTools.base.OutputHandle import OutputFileHandle
from PyAnalysisTools.PlottingUtils.Plotter import Plotter as pl
from numpy.lib.recfunctions import rec_append_fields
from PyAnalysisTools.AnalysisTools.RegionBuilder import RegionBuilder
from PyAnalysisTools.base.YAMLHandle import YAMLLoader
from PyAnalysisTools.AnalysisTools.MLHelper import Root2NumpyConverter
from PyAnalysisTools.AnalysisTools.StatisticsTools import get_signal_acceptance
from PyAnalysisTools.PlottingUtils import set_batch_mode
try:
from tabulate.tabulate import tabulate
except ImportError:
from tabulate import tabulate
tabulate.LATEX_ESCAPE_RULES = {}
class CommonCutFlowAnalyser(object):
def __init__(self, **kwargs):
kwargs.setdefault('lumi', None)
kwargs.setdefault('process_configs', None)
kwargs.setdefault('disable_sm_total', False)
kwargs.setdefault('plot_config_file', None)
kwargs.setdefault('config_file', None)
kwargs.setdefault('module_config_files', None)
kwargs.setdefault('disable_interactive', False)
kwargs.setdefault('save_table', False)
kwargs.setdefault('batch', True)
kwargs.setdefault('friend_directory', None)
kwargs.setdefault('friend_tree_names', None)
kwargs.setdefault('friend_file_pattern', None)
kwargs.setdefault('precision', 3)
kwargs.setdefault('output_tag', None)
kwargs.setdefault('disable_cutflow_reading', False)
self.event_numbers = dict()
self.lumi = kwargs['lumi']
self.interactive = not kwargs['disable_interactive']
self.output_tag = kwargs['output_tag']
self.save_table = kwargs['save_table']
if self.save_table:
self.interactive = False
if kwargs['output_dir'] is None:
_logger.error('No output directory provided but requesting to store tables. Using current dir')
kwargs['output_dir'] = '.'
self.disable_sm_total = kwargs['disable_sm_total']
if 'dataset_config' in kwargs:
_logger.error('The property "dataset_config" is not supported anymore. Please use xs_config_file')
kwargs.setdefault('xs_config_file', kwargs['dataset_config'])
self.xs_handle = XSHandle(kwargs["xs_config_file"])
self.file_handles = [FH(file_name=fn, dataset_info=kwargs['xs_config_file'],
friend_directory=kwargs['friend_directory'],
friend_tree_names=kwargs['friend_tree_names'],
friend_pattern=kwargs['friend_file_pattern']) for fn in set(kwargs['file_list'])]
self.process_configs = None
if kwargs['process_config_files'] is not None:
self.process_configs = parse_and_build_process_config(kwargs['process_config_files'])
# self.dtype = [('cut', 'S300'), ('yield', 'f4'), ('yield_unc', 'f4'), ('eff', float), ('eff_total', float)]
self.dtype = [('cut', 'S300'), ('yield', 'f4')]
self.output_handle = None
self.plot_config = None
self.config = None
if kwargs['plot_config_file'] is not None:
self.plot_config = parse_and_build_plot_config(kwargs['plot_config_file'])
if kwargs['output_dir'] is not None:
self.output_handle = OutputFileHandle(output_dir=kwargs['output_dir'])
if kwargs['config_file'] is not None:
self.config = YAMLLoader.read_yaml(kwargs['config_file'])
if 'Lumi' in self.config:
self.lumi = self.config['Lumi']
if self.process_configs is not None:
self.file_handles = pl.filter_unavailable_processes(self.file_handles, self.process_configs)
self.modules = []
if kwargs['module_config_files'] is not None:
modules = load_modules(kwargs['module_config_files'], self)
self.modules = [m for m in modules]
self.disable_cutflow_reading = kwargs['disable_cutflow_reading']
if not self.disable_cutflow_reading:
list(map(self.load_dxaod_cutflows, self.file_handles))
set_batch_mode(kwargs['batch'])
def __del__(self):
if self.output_handle is not None:
self.output_handle.write_and_close()
def load_dxaod_cutflows(self, file_handle):
process = file_handle.process
if process is None:
_logger.error("Parsed NoneType process from {:s}".format(file_handle.file_name))
return
if process not in self.event_numbers:
self.event_numbers[process] = file_handle.get_number_of_total_events()
else:
self.event_numbers[process] += file_handle.get_number_of_total_events()
def get_cross_section_weight(self, process):
"""
Calculates weight according to process cross section and luminosity. If MC samples are split in several
production campaigns and the luminosity information is provided as a dictionary with the campaign name as key
and luminosity as value each campaign will be scaled to this luminosity and processes will be added up later
:param process: process information
:type process: Process
:return: luminosity weight
:rtype: float
"""
cross_section_weight = get_xsec_weight(self.lumi, process, self.xs_handle, self.event_numbers)
return cross_section_weight
@staticmethod
def format_yield(value, uncertainty=None):
if value > 10000.:
yld_string = '{:.3e}'.format(value)
if uncertainty is not None:
yld_string += ' +- {:.3e}'.format(uncertainty)
else:
yld_string = '{:.2f}'.format(value)
return yld_string
def stringify(self, cutflow):
def format_yield(value, uncertainty=None):
if value > 10000.:
return "{:.2e}".format(value)
else:
return "{:.2f} ".format(value)
name = 'yield'
if self.raw:
name = 'yield_raw'
cutflow = np.array([(cutflow[i]["cut"],
format_yield(cutflow[i][name])) for i in range(len(cutflow))],
dtype=[("cut", "S100"), (name, "S100")])
return cutflow
def print_cutflow_table(self):
available_cutflows = list(self.cutflow_tables.keys())
if self.interactive:
print("######## Selection menu ########")
print("Available cutflows for printing1: ")
print("--------------------------------")
for i, region in enumerate(available_cutflows):
print(i, ")", region)
print("a) all")
user_input = input(
"Please enter your selection (space or comma separated). Hit enter to select default (BaseSelection) ")
if user_input == "":
selections = ["BaseSelection"]
elif user_input.lower() == "a":
selections = available_cutflows
elif "," in user_input:
selections = [available_cutflows[i] for i in map(int, user_input.split(","))]
elif "," not in user_input:
selections = [available_cutflows[i] for i in map(int, user_input.split())]
else:
print("{:s}Invalid input {:s}. Going for default.\033[0m".format("\033[91m", user_input))
selections = ["BaseSelection"]
else:
selections = available_cutflows
for selection, cutflow in list(self.cutflow_tables.items()):
if selection not in selections:
continue
out_file = None
if self.save_table:
file_extension = 'txt'
if self.format == 'latex':
file_extension = 'tex'
if self.output_tag:
out_file = open(os.path.join(self.output_handle.output_dir,
'cutflow_{:s}_{:s}.{:s}'.format(self.output_tag, selection,
file_extension)), 'wb')
else:
out_file = open(os.path.join(self.output_handle.output_dir,
'cutflow_{:s}.{:s}'.format(selection, file_extension)), 'wb')
print()
print("Cutflow for region {:s}".format(selection))
print(cutflow, file=out_file)
if out_file is not None:
_logger.info('Stored cutflow in {:s}'.format(out_file.name))
def make_cutflow_tables(self):
for systematic in self.systematics:
self.make_cutflow_table(systematic)
class ExtendedCutFlowAnalyser(CommonCutFlowAnalyser):
"""
Extended cutflow analyser building additional cuts not stored in cutflow hist
"""
def __init__(self, **kwargs):
kwargs.setdefault("output_file_name", None)
kwargs.setdefault("no_merge", False)
kwargs.setdefault("raw", False)
kwargs.setdefault("output_dir", None)
kwargs.setdefault("format", "plain")
kwargs.setdefault('enable_eff', False)
kwargs.setdefault('percent_eff', False)
kwargs.setdefault('enable_signal_plots', False)
kwargs.setdefault('friend_tree_names', None)
kwargs.setdefault('tree_dir_name', 'Nominal')
super(ExtendedCutFlowAnalyser, self).__init__(**kwargs)
for k, v in list(kwargs.items()):
if not hasattr(self, k):
setattr(self, k, v)
if 'alternative_tree_name' not in kwargs:
self.alternative_tree_name = self.tree_name
self.file_handles = filter_empty_trees(self.file_handles, self.tree_name, self.alternative_tree_name,
self.tree_dir_name)
self.event_yields = {}
self.selection = RegionBuilder(**YAMLLoader.read_yaml(kwargs["selection_config"])["RegionBuilder"])
self.converter = Root2NumpyConverter(["weight"])
self.cutflow_tables = {}
self.cutflows = {}
_logger.debug("Setup extended CutFlowAnalyser")
if kwargs["output_dir"] is not None:
self.output_handle = OutputFileHandle(output_dir=kwargs["output_dir"])
for k, v in list(kwargs.items()):
if not hasattr(self, k):
setattr(self, k, v)
if kwargs['friend_tree_names'] is not None:
list([fh.reset_friends() for fh in self.file_handles])
list([fh.link_friend_trees(self.tree_name, 'Nominal') for fh in self.file_handles])
self.region_selections = {}
if self.plot_config is None:
self.plot_config = PlotConfig(name="acceptance_all_cuts", color=get_default_color_scheme(),
# labels=[data[0] for data in acceptance_hists],
xtitle="LQ mass [GeV]", ytitle="acceptance [%]", draw="Marker",
lumi=self.lumi, watermark="Internal", ymin=0., ymax=100.)
def read_event_yields(self, systematic="Nominal"):
_logger.info("Read event yields in directory {:s}".format(systematic))
if systematic not in self.cutflows:
self.cutflows[systematic] = {}
for region in self.selection.regions:
self.region_selections[region] = region.get_cut_list()
if region.name not in self.cutflows[systematic]:
self.cutflows[systematic][region.name] = {}
for file_handle in self.file_handles:
process = file_handle.process
process_config = find_process_config(process, process_configs=self.process_configs)
tree = file_handle.get_object_by_name(self.tree_name, systematic)
yields = []
cut_list = region.get_cut_list(file_handle.process.is_data)
cut_string = ""
for i, cut in enumerate(cut_list):
if cut.process_type is not None:
if process_config.type.lower() in cut.process_type:
current_cut = cut.selection
else:
current_cut = "1"
else:
current_cut = cut.selection
cut_string = '&&'.join([cut_string, current_cut]).lstrip('&&')
cut_string = cut_string.replace(' ', '')
if not self.raw:
yields.append([cut.name,
self.converter.convert_to_array(tree, cut_string)['weight'].flatten().sum()])
# 0, -1., -1.))
else:
yields.append([cut.name,
len([y for y in
self.converter.convert_to_array(tree, cut_string)['weight'].flatten() if
y != 0.])])
# 0, -1., -1.))
if process not in self.cutflows[systematic][region.name]:
self.cutflows[systematic][region.name][process] = yields
else:
for icut, y in enumerate(yields):
self.cutflows[systematic][region.name][process][icut][1] += y[1]
name = 'yield'
if self.raw:
name = 'yield_raw'
for process in list(self.cutflows[systematic][region.name].keys()):
list(map(tuple, self.cutflows[systematic][region.name][process]))
self.cutflows[systematic][region.name][process] = list(
map(tuple, self.cutflows[systematic][region.name][process]))
self.cutflows[systematic][region.name][process] = np.array(
self.cutflows[systematic][region.name][process],
dtype=[('cut', 'S300'), (name, 'f4')])
def apply_cross_section_weight(self, systematic, region):
for process in list(self.cutflows[systematic][region].keys()):
if process.is_data:
continue
try:
lumi_weight = self.get_cross_section_weight(process)
except InvalidInputError:
_logger.error("None type parsed for ", process)
continue
self.cutflows[systematic][region][process]['yield'] *= lumi_weight
def make_cutflow_table(self, systematic):
cutflow_tables = dict()
def keep_process(process, signals):
if process == "SMTotal":
return True
prcf = find_process_config(process, self.process_configs)
if prcf is None:
_logger.error("Could not find process config for {:s}. This is not expected. Removing process. "
"Please investigate".format(process))
return False
if prcf.type.lower() != "signal":
return True
if prcf in signals:
return True
return False
for region in list(self.cutflows[systematic].keys()):
process_configs = [(process,
find_process_config(process,
self.process_configs)) for process in
list(self.cutflows[systematic][region].keys())]
if self.process_configs is None:
process_configs = []
if len([pc for pc in process_configs if pc[0] == "SMTotal" or pc[1].type.lower() == "signal"]) > 3:
signals = [pc for pc in process_configs if pc[0] == "SMTotal" or pc[1].type.lower() == "signal"]
try:
signals.sort(key=lambda i: int(re.findall(r'\d{2,4}', i[0])[0]))
except IndexError:
_logger.error("Problem sorting signals")
if self.config is not None:
if 'ordering' in self.config:
for sig in signals:
sig_name = sig[0]
if sig_name in self.config['ordering']:
continue
self.config['ordering'].append(sig_name)
else:
self.config['ordering'] = [s[0] for s in signals]
else:
self.config = OrderedDict({'ordering': [s[0] for s in signals]})
choices = None
if self.interactive:
for i, process in enumerate(signals):
print("{:d}, {:s}".format(i, process[0]))
print("a) All")
choice = input("What processes do you like to have shown (comma/space seperated)?")
try:
if choice.lower() == "a":
choices = None
elif "," in choice:
choices = list(map(int, choice.split(",")))
else:
choices = list(map(int, choice.split(",")))
except ValueError:
pass
if choices is not None:
signals = [process[1] for process in signals if signals.index(process) in choices]
self.cutflows[systematic][region] = OrderedDict(
[kv for kv in iter(list(self.cutflows[systematic][region].items())) if
keep_process(kv[0], signals)])
for process, cutflow in list(self.cutflows[systematic][region].items()):
cutflow_tmp = self.stringify(cutflow)
if region not in list(cutflow_tables.keys()):
cutflow_tables[region] = pd.DataFrame(cutflow_tmp, dtype=str)
if self.enable_eff:
cutflow_tables[region] = self.calculate_cut_efficiencies(cutflow_tables[region])
cutflow_tables[region].columns = ["cut", process, 'eff_{:s}'.format(process)]
else:
cutflow_tables[region].columns = ["cut", process]
continue
if not self.raw:
d = {process: cutflow_tmp['yield']}
else:
d = {process: cutflow_tmp['yield_raw']}
cutflow_tables[region] = cutflow_tables[region].assign(**d)
if self.enable_eff:
cutflow_tables[region] = self.calculate_cut_efficiencies(cutflow_tables[region], cutflow_tmp,
process)
self.cutflow_tables = {}
ordering = None
if self.config is not None and 'ordering' in self.config:
ordering = self.config['ordering']
for k, v in list(cutflow_tables.items()):
if ordering is not None:
processes = list(v.keys())
if 'cut' not in ordering:
ordering.insert(0, 'cut')
ordering = [p for p in ordering if p in processes]
ordering += [p for p in processes if p not in ordering]
v = v[ordering]
fct = 'to_csv'
default_args = {'sep': ','}
if self.format == 'plain':
fct = 'to_string'
default_args = {}
if self.format == 'latex':
fct = 'to_latex'
default_args = {'index': False, 'escape': False}
self.cutflow_tables[k] = getattr(v, fct)(**default_args)
def calculate_sm_total(self):
def add(yields):
sm_yield = []
for process, evn_yields in list(yields.items()):
if 'data' in process.lower():
continue
if find_process_config(process, self.process_configs).type.lower() == "signal":
continue
if len(sm_yield) == 0:
sm_yield = list(evn_yields)
continue
for icut, cut_item in enumerate(evn_yields):
sm_yield[icut] = tuple([sm_yield[icut][0]] + list(map(operator.add,
list(sm_yield[icut])[1:],
list(evn_yields[icut])[1:])))
return np.array(sm_yield, dtype=self.dtype)
for systematics, regions_data in list(self.cutflows.items()):
for region, yields in list(regions_data.items()):
self.cutflows[systematics][region]['SMTotal'] = add(yields)
def merge(self, yields):
"""
Merge event yields for subprocesses
:param yields: pair of process and yields
:type yields: dict
:return: merged yields
:rtype: dict
"""
if self.process_configs is None:
return yields
for process in list(yields.keys()):
parent_process = find_process_config(process, self.process_configs).name
if parent_process is None:
continue
if parent_process not in list(yields.keys()):
yields[parent_process] = yields[process]
else:
try:
if not self.raw:
yields[parent_process]["yield"] += yields[process]["yield"]
else:
yields[parent_process]["yield"] += yields[process]["yield_raw"]
except TypeError:
yields[parent_process] += yields[process]
yields.pop(process)
return yields
def merge_yields(self):
for systematics, regions_data in list(self.cutflows.items()):
for region, yields in list(regions_data.items()):
self.cutflows[systematics][region] = self.merge(yields)
def update_top_background(self, module):
# def calc_inclusive():
# # stitch = module.get_stitch_point(region)
for region in list(self.cutflows['Nominal'].keys()):
# inclusive = None
stitch = module.get_stitch_point(region)
for cut, yld in self.cutflows['Nominal'][region]['ttbar']:
# yld = 10000.
if 'mLQmax' not in cut:
continue
mass = float(re.findall(r'\d+', cut)[0])
if mass < stitch:
continue
# yld = module.get_extrapolated_bin_content(region, mass, lumi=140.)
def plot_signal_yields(self):
"""
Make plots of signal yields after each cut summarised per signal sample
:return: nothing
:rtype: None
"""
if self.output_handle is None:
_logger.error("Request to plot signal yields, but output handle not initialised. Please provide output"
"directory.")
return
replace_items = [('/', ''), (' ', ''), ('>', '_gt_'), ('<', '_lt_'), ('$', ''), ('.', '')]
signal_processes = [prc for prc in list(self.process_configs.values()) if prc.type.lower() == "signal"]
signal_generated_events = self.merge(self.event_numbers)
signal_generated_events = dict([cf for cf in iter(list(signal_generated_events.items())) if
cf[0] in [prc.name for prc in signal_processes]])
for region, cutflows in list(self.cutflows["Nominal"].items()):
signal_yields = dict(
[cf for cf in iter(list(cutflows.items())) if cf[0] in [prc.name for prc in signal_processes]])
if len(signal_yields) == 0:
continue
canvas_cuts, canvas_cuts_log, canvas_final = get_signal_acceptance(signal_yields, signal_generated_events,
self.plot_config)
new_name_cuts = canvas_cuts.GetName()
new_name_cuts_log = canvas_cuts_log.GetName()
new_name_final = canvas_final.GetName()
for item in replace_items:
new_name_cuts = new_name_cuts.replace(item[0], item[1])
new_name_cuts_log = new_name_cuts_log.replace(item[0], item[1])
new_name_final = new_name_final.replace(item[0], item[1])
canvas_cuts.SetName('{:s}_{:s}'.format(new_name_cuts, region))
canvas_cuts_log.SetName('{:s}_{:s}'.format(new_name_cuts_log, region))
canvas_final.SetName('{:s}_{:s}'.format(new_name_final, region))
self.output_handle.register_object(canvas_cuts)
self.output_handle.register_object(canvas_cuts_log)
self.output_handle.register_object(canvas_final)
def calculate_cut_efficiencies(self, cutflow, np_cutflow=None, process=None):
"""
Calculate cut efficiencies w.r.t to first yield
:param cutflow: cutflow yields
:type cutflow: pandas.DataFrame
:param cutflow: numpy array cutflow to be added to overall cutflow (cutflow)
:type cutflow: numpy.ndarray
:return: cutflow yields with efficiencies
:rtype: pandas.DataFrame
"""
def get_reference():
return current_process_cf['yield'][0]
current_process_cf = cutflow
if np_cutflow is not None:
current_process_cf = np_cutflow
try:
cut_efficiencies = [float(i) / float(get_reference()) for i in current_process_cf['yield']]
except ZeroDivisionError:
cut_efficiencies = [1.] * len(current_process_cf['yield'])
if self.percent_eff:
cut_efficiencies = [val * 100. for val in cut_efficiencies]
cut_efficiencies = ['{:.2f}'.format(val) for val in cut_efficiencies]
tag = 'eff'
if process is not None:
tag = 'eff_{:s}'.format(process)
return cutflow.assign(**{tag: cut_efficiencies})
def execute(self):
for systematic in self.systematics:
self.read_event_yields(systematic)
# self.plot_signal_yields()
if not self.raw:
for systematic in list(self.cutflows.keys()):
for region in list(self.cutflows[systematic].keys()):
self.apply_cross_section_weight(systematic, region)
if self.no_merge is False:
self.merge_yields()
# Need to remap names
for systematics in list(self.cutflows.keys()):
for region in list(self.cutflows[systematics].keys()):
for process in list(self.cutflows[systematics][region].keys()):
if not isinstance(process, Process):
continue
self.cutflows[systematics][region][process.process_name] = self.cutflows[systematics][region].pop(
process)
if len(self.modules) > 0:
self.update_top_background(self.modules[0])
if self.enable_signal_plots:
self.plot_signal_yields()
if not self.disable_sm_total:
self.calculate_sm_total()
self.make_cutflow_tables()
class CutflowAnalyser(CommonCutFlowAnalyser):
"""
Cutflow analyser
"""
def __init__(self, **kwargs):
kwargs.setdefault('output_file_name', None)
kwargs.setdefault('lumi', None)
kwargs.setdefault('process_configs', None)
kwargs.setdefault('no_merge', False)
kwargs.setdefault('raw', False)
kwargs.setdefault('format', 'plain')
super(CutflowAnalyser, self).__init__(**kwargs)
self.precision = 2 # TODO: quick term fix
self.cutflow_hists = dict()
self.cutflow_hists = dict()
self.cutflow_tables = dict()
self.output_file_name = kwargs['output_file_name']
self.systematics = kwargs['systematics']
self.cutflow_hists = dict()
self.cutflows = dict()
self.event_numbers = dict()
self.raw = kwargs['raw']
self.format = kwargs['format']
self.merge = True if not kwargs['no_merge'] else False
def apply_cross_section_weight(self):
for process in list(self.cutflow_hists.keys()):
if process.is_data:
continue
try:
lumi_weight = self.get_cross_section_weight(process)
except InvalidInputError:
_logger.error("None type parsed for ", self.cutflow_hists[process])
continue
for systematic in list(self.cutflow_hists[process].keys()):
for cutflow in list(self.cutflow_hists[process][systematic].values()):
scale(cutflow, lumi_weight)
def analyse_cutflow(self):
self.apply_cross_section_weight()
if self.process_configs is not None and self.merge:
self.merge_histograms(self.cutflow_hists)
if not self.disable_sm_total:
self.calculate_sm_total()
self.cutflow_hists = dict([kv for kv in iter(list(self.cutflow_hists.items())) if len(kv[1]) > 0])
for systematic in self.systematics:
self.cutflows[systematic] = dict()
for process in list(self.cutflow_hists.keys()):
self.cutflows[systematic][bytes(process)] = dict()
for k, v in list(self.cutflow_hists[process][systematic].items()):
if k.endswith('_raw'):
continue
raw_cutflow = self.cutflow_hists[process][systematic][k + '_raw']
self.cutflows[systematic][process][k] = self._analyse_cutflow(v, raw_cutflow)
self.calculate_cut_efficiencies()
def merge_histograms(self, histograms):
for process in list(histograms.keys()):
parent_process = find_process_config(process, self.process_configs).name
if parent_process is None:
continue
for systematic in list(histograms[process].keys()):
for selection in list(histograms[process][systematic].keys()):
if parent_process not in list(histograms.keys()):
histograms[parent_process] = dict((syst,
dict((sel, None) for sel in
list(histograms[process][syst].keys())))
for syst in list(histograms[process].keys()))
if selection not in histograms[process][systematic]:
_logger.warning("Could not find selection {:s} for process {:s}".format(selection,
process.process_name))
continue
new_hist_name = histograms[process][systematic][selection].GetName().replace(process.process_name,
parent_process)
if histograms[parent_process][systematic][selection] is None:
new_hist_name = histograms[process][systematic][selection].GetName().replace(
process.process_name, parent_process)
histograms[parent_process][systematic][selection] = histograms[process][systematic][
selection].Clone(new_hist_name)
else:
histograms[parent_process][systematic][selection].Add(
histograms[process][systematic][selection].Clone(new_hist_name))
histograms.pop(process)
def calculate_sm_total(self):
sm_total_cutflows = {}
for process, systematics in list(self.cutflow_hists.items()):
if 'data' in process.lower():
continue
for systematic, regions in list(systematics.items()):
if systematic not in list(sm_total_cutflows.keys()):
sm_total_cutflows[systematic] = {}
for region, cutflow_hist in list(regions.items()):
if region not in list(sm_total_cutflows[systematic].keys()):
sm_total_cutflows[systematic][region] = cutflow_hist.Clone()
continue
sm_total_cutflows[systematic][region].Add(cutflow_hist)
self.cutflow_hists['SMTotal'] = sm_total_cutflows
def _analyse_cutflow(self, cutflow_hist, raw_cutflow_hist):
if not self.raw:
parsed_info = np.array([(cutflow_hist.GetXaxis().GetBinLabel(b),
cutflow_hist.GetBinContent(b),
# raw_cutflow_hist.GetBinContent(b),
cutflow_hist.GetBinError(b),
# #raw_cutflow_hist.GetBinError(b),
-1.,
-1.) for b in range(1, cutflow_hist.GetNbinsX() + 1)],
dtype=[('cut', 'S100'), ('yield', 'f4'), # ('yield_raw', 'f4'),
('yield_unc', 'f4'),
('eff', float),
('eff_total', float)]) # todo: array dtype for string not a good choice
else:
parsed_info = np.array([(cutflow_hist.GetXaxis().GetBinLabel(b),
raw_cutflow_hist.GetBinContent(b),
raw_cutflow_hist.GetBinError(b),
-1.,
-1.) for b in range(1, cutflow_hist.GetNbinsX() + 1)],
dtype=[('cut', 'S100'), ('yield_raw', 'f4'),
('yield_unc_raw', 'f4'),
('eff', float),
('eff_total', float)]) # todo: array dtype for string not a good choice
return parsed_info
def calculate_cut_efficiencies(self):
for systematic in self.systematics:
for process in list(self.cutflows[systematic].keys()):
for cutflow in list(self.cutflows[systematic][process].values()):
self.calculate_cut_efficiency(cutflow)
def calculate_cut_efficiency(self, cutflow):
yield_str = 'yield'
if self.raw:
yield_str = 'yield_raw'
for i in range(len(cutflow['cut'])):
if i == 0:
cutflow[i]['eff'] = 100.
cutflow[i]['eff_total'] = 100.
continue
if cutflow[i - 1][yield_str] != 0.:
cutflow[i]['eff'] = round(100.0 * cutflow[i][yield_str] / cutflow[i - 1][yield_str], 3)
else:
cutflow[i]['eff'] = -1.
if cutflow[0][yield_str] != 0.:
cutflow[i]['eff_total'] = round(100.0 * cutflow[i][yield_str] / cutflow[0][yield_str], 3)
else:
cutflow[i]['eff_total'] = -1.
def make_cutflow_tables(self):
for systematic in self.systematics:
self.make_cutflow_table(systematic)
def make_cutflow_table(self, systematic):
cutflow_tables = OrderedDict()
# signal_yields = dict(filter(lambda cf: cf[0] in map(lambda prc: prc.name, signal_processes),
# cutflows.iteritems()))
for process in list(self.cutflows[systematic].keys()):
for selection, cutflow in list(self.cutflows[systematic][process].items()):
cutflow_tmp = self.stringify(cutflow)
if selection not in list(cutflow_tables.keys()):
cutflow_tables[selection] = cutflow_tmp
continue
cutflow_tables[selection] = rec_append_fields(cutflow_tables[selection],
[i + process for i in cutflow_tmp.dtype.names[1:]],
[cutflow_tmp[n] for n in cutflow_tmp.dtype.names[1:]])
headers = ['Cut'] + [x for elem in list(self.cutflows[systematic].keys()) for x in (elem, '')]
self.cutflow_tables = {k: tabulate(v.transpose(),
headers=headers,
tablefmt=self.format) # floatfmt='.2f'
for k, v in list(cutflow_tables.items())}
def stringify(self, cutflow):
def format_yield(value, uncertainty):
if value > 10000.:
return bytes('{:.{:d}e}'.format(value, self.precision))
else:
return bytes('{:.{:d}f}'.format(value, self.precision))
# if value > 10000.:
# return "{:.3e} +- {:.3e}".format(value, uncertainty)
# else:
# return "{:.2f} +- {:.2f}".format(value, uncertainty)
if not self.raw:
cutflow = np.array([(cutflow[i]['cut'],
format_yield(cutflow[i]['yield'], cutflow[i]['yield_unc']),
# cutflow[i]['eff'],
cutflow[i]['eff_total']) for i in range(len(cutflow))],
dtype=[('cut', 'S100'), ('yield', 'S100'), ('eff_total', float)]) # ('eff', float),
else:
cutflow = np.array([(cutflow[i]['cut'],
format_yield(cutflow[i]['yield_raw'], cutflow[i]['yield_unc_raw']),
# cutflow[i]['eff'],
cutflow[i]['eff_total']) for i in range(len(cutflow))],
dtype=[('cut', 'S100'), ('yield_raw', 'S100'), ('eff_total', float)]) # ('eff', float),
return cutflow
def store_cutflow_table(self):
pass
def load_cutflows(self, file_handle):
process = file_handle.process
if process is None:
_logger.error("Parsed NoneType process from {:s}".format(file_handle.file_name))
return
if process not in self.event_numbers and not self.disable_cutflow_reading:
self.event_numbers[process] = file_handle.get_number_of_total_events()
elif not self.disable_cutflow_reading:
self.event_numbers[process] += file_handle.get_number_of_total_events()
if process not in list(self.cutflow_hists.keys()):
self.cutflow_hists[process] = dict()
for systematic in self.systematics:
cutflow_hists = file_handle.get_objects_by_pattern("^(cutflow_)", systematic)
if systematic not in self.cutflow_hists[process]:
self.cutflow_hists[process][systematic] = dict()
for cutflow_hist in cutflow_hists:
cutflow_hist.SetDirectory(0)
try:
self.cutflow_hists[process][systematic][cutflow_hist.GetName().replace("cutflow_", "")].Add(
cutflow_hist)
except KeyError:
self.cutflow_hists[process][systematic][
cutflow_hist.GetName().replace("cutflow_", "")] = cutflow_hist
def read_cutflows(self):
for file_handle in self.file_handles:
self.load_cutflows(file_handle)
def plot_cutflow(self):
if self.output_handle is None:
return
set_batch_mode(True)
flipped = defaultdict(lambda: defaultdict(dict))
for process, systematics in list(self.cutflow_hists.items()):
for systematic, cutflows in list(systematics.items()):
if "smtotal" in process.lower():
continue
for region, cutflow_hist in list(cutflows.items()):
flipped[systematic][region][process] = cutflow_hist
plot_config = PlotConfig(name=None, dist=None, ytitle="Events", logy=True)
for region in list(flipped['Nominal'].keys()):
plot_config.name = "{:s}_cutflow".format(region)
cutflow_hists = {process: hist for process, hist in list(flipped["Nominal"][region].items())
if "smtotal" not in process.lower()}
for process, cutflow_hist in list(cutflow_hists.items()):
cutflow_hist.SetName("{:s}_{:s}".format(cutflow_hist.GetName(), process))
cutflow_canvas = Pt.plot_stack(cutflow_hists, plot_config, process_configs=self.process_configs)
Ft.add_legend_to_canvas(cutflow_canvas, process_configs=self.process_configs)
self.output_handle.register_object(cutflow_canvas)
def execute(self):
self.read_cutflows()
self.analyse_cutflow()
self.make_cutflow_tables()
if hasattr(self, "output_handle"):
self.plot_cutflow()
| mit |
jhugon/astroobsplanner | astroobsplanner/makeplan.py | 1 | 16714 | #!/usr/bin/env python2
# vim: set fileencoding=utf-8
import sys
import argparse
import datetime
import numpy
import pytz
from matplotlib import pyplot as mpl
from matplotlib.dates import HourLocator, DateFormatter
from matplotlib.backends.backend_pdf import PdfPages
from astropy.time import Time
from astropy.table import Table
import astropy.units as u
from astroplan import Observer, FixedTarget, AltitudeConstraint, AirmassConstraint, AtNightConstraint, MoonSeparationConstraint, MoonIlluminationConstraint
from astroplan import months_observable, is_always_observable, is_observable
from astroplan.utils import time_grid_from_range
from .lookuptarget import lookuptarget, lookuptargettype, CALDWELL_MAP
def makeTargetLabels(nameList,args):
targetTypes = [lookuptargettype(name) for name in nameList]
result = []
seperator = " "
ylabelsize = "x-small"
if len(nameList) < 25:
seperator = "\n"
ylabelsize = "medium"
for x,t in zip(nameList,targetTypes):
thisResult = ""
try:
othername = CALDWELL_MAP[x.lower()]
except KeyError:
thisResult += x
else:
if ("ngc" in othername) or ("ic" in othername):
othername = othername.upper()
thisResult += f"{x} ({othername})"
if args.showType:
thisResult += f"{seperator}{t[0]}"
result.append(thisResult)
return result, ylabelsize
def run_months(observers, nameList, args):
targets = [FixedTarget(coord=lookuptarget(name),name=name) for name in nameList]
targetLabelList, ylabelsize = makeTargetLabels(nameList,args)
constraints = [
AltitudeConstraint(min=args.minAlt*u.deg),
AtNightConstraint.twilight_astronomical(),
]
outfn = args.outFileNameBase+"_monthly.pdf"
with PdfPages(outfn) as pdf:
for observer in observers:
observability_months_table = months_observable(constraints,observer,targets,time_grid_resolution=1*u.hour)
observability_months_grid = numpy.zeros((len(targets),12))
for i, observable in enumerate(observability_months_table):
for jMonth in range(1,13):
observability_months_grid[i,jMonth-1] = jMonth in observable
observable_targets = targets
observable_target_labels = targetLabelList
ever_observability_months_grid = observability_months_grid
if args.onlyEverObservable:
target_is_observable = numpy.zeros(len(targets))
for iMonth in range(observability_months_grid.shape[1]):
target_is_observable += observability_months_grid[:,iMonth]
target_is_observable = target_is_observable > 0. # change to boolean numpy array
observable_targets = [x for x, o in zip(targets,target_is_observable) if o]
observable_target_labels = [x for x, o in zip(targetLabelList,target_is_observable) if o]
ever_observability_months_grid = observability_months_grid[target_is_observable,:]
fig, ax = mpl.subplots(
figsize=(8.5,11),
gridspec_kw={
"top":0.92,
"bottom":0.03,
"left":0.13,
"right":0.98,
},
tight_layout=False,constrained_layout=False
)
extent = [-0.5, -0.5+12, -0.5, len(observable_targets)-0.5]
ax.imshow(ever_observability_months_grid, extent=extent, origin="lower", aspect="auto", cmap=mpl.get_cmap("Greens"))
ax.xaxis.tick_top()
ax.invert_yaxis()
ax.set_yticks(range(0,len(observable_targets)))
ax.set_yticklabels(observable_target_labels, fontsize=ylabelsize)
ax.set_xticks(range(12))
ax.set_xticklabels(["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"])
ax.set_xticks(numpy.arange(extent[0], extent[1]), minor=True)
ax.set_yticks(numpy.arange(extent[2], extent[3]), minor=True)
ax.grid(which="minor",color="black",ls="-", linewidth=1)
ax.tick_params(axis='y', which='minor', left=False, right=False)
ax.tick_params(axis='x', which='minor', bottom=False, top=False)
fig.suptitle(f"Monthly Observability at {observer.name}")
fig.text(1.0,0.0,"Constraints: Astronomical Twilight, Altitude $\geq {:.0f}^\circ$".format(args.minAlt),ha="right",va="bottom")
pdf.savefig(fig)
print(f"Writing out file: {outfn}")
def run_nights(observers, nameList, args):
# Define range of times to observe between
startDate = datetime.datetime.strptime(args.startDate,"%Y-%m-%d")
beginTimeFirstNight = datetime.datetime(startDate.year,startDate.month,startDate.day,hour=16)
endTimeFirstNight = beginTimeFirstNight + datetime.timedelta(hours=16)
t_datetimes_nights_list = []
for iDay in range(args.nNights):
beginTime = beginTimeFirstNight + datetime.timedelta(days=iDay)
endTime = endTimeFirstNight + datetime.timedelta(days=iDay)
currTime = beginTime
t_datetime = []
while currTime <= endTime:
t_datetime.append(currTime)
currTime += datetime.timedelta(hours=1)
t_datetimes_nights_list.append(t_datetime)
targets = [FixedTarget(coord=lookuptarget(name),name=name) for name in nameList]
targetLabelList, ylabelsize = makeTargetLabels(nameList,args)
constraints = [
AltitudeConstraint(min=args.minAlt*u.deg),
AtNightConstraint.twilight_astronomical(),
MoonSeparationConstraint(min=args.minMoonSep*u.deg),
MoonIlluminationConstraint(max=args.maxMoonIllum),
]
outfn = args.outFileNameBase+"_nightly.pdf"
with PdfPages(outfn) as pdf:
for observer in observers:
fig, axes = mpl.subplots(
figsize=(8.5,11),
ncols=args.nNights,
sharex="col",
gridspec_kw={
"top":0.92,
"bottom":0.03,
"left":0.13,
"right":0.98,
"hspace":0,
"wspace":0
},
tight_layout=False,constrained_layout=False
)
observability_grids = []
for iNight in range(args.nNights):
t_datetime = t_datetimes_nights_list[iNight]
time_grid = [Time(observer.timezone.localize(t)) for t in t_datetime]
observability_grid = numpy.zeros((len(targets),len(time_grid)-1))
for i in range(len(time_grid)-1):
tmp = is_always_observable(constraints, observer, targets, times=[time_grid[i],time_grid[i+1]])
observability_grid[:, i] = tmp
observability_grids.append(observability_grid)
observable_targets = targets
observable_target_labels = targetLabelList
ever_observability_grids = observability_grids
if args.onlyEverObservable:
target_is_observable = numpy.zeros(len(targets))
for observability_grid in observability_grids:
for iTime in range(observability_grid.shape[1]):
target_is_observable += observability_grid[:,iTime]
target_is_observable = target_is_observable > 0. # change to boolean numpy array
observable_targets = [x for x, o in zip(targets,target_is_observable) if o]
observable_target_labels = [x for x, o in zip(targetLabelList,target_is_observable) if o]
ever_observability_grids = []
for observability_grid in observability_grids:
ever_observability_grid = observability_grid[target_is_observable,:]
ever_observability_grids.append(ever_observability_grid)
for iNight in range(args.nNights):
ax = axes[iNight]
t_datetime = t_datetimes_nights_list[iNight]
extent = [0, len(t_datetime)-1, -0.5, len(observable_targets)-0.5]
ax.imshow(ever_observability_grids[iNight], extent=extent, origin="lower", aspect="auto", cmap=mpl.get_cmap("Greens"))
ax.xaxis.tick_top()
ax.xaxis.set_label_position("top")
ax.invert_yaxis()
if iNight == 0:
ax.set_yticks(range(0,len(observable_targets)))
ax.set_yticklabels(observable_target_labels, fontsize=ylabelsize)
else:
ax.set_yticks([])
ax.set_xticks(range(0,len(t_datetime)-1,4))
ax.set_xticks(range(0,len(t_datetime)),minor=True)
ax.set_xticklabels([t_datetime[i].strftime("%Hh") for i in range(0,len(t_datetime)-1,4)])
ax.set_xlabel(t_datetime[0].strftime("%a %b %d"))
ax.set_yticks(numpy.arange(extent[2], extent[3]), minor=True)
ax.grid(axis="x",which="minor",color="0.7",ls="-", linewidth=0.5)
ax.grid(axis="x",which="major",color="0.7",ls="-", linewidth=1)
ax.grid(axis="y",which="minor",color="0.7",ls="-", linewidth=0.5)
ax.tick_params(axis='y', which='minor', left=False, right=False)
ax.tick_params(axis='x', which='minor', bottom=False, top=False)
fig.suptitle(f"Observability at {observer.name} in {startDate.year}")
fig.text(1.0,0.0,"Constraints: Astronomical Twilight, Altitude $\geq {:.0f}^\circ$, Moon Seperation $\geq {:.0f}^\circ$, Moon Illumination $\leq {:.2f}$".format(args.minAlt,args.minMoonSep,args.maxMoonIllum),ha="right",va="bottom")
pdf.savefig(fig)
print(f"Writing out file: {outfn}")
def main():
parser = argparse.ArgumentParser(description="Makes observability tables. Best to include less than 100 or so targets")
parser.add_argument("outFileNameBase",help="Output file name base (will end in _monthly.pdf for month chart, etc.")
parser.add_argument("objectNames",nargs='*',help='Object name (e.g. "M42" "Polaris" "Gam Cru" "Orion Nebula")')
parser.add_argument("--textFileObjNames",'-t',help="A newline seperated list of object names is in the text file. Funcions just like extra objectNames")
parser.add_argument("--monthly",'-m',action="store_true",help="Make monthly visibility, otherwise, run nightly chart")
parser.add_argument("--startDate",'-s',default=str(datetime.date.today()),help=f"Start date in ISO format YYYY-MM-DD (default: today, {datetime.date.today()})")
parser.add_argument("--nNights",'-n',type=int,default=5,help=f"Number of nights to show including STARTDATE (default: 5)")
parser.add_argument("--minAlt",'-a',type=float,default=45,help=f"Minimum altitude constraint, in degrees (default: 45)")
parser.add_argument("--minMoonSep",type=float,default=60,help=f"Minimum angular seperation between target and moon constraint, in degrees (default: 60)")
parser.add_argument("--maxMoonIllum",type=float,default=0.05,help=f"Maximum fractional moon illumination constraint: a float between 0.0 and 1.0. Also satisfied if moon has set. (default: 0.05)")
parser.add_argument("--onlyEverObservable",'-o',action="store_true",help="For each site, only display objects that are ever observable in the time range (get rid of empty rows)")
parser.add_argument("--showType",action="store_true",help="For each object, list the main type returned from looking it up in SIMBAD in the tables")
parser.add_argument("--printObjectLists","-p",action="store_true",help="Print out Messier and Caldwell catalogues.")
parser.add_argument("--GlCl",action="store_true",help="Run all globular clusters from Messier and Caldwell catalogues")
parser.add_argument("--OpCl",action="store_true",help="Run all open clusters from Messier and Caldwell catalogues")
parser.add_argument("--G",'-g',action="store_true",help="Run all galaxies from Messier and Caldwell catalogues")
parser.add_argument("--PN",action="store_true",help="Run all planatary nebulae from Messier and Caldwell catalogues")
parser.add_argument("--Other",action="store_true",help="Run everything else from Messier and Caldwell catalogues")
parser.add_argument("--HCG",action="store_true",help="Run all of Hickson's Compact Groups of galaxies")
args = parser.parse_args()
observers = [
Observer(name="NM Skies",latitude=32.903308333333335*u.deg,longitude=-106.96066666666667*u.deg,elevation=2225.*u.meter,timezone='US/Mountain'),
Observer(name="Sierra Remote Obs., CA",latitude=37.0703*u.deg,longitude=-119.4128*u.deg,elevation=1405.*u.meter,timezone='US/Pacific'),
Observer(name="AstroCamp, Spain",latitude=38.15*u.deg,longitude=-2.31*u.deg,elevation=1650.*u.meter,timezone='Europe/Madrid'),
Observer(name="Siding Spring, AUS",latitude=-31.27333*u.deg,longitude=149.064444*u.deg,elevation=1165.*u.meter,timezone='Australia/Melbourne'),
]
messierAndCaldwellNames = ["M"+str(i) for i in range(1,111)]+["C"+str(i) for i in range(1,110)]
messierAndCaldwellTypes = [lookuptargettype(name) for name in messierAndCaldwellNames]
messierAndCaldwellGlClNames = [n for n,t in zip(messierAndCaldwellNames,messierAndCaldwellTypes) if "GlCl" in t] # globular clusters
messierAndCaldwellOpClNames = [n for n,t in zip(messierAndCaldwellNames,messierAndCaldwellTypes) if "OpCl" == t[0]] # main type open cluster
messierAndCaldwellGNames = [n for n,t in zip(messierAndCaldwellNames,messierAndCaldwellTypes) if "G" in t and not ("GlCl" == t[0]) and not ("OpCl" == t[0]) and not ("PN" == t[0]) and not ("SNR" == t[0]) and not ("HII" == t[0])] # galaxies
messierAndCaldwellPNNames = [n for n,t in zip(messierAndCaldwellNames,messierAndCaldwellTypes) if "PN" in t] # planetary nebulae
messierAndCaldwellNotGNorGlClNorOpClNorPNNames = [n for n,t in zip(messierAndCaldwellNames,messierAndCaldwellTypes) if not (("G" in t) or ("GlCl" in t) or ("OpCl" == t[0]) or ("PN" in t))] # not galaxies nor globular clusters nor main type open cluster nor planetary nebula
HCGNames = ["HCG"+str(i) for i in range(1,101)] # Hickson's Compact Groups of galaxies
if args.printObjectLists:
print(f"GlCl: {len(messierAndCaldwellGlClNames)}")
for name in messierAndCaldwellGlClNames:
print(f" {name}: {lookuptargettype(name)}")
print(f"OpCl: {len(messierAndCaldwellOpClNames)}")
for name in messierAndCaldwellOpClNames:
print(f" {name}: {lookuptargettype(name)}")
print(f"G: {len(messierAndCaldwellGNames)}")
for name in messierAndCaldwellGNames:
print(f" {name}: {lookuptargettype(name)}")
print(f"PN: {len(messierAndCaldwellPNNames)}")
for name in messierAndCaldwellPNNames:
print(f" {name}: {lookuptargettype(name)}")
print(f"Not G nor GlCl nor OpCl nor PN: {len(messierAndCaldwellNotGNorGlClNorOpClNorPNNames)}")
for name in messierAndCaldwellNotGNorGlClNorOpClNorPNNames:
print(f" {name}: {lookuptargettype(name)}")
print(f"Hickson's Compact Groups of galaxies:")
for name in HCGNames:
print(f" {name}: {lookuptargettype(name)}")
sys.exit(0)
nameList = args.objectNames
if args.textFileObjNames:
print(f"Reading object names from: '{args.textFileObjNames}'")
try:
with open(args.textFileObjNames) as infile:
for line in infile.readlines():
nameList.append(line.strip("\n"))
except FileNotFoundError as e:
print(f"Error: {e}, exiting.")
sys.exit(1)
messierAndCaldwellNamesToUse = []
if args.GlCl:
messierAndCaldwellNamesToUse += messierAndCaldwellGlClNames
if args.OpCl:
messierAndCaldwellNamesToUse += messierAndCaldwellOpClNames
if args.G:
messierAndCaldwellNamesToUse += messierAndCaldwellGNames
if args.PN:
messierAndCaldwellNamesToUse += messierAndCaldwellPNNames
if args.Other:
messierAndCaldwellNamesToUse += messierAndCaldwellNotGNorGlClNorOpClNorPNNames
#messierAndCaldwellNamesToUse.sort() #need number sort not lexical
nameList += messierAndCaldwellNamesToUse
if args.HCG:
nameList += HCGNames
if args.monthly:
run_months(observers, nameList, args)
run_nights(observers, nameList, args)
| gpl-3.0 |
shenzebang/scikit-learn | sklearn/tree/tests/test_export.py | 130 | 9950 | """
Testing for export functions of decision trees (sklearn.tree.export).
"""
from re import finditer
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
from sklearn.utils.testing import assert_in
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 2], [-1, 3], [1, 1], [1, 2], [1, 3]]
w = [1, 1, 1, .5, .5, .5]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=1,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with class_names
out = StringIO()
export_graphviz(clf, out_file=out, class_names=["yes", "no"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = yes"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \
'class = yes"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \
'class = no"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test plot_options
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False,
proportion=True, special_characters=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'edge [fontname=helvetica] ;\n' \
'0 [label=<X<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>' \
'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \
'1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \
'fillcolor="#399de5ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, class_names=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = y[0]"] ;\n' \
'1 [label="(...)"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth with plot_options
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, filled=True,
node_ids=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \
'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(max_depth=2,
min_samples_split=1,
criterion="gini",
random_state=2)
clf = clf.fit(X, y2, sample_weight=w)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="X[0] <= 0.0\\nsamples = 6\\n' \
'value = [[3.0, 1.5, 0.0]\\n' \
'[1.5, 1.5, 1.5]]", fillcolor="#e5813900"] ;\n' \
'1 [label="X[1] <= -1.5\\nsamples = 3\\n' \
'value = [[3, 0, 0]\\n[1, 1, 1]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="samples = 1\\nvalue = [[1, 0, 0]\\n' \
'[0, 0, 1]]", fillcolor="#e58139ff"] ;\n' \
'1 -> 2 ;\n' \
'3 [label="samples = 2\\nvalue = [[2, 0, 0]\\n' \
'[1, 1, 0]]", fillcolor="#e581398c"] ;\n' \
'1 -> 3 ;\n' \
'4 [label="X[0] <= 1.5\\nsamples = 3\\n' \
'value = [[0.0, 1.5, 0.0]\\n[0.5, 0.5, 0.5]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 4 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'5 [label="samples = 2\\nvalue = [[0.0, 1.0, 0.0]\\n' \
'[0.5, 0.5, 0.0]]", fillcolor="#e581398c"] ;\n' \
'4 -> 5 ;\n' \
'6 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \
'[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \
'4 -> 6 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test regression output with plot_options
clf = DecisionTreeRegressor(max_depth=3,
min_samples_split=1,
criterion="mse",
random_state=2)
clf.fit(X, y)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, leaves_parallel=True,
rotate=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'graph [ranksep=equally, splines=polyline] ;\n' \
'edge [fontname=helvetica] ;\n' \
'rankdir=LR ;\n' \
'0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \
'value = 0.0", fillcolor="#e581397f"] ;\n' \
'1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \
'fillcolor="#e5813900"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="True"] ;\n' \
'2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=45, ' \
'headlabel="False"] ;\n' \
'{rank=same ; 0} ;\n' \
'{rank=same ; 1; 2} ;\n' \
'}'
assert_equal(contents1, contents2)
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
# Check feature_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
# Check class_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, class_names=[])
def test_friedman_mse_in_graphviz():
clf = DecisionTreeRegressor(criterion="friedman_mse", random_state=0)
clf.fit(X, y)
dot_data = StringIO()
export_graphviz(clf, out_file=dot_data)
clf = GradientBoostingClassifier(n_estimators=2, random_state=0)
clf.fit(X, y)
for estimator in clf.estimators_:
export_graphviz(estimator[0], out_file=dot_data)
for finding in finditer("\[.*?samples.*?\]", dot_data.getvalue()):
assert_in("friedman_mse", finding.group())
| bsd-3-clause |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/core/reshape/tile.py | 4 | 13776 | """
Quantilization functions and related stuff
"""
from pandas.core.dtypes.missing import isna
from pandas.core.dtypes.common import (
is_integer,
is_scalar,
is_categorical_dtype,
is_datetime64_dtype,
is_timedelta64_dtype,
_ensure_int64)
import pandas.core.algorithms as algos
import pandas.core.nanops as nanops
from pandas._libs.lib import infer_dtype
from pandas import (to_timedelta, to_datetime,
Categorical, Timestamp, Timedelta,
Series, Interval, IntervalIndex)
import numpy as np
def cut(x, bins, right=True, labels=None, retbins=False, precision=3,
include_lowest=False):
"""
Return indices of half-open bins to which each value of `x` belongs.
Parameters
----------
x : array-like
Input array to be binned. It has to be 1-dimensional.
bins : int, sequence of scalars, or IntervalIndex
If `bins` is an int, it defines the number of equal-width bins in the
range of `x`. However, in this case, the range of `x` is extended
by .1% on each side to include the min or max values of `x`. If
`bins` is a sequence it defines the bin edges allowing for
non-uniform bin width. No extension of the range of `x` is done in
this case.
right : bool, optional
Indicates whether the bins include the rightmost edge or not. If
right == True (the default), then the bins [1,2,3,4] indicate
(1,2], (2,3], (3,4].
labels : array or boolean, default None
Used as labels for the resulting bins. Must be of the same length as
the resulting bins. If False, return only integer indicators of the
bins.
retbins : bool, optional
Whether to return the bins or not. Can be useful if bins is given
as a scalar.
precision : int, optional
The precision at which to store and display the bins labels
include_lowest : bool, optional
Whether the first interval should be left-inclusive or not.
Returns
-------
out : Categorical or Series or array of integers if labels is False
The return type (Categorical or Series) depends on the input: a Series
of type category if input is a Series else Categorical. Bins are
represented as categories when categorical data is returned.
bins : ndarray of floats
Returned only if `retbins` is True.
Notes
-----
The `cut` function can be useful for going from a continuous variable to
a categorical variable. For example, `cut` could convert ages to groups
of age ranges.
Any NA values will be NA in the result. Out of bounds values will be NA in
the resulting Categorical object
Examples
--------
>>> pd.cut(np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1]), 3, retbins=True)
... # doctest: +ELLIPSIS
([(0.19, 3.367], (0.19, 3.367], (0.19, 3.367], (3.367, 6.533], ...
Categories (3, interval[float64]): [(0.19, 3.367] < (3.367, 6.533] ...
>>> pd.cut(np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1]),
... 3, labels=["good", "medium", "bad"])
... # doctest: +SKIP
[good, good, good, medium, bad, good]
Categories (3, object): [good < medium < bad]
>>> pd.cut(np.ones(5), 4, labels=False)
array([1, 1, 1, 1, 1])
"""
# NOTE: this binning code is changed a bit from histogram for var(x) == 0
# for handling the cut for datetime and timedelta objects
x_is_series, series_index, name, x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
if not np.iterable(bins):
if is_scalar(bins) and bins < 1:
raise ValueError("`bins` should be a positive integer.")
try: # for array-like
sz = x.size
except AttributeError:
x = np.asarray(x)
sz = x.size
if sz == 0:
raise ValueError('Cannot cut empty array')
rng = (nanops.nanmin(x), nanops.nanmax(x))
mn, mx = [mi + 0.0 for mi in rng]
if mn == mx: # adjust end points before binning
mn -= .001 * abs(mn) if mn != 0 else .001
mx += .001 * abs(mx) if mx != 0 else .001
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
else: # adjust end points after binning
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
adj = (mx - mn) * 0.001 # 0.1% of the range
if right:
bins[0] -= adj
else:
bins[-1] += adj
elif isinstance(bins, IntervalIndex):
pass
else:
bins = np.asarray(bins)
bins = _convert_bin_to_numeric_type(bins, dtype)
if (np.diff(bins) < 0).any():
raise ValueError('bins must increase monotonically.')
fac, bins = _bins_to_cuts(x, bins, right=right, labels=labels,
precision=precision,
include_lowest=include_lowest,
dtype=dtype)
return _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name)
def qcut(x, q, labels=None, retbins=False, precision=3, duplicates='raise'):
"""
Quantile-based discretization function. Discretize variable into
equal-sized buckets based on rank or based on sample quantiles. For example
1000 values for 10 quantiles would produce a Categorical object indicating
quantile membership for each data point.
Parameters
----------
x : ndarray or Series
q : integer or array of quantiles
Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately
array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles
labels : array or boolean, default None
Used as labels for the resulting bins. Must be of the same length as
the resulting bins. If False, return only integer indicators of the
bins.
retbins : bool, optional
Whether to return the (bins, labels) or not. Can be useful if bins
is given as a scalar.
precision : int, optional
The precision at which to store and display the bins labels
duplicates : {default 'raise', 'drop'}, optional
If bin edges are not unique, raise ValueError or drop non-uniques.
.. versionadded:: 0.20.0
Returns
-------
out : Categorical or Series or array of integers if labels is False
The return type (Categorical or Series) depends on the input: a Series
of type category if input is a Series else Categorical. Bins are
represented as categories when categorical data is returned.
bins : ndarray of floats
Returned only if `retbins` is True.
Notes
-----
Out of bounds values will be NA in the resulting Categorical object
Examples
--------
>>> pd.qcut(range(5), 4)
... # doctest: +ELLIPSIS
[(-0.001, 1.0], (-0.001, 1.0], (1.0, 2.0], (2.0, 3.0], (3.0, 4.0]]
Categories (4, interval[float64]): [(-0.001, 1.0] < (1.0, 2.0] ...
>>> pd.qcut(range(5), 3, labels=["good", "medium", "bad"])
... # doctest: +SKIP
[good, good, medium, bad, bad]
Categories (3, object): [good < medium < bad]
>>> pd.qcut(range(5), 4, labels=False)
array([0, 0, 1, 2, 3])
"""
x_is_series, series_index, name, x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
if is_integer(q):
quantiles = np.linspace(0, 1, q + 1)
else:
quantiles = q
bins = algos.quantile(x, quantiles)
fac, bins = _bins_to_cuts(x, bins, labels=labels,
precision=precision, include_lowest=True,
dtype=dtype, duplicates=duplicates)
return _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name)
def _bins_to_cuts(x, bins, right=True, labels=None,
precision=3, include_lowest=False,
dtype=None, duplicates='raise'):
if duplicates not in ['raise', 'drop']:
raise ValueError("invalid value for 'duplicates' parameter, "
"valid options are: raise, drop")
if isinstance(bins, IntervalIndex):
# we have a fast-path here
ids = bins.get_indexer(x)
result = algos.take_nd(bins, ids)
result = Categorical(result, categories=bins, ordered=True)
return result, bins
unique_bins = algos.unique(bins)
if len(unique_bins) < len(bins) and len(bins) != 2:
if duplicates == 'raise':
raise ValueError("Bin edges must be unique: {bins!r}.\nYou "
"can drop duplicate edges by setting "
"the 'duplicates' kwarg".format(bins=bins))
else:
bins = unique_bins
side = 'left' if right else 'right'
ids = _ensure_int64(bins.searchsorted(x, side=side))
if include_lowest:
ids[x == bins[0]] = 1
na_mask = isna(x) | (ids == len(bins)) | (ids == 0)
has_nas = na_mask.any()
if labels is not False:
if labels is None:
labels = _format_labels(bins, precision, right=right,
include_lowest=include_lowest,
dtype=dtype)
else:
if len(labels) != len(bins) - 1:
raise ValueError('Bin labels must be one fewer than '
'the number of bin edges')
if not is_categorical_dtype(labels):
labels = Categorical(labels, categories=labels, ordered=True)
np.putmask(ids, na_mask, 0)
result = algos.take_nd(labels, ids - 1)
else:
result = ids - 1
if has_nas:
result = result.astype(np.float64)
np.putmask(result, na_mask, np.nan)
return result, bins
def _trim_zeros(x):
while len(x) > 1 and x[-1] == '0':
x = x[:-1]
if len(x) > 1 and x[-1] == '.':
x = x[:-1]
return x
def _coerce_to_type(x):
"""
if the passed data is of datetime/timedelta type,
this method converts it to integer so that cut method can
handle it
"""
dtype = None
if is_timedelta64_dtype(x):
x = to_timedelta(x).view(np.int64)
dtype = np.timedelta64
elif is_datetime64_dtype(x):
x = to_datetime(x).view(np.int64)
dtype = np.datetime64
return x, dtype
def _convert_bin_to_numeric_type(bins, dtype):
"""
if the passed bin is of datetime/timedelta type,
this method converts it to integer
Parameters
----------
bins : list-liek of bins
dtype : dtype of data
Raises
------
ValueError if bins are not of a compat dtype to dtype
"""
bins_dtype = infer_dtype(bins)
if is_timedelta64_dtype(dtype):
if bins_dtype in ['timedelta', 'timedelta64']:
bins = to_timedelta(bins).view(np.int64)
else:
raise ValueError("bins must be of timedelta64 dtype")
elif is_datetime64_dtype(dtype):
if bins_dtype in ['datetime', 'datetime64']:
bins = to_datetime(bins).view(np.int64)
else:
raise ValueError("bins must be of datetime64 dtype")
return bins
def _format_labels(bins, precision, right=True,
include_lowest=False, dtype=None):
""" based on the dtype, return our labels """
closed = 'right' if right else 'left'
if is_datetime64_dtype(dtype):
formatter = Timestamp
adjust = lambda x: x - Timedelta('1ns')
elif is_timedelta64_dtype(dtype):
formatter = Timedelta
adjust = lambda x: x - Timedelta('1ns')
else:
precision = _infer_precision(precision, bins)
formatter = lambda x: _round_frac(x, precision)
adjust = lambda x: x - 10 ** (-precision)
breaks = [formatter(b) for b in bins]
labels = IntervalIndex.from_breaks(breaks, closed=closed)
if right and include_lowest:
# we will adjust the left hand side by precision to
# account that we are all right closed
v = adjust(labels[0].left)
i = IntervalIndex.from_intervals(
[Interval(v, labels[0].right, closed='right')])
labels = i.append(labels[1:])
return labels
def _preprocess_for_cut(x):
"""
handles preprocessing for cut where we convert passed
input to array, strip the index information and store it
separately
"""
x_is_series = isinstance(x, Series)
series_index = None
name = None
if x_is_series:
series_index = x.index
name = x.name
x = np.asarray(x)
return x_is_series, series_index, name, x
def _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name):
"""
handles post processing for the cut method where
we combine the index information if the originally passed
datatype was a series
"""
if x_is_series:
fac = Series(fac, index=series_index, name=name)
if not retbins:
return fac
return fac, bins
def _round_frac(x, precision):
"""
Round the fractional part of the given number
"""
if not np.isfinite(x) or x == 0:
return x
else:
frac, whole = np.modf(x)
if whole == 0:
digits = -int(np.floor(np.log10(abs(frac)))) - 1 + precision
else:
digits = precision
return np.around(x, digits)
def _infer_precision(base_precision, bins):
"""Infer an appropriate precision for _round_frac
"""
for precision in range(base_precision, 20):
levels = [_round_frac(b, precision) for b in bins]
if algos.unique(levels).size == bins.size:
return precision
return base_precision # default
| apache-2.0 |
xubenben/scikit-learn | examples/linear_model/plot_ols_3d.py | 350 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
sameera2004/xray-vision | xray_vision/mpl_plotting/utils.py | 3 | 2478 | from __future__ import absolute_import, division, print_function
import numpy as np
def multiline(ax, data, labels, line_kw=None, xlabels=None, ylabels=None):
"""Plot a number of datasets on their own line_artist
Parameters
----------
ax : iterable
List of mpl.Axes objects
data : list
If the data is Nx1, the data will be treated as 'x'. If the data is
Nx2, the data will be treated as (x, y)
labels : list
Names of the data sets. These will appear as the legend in each plot
line_kw : dict
Dictionary of kwargs to be passed to **all** of the plotting functions.
xlabels : iterable or string, optional
The name of the x axes. If an iterable is passed in, it should be the
same length as `data`. If a string is passed in, it is assumed that all
'data' should have the same `x` axis
ylabels : iterable or string, optional
Same as `xlabels`.
Returns
-------
arts : list
Dictionary of matplotlib.lines.Line2D objects. These objects can be
used for further manipulation of the plot
"""
if line_kw is None:
line_kw = {}
arts = []
# handle the xlabels
if xlabels is None:
xlabels = [''] * len(data)
if ylabels is None:
ylabels = [''] * len(data)
if isinstance(xlabels, str):
xlabels = [xlabel] * len(data)
if isinstance(ylabels, str):
ylabels = [ylabel] * len(data)
def to_xy(d, label):
shape = d.shape
if len(shape) == 1:
return range(len(d)), d
elif len(shape) == 2:
if shape[0] == 1:
return range(len(d)), d[:, 1]
elif shape[1] == 1:
return range(len(d)), d[0]
elif shape[0] == 2:
return d[0], d[1]
elif shape[1] == 2:
return d[:, 0], d[:, 1]
raise ValueError('data set "%s" has a shape I do not '
'understand. Expecting shape (N), (Nx1), '
'(1xN), (Nx2) or (2xN). I got %s' % (label, shape))
for ax, d, label, xlabel, ylabel in zip(ax, data, labels, xlabels, ylabels):
d = np.asarray(d)
x, y = to_xy(d, label)
art, = ax.plot(x, y, label=label, **line_kw)
arts.append(art)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.legend()
return arts
| bsd-3-clause |
mpa46/PiNN_Caffe2 | ac_qv_api.py | 3 | 12685 | import caffe2_paths
import os
import pickle
from caffe2.python import (
workspace, layer_model_helper, schema, optimizer, net_drawer
)
import caffe2.python.layer_model_instantiator as instantiator
import numpy as np
from pinn.adjoint_mlp_lib import build_adjoint_mlp, init_model_with_schemas
import pinn.data_reader as data_reader
import pinn.preproc as preproc
import pinn.parser as parser
import pinn.visualizer as visualizer
import pinn.exporter as exporter
# import logging
import matplotlib.pyplot as plt
class ACQVModel:
def __init__(
self,
model_name,
input_dim=1,
output_dim=1,
):
self.model_name = model_name
self.input_dim = input_dim
self.output_dim = output_dim
self.model = init_model_with_schemas(
model_name, self.input_dim, self.output_dim)
self.input_data_store = {}
self.preproc_param = {}
self.net_store = {}
self.reports = {'epoch':[],'train_loss':[], 'eval_loss':[]}
def add_data(
self,
data_tag,
data_arrays,
preproc_param,
override=True,
):
'''
data_arrays are in the order of origin_input, adjoint_label
origin_input and adjoint_label must be numpy arrays
'''
#check length and dimensions of origin input and adjoint label
assert len(data_arrays) == 2, 'Incorrect number of input data'
voltages = data_arrays[0]
capas = data_arrays[1]
assert voltages.shape == capas.shape, 'Mismatch dimensions'
#Set preprocess parameters and database name
self.preproc_param = preproc_param
self.pickle_file_name = self.model_name + '_preproc_param' + '.p'
db_name = self.model_name + '_' + data_tag + '.minidb'
if os.path.isfile(db_name):
if override:
print("XXX Delete the old database...")
os.remove(db_name)
os.remove(self.pickle_file_name)
else:
raise Exception('Encounter database with the same name. ' +
'Choose the other model name or set override to True.')
print("+++ Create a new database...")
self.preproc_param.setdefault('max_loss_scale', 1.)
pickle.dump(
self.preproc_param,
open(self.pickle_file_name, 'wb')
)
#Preprocess the data
voltages, capas = preproc.ac_qv_preproc(
voltages, capas,
self.preproc_param['scale'],
self.preproc_param['vg_shift']
)
# Only expand the dim if the number of dimension is 1
origin_input = np.expand_dims(
voltages, axis=1) if voltages.ndim == 1 else voltages
adjoint_label = np.expand_dims(
capas, axis=1) if capas.ndim == 1 else capas
# Create adjoint_input data
adjoint_input = np.ones((origin_input.shape[0], 1))
# Set the data type to np float for origin input, adjoint input, adjoint label
origin_input = origin_input.astype(np.float32)
adjoint_input = adjoint_input.astype(np.float32)
adjoint_label = adjoint_label.astype(np.float32)
# Write to database
data_reader.write_db(
'minidb', db_name,
[origin_input, adjoint_input, adjoint_label]
)
self.input_data_store[data_tag] = [db_name, origin_input.shape[0]]
preproc.restore_voltages(
self.preproc_param['scale'],
self.preproc_param['vg_shift'],
voltages
)
def build_nets(
self,
hidden_dims,
batch_size=1,
optim_method = 'AdaGrad',
optim_param = {'alpha':0.01, 'epsilon':1e-4},
):
assert len(self.input_data_store) > 0, 'Input data store is empty.'
assert 'train' in self.input_data_store, 'Missing training data.'
self.batch_size = batch_size
# Build the date reader net for train net
input_data_train = data_reader.build_input_reader(
self.model,
self.input_data_store['train'][0],
'minidb',
['origin_input', 'adjoint_input', 'label'],
batch_size=batch_size,
data_type='train',
)
if 'eval' in self.input_data_store:
# Build the data reader net for eval net
input_data_eval = data_reader.build_input_reader(
self.model,
self.input_data_store['eval'][0],
'minidb',
['origin_input', 'adjoint_input'],
batch_size=batch_size,
data_type='eval',
)
# Build the computational nets
# Create train net
self.model.input_feature_schema.origin_input.set_value(
input_data_train[0].get(), unsafe=True)
self.model.input_feature_schema.adjoint_input.set_value(
input_data_train[1].get(), unsafe=True)
self.model.trainer_extra_schema.label.set_value(
input_data_train[2].get(), unsafe=True)
self.origin_pred, self.adjoint_pred, self.loss = build_adjoint_mlp(
self.model,
input_dim = self.input_dim,
hidden_dims = hidden_dims,
output_dim = self.output_dim,
optim=_build_optimizer(
optim_method, optim_param),
)
train_init_net, train_net = instantiator.generate_training_nets(self.model)
workspace.RunNetOnce(train_init_net)
workspace.CreateNet(train_net)
self.net_store['train_net'] = train_net
pred_net = instantiator.generate_predict_net(self.model)
workspace.CreateNet(pred_net)
self.net_store['pred_net'] = pred_net
if 'eval' in self.input_data_store:
# Create eval net
self.model.input_feature_schema.origin_input.set_value(
input_data_eval[0].get(), unsafe=True)
self.model.input_feature_schema.adjoint_input.set_value(
input_data_eval[1].get(), unsafe=True)
self.model.trainer_extra_schema.label.set_value(
input_data_eval[2].get(), unsafe=True)
eval_net = instantiator.generate_eval_net(self.model)
workspace.CreateNet(eval_net)
self.net_store['eval_net'] = eval_net
def train_with_eval(
self,
num_epoch=1,
report_interval=0,
eval_during_training=False,
):
''' Fastest mode: report_interval = 0
Medium mode: report_interval > 0, eval_during_training=False
Slowest mode: report_interval > 0, eval_during_training=True
'''
num_batch_per_epoch = int(
self.input_data_store['train'][1] /
self.batch_size
)
if not self.input_data_store['train'][1] % self.batch_size == 0:
num_batch_per_epoch += 1
print('[Warning]: batch_size cannot be divided. ' +
'Run on {} example instead of {}'.format(
num_batch_per_epoch * self.batch_size,
self.input_data_store['train'][1]
)
)
print('<<< Run {} iteration'.format(num_epoch * num_batch_per_epoch))
train_net = self.net_store['train_net']
if report_interval > 0:
print('>>> Training with Reports')
num_eval = int(num_epoch / report_interval)
num_unit_iter = int((num_batch_per_epoch * num_epoch)/num_eval)
if eval_during_training and 'eval_net' in self.net_store:
print('>>> Training with Eval Reports (Slowest mode)')
eval_net = self.net_store['eval_net']
for i in range(num_eval):
workspace.RunNet(
train_net.Proto().name,
num_iter=num_unit_iter
)
self.reports['epoch'].append((i + 1) * report_interval)
train_loss = np.asscalar(schema.FetchRecord(self.loss).get())
self.reports['train_loss'].append(train_loss)
if eval_during_training and 'eval_net' in self.net_store:
workspace.RunNet(
eval_net.Proto().name,
num_iter=num_unit_iter)
eval_loss = np.asscalar(schema.FetchRecord(self.loss).get())
self.reports['eval_loss'].append(eval_loss)
else:
print('>>> Training without Reports (Fastest mode)')
num_iter = num_epoch*num_batch_per_epoch
workspace.RunNet(
train_net,
num_iter=num_iter
)
print('>>> Saving test model')
exporter.save_net(
self.net_store['pred_net'],
self.model,
self.model_name+'_init', self.model_name+'_predict'
)
def draw_nets(self):
for net_name in self.net_store:
net = self.net_store[net_name]
graph = net_drawer.GetPydotGraph(net.Proto().op, rankdir='TB')
with open(net.Name() + ".png",'wb') as f:
f.write(graph.create_png())
def predict_qs(self, voltages):
# requires voltages is an numpy array of size
# (batch size, input_dimension)
# the first dimension is Vg and the second dimenstion is Vd
# preprocess the origin input and create adjoint input
# voltages array is unchanged
if len(self.preproc_param) == 0:
self.preproc_param = pickle.load(
open(self.pickle_file_name, "rb" )
)
dummy_qs = np.zeros(voltages[0].shape[0])
voltages, dummy_qs = preproc.ac_qv_preproc(
voltages, dummy_qs,
self.preproc_param['scale'],
self.preproc_param['vg_shift']
)
adjoint_input = np.ones((voltages[0].shape[0], 1))
# Expand dimensions of input and set data type of inputs
origin_input = np.expand_dims(
voltages, axis=1)
origin_input = origin_input.astype(np.float32)
adjoint_input = adjoint_input.astype(np.float32)
workspace.FeedBlob('DBInput_train/origin_input', origin_input)
workspace.FeedBlob('DBInput_train/adjoint_input', adjoint_input)
pred_net = self.net_store['pred_net']
workspace.RunNet(pred_net)
qs = np.squeeze(schema.FetchRecord(self.origin_pred).get())
gradients = np.squeeze(schema.FetchRecord(self.adjoint_pred).get())
restore_integral_func, restore_gradient_func = preproc.get_restore_q_func(
self.preproc_param['scale'],
self.preproc_param['vg_shift']
)
original_qs = restore_integral_func(qs)
original_gradients = restore_gradient_func(gradients)
preproc.restore_voltages(
self.preproc_param['scale'],
self.preproc_param['vg_shift'],
voltages
)
return qs, original_qs, gradients, original_gradients
def plot_loss_trend(self):
plt.plot(self.reports['epoch'], self.reports['train_loss'])
if len(self.reports['eval_loss']) > 0:
plt.plot(self.reports['epoch'], self.reports['eval_loss'], 'r--')
plt.show()
# --------------------------------------------------------
# ---------------- Global functions -------------------
# --------------------------------------------------------
def predict_qs(model_name, voltages):
workspace.ResetWorkspace()
# requires voltages is an numpy array of size
# (batch size, input_dimension)
# the first dimension is Vg and the second dimenstion is Vd
# preprocess the origin input and create adjoint input
preproc_param = pickle.load(
open(model_name+'_preproc_param.p', "rb" )
)
dummy_qs = np.zeros(voltages[0].shape[0])
voltages, dummy_qs = preproc.ac_qv_preproc(
voltages, dummy_qs,
preproc_param['scale'],
preproc_param['vg_shift']
)
adjoint_input = np.ones((voltages[0].shape[0], 1))
# Expand dimensions of input and set data type of inputs
origin_input = np.expand_dims(
voltages, axis=1)
origin_input = origin_input.astype(np.float32)
adjoint_input = adjoint_input.astype(np.float32)
workspace.FeedBlob('DBInput_train/origin_input', voltages)
workspace.FeedBlob('DBInput_train/adjoint_input', adjoint_input)
pred_net = exporter.load_net(model_name+'_init', model_name+'_predict')
workspace.RunNet(pred_net)
qs = np.squeeze(schema.FetchBlob('prediction'))
gradients = np.squeeze(schema.FetchBlob('adjoint_prediction'))
restore_integral_func, restore_gradient_func = preproc.get_restore_q_func(
preproc_param['scale'],
preproc_param['vg_shift']
)
original_qs = restore_integral_func(qs)
original_gradients = restore_gradient_func(gradients)
preproc.restore_voltages(
self.preproc_param['scale'],
self.preproc_param['vg_shift'],
voltages
)
return qs, original_qs, gradients, original_gradients
def plot_iv(
vg, vd, ids,
vg_comp = None, vd_comp = None, ids_comp = None,
styles = ['vg_major_linear', 'vd_major_linear', 'vg_major_log', 'vd_major_log']
):
if 'vg_major_linear' in styles:
visualizer.plot_linear_Id_vs_Vd_at_Vg(
vg, vd, ids,
vg_comp = vg_comp, vd_comp = vd_comp, ids_comp = ids_comp,
)
if 'vd_major_linear' in styles:
visualizer.plot_linear_Id_vs_Vg_at_Vd(
vg, vd, ids,
vg_comp = vg_comp, vd_comp = vd_comp, ids_comp = ids_comp,
)
if 'vg_major_log' in styles:
visualizer.plot_log_Id_vs_Vd_at_Vg(
vg, vd, ids,
vg_comp = vg_comp, vd_comp = vd_comp, ids_comp = ids_comp,
)
if 'vd_major_log' in styles:
visualizer.plot_log_Id_vs_Vg_at_Vd(
vg, vd, ids,
vg_comp = vg_comp, vd_comp = vd_comp, ids_comp = ids_comp,
)
def _build_optimizer(optim_method, optim_param):
if optim_method == 'AdaGrad':
optim = optimizer.AdagradOptimizer(**optim_param)
elif optim_method == 'SgdOptimizer':
optim = optimizer.SgdOptimizer(**optim_param)
elif optim_method == 'Adam':
optim = optimizer.AdamOptimizer(**optim_param)
else:
raise Exception(
'Did you foget to implement {}?'.format(optim_method))
return optim
| mit |
burgerdev/googleclusterdata | extract_mean_cpu_usage_from_zips.py | 1 | 4700 |
import time
import logging
import glob
import gzip
import csv
import h5py
import numpy as np
import pandas
from clusterdata.schema import get_valid_tables
from fill_tables import format_seconds
logger = logging.getLogger(__name__)
def add_means_to_array(arr, out):
n = len(out)
first_ind = int(np.floor(arr[:, 1].min()))
first_ind = max(0, first_ind)
last_ind = int(np.floor(arr[:, 2].max()))
last_ind = min(n, last_ind)
x = np.zeros((n,))
for i in range(first_ind, last_ind):
a = np.maximum(arr[:, 1], i)
b = np.minimum(arr[:, 2], i+1)
w = b - a
w = np.minimum(w, 1)
w = np.maximum(w, 0)
x[i] = (arr[:, 0] * w).sum()
out[:] += x
def process_csv(csv_file, start, end, resolution, out):
df = pandas.read_csv(csv_file, header=None)
x = df[[5, 0, 1]].as_matrix()
x[:, 1] = np.maximum(x[:, 1], start)
x[:, 2] = np.minimum(x[:, 2], end)
x[:, 1] -= start
x[:, 2] -= start
x[:, 1:] /= float(resolution)
add_means_to_array(x, out)
def run(args):
times = np.arange(args.start, args.end, args.resolution)
output = np.zeros((len(times), 2))
output[:, 0] = times
with h5py.File(args.output, 'w') as h5f:
h5ds = h5f.require_dataset("cpu_usage",
shape=output.shape, dtype=np.float64)
h5ds[:] = output
already_processed = set()
if args.import_file is not None:
with open(args.import_file, 'r') as f:
l = [line.strip() for line in f]
already_processed = set(l)
export_file = None
if args.export_file is not None:
export_file = open(args.export_file, 'a')
try:
table = filter(lambda t: t.name == "task_usage", get_valid_tables())[0]
start_time = time.time()
g = table.get_glob()
filenames = sorted(glob.glob(g))
num_filenames = len(filenames)
actually_processed = 0.0
for i, filename in enumerate(filenames):
if filename in already_processed:
logger.info("skipping file '{}'".format(filename))
continue
logger.info("processing file '{}'".format(filename))
with h5py.File(args.output, 'a') as h5f:
h5ds = h5f.require_dataset("cpu_usage",
shape=output.shape,
dtype=np.float64)
output[:] = h5ds[:]
with gzip.GzipFile(filename, 'r') as f:
process_csv(f,
args.start, args.end, args.resolution,
output[:, 1])
h5ds[:] = output[:]
if export_file is not None:
export_file.write("{}\n".format(filename))
actually_processed += 1
total_elapsed_time = time.time() - start_time
mean_elapsed_time = total_elapsed_time / actually_processed
time_to_go = (num_filenames-i-1) * mean_elapsed_time
logger.info("Estimated time remaining for this table: "
"{}".format(format_seconds(time_to_go)))
finally:
if export_file is not None:
export_file.close()
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("output", help="write HDF5 output here")
parser.add_argument("-v", "--verbose", action="store_true", default=False,
help="print progress indicators")
parser.add_argument("-r", "--resolution", action="store", type=int,
default=int(1e6*60),
help="resolution of host load result in microseconds")
parser.add_argument("--start", action="store", type=int,
default=600000000,
help="start time in microseconds")
parser.add_argument("--end", action="store", type=int,
default=2506200000001,
help="end time in microseconds")
parser.add_argument("-e", "--export-file", action="store",
default=None,
help="save information to this file for a future run")
parser.add_argument("-i", "--import-file", action="store",
default=None,
help="use this file to resume a former run")
args = parser.parse_args()
if args.export_file is None:
args.export_file = args.import_file
if args.verbose:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.WARN)
logging.basicConfig()
run(args)
| mit |
harisbal/pandas | pandas/tests/indexes/timedeltas/test_timedelta_range.py | 1 | 3188 | import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
from pandas import timedelta_range, to_timedelta
from pandas.tseries.offsets import Day, Second
class TestTimedeltas(object):
def test_timedelta_range(self):
expected = to_timedelta(np.arange(5), unit='D')
result = timedelta_range('0 days', periods=5, freq='D')
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(11), unit='D')
result = timedelta_range('0 days', '10 days', freq='D')
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(5), unit='D') + Second(2) + Day()
result = timedelta_range('1 days, 00:00:02', '5 days, 00:00:02',
freq='D')
tm.assert_index_equal(result, expected)
expected = to_timedelta([1, 3, 5, 7, 9], unit='D') + Second(2)
result = timedelta_range('1 days, 00:00:02', periods=5, freq='2D')
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(50), unit='T') * 30
result = timedelta_range('0 days', freq='30T', periods=50)
tm.assert_index_equal(result, expected)
# GH 11776
arr = np.arange(10).reshape(2, 5)
df = pd.DataFrame(np.arange(10).reshape(2, 5))
for arg in (arr, df):
with tm.assert_raises_regex(TypeError, "1-d array"):
to_timedelta(arg)
for errors in ['ignore', 'raise', 'coerce']:
with tm.assert_raises_regex(TypeError, "1-d array"):
to_timedelta(arg, errors=errors)
# issue10583
df = pd.DataFrame(np.random.normal(size=(10, 4)))
df.index = pd.timedelta_range(start='0s', periods=10, freq='s')
expected = df.loc[pd.Timedelta('0s'):, :]
result = df.loc['0s':, :]
tm.assert_frame_equal(expected, result)
with pytest.raises(ValueError):
# GH 22274: CalendarDay is a relative time measurement
timedelta_range('1day', freq='CD', periods=2)
@pytest.mark.parametrize('periods, freq', [
(3, '2D'), (5, 'D'), (6, '19H12T'), (7, '16H'), (9, '12H')])
def test_linspace_behavior(self, periods, freq):
# GH 20976
result = timedelta_range(start='0 days', end='4 days', periods=periods)
expected = timedelta_range(start='0 days', end='4 days', freq=freq)
tm.assert_index_equal(result, expected)
def test_errors(self):
# not enough params
msg = ('Of the four parameters: start, end, periods, and freq, '
'exactly three must be specified')
with tm.assert_raises_regex(ValueError, msg):
timedelta_range(start='0 days')
with tm.assert_raises_regex(ValueError, msg):
timedelta_range(end='5 days')
with tm.assert_raises_regex(ValueError, msg):
timedelta_range(periods=2)
with tm.assert_raises_regex(ValueError, msg):
timedelta_range()
# too many params
with tm.assert_raises_regex(ValueError, msg):
timedelta_range(start='0 days', end='5 days', periods=10, freq='H')
| bsd-3-clause |
KarrLab/kinetic_datanator | tests/data_source/rna_halflife/test_doi_10_1093_nar_gks1019.py | 1 | 2208 | import unittest
from datanator.data_source.rna_halflife import doi_10_1093_nar_gks1019
import tempfile
import shutil
import json
import os
from datanator_query_python.config import config
import pandas as pd
class TestProteinAggregate(unittest.TestCase):
@classmethod
def setUpClass(cls):
des_db = 'test'
src_db = 'datanator'
cls.protein_col = 'uniprot'
cls.rna_col = 'rna_halflife'
conf = config.TestConfig()
username = conf.USERNAME
password = conf.PASSWORD
MongoDB = conf.SERVER
cls.src = doi_10_1093_nar_gks1019.Halflife(server=MongoDB, src_db=src_db,
protein_col=cls.protein_col, authDB='admin', readPreference='nearest',
username=username, password=password, verbose=True, max_entries=20,
des_db=des_db, rna_col=cls.rna_col)
@classmethod
def tearDownClass(cls):
cls.src.uniprot_collection_manager.db_obj.drop_collection(cls.protein_col)
cls.src.db_obj.drop_collection(cls.rna_col)
cls.src.uniprot_collection_manager.client.close()
cls.src.client.close()
cls.src.uniprot_query_manager.client.close()
@unittest.skip('avoid downloading')
def test_fill_uniprot(self):
url_0 = 'https://oup.silverchair-cdn.com/oup/backfile/Content_public/Journal/nar/41/1/10.1093/nar/gks1019/2/gks1019-nar-00676-a-2012-File003.xlsx?Expires=1578425844&Signature=ZRFUxLdn4-vaBt5gQci~0o56KqyR9nJj9i32ig5X6YcfqiJeV3obEq8leHGdDxx6w~KABgewiQ66HTB7gmuG~2GL-YgxPKYSjt17WrYMkc-0ibw6TMlTvWZZfvw-lPe~wvpmVfNEXnTbP7jHyNLu9jeJ6yhoXvgIyQtzA5PbEI1fyXEgeZzOKMltmITqL3g3APsPsagCTC66rwrBT23Aghh6D314uilT2DZHCc68MH2nyV~qAhFqIQiOj-7VTEKqkDPvPYvuE2KNKXdvW23gk100YV~58ozbt8ijRz5Gr5gPtE~f1Ab5l260EIbWHJNabMRleInJQqUIDPFN4C38PQ__&Key-Pair-Id=APKAIE5G5CRDK6RD3PGA'
df_0 = self.src.fill_uniprot(url_0, 'Supplementary Table 1')
self.assertEqual(df_0.iloc[0]['ordered_locus_name'], 'Rv0002')
def test_fill_rna_halflife(self):
d = {'half_life': [32.3, 12.2, 13.2], 'r_squared': [0.9, 0.7, 0.8],
'ordered_locus_name': ['Rv0002', 'something', 'this']}
df_0 = pd.DataFrame(d)
self.src.fill_rna_halflife(df_0, ['aaa', 102]) | mit |
hill-a/stable-baselines | stable_baselines/bench/monitor.py | 1 | 7798 | __all__ = ['Monitor', 'get_monitor_files', 'load_results']
import csv
import json
import os
import time
from glob import glob
from typing import Tuple, Dict, Any, List, Optional
import gym
import pandas
import numpy as np
class Monitor(gym.Wrapper):
"""
A monitor wrapper for Gym environments, it is used to know the episode reward, length, time and other data.
:param env: (gym.Env) The environment
:param filename: (Optional[str]) the location to save a log file, can be None for no log
:param allow_early_resets: (bool) allows the reset of the environment before it is done
:param reset_keywords: (tuple) extra keywords for the reset call, if extra parameters are needed at reset
:param info_keywords: (tuple) extra information to log, from the information return of environment.step
"""
EXT = "monitor.csv"
file_handler = None
def __init__(self,
env: gym.Env,
filename: Optional[str],
allow_early_resets: bool = True,
reset_keywords=(),
info_keywords=()):
super(Monitor, self).__init__(env=env)
self.t_start = time.time()
if filename is None:
self.file_handler = None
self.logger = None
else:
if not filename.endswith(Monitor.EXT):
if os.path.isdir(filename):
filename = os.path.join(filename, Monitor.EXT)
else:
filename = filename + "." + Monitor.EXT
self.file_handler = open(filename, "wt")
self.file_handler.write('#%s\n' % json.dumps({"t_start": self.t_start, 'env_id': env.spec and env.spec.id}))
self.logger = csv.DictWriter(self.file_handler,
fieldnames=('r', 'l', 't') + reset_keywords + info_keywords)
self.logger.writeheader()
self.file_handler.flush()
self.reset_keywords = reset_keywords
self.info_keywords = info_keywords
self.allow_early_resets = allow_early_resets
self.rewards = None
self.needs_reset = True
self.episode_rewards = []
self.episode_lengths = []
self.episode_times = []
self.total_steps = 0
self.current_reset_info = {} # extra info about the current episode, that was passed in during reset()
def reset(self, **kwargs) -> np.ndarray:
"""
Calls the Gym environment reset. Can only be called if the environment is over, or if allow_early_resets is True
:param kwargs: Extra keywords saved for the next episode. only if defined by reset_keywords
:return: (np.ndarray) the first observation of the environment
"""
if not self.allow_early_resets and not self.needs_reset:
raise RuntimeError("Tried to reset an environment before done. If you want to allow early resets, "
"wrap your env with Monitor(env, path, allow_early_resets=True)")
self.rewards = []
self.needs_reset = False
for key in self.reset_keywords:
value = kwargs.get(key)
if value is None:
raise ValueError('Expected you to pass kwarg {} into reset'.format(key))
self.current_reset_info[key] = value
return self.env.reset(**kwargs)
def step(self, action: np.ndarray) -> Tuple[np.ndarray, float, bool, Dict[Any, Any]]:
"""
Step the environment with the given action
:param action: (np.ndarray) the action
:return: (Tuple[np.ndarray, float, bool, Dict[Any, Any]]) observation, reward, done, information
"""
if self.needs_reset:
raise RuntimeError("Tried to step environment that needs reset")
observation, reward, done, info = self.env.step(action)
self.rewards.append(reward)
if done:
self.needs_reset = True
ep_rew = sum(self.rewards)
eplen = len(self.rewards)
ep_info = {"r": round(ep_rew, 6), "l": eplen, "t": round(time.time() - self.t_start, 6)}
for key in self.info_keywords:
ep_info[key] = info[key]
self.episode_rewards.append(ep_rew)
self.episode_lengths.append(eplen)
self.episode_times.append(time.time() - self.t_start)
ep_info.update(self.current_reset_info)
if self.logger:
self.logger.writerow(ep_info)
self.file_handler.flush()
info['episode'] = ep_info
self.total_steps += 1
return observation, reward, done, info
def close(self):
"""
Closes the environment
"""
super(Monitor, self).close()
if self.file_handler is not None:
self.file_handler.close()
def get_total_steps(self) -> int:
"""
Returns the total number of timesteps
:return: (int)
"""
return self.total_steps
def get_episode_rewards(self) -> List[float]:
"""
Returns the rewards of all the episodes
:return: ([float])
"""
return self.episode_rewards
def get_episode_lengths(self) -> List[int]:
"""
Returns the number of timesteps of all the episodes
:return: ([int])
"""
return self.episode_lengths
def get_episode_times(self) -> List[float]:
"""
Returns the runtime in seconds of all the episodes
:return: ([float])
"""
return self.episode_times
class LoadMonitorResultsError(Exception):
"""
Raised when loading the monitor log fails.
"""
pass
def get_monitor_files(path: str) -> List[str]:
"""
get all the monitor files in the given path
:param path: (str) the logging folder
:return: ([str]) the log files
"""
return glob(os.path.join(path, "*" + Monitor.EXT))
def load_results(path: str) -> pandas.DataFrame:
"""
Load all Monitor logs from a given directory path matching ``*monitor.csv`` and ``*monitor.json``
:param path: (str) the directory path containing the log file(s)
:return: (pandas.DataFrame) the logged data
"""
# get both csv and (old) json files
monitor_files = (glob(os.path.join(path, "*monitor.json")) + get_monitor_files(path))
if not monitor_files:
raise LoadMonitorResultsError("no monitor files of the form *%s found in %s" % (Monitor.EXT, path))
data_frames = []
headers = []
for file_name in monitor_files:
with open(file_name, 'rt') as file_handler:
if file_name.endswith('csv'):
first_line = file_handler.readline()
assert first_line[0] == '#'
header = json.loads(first_line[1:])
data_frame = pandas.read_csv(file_handler, index_col=None)
headers.append(header)
elif file_name.endswith('json'): # Deprecated json format
episodes = []
lines = file_handler.readlines()
header = json.loads(lines[0])
headers.append(header)
for line in lines[1:]:
episode = json.loads(line)
episodes.append(episode)
data_frame = pandas.DataFrame(episodes)
else:
assert 0, 'unreachable'
data_frame['t'] += header['t_start']
data_frames.append(data_frame)
data_frame = pandas.concat(data_frames)
data_frame.sort_values('t', inplace=True)
data_frame.reset_index(inplace=True)
data_frame['t'] -= min(header['t_start'] for header in headers)
# data_frame.headers = headers # HACK to preserve backwards compatibility
return data_frame
| mit |
treycausey/scikit-learn | examples/plot_rfe_with_cross_validation.py | 1 | 1366 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
import pylab as pl
pl.figure()
pl.xlabel("Number of features selected")
pl.ylabel("Cross validation score (nb of correct classifications)")
pl.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
pl.show()
| bsd-3-clause |
anurag313/scikit-learn | sklearn/metrics/cluster/tests/test_bicluster.py | 394 | 1770 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
| bsd-3-clause |
slinderman/theano_pyglm | pyglm/plotting/plotting.py | 1 | 5031 | """
Build a class of plotting classes. These classes should be specific
to particular components or state variables. For example, we might
have a plotting class for the network. The classes should be initialized
with a model to determine how they should plot the results.
The classes should be able to:
- plot either a single sample or the mean of a sequence of samples
along with error bars.
- take in an axis (or a figure) or create a new figure if not specified
- take in a color or colormap for plotting
"""
import numpy as np
import matplotlib.pyplot as plt
from hips.plotting.colormaps import gradient_cmap
from pyglm.utils.theano_func_wrapper import seval
rwb_cmap = gradient_cmap([[1,0,0],
[1,1,1],
[0,0,0]])
class PlotProvider(object):
"""
Abstract class for plotting a sample or a sequence of samples
"""
def __init__(self, population):
"""
Check that the model satisfies whatever criteria are appropriate
for this model.
"""
self.population = population
def plot(self, sample, ax=None):
"""
Plot the sample or sequence of samples
"""
pass
class NetworkPlotProvider(PlotProvider):
"""
Class to plot the connectivity network
"""
def __init__(self, population):
super(NetworkPlotProvider, self).__init__(population)
# TODO: Check that the model has a network?
# All models should have a network
def plot(self, xs, ax=None, title=None, vmin=None, vmax=None, cmap=rwb_cmap):
# Ensure sample is a list
if not isinstance(xs, list):
xs = [xs]
# Get the weight matrix and adjacency matrix
wvars = self.population.network.weights.get_variables()
Ws = np.array([seval(self.population.network.weights.W,
wvars, x['net']['weights'])
for x in xs])
gvars = self.population.network.graph.get_variables()
As = np.array([seval(self.population.network.graph.A,
gvars, x['net']['graph'])
for x in xs])
# Compute the effective connectivity matrix
W_inf = np.mean(Ws*As, axis=0)
# Make sure bounds are set
if None in (vmax,vmin):
vmax = np.amax(np.abs(W_inf))
vmin = -vmax
# Create a figure if necessary
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
px_per_node = 10
im = ax.imshow(np.kron(W_inf,np.ones((px_per_node,px_per_node))),
vmin=vmin, vmax=vmax,
extent=[0,1,0,1],
interpolation='nearest',
cmap=cmap)
ax.set_title(title)
class LocationPlotProvider(PlotProvider):
"""
Plot the latent locations of the neurons
"""
def plot(self, xs, ax=None, name='location_provider', color='k'):
"""
Plot a histogram of the inferred locations for each neuron
"""
# Ensure sample is a list
if not isinstance(xs, list):
xs = [xs]
if name not in xs[0]['latent']:
return
# Get the locations
loccomp = self.population.latent.latentdict[name]
locprior = loccomp.location_prior
locvars = loccomp.get_variables()
Ls = np.array([seval(loccomp.Lmatrix,
locvars, x['latent'][name])
for x in xs])
[N_smpls, N, D] = Ls.shape
for n in range(N):
# plt.subplot(1,N,n+1, aspect=1.0)
# plt.title('N: %d' % n)
if N_smpls == 1:
if D == 1:
plt.plot([Ls[0,n,0], Ls[0,n,0]],
[0,2], color=color, lw=2)
elif D == 2:
ax.plot(Ls[0,n,1], Ls[0,n,0], 's',
color=color, markerfacecolor=color)
ax.text(Ls[0,n,1]+0.25, Ls[0,n,0]+0.25, '%d' % n,
color=color)
# Set the limits
ax.set_xlim((locprior.min0-0.5, locprior.max0+0.5))
ax.set_ylim((locprior.max1+0.5, locprior.min1-0.5))
else:
raise Exception("Only plotting locs of dim <= 2")
else:
# Plot a histogram of samples
if D == 1:
ax.hist(Ls[:,n,0], bins=20, normed=True, color=color)
elif D == 2:
ax.hist2d(Ls[:,n,1], Ls[:,n,0], bins=np.arange(-0.5,5), cmap='Reds', alpha=0.5, normed=True)
# Set the limits
ax.set_xlim((locprior.min0-0.5, locprior.max0+0.5))
ax.set_ylim((locprior.max1+0.5, locprior.min1-0.5))
# ax.colorbar()
else:
raise Exception("Only plotting locs of dim <= 2")
| mit |
scipy/scipy | scipy/signal/bsplines.py | 12 | 19509 | from numpy import (logical_and, asarray, pi, zeros_like,
piecewise, array, arctan2, tan, zeros, arange, floor)
from numpy.core.umath import (sqrt, exp, greater, less, cos, add, sin,
less_equal, greater_equal)
# From splinemodule.c
from .spline import cspline2d, sepfir2d
from scipy.special import comb
from scipy._lib._util import float_factorial
__all__ = ['spline_filter', 'bspline', 'gauss_spline', 'cubic', 'quadratic',
'cspline1d', 'qspline1d', 'cspline1d_eval', 'qspline1d_eval']
def spline_filter(Iin, lmbda=5.0):
"""Smoothing spline (cubic) filtering of a rank-2 array.
Filter an input data set, `Iin`, using a (cubic) smoothing spline of
fall-off `lmbda`.
Parameters
----------
Iin : array_like
input data set
lmbda : float, optional
spline smooghing fall-off value, default is `5.0`.
Returns
-------
res : ndarray
filterd input data
Examples
--------
We can filter an multi dimentional signal (ex: 2D image) using cubic
B-spline filter:
>>> from scipy.signal import spline_filter
>>> import matplotlib.pyplot as plt
>>> orig_img = np.eye(20) # create an image
>>> orig_img[10, :] = 1.0
>>> sp_filter = spline_filter(orig_img, lmbda=0.1)
>>> f, ax = plt.subplots(1, 2, sharex=True)
>>> for ind, data in enumerate([[orig_img, "original image"],
... [sp_filter, "spline filter"]]):
... ax[ind].imshow(data[0], cmap='gray_r')
... ax[ind].set_title(data[1])
>>> plt.tight_layout()
>>> plt.show()
"""
intype = Iin.dtype.char
hcol = array([1.0, 4.0, 1.0], 'f') / 6.0
if intype in ['F', 'D']:
Iin = Iin.astype('F')
ckr = cspline2d(Iin.real, lmbda)
cki = cspline2d(Iin.imag, lmbda)
outr = sepfir2d(ckr, hcol, hcol)
outi = sepfir2d(cki, hcol, hcol)
out = (outr + 1j * outi).astype(intype)
elif intype in ['f', 'd']:
ckr = cspline2d(Iin, lmbda)
out = sepfir2d(ckr, hcol, hcol)
out = out.astype(intype)
else:
raise TypeError("Invalid data type for Iin")
return out
_splinefunc_cache = {}
def _bspline_piecefunctions(order):
"""Returns the function defined over the left-side pieces for a bspline of
a given order.
The 0th piece is the first one less than 0. The last piece is a function
identical to 0 (returned as the constant 0). (There are order//2 + 2 total
pieces).
Also returns the condition functions that when evaluated return boolean
arrays for use with `numpy.piecewise`.
"""
try:
return _splinefunc_cache[order]
except KeyError:
pass
def condfuncgen(num, val1, val2):
if num == 0:
return lambda x: logical_and(less_equal(x, val1),
greater_equal(x, val2))
elif num == 2:
return lambda x: less_equal(x, val2)
else:
return lambda x: logical_and(less(x, val1),
greater_equal(x, val2))
last = order // 2 + 2
if order % 2:
startbound = -1.0
else:
startbound = -0.5
condfuncs = [condfuncgen(0, 0, startbound)]
bound = startbound
for num in range(1, last - 1):
condfuncs.append(condfuncgen(1, bound, bound - 1))
bound = bound - 1
condfuncs.append(condfuncgen(2, 0, -(order + 1) / 2.0))
# final value of bound is used in piecefuncgen below
# the functions to evaluate are taken from the left-hand side
# in the general expression derived from the central difference
# operator (because they involve fewer terms).
fval = float_factorial(order)
def piecefuncgen(num):
Mk = order // 2 - num
if (Mk < 0):
return 0 # final function is 0
coeffs = [(1 - 2 * (k % 2)) * float(comb(order + 1, k, exact=1)) / fval
for k in range(Mk + 1)]
shifts = [-bound - k for k in range(Mk + 1)]
def thefunc(x):
res = 0.0
for k in range(Mk + 1):
res += coeffs[k] * (x + shifts[k]) ** order
return res
return thefunc
funclist = [piecefuncgen(k) for k in range(last)]
_splinefunc_cache[order] = (funclist, condfuncs)
return funclist, condfuncs
def bspline(x, n):
"""B-spline basis function of order n.
Parameters
----------
x : array_like
a knot vector
n : int
The order of the spline. Must be non-negative, i.e., n >= 0
Returns
-------
res : ndarray
B-spline basis function values
See Also
--------
cubic : A cubic B-spline.
quadratic : A quadratic B-spline.
Notes
-----
Uses numpy.piecewise and automatic function-generator.
Examples
--------
We can calculate B-Spline basis function of several orders:
>>> from scipy.signal import bspline, cubic, quadratic
>>> bspline(0.0, 1)
1
>>> knots = [-1.0, 0.0, -1.0]
>>> bspline(knots, 2)
array([0.125, 0.75, 0.125])
>>> np.array_equal(bspline(knots, 2), quadratic(knots))
True
>>> np.array_equal(bspline(knots, 3), cubic(knots))
True
"""
ax = -abs(asarray(x))
# number of pieces on the left-side is (n+1)/2
funclist, condfuncs = _bspline_piecefunctions(n)
condlist = [func(ax) for func in condfuncs]
return piecewise(ax, condlist, funclist)
def gauss_spline(x, n):
r"""Gaussian approximation to B-spline basis function of order n.
Parameters
----------
x : array_like
a knot vector
n : int
The order of the spline. Must be non-negative, i.e., n >= 0
Returns
-------
res : ndarray
B-spline basis function values approximated by a zero-mean Gaussian
function.
Notes
-----
The B-spline basis function can be approximated well by a zero-mean
Gaussian function with standard-deviation equal to :math:`\sigma=(n+1)/12`
for large `n` :
.. math:: \frac{1}{\sqrt {2\pi\sigma^2}}exp(-\frac{x^2}{2\sigma})
References
----------
.. [1] Bouma H., Vilanova A., Bescos J.O., ter Haar Romeny B.M., Gerritsen
F.A. (2007) Fast and Accurate Gaussian Derivatives Based on B-Splines. In:
Sgallari F., Murli A., Paragios N. (eds) Scale Space and Variational
Methods in Computer Vision. SSVM 2007. Lecture Notes in Computer
Science, vol 4485. Springer, Berlin, Heidelberg
.. [2] http://folk.uio.no/inf3330/scripting/doc/python/SciPy/tutorial/old/node24.html
Examples
--------
We can calculate B-Spline basis functions approximated by a gaussian
distribution:
>>> from scipy.signal import gauss_spline, bspline
>>> knots = np.array([-1.0, 0.0, -1.0])
>>> gauss_spline(knots, 3)
array([0.15418033, 0.6909883, 0.15418033]) # may vary
>>> bspline(knots, 3)
array([0.16666667, 0.66666667, 0.16666667]) # may vary
"""
x = asarray(x)
signsq = (n + 1) / 12.0
return 1 / sqrt(2 * pi * signsq) * exp(-x ** 2 / 2 / signsq)
def cubic(x):
"""A cubic B-spline.
This is a special case of `bspline`, and equivalent to ``bspline(x, 3)``.
Parameters
----------
x : array_like
a knot vector
Returns
-------
res : ndarray
Cubic B-spline basis function values
See Also
--------
bspline : B-spline basis function of order n
quadratic : A quadratic B-spline.
Examples
--------
We can calculate B-Spline basis function of several orders:
>>> from scipy.signal import bspline, cubic, quadratic
>>> bspline(0.0, 1)
1
>>> knots = [-1.0, 0.0, -1.0]
>>> bspline(knots, 2)
array([0.125, 0.75, 0.125])
>>> np.array_equal(bspline(knots, 2), quadratic(knots))
True
>>> np.array_equal(bspline(knots, 3), cubic(knots))
True
"""
ax = abs(asarray(x))
res = zeros_like(ax)
cond1 = less(ax, 1)
if cond1.any():
ax1 = ax[cond1]
res[cond1] = 2.0 / 3 - 1.0 / 2 * ax1 ** 2 * (2 - ax1)
cond2 = ~cond1 & less(ax, 2)
if cond2.any():
ax2 = ax[cond2]
res[cond2] = 1.0 / 6 * (2 - ax2) ** 3
return res
def quadratic(x):
"""A quadratic B-spline.
This is a special case of `bspline`, and equivalent to ``bspline(x, 2)``.
Parameters
----------
x : array_like
a knot vector
Returns
-------
res : ndarray
Quadratic B-spline basis function values
See Also
--------
bspline : B-spline basis function of order n
cubic : A cubic B-spline.
Examples
--------
We can calculate B-Spline basis function of several orders:
>>> from scipy.signal import bspline, cubic, quadratic
>>> bspline(0.0, 1)
1
>>> knots = [-1.0, 0.0, -1.0]
>>> bspline(knots, 2)
array([0.125, 0.75, 0.125])
>>> np.array_equal(bspline(knots, 2), quadratic(knots))
True
>>> np.array_equal(bspline(knots, 3), cubic(knots))
True
"""
ax = abs(asarray(x))
res = zeros_like(ax)
cond1 = less(ax, 0.5)
if cond1.any():
ax1 = ax[cond1]
res[cond1] = 0.75 - ax1 ** 2
cond2 = ~cond1 & less(ax, 1.5)
if cond2.any():
ax2 = ax[cond2]
res[cond2] = (ax2 - 1.5) ** 2 / 2.0
return res
def _coeff_smooth(lam):
xi = 1 - 96 * lam + 24 * lam * sqrt(3 + 144 * lam)
omeg = arctan2(sqrt(144 * lam - 1), sqrt(xi))
rho = (24 * lam - 1 - sqrt(xi)) / (24 * lam)
rho = rho * sqrt((48 * lam + 24 * lam * sqrt(3 + 144 * lam)) / xi)
return rho, omeg
def _hc(k, cs, rho, omega):
return (cs / sin(omega) * (rho ** k) * sin(omega * (k + 1)) *
greater(k, -1))
def _hs(k, cs, rho, omega):
c0 = (cs * cs * (1 + rho * rho) / (1 - rho * rho) /
(1 - 2 * rho * rho * cos(2 * omega) + rho ** 4))
gamma = (1 - rho * rho) / (1 + rho * rho) / tan(omega)
ak = abs(k)
return c0 * rho ** ak * (cos(omega * ak) + gamma * sin(omega * ak))
def _cubic_smooth_coeff(signal, lamb):
rho, omega = _coeff_smooth(lamb)
cs = 1 - 2 * rho * cos(omega) + rho * rho
K = len(signal)
yp = zeros((K,), signal.dtype.char)
k = arange(K)
yp[0] = (_hc(0, cs, rho, omega) * signal[0] +
add.reduce(_hc(k + 1, cs, rho, omega) * signal))
yp[1] = (_hc(0, cs, rho, omega) * signal[0] +
_hc(1, cs, rho, omega) * signal[1] +
add.reduce(_hc(k + 2, cs, rho, omega) * signal))
for n in range(2, K):
yp[n] = (cs * signal[n] + 2 * rho * cos(omega) * yp[n - 1] -
rho * rho * yp[n - 2])
y = zeros((K,), signal.dtype.char)
y[K - 1] = add.reduce((_hs(k, cs, rho, omega) +
_hs(k + 1, cs, rho, omega)) * signal[::-1])
y[K - 2] = add.reduce((_hs(k - 1, cs, rho, omega) +
_hs(k + 2, cs, rho, omega)) * signal[::-1])
for n in range(K - 3, -1, -1):
y[n] = (cs * yp[n] + 2 * rho * cos(omega) * y[n + 1] -
rho * rho * y[n + 2])
return y
def _cubic_coeff(signal):
zi = -2 + sqrt(3)
K = len(signal)
yplus = zeros((K,), signal.dtype.char)
powers = zi ** arange(K)
yplus[0] = signal[0] + zi * add.reduce(powers * signal)
for k in range(1, K):
yplus[k] = signal[k] + zi * yplus[k - 1]
output = zeros((K,), signal.dtype)
output[K - 1] = zi / (zi - 1) * yplus[K - 1]
for k in range(K - 2, -1, -1):
output[k] = zi * (output[k + 1] - yplus[k])
return output * 6.0
def _quadratic_coeff(signal):
zi = -3 + 2 * sqrt(2.0)
K = len(signal)
yplus = zeros((K,), signal.dtype.char)
powers = zi ** arange(K)
yplus[0] = signal[0] + zi * add.reduce(powers * signal)
for k in range(1, K):
yplus[k] = signal[k] + zi * yplus[k - 1]
output = zeros((K,), signal.dtype.char)
output[K - 1] = zi / (zi - 1) * yplus[K - 1]
for k in range(K - 2, -1, -1):
output[k] = zi * (output[k + 1] - yplus[k])
return output * 8.0
def cspline1d(signal, lamb=0.0):
"""
Compute cubic spline coefficients for rank-1 array.
Find the cubic spline coefficients for a 1-D signal assuming
mirror-symmetric boundary conditions. To obtain the signal back from the
spline representation mirror-symmetric-convolve these coefficients with a
length 3 FIR window [1.0, 4.0, 1.0]/ 6.0 .
Parameters
----------
signal : ndarray
A rank-1 array representing samples of a signal.
lamb : float, optional
Smoothing coefficient, default is 0.0.
Returns
-------
c : ndarray
Cubic spline coefficients.
See Also
--------
cspline1d_eval : Evaluate a cubic spline at the new set of points.
Examples
--------
We can filter a signal to reduce and smooth out high-frequency noise with
a cubic spline:
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import cspline1d, cspline1d_eval
>>> rng = np.random.default_rng()
>>> sig = np.repeat([0., 1., 0.], 100)
>>> sig += rng.standard_normal(len(sig))*0.05 # add noise
>>> time = np.linspace(0, len(sig))
>>> filtered = cspline1d_eval(cspline1d(sig), time)
>>> plt.plot(sig, label="signal")
>>> plt.plot(time, filtered, label="filtered")
>>> plt.legend()
>>> plt.show()
"""
if lamb != 0.0:
return _cubic_smooth_coeff(signal, lamb)
else:
return _cubic_coeff(signal)
def qspline1d(signal, lamb=0.0):
"""Compute quadratic spline coefficients for rank-1 array.
Parameters
----------
signal : ndarray
A rank-1 array representing samples of a signal.
lamb : float, optional
Smoothing coefficient (must be zero for now).
Returns
-------
c : ndarray
Quadratic spline coefficients.
See Also
--------
qspline1d_eval : Evaluate a quadratic spline at the new set of points.
Notes
-----
Find the quadratic spline coefficients for a 1-D signal assuming
mirror-symmetric boundary conditions. To obtain the signal back from the
spline representation mirror-symmetric-convolve these coefficients with a
length 3 FIR window [1.0, 6.0, 1.0]/ 8.0 .
Examples
--------
We can filter a signal to reduce and smooth out high-frequency noise with
a quadratic spline:
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import qspline1d, qspline1d_eval
>>> rng = np.random.default_rng()
>>> sig = np.repeat([0., 1., 0.], 100)
>>> sig += rng.standard_normal(len(sig))*0.05 # add noise
>>> time = np.linspace(0, len(sig))
>>> filtered = qspline1d_eval(qspline1d(sig), time)
>>> plt.plot(sig, label="signal")
>>> plt.plot(time, filtered, label="filtered")
>>> plt.legend()
>>> plt.show()
"""
if lamb != 0.0:
raise ValueError("Smoothing quadratic splines not supported yet.")
else:
return _quadratic_coeff(signal)
def cspline1d_eval(cj, newx, dx=1.0, x0=0):
"""Evaluate a cubic spline at the new set of points.
`dx` is the old sample-spacing while `x0` was the old origin. In
other-words the old-sample points (knot-points) for which the `cj`
represent spline coefficients were at equally-spaced points of:
oldx = x0 + j*dx j=0...N-1, with N=len(cj)
Edges are handled using mirror-symmetric boundary conditions.
Parameters
----------
cj : ndarray
cublic spline coefficients
newx : ndarray
New set of points.
dx : float, optional
Old sample-spacing, the default value is 1.0.
x0 : int, optional
Old origin, the default value is 0.
Returns
-------
res : ndarray
Evaluated a cubic spline points.
See Also
--------
cspline1d : Compute cubic spline coefficients for rank-1 array.
Examples
--------
We can filter a signal to reduce and smooth out high-frequency noise with
a cubic spline:
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import cspline1d, cspline1d_eval
>>> rng = np.random.default_rng()
>>> sig = np.repeat([0., 1., 0.], 100)
>>> sig += rng.standard_normal(len(sig))*0.05 # add noise
>>> time = np.linspace(0, len(sig))
>>> filtered = cspline1d_eval(cspline1d(sig), time)
>>> plt.plot(sig, label="signal")
>>> plt.plot(time, filtered, label="filtered")
>>> plt.legend()
>>> plt.show()
"""
newx = (asarray(newx) - x0) / float(dx)
res = zeros_like(newx, dtype=cj.dtype)
if res.size == 0:
return res
N = len(cj)
cond1 = newx < 0
cond2 = newx > (N - 1)
cond3 = ~(cond1 | cond2)
# handle general mirror-symmetry
res[cond1] = cspline1d_eval(cj, -newx[cond1])
res[cond2] = cspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
newx = newx[cond3]
if newx.size == 0:
return res
result = zeros_like(newx, dtype=cj.dtype)
jlower = floor(newx - 2).astype(int) + 1
for i in range(4):
thisj = jlower + i
indj = thisj.clip(0, N - 1) # handle edge cases
result += cj[indj] * cubic(newx - thisj)
res[cond3] = result
return res
def qspline1d_eval(cj, newx, dx=1.0, x0=0):
"""Evaluate a quadratic spline at the new set of points.
Parameters
----------
cj : ndarray
Quadratic spline coefficients
newx : ndarray
New set of points.
dx : float, optional
Old sample-spacing, the default value is 1.0.
x0 : int, optional
Old origin, the default value is 0.
Returns
-------
res : ndarray
Evaluated a quadratic spline points.
See Also
--------
qspline1d : Compute quadratic spline coefficients for rank-1 array.
Notes
-----
`dx` is the old sample-spacing while `x0` was the old origin. In
other-words the old-sample points (knot-points) for which the `cj`
represent spline coefficients were at equally-spaced points of::
oldx = x0 + j*dx j=0...N-1, with N=len(cj)
Edges are handled using mirror-symmetric boundary conditions.
Examples
--------
We can filter a signal to reduce and smooth out high-frequency noise with
a quadratic spline:
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import qspline1d, qspline1d_eval
>>> rng = np.random.default_rng()
>>> sig = np.repeat([0., 1., 0.], 100)
>>> sig += rng.standard_normal(len(sig))*0.05 # add noise
>>> time = np.linspace(0, len(sig))
>>> filtered = qspline1d_eval(qspline1d(sig), time)
>>> plt.plot(sig, label="signal")
>>> plt.plot(time, filtered, label="filtered")
>>> plt.legend()
>>> plt.show()
"""
newx = (asarray(newx) - x0) / dx
res = zeros_like(newx)
if res.size == 0:
return res
N = len(cj)
cond1 = newx < 0
cond2 = newx > (N - 1)
cond3 = ~(cond1 | cond2)
# handle general mirror-symmetry
res[cond1] = qspline1d_eval(cj, -newx[cond1])
res[cond2] = qspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
newx = newx[cond3]
if newx.size == 0:
return res
result = zeros_like(newx)
jlower = floor(newx - 1.5).astype(int) + 1
for i in range(3):
thisj = jlower + i
indj = thisj.clip(0, N - 1) # handle edge cases
result += cj[indj] * quadratic(newx - thisj)
res[cond3] = result
return res
| bsd-3-clause |
zihua/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 20 | 51086 | # Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from .base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
if self.learning_rate == "optimal" and self.alpha == 0:
raise ValueError("alpha must be > 0 since "
"learning_rate is 'optimal'. alpha is used "
"to compute the optimal learning rate.")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match "
"dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(self.intercept_)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights,"
" use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
constructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
- 'constant': eta = eta0
- 'optimal': eta = 1.0 / (alpha * (t + t0)) [default]
- 'invscaling': eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10`` will begin averaging after seeing 10
samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions
it is necessary to perform proper probability calibration by wrapping
the classifier with
:class:`sklearn.calibration.CalibratedClassifierCV` instead.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate schedule:
- 'constant': eta = eta0
- 'optimal': eta = 1.0 / (alpha * (t + t0)) [default]
- 'invscaling': eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10`` will begin averaging after seeing 10
samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause |
a-doumoulakis/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimators_test.py | 21 | 6697 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom optimizer tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn.estimators import estimator as estimator_lib
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"transformed_x": constant_op.constant([9.])
}, {
"transformed_y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["transformed_x"]
loss = constant_op.constant([2.])
update_global_step = variables.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
metrics = estimator.evaluate(
input_fn=input_fn, steps=1,
metrics={"label":
metric_spec.MetricSpec(lambda predictions, labels: labels)})
# labels = transformed_y (99)
self.assertEqual(99., metrics["label"])
def testFeatureEngineeringFnWithSameName(self):
def input_fn():
return {
"x": constant_op.constant(["9."])
}, {
"y": constant_op.constant(["99."])
}
def feature_engineering_fn(features, labels):
# Github #12205: raise a TypeError if called twice.
_ = string_ops.string_split(features["x"])
features["x"] = constant_op.constant([9.])
labels["y"] = constant_op.constant([99.])
return features, labels
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["x"]
loss = constant_op.constant([2.])
update_global_step = variables.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
metrics = estimator.evaluate(
input_fn=input_fn, steps=1,
metrics={"label":
metric_spec.MetricSpec(lambda predictions, labels: labels)})
# labels = transformed_y (99)
self.assertEqual(99., metrics["label"])
def testNoneFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"x": constant_op.constant([9.])
}, {
"y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["x"]
loss = constant_op.constant([2.])
update_global_step = variables.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator_with_fe_fn = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=1)
estimator_without_fe_fn = estimator_lib.Estimator(model_fn=model_fn)
estimator_without_fe_fn.fit(input_fn=input_fn, steps=1)
# predictions = x
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict(
input_fn=input_fn, as_iterable=True))
self.assertEqual(9., prediction_with_fe_fn)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict(
input_fn=input_fn, as_iterable=True))
self.assertEqual(1., prediction_without_fe_fn)
class CustomOptimizer(test.TestCase):
"""Custom optimizer tests."""
def testIrisMomentum(self):
random.seed(42)
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
def custom_optimizer():
return momentum_lib.MomentumOptimizer(learning_rate=0.01, momentum=0.9)
classifier = learn.DNNClassifier(
hidden_units=[10, 20, 10],
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
n_classes=3,
optimizer=custom_optimizer,
config=learn.RunConfig(tf_random_seed=1))
classifier.fit(x_train, y_train, steps=400)
predictions = np.array(list(classifier.predict_classes(x_test)))
score = accuracy_score(y_test, predictions)
self.assertGreater(score, 0.65, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.