repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
victorbergelin/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 387 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
dialounke/pylayers | pylayers/antprop/tests/test_SSHFunc.py | 2 | 1730 | import numpy as np
import matplotlib.pyplot as plt
from pylayers.antprop.antssh import *
from pylayers.antprop.antenna import *
from sklearn.linear_model import Lasso
from scipy.optimize import fmin,minimize
plt.ion()
# reference antenna gain
A = Antenna('hplanesectoralhorn',fGHz=26)
A.eval()
# G est reel
G = A.G[:,:,0]
nth = G.shape[0]
nph = G.shape[1]
L = 45
theta = np.linspace(0,np.pi,nth)
phi = np.linspace(0,2*np.pi,nph)
# Spherical Harmonics matrix
Y,idx = SSHFunc(L,theta,phi)
Ypinv = np.linalg.pinv(Y)
cg = np.dot(G.ravel(),Ypinv)
Gr = np.real(np.dot(cg,Y).reshape(nth,nph))
#plt.subplot(121)
#plt.imshow(np.abs(Y.T),cmap='jet')
#plt.axis('auto')
#plt.subplot(122)
#plt.imshow(np.angle(Y.T),cmap='jet')
#plt.axis('auto')
#
Gt = G[:,[0,45,90,135]]
Y2,idx = SSHFunc(L,theta,phi[[0,45,90,135]])
def fun(x,Y2=Y2,gt=Gt.ravel(),alpha=0.1):
c = x[::2]+1j*x[1::2]
gr = np.real(np.dot(c,Y2))
L2 = np.sqrt(np.dot(gr-gt,gr-gt))
L1 = np.sum(np.abs(x))
M = L2+alpha*L1
return(M)
Y2pinv = np.linalg.pinv(Y2)
cg2 = np.dot(Gt.ravel(),Y2pinv)
xg2 = np.array(zip(np.real(cg2),np.imag(cg2))).ravel()
xg = np.array(zip(np.real(cg),np.imag(cg))).ravel()
print "Start optimization"
ropt = minimize(fun,xg2,method='CG')
xopt = fmin(fun,xg2)
xopt = ropt.x
copt = xopt[::2]+1j*xopt[1::2]
Gr2 = np.real(np.dot(copt,Y).reshape(90,181))
plt.subplot()
plt.subplot(311)
plt.imshow(10*np.log10(np.abs(G)),cmap='jet',vmin=-40)
plt.colorbar()
plt.subplot(312)
plt.imshow(10*np.log10(np.abs(Gr)),cmap='jet',vmin=-40)
plt.colorbar()
plt.subplot(313)
plt.imshow(10*np.log10(np.abs(Gr2)),cmap='jet',vmin=-40,vmax=18)
plt.colorbar()
plt.show()
#alpha = 0.1
#lasso = Lasso(alpha=alpha)
#h=lasso.fit(Y2.T,Gt.ravel).predict(Y2.T)
| mit |
XueqingLin/tensorflow | tensorflow/examples/skflow/mnist_rnn.py | 14 | 2812 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This example builds rnn network for mnist data.
Borrowed structure from here: https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3%20-%20Neural%20Networks/recurrent_network.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import metrics, preprocessing
import tensorflow as tf
from tensorflow.contrib import learn
# Parameters
learning_rate = 0.1
training_steps = 3000
batch_size = 128
# Network Parameters
n_input = 28 # MNIST data input (img shape: 28*28)
n_steps = 28 # timesteps
n_hidden = 128 # hidden layer num of features
n_classes = 10 # MNIST total classes (0-9 digits)
### Download and load MNIST data.
mnist = learn.datasets.load_dataset('mnist')
X_train = mnist.train.images
y_train = mnist.train.labels
X_test = mnist.test.images
y_test = mnist.test.labels
# It's useful to scale to ensure Stochastic Gradient Descent will do the right thing
scaler = preprocessing.StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.fit_transform(X_test)
def rnn_model(X, y):
X = tf.reshape(X, [-1, n_steps, n_input]) # (batch_size, n_steps, n_input)
# # permute n_steps and batch_size
X = tf.transpose(X, [1, 0, 2])
# # Reshape to prepare input to hidden activation
X = tf.reshape(X, [-1, n_input]) # (n_steps*batch_size, n_input)
# # Split data because rnn cell needs a list of inputs for the RNN inner loop
X = tf.split(0, n_steps, X) # n_steps * (batch_size, n_input)
# Define a GRU cell with tensorflow
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(n_hidden)
# Get lstm cell output
_, encoding = tf.nn.rnn(lstm_cell, X, dtype=tf.float32)
return learn.models.logistic_regression(encoding, y)
classifier = learn.TensorFlowEstimator(model_fn=rnn_model, n_classes=n_classes,
batch_size=batch_size,
steps=training_steps,
learning_rate=learning_rate)
classifier.fit(X_train, y_train, logdir="/tmp/mnist_rnn")
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print('Accuracy: {0:f}'.format(score))
| apache-2.0 |
sniemi/EuclidVisibleInstrument | analysis/analyseGaiaBAMCosmicRayData.py | 1 | 19247 | # -*- coding: utf-8 -*-
"""
Cosmic Rays
===========
This scripts derives simple cosmic ray statististics from Gaia BAM data.
Note that the Gaia BAM data are binned 4 x 1 leading to pixel geometries
that are 120 x 10 microns. One can derive a statistical correction to
take into account the binning. Further corrections are needed to scale
e.g. to VIS CCD273 pixels, which are 12 x 12 microns. Thus, the results
will necessarily contain some uncertainties.
:requires: pyfits (tested with 3.3)
:requires: numpy (tested with 1.9.2)
:requires: scipy (tested with 0.15.1)
:requires: matplotlib (tested with 1.4.3)
:requires: sklearn (scikit-learn, tested with 0.15.2)
:requires: statsmodels (tested with 0.6.1)
:requires: vissim-python
:author: Sami-Matias Niemi
:contact: [email protected]
:version: 1.0
"""
import matplotlib
matplotlib.use('pdf')
matplotlib.rc('text', usetex=True)
matplotlib.rcParams['font.size'] = 17
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('axes', linewidth=1.1)
matplotlib.rcParams['legend.fontsize'] = 11
matplotlib.rcParams['legend.handlelength'] = 3
matplotlib.rcParams['xtick.major.size'] = 5
matplotlib.rcParams['ytick.major.size'] = 5
matplotlib.rcParams['image.interpolation'] = 'none'
import matplotlib.pyplot as plt
import pyfits as pf
import numpy as np
import scipy as sp
import scipy.interpolate as interpolate
from sklearn.neighbors import KernelDensity
from statsmodels.distributions.empirical_distribution import ECDF
from scipy import ndimage
import glob as g
from support import logger as lg
def findFiles(log, fileID='data/*/*.fits'):
"""
Find all files that match the ID.
:param log: a logger instance
:type log: instance
:param fileID: identification with a wild card to find the files
:type fileID: str
:return: a list containing all files matching the wild card.
:rtype: lst
"""
files = g.glob(fileID)
msg = 'Found %i files' % len(files)
print msg
log.info(msg)
return files
def readData(log, files):
"""
Read data and gather information from the header from all files given.
:param log: a logger instance
:type log: instance
:param files: a list of FITS file names
:type files: lst
:return: NumPy array contaning pixel data and a list containing dictionaries that hold header information
:rtype: ndarray, lst
"""
info = []
data = []
for f in files:
fh = pf.open(f)
hdr = fh[0].header
exptime = float(hdr['EXP_TIME'].split()[0])
tditime = float(hdr['TRO_TIME'].split()[0])
gain = float(hdr['CONVGAIN'].split()[0])
pixels = hdr['PIX_GEOM'].split()[::2]
binningx, binningy = hdr['BINNING'].strip().split('x')
binningx = int(binningx)
binningy = int(binningy)
info.append(dict(exptime=exptime, tditime=tditime, gain=gain, pixels=pixels,
binningx=binningx, binningy=binningy))
data.append(fh[0].data.astype(np.float64))
fh.close()
np.asarray(data)
log.info('files read')
return data, info
def preProcessData(log, data, info):
"""
Removes the first line, transposes the array, and subtracts the median (derived ADC offset).
:param log: a logger instance
:type log: instance
:param data: a list of pixel data arrays to process
:type data: lst
:param info: a list of dictionaries that contain the header information
:type info: lst
:return: list of pixel data arrays
:rtype: lst
"""
out = []
for d, i in zip(data, info):
#remove first line
d = d[1:, :].T
#remove median, this is assumed to correspond to the average ADC offset
med = np.median(d)
msg = 'Subtracting %.1f from the data' % med
print msg
log.info(msg)
d -= med
#convert to electrons
msg = 'Multiplying with gain of %.3f' % i['gain']
print msg
log.info(msg)
d *= i['gain']
out.append(d)
return out
def _drawFromCumulativeDistributionFunction(cpdf, x, number):
"""
Draw a number of random x values from a cumulative distribution function.
:param cpdf: cumulative distribution function
:type cpdf: numpy array
:param x: values of the abscissa
:type x: numpy array
:param number: number of draws
:type number: int
:return: randomly drawn x value
:rtype: ndarray
"""
luck = np.random.random(number)
tck = interpolate.splrep(cpdf, x)
out = interpolate.splev(luck, tck)
return out
def _findCosmicRays(log, array, info, output, sigma=3.5, correctLengths=True):
"""
Find all cosmic rays from data. A simple threshold above the noise level is used.
All pixels above the given sigma limit are assumed to be cosmic ray events.
:param log: a logger instance
:type log: instance
:param array: pixel data array
:type array: ndarray
:param info: dictionary containg the header information of the pixel data
:type info: dict
:param output: name of the output file
:type output: str
:param sigma: the thershold (std) above which the cosmic rays are identified
:type sigma: float
:param correctLenghts: whether or not correct for the binning.
:type correctLengths: bool
:return: a list containing CR labels, tracks, energies, and fluence
:rtype: lst
"""
#find all pixels above a threshold
thresholded = array > array.std()*sigma
CRdata = array[thresholded]
#label the pixels
labels, numb = ndimage.label(thresholded)
print 'Found %i cosmic rays' % numb
#find locations
locations = ndimage.measurements.find_objects(labels)
if correctLengths:
print 'Trying to correct for the track lengths, loading a track length PDF'
#read in the PDF of lengths
data = np.loadtxt('trackLengthPDF.txt', delimiter=' ')
pix = data[:, 0]
PDF = data[:, 1]
#convert to CDF
dx = pix[1] - pix[0] #assume equal size steps
cdf = np.cumsum(PDF*dx)
#count the track lengths and energies
tracks = []
energy = []
for loc in locations:
pixels = array[loc]
num = pixels.size
#TODO: check that this is correct, tno sure...
#correct for the fact that the data heavily binned
if correctLengths:
if num == 1:
#if single pixel event, then make a correction to the track length
tmp = _drawFromCumulativeDistributionFunction(cdf, pix, num)
num = tmp[0] / info['binningx']
else:
#multiple pixels, need to know direction
x, y = pixels.shape #we transposed the array earlier when loading data
if x > 1:
#need to draw a correction for each pixel covered
tmp = _drawFromCumulativeDistributionFunction(cdf, pix, x)
x = np.sum(tmp)
#total number of pixels covered
num = x + y
#store information
tracks.append(num)
energy.append(pixels.sum())
#convert to NumPy array
tracks = np.asarray(tracks)
energy = np.asarray(energy)
#calculate statitics
sm = float(tracks.sum())
rate = sm / (info['exptime'] + info['tditime']) /array.size #not sure if it should be half of the TDI time
fluence = rate / (float(info['pixels'][0])*info['binningx']*float(info['pixels'][1])*info['binningy']) / 1e-8 / tracks.mean()
if correctLengths:
print 'The longest track covers %i unbinned pixels' % tracks.max()
print 'Average track length is %.1f unbinned pixels' % tracks.mean()
print 'In total, %i unbinned pixels were affected, i.e. %.1f per cent' % (sm, 100.*sm/array.size)
print 'The rate of cosmic rays is %.2e CR / second / unbinned pixel' % rate
print 'The fluence of cosmic rays is %.1f events / second / cm**2' % fluence
print 'Most energetic cosmic ray deposited %.1f photoelectrons' % energy.max()
print 'Average track energy is %.1f photoelectrons' % energy.mean()
else:
print 'The longest track covers %i binned pixels' % tracks.max()
print 'Average track length is %.1f binned pixels' % tracks.mean()
print 'In total, %i binned pixels were affected, i.e. %.1f per cent' % (sm, 100.*sm/array.size)
print 'The rate of cosmic rays is %.2e CR / second / binned pixel' % rate
print 'The fluence of cosmic rays is %.1f events / second / cm**2' % fluence
print 'Most energetic cosmic ray deposited %.1f photoelectrons' % energy.max()
print 'Average track energy is %.1f photoelectrons' % energy.mean()
#plot simple histogram of the pixel values
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 6))
ax1.imshow(array, cmap=plt.cm.gray, interpolation='none', vmin=0, vmax=500)
ax1.axis('off')
ax2.set_title('Pixel Values')
ax2.hist(array.ravel(), bins=np.linspace(0, 500., 100), normed=True)
ax2.set_xlabel('Energy [e$^{-}$]')
plt.savefig(output+'histogram.png')
plt.close()
#threshold image
# fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 6))
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10, 6))
ax1.set_title('Data')
ax2.set_title('Found CRs')
ax1.imshow(array, cmap=plt.cm.gray, interpolation='none', vmin=0, vmax=2000)
ax2.imshow(labels, cmap=plt.cm.jet, interpolation='none', vmin=1)
ax1.axis('off')
ax2.axis('off')
plt.savefig(output+'thresholded.png')
plt.close()
#energy and track lengths
plt.title(output.replace('_', '\_')) #needed for LaTeX
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 6))
ax1.hist(energy, bins=20, normed=True)
ax2.hist(tracks, bins=20, normed=True)
ax1.set_xlabel('Energy [e$^{-}$]')
ax1.set_ylabel('PDF')
ax2.set_xlabel('Track Lengths [pix]')
plt.savefig(output+'CRs.png')
plt.close()
return labels, tracks, energy, fluence
def analyseData(log, files, data, info):
"""
Analyse all BAM data held in files.
:param log: a logger instance
:type log: instance
:param files: a list of file names
:type files: lst
:param data: a list of pixel data
:type data: lst
:parama info: a list of meta data dictionaries
:type info: dict
:return: None
"""
allD = []
for f, d, i in zip(files, data, info):
msg = 'Processing: ', f
out = f.split('/')[-1].replace('.fits', '')
print msg
log.info(msg)
labels, tracks, energy, fluence = _findCosmicRays(log, d, i, out)
allD.append([d, labels, tracks, energy, fluence])
#pull out the information from the individual files and join to a single array
tracks = np.concatenate(np.asarray([x[2] for x in allD]))
energies = np.concatenate(np.asarray([x[3] for x in allD]))
fluences = np.asarray([x[4] for x in allD])
#scale the track lengths to VIS 12 micron square pixels, assumes that the tracks
#are unbinned lengths
tracks *= ((float(info[0]['pixels'][0]) * float(info[0]['pixels'][1])) / (12.*12.))
print '\n\n\nCR fluences in events / cm**2 / second (min, max, average, std):'
print fluences.min(), fluences.max(), fluences.mean(), fluences.std()
#take a log10, better for visualisation and comparison against Stardust modelling
tracks = np.log10(tracks)
energies = np.log10(energies)
#histogram bins
esample = np.linspace(0., 7, 30)
tsample = np.linspace(0., 3.5, 20)
#KDE for energy
d2d = energies[:, np.newaxis]
x_gride = np.linspace(0.0, esample.max(), 500)
kde_skl = KernelDensity(kernel='gaussian', bandwidth=0.1)
kde_skl.fit(d2d)
log_pdfe = kde_skl.score_samples(x_gride[:, np.newaxis])
epdf = np.exp(log_pdfe)
np.savetxt('CRenergyPDF.txt', np.vstack([x_gride, epdf]).T)
#KDE for track lengts
d2d = tracks[:, np.newaxis]
x_gridt = np.linspace(0.0, tsample.max(), 500)
kde_skl = KernelDensity(kernel='gaussian', bandwidth=0.05)
kde_skl.fit(d2d)
log_pdft = kde_skl.score_samples(x_gridt[:, np.newaxis])
tpdf = np.exp(log_pdft)
np.savetxt('CRtrackPDF.txt', np.vstack([x_gridt, tpdf]).T)
#energy and track lengths
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 6))
ax1.hist(energies, bins=esample, normed=True, alpha=0.2)
ax1.plot(x_gride, epdf, lw=3, c='r')
ax2.hist(tracks, bins=tsample, normed=True, alpha=0.2)
ax2.plot(x_gridt, tpdf, lw=3, c='r')
ax1.set_xlabel('$\log_{10}(\Sigma$Energy [e$^{-}$]$)$')
ax1.set_xlim(2.5, 7.)
ax1.set_ylabel('PDF')
ax2.set_xlabel('$\log_{10}($Track Lengths [pix]$)$')
plt.savefig('CRPDFs.png')
plt.close()
def generateBAMdatagridImage():
"""
Generates an example plot showing the Gaia BAM detector geometry
and binning used. A simple illustration.
:return: None
"""
#grid
x = np.linspace(0, 30*10, 11) #30 micron pixels
y = np.linspace(0, 10*10, 11) #10 micron pixels
fig, ax = plt.subplots(1, 1, figsize=(12, 6))
plt.title('Gaia BAM Data: CR Tracks')
for v in y:
ax.axhline(y=v)
for v in x:
ax.axvline(x=v)
#binning area 1
plt.fill_between([0, 120], y1=[10, 10], y2=[20, 20], facecolor='r', alpha=0.1)
plt.text(60, 14, 'Binned Pixel N+1', horizontalalignment='center', verticalalignment='center')
plt.fill_between([0, 120], y1=[20, 20], y2=[30, 30], facecolor='g', alpha=0.1)
plt.text(60, 24, 'Binned Pixel N+2', horizontalalignment='center', verticalalignment='center')
plt.fill_between([0, 120], y1=[30, 30], y2=[40, 40], facecolor='m', alpha=0.1)
plt.text(60, 34, 'Binned Pixel N+3', horizontalalignment='center', verticalalignment='center')
#CR examples
plt.fill_between([120, 240], y1=[70, 70], y2=[80, 80], facecolor='k', alpha=0.1)
plt.plot([120, 240], [70, 80], 'k:', lw=1.)
plt.text(230, 73, '4 Pix', horizontalalignment='center', verticalalignment='center')
plt.fill_between([120, 240], y1=[60, 60], y2=[70, 70], facecolor='k', alpha=0.1)
plt.plot([120, 190], [60, 70], 'k:', lw=1.)
plt.text(230, 63, '3 Pix', horizontalalignment='center', verticalalignment='center')
plt.fill_between([120, 240], y1=[50, 50], y2=[60, 60], facecolor='k', alpha=0.1)
plt.plot([120, 170], [50, 60], 'k:', lw=1.)
plt.text(230, 53, '2 Pix', horizontalalignment='center', verticalalignment='center')
plt.fill_between([120, 240], y1=[40, 40], y2=[50, 50], facecolor='k', alpha=0.1)
plt.plot([120, 140], [40, 50], 'k:', lw=1.)
plt.text(230, 43, '1 Pix', horizontalalignment='center', verticalalignment='center')
#possible CRs
plt.fill_between([120, 240], y1=[20, 20], y2=[30, 30], facecolor='k', alpha=0.1)
for xval in np.linspace(-60, 60, 31):
plt.plot([180, 180+xval], [20, 30], 'k:', lw=0.5)
for yval in np.linspace(20, 30, 11):
plt.plot([180, 120], [20, yval], 'k:', lw=0.5)
plt.plot([180, 240], [20, yval], 'k:', lw=0.5)
ax.set_xticks(x)
ax.set_yticks(y)
plt.xlabel('Physical X [microns]')
plt.ylabel('Physical Y [microns]')
plt.axis('scaled')
plt.xlim(0, 300)
plt.ylim(0, 100)
plt.savefig('BAMccdGrid.pdf')
plt.close()
def deriveCumulativeFunctionsforBinning(xbin=4, ybin=1, xsize=1, ysize=1, mc=100000, dx=0.1):
"""
Because the original BAM data are binned 4 x 1, we do not know the track lengths.
One can try to derive a statistical correction by randomizing the position and the
angle a cosmic ray may have arrived. This function derives a probability density
function for the track lengths by Monte Carloing over the random locations and
angles.
:param xbin: number of pixels binned in x-direction
:type xbin: int
:param ybin: number of pixels binned in y-direction
:type ybin: int
:param xsize: how many binned pixels to use in the derivation
:type xsize: int
:param ysize:how many binned pixels to use in the derivation
:type ysize: int
:param mc: number of random realisations to generate
:type mc: int
:param dx: size of the steps to adopt when deriving CDF
:type dx: float
:return: None
"""
#pixel sizes, unbinned
ys = ysize * ybin
xs = xsize * xbin
#random location, random angle
xloc = np.random.random(size=mc)*xbin
yloc = np.random.random(size=mc)*ybin
angle = np.deg2rad(np.random.random(size=mc)*90)
#maximum distance in y direction is either location or size - location
ymax = np.maximum(yloc, ys - yloc)
xmax = np.maximum(xloc, xs - xloc)
#x length is the minimum of travel distance or the distance from the edge, but no smaller than 1
xtravel = np.minimum(ymax / np.tan(angle), xmax)
xtravel = np.maximum(xtravel, 1)
#covering full pixels, but no more than xs
covering = np.minimum(np.ceil(xtravel), xs)
print 'Track lengths (mean, median, std):'
print covering.mean(), np.median(covering), covering.std()
#PDF for track lengths
d2d = covering[:, np.newaxis]
x_grid = np.linspace(0.0, xs+0.5, 1000)
kde_skl = KernelDensity(kernel='gaussian', bandwidth=0.4)
kde_skl.fit(d2d)
log_pdf = kde_skl.score_samples(x_grid[:, np.newaxis])
PDF = np.exp(log_pdf)
#save to file
np.savetxt('trackLengthPDF.txt', np.vstack([x_grid, PDF]).T)
#get CDF using cumulative sum of the PDF
dx = x_grid[1] - x_grid[0]
CD = np.cumsum(PDF*dx)
#derive empirical CDF and add unity stopping point
CDF = ECDF(covering)
CDF.y.put(-1, 1)
CDF.x.put(-1, xs+1)
CDFx = CDF.x
CDFy = CDF.y
#plot
bins = np.arange(0, xs+1)+0.5
fig, ax = plt.subplots(1, 1, figsize=(12, 6))
plt.title('Probabilistic Track Lengths: Binning %i x %i' % (xbin, ybin))
plt.plot(x_grid, PDF, lw=2, c='r', label='PDF')
plt.plot(CDFx-0.5, CDFy, lw=1.5, c='m', alpha=0.7, label='CDF')
plt.plot(x_grid, CD, lw=1.4, c='r', ls='--')
plt.hist(covering, bins, normed=True, facecolor='green', alpha=0.35)
ax.set_xticks(bins+0.5)
plt.xlim(0.5, xs+0.5)
plt.ylim(0, 1.05)
plt.xlabel('Track Length')
plt.ylabel('PDF / CDF')
plt.legend(shadow=True, fancybox=True)
plt.savefig('TrackLengths.pdf')
plt.close()
def runAll(deriveCDF=True, examplePlot=True):
"""
Run all steps from finding suitable Gaia BAM files to analysing them.
:return: None
"""
log = lg.setUpLogger('analyse.log')
log.info('\n\nStarting to analyse')
if deriveCDF: deriveCumulativeFunctionsforBinning()
if examplePlot: generateBAMdatagridImage()
files = findFiles(log)
data, info = readData(log, files)
data = preProcessData(log, data, info)
analyseData(log, files, data, info)
if __name__ == '__main__':
runAll() | bsd-2-clause |
pombredanne/nTLP | examples/robot_simple_gr1c.py | 1 | 6962 | #!/usr/bin/env python
"""
The example is an extension of robot_discrete_simple.py by including
disturbance and input computation using the "closed loop" algorithm.
This is an almost verbatim copy of the robot_simple_disturbance.py
code by Petter Nilsson and Nok Wongpiromsarn. It demonstrates use of
the gr1c synthesis tool, rather than the historic default of JTLV.
Toggle the truth value of load_from_XML to indicate whether to
generate a new tulipcon XML file, or read from one.
SCL; 25 April 2013.
"""
from os import environ as os_environ
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from tulip import discretize, prop2part, grsim, conxml
from tulip import gr1cint
from tulip.spec import GRSpec
import tulip.polytope as pc
from tulip.polytope.plot import plot_partition
if os_environ.has_key("TULIP_REGRESS"):
regression_mode = True
else:
regression_mode = False
# Problem parameters
input_bound = 0.4
uncertainty = 0.05
N = 5
load_from_XML = False
if not load_from_XML:
# Environment variables
env_vars = {'park' : 'boolean'}
# Discrete system variable
# Introduce a boolean variable X0reach to handle the spec [](park -> <>X0)
# X0reach starts with TRUE.
# [](next(X0reach) = (X0 | X0reach) & !park)
sys_disc_vars = {'X0reach' : 'boolean'}
# Continuous state space
cont_state_space = pc.Polytope(np.array([[1., 0.],
[-1., 0.],
[0., 1.],
[0., -1.]]),
np.array([[3.],[0.],[2.],[0.]]))
# Continuous proposition
cont_props = {}
for i in xrange(0, 3):
for j in xrange(0, 2):
prop_sym = 'X' + str(3*j + i)
cont_props[prop_sym] = pc.Polytope(np.array([[1., 0.],
[-1., 0.],
[0., 1.],
[0., -1.]]),
np.array([[float(i+1)],
[float(-i)],
[float(j+1)],
[float(-j)]]))
# Continuous dynamics
A = np.array([[1.1052, 0.],[ 0., 1.1052]])
B = np.array([[1.1052, 0.],[ 0., 1.1052]])
E = np.array([[1,0],[0,1]])
U = pc.Polytope(np.array([[1., 0.],[-1., 0.], [0., 1.], [0., -1.]]),
input_bound*np.array([[1.],[1.],[1.],[1.]]))
W = pc.Polytope(np.array([[1.,0.],[-1.,0.],[0.,1.],[0.,-1.]]),
uncertainty*np.array([1., 1., 1., 1.]))
sys_dyn = discretize.CtsSysDyn(A,B,E,[],U,W)
# Compute the proposition preserving partition of the continuous state space
cont_partition = prop2part.prop2part2(cont_state_space, cont_props)
# Discretize the continuous state space
disc_dynamics = discretize.discretize(cont_partition, sys_dyn, closed_loop=True,
N=N, min_cell_volume=0.1, verbose=0)
# Spec
spec = GRSpec(env_vars=env_vars.keys(),
sys_vars=sys_disc_vars.keys(),
sys_init=["X0reach"],
env_prog=["!park"],
sys_safety=["(X0reach' & ((X0' | X0reach) & !park')) | (!X0reach' & ((!X0' & !X0reach) | park'))"],
sys_prog=['X5', 'X0reach'])
# Import discretization (abstraction) of continuous state space
spec.importDiscDynamics(disc_dynamics)
# Check realizability
realizability = gr1cint.check_realizable(spec, verbose=1)
# Compute an automaton
aut = gr1cint.synthesize(spec, verbose=1)
aut.writeDotFile("rdsimple_gr1c_example.dot", hideZeros=True)
# Remove dead-end states from automaton
#aut.trimDeadStates()
conxml.writeXMLfile("rsimple_example.xml", spec=spec, sys_dyn=sys_dyn,
aut=aut, disc_dynamics=disc_dynamics, pretty=True)
else:
# Read from tulipcon XML file
(disc_dynamics, sys_dyn, aut) = conxml.readXMLfile("rsimple_example.xml")
# Simulate
if regression_mode:
np.random.seed(0)
num_it = 10
init_state = {'X0reach': True}
graph_vis = raw_input("Do you want to open in Gephi? (y/n)") == 'y'
destfile = 'rsdisturbance_example.gexf'
states = grsim.grsim([aut], env_states=[init_state], num_it=num_it,
deterministic_env=regression_mode, graph_vis=graph_vis,
destfile=destfile)
# Dump state sequence.
print "\n".join([str(aut.node[n]["state"]) for (autID, n) in states]) + "\n"
# Store discrete trajectory in np array
cellid_arr = []
for (autID, n) in states:
occupied_cells = [int(k[len("cellID_"):]) for (k,v) in aut.node[n]["state"].items() if v==1 and k.startswith("cellID")]
if len(occupied_cells) > 1:
print "ERROR: more than one cell occupied by continuous state."
exit(-1)
cellid_arr.append(occupied_cells[0])
cellid_arr = np.array(cellid_arr)
# First continuous state is middle point of first cell
r, x = pc.cheby_ball(disc_dynamics.list_region[cellid_arr[0]])
x = x.flatten()
x_arr = x
u_arr = np.zeros([N*num_it, sys_dyn.B.shape[1]])
d_arr = np.zeros([N*num_it, sys_dyn.E.shape[1]])
for i in range(1, len(cellid_arr)):
# For each step, calculate N input signals
for j in range(N):
u_seq = discretize.get_input(x, sys_dyn, disc_dynamics, \
cellid_arr[i-1], cellid_arr[i], N-j, mid_weight=3, Q=np.eye(2*(N-j)), \
test_result=True)
u0 = u_seq[0,:] # Only the first input should be used
u_arr[(i-1)*N + j,:] = u0 # Store input
d = uncertainty * 2 * (np.random.rand(2) - 0.5 ) # Simulate disturbance
d_arr[(i-1)*N + j,:] = d # Store disturbance
x = np.dot(sys_dyn.A, x).flatten() + np.dot(sys_dyn.B, u0).flatten() + np.dot(sys_dyn.E, d).flatten()
x_arr = np.vstack([x_arr, x]) # Store state
# Print trajectory information
for i in range(x_arr.shape[0]-1):
print "From: " + str(cellid_arr[np.floor(i/N)]) + " to " + str(cellid_arr[np.floor(i/N) + 1]) \
+ " u: " + str(u_arr[i,:]) + " x: " + str(x_arr[i,:]) + " d: " + str(d_arr[i,:])
print "Final state x: " + str(x_arr[-1,:])
# Plot state trajectory
if not regression_mode:
ax = plot_partition(disc_dynamics, show=False)
arr_size = 0.05
for i in range(1,x_arr.shape[0]):
x = x_arr[i-1,0]
y = x_arr[i-1,1]
dx = x_arr[i,0] - x
dy = x_arr[i,1] - y
arr = matplotlib.patches.Arrow(float(x),float(y),float(dx),float(dy),width=arr_size)
ax.add_patch(arr)
spec_ind = range(0, x_arr.shape[0], N)
ax.plot(x_arr[spec_ind,0], x_arr[spec_ind,1], 'oy')
ax.plot(x_arr[0,0], x_arr[0,1], 'og')
ax.plot(x_arr[-1,0], x_arr[-1,1], 'or')
plt.show()
| bsd-3-clause |
nikitasingh981/scikit-learn | examples/ensemble/plot_gradient_boosting_quantile.py | 392 | 2114 | """
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
bzero/statsmodels | statsmodels/examples/ex_feasible_gls_het_0.py | 34 | 6454 | # -*- coding: utf-8 -*-
"""Examples for linear model with heteroscedasticity estimated by feasible GLS
These are examples to check the results during developement.
The assumptions:
We have a linear model y = X*beta where the variance of an observation depends
on some explanatory variable Z (`exog_var`).
linear_model.WLS estimated the model for a given weight matrix
here we want to estimate also the weight matrix by two step or iterative WLS
Created on Wed Dec 21 12:28:17 2011
Author: Josef Perktold
"""
from __future__ import print_function
import numpy as np
from numpy.testing import assert_almost_equal
from statsmodels.regression.linear_model import OLS, WLS, GLS
from statsmodels.regression.feasible_gls import GLSHet, GLSHet2
from statsmodels.tools.tools import add_constant
examples = ['ex1']
if 'ex1' in examples:
nsample = 300 #different pattern last graph with 100 or 200 or 500
sig = 0.5
np.random.seed(9876789) #9876543)
X = np.random.randn(nsample, 3)
X = np.column_stack((np.ones((nsample,1)), X))
beta = [1, 0.5, -0.5, 1.]
y_true2 = np.dot(X, beta)
x1 = np.linspace(0, 1, nsample)
gamma = np.array([1, 3.])
#with slope 3 instead of two, I get negative weights, Not correct
# - was misspecified, but the negative weights are still possible with identity link
#gamma /= gamma.sum() #normalize assuming x1.max is 1
z_true = add_constant(x1)
winv = np.dot(z_true, gamma)
het_params = sig**2 * np.array([1, 3.]) # for squared
sig2_het = sig**2 * winv
weights_dgp = 1/winv
weights_dgp /= weights_dgp.max() #should be already normalized - NOT check normalization
#y2[:nsample*6/10] = y_true2[:nsample*6/10] + sig*1. * np.random.normal(size=nsample*6/10)
z0 = np.zeros(nsample)
z0[(nsample * 5)//10:] = 1 #dummy for 2 halfs of sample
z0 = add_constant(z0)
z1 = add_constant(x1)
noise = np.sqrt(sig2_het) * np.random.normal(size=nsample)
y2 = y_true2 + noise
X2 = X[:,[0,2]] #misspecified, missing regressor in main equation
X2 = X #correctly specigied
res_ols = OLS(y2, X2).fit()
print('OLS beta estimates')
print(res_ols.params)
print('OLS stddev of beta')
print(res_ols.bse)
print('\nWLS')
mod0 = GLSHet2(y2, X2, exog_var=winv)
res0 = mod0.fit()
print('new version')
mod1 = GLSHet(y2, X2, exog_var=winv)
res1 = mod1.iterative_fit(2)
print('WLS beta estimates')
print(res1.params)
print(res0.params)
print('WLS stddev of beta')
print(res1.bse)
#compare with previous version GLSHet2, refactoring check
#assert_almost_equal(res1.params, np.array([ 0.37642521, 1.51447662]))
#this fails ??? more iterations? different starting weights?
print(res1.model.weights/res1.model.weights.max())
#why is the error so small in the estimated weights ?
assert_almost_equal(res1.model.weights/res1.model.weights.max(), weights_dgp, 14)
print('residual regression params')
print(res1.results_residual_regression.params)
print('scale of model ?')
print(res1.scale)
print('unweighted residual variance, note unweighted mean is not zero')
print(res1.resid.var())
#Note weighted mean is zero:
#(res1.model.weights * res1.resid).mean()
doplots = True #False
if doplots:
import matplotlib.pyplot as plt
plt.figure()
plt.plot(x1, y2, 'o')
plt.plot(x1, y_true2, 'b-', label='true')
plt.plot(x1, res1.fittedvalues, 'r-', label='fwls')
plt.plot(x1, res_ols.fittedvalues, '--', label='ols')
plt.legend()
#the next only works if w has finite support, discrete/categorical
#z = (w[:,None] == [1,4]).astype(float) #dummy variable
#z = (w0[:,None] == np.unique(w0)).astype(float) #dummy variable
#changed z0 contains dummy and constant
mod2 = GLSHet(y2, X2, exog_var=z0)
res2 = mod2.iterative_fit(3)
print(res2.params)
import statsmodels.api as sm
#z = sm.add_constant(w, prepend=True)
z = sm.add_constant(x1/x1.max())
mod3 = GLSHet(y2, X2, exog_var=z1)#, link=sm.families.links.log())
res3 = mod3.iterative_fit(20)
error_var_3 = res3.mse_resid/res3.model.weights
print(res3.params)
print("np.array(res3.model.history['ols_params'])")
print(np.array(res3.model.history['ols_params']))
print("np.array(res3.model.history['self_params'])")
print(np.array(res3.model.history['self_params']))
#Models 2 and 3 are equivalent with different parameterization of Z
print(np.unique(res2.model.weights)) #for discrete z only, only a few uniques
print(np.unique(res3.model.weights))
print(res3.summary())
print('\n\nResults of estimation of weights')
print('--------------------------------')
print(res3.results_residual_regression.summary())
if doplots:
plt.figure()
plt.plot(x1, y2, 'o')
plt.plot(x1, y_true2, 'b-', label='true')
plt.plot(x1, res1.fittedvalues, '-', label='fwls1')
plt.plot(x1, res2.fittedvalues, '-', label='fwls2')
plt.plot(x1, res3.fittedvalues, '-', label='fwls3')
plt.plot(x1, res_ols.fittedvalues, '--', label='ols')
plt.legend()
plt.figure()
plt.ylim(0, 5)
res_e2 = OLS(noise**2, z).fit()
plt.plot(noise**2, 'bo', alpha=0.5, label='dgp error**2')
plt.plot(res_e2.fittedvalues, lw=2, label='ols for noise**2')
#plt.plot(res3.model.weights, label='GLSHet weights')
plt.plot(error_var_3, lw=2, label='GLSHet error var')
plt.plot(res3.resid**2, 'ro', alpha=0.5, label='resid squared')
#plt.plot(weights_dgp, label='DGP weights')
plt.plot(sig**2 * winv, lw=2, label='DGP error var')
plt.legend()
plt.show()
'''Note these are close but maybe biased because of skewed distribution
>>> res3.mse_resid/res3.model.weights[-10:]
array([ 1.03115871, 1.03268209, 1.03420547, 1.03572885, 1.03725223,
1.03877561, 1.04029899, 1.04182237, 1.04334575, 1.04486913])
>>> res_e2.fittedvalues[-10:]
array([ 1.0401953 , 1.04171386, 1.04323242, 1.04475098, 1.04626954,
1.0477881 , 1.04930666, 1.05082521, 1.05234377, 1.05386233])
>>> sig**2 * w[-10:]
array([ 0.98647295, 0.98797595, 0.98947896, 0.99098196, 0.99248497,
0.99398798, 0.99549098, 0.99699399, 0.99849699, 1. ])
'''
| bsd-3-clause |
ClimbsRocks/scikit-learn | examples/plot_isotonic_regression.py | 55 | 1767 | """
===================
Isotonic Regression
===================
An illustration of the isotonic regression on generated data. The
isotonic regression finds a non-decreasing approximation of a function
while minimizing the mean squared error on the training data. The benefit
of such a model is that it does not assume any form for the target
function such as linearity. For comparison a linear regression is also
presented.
"""
print(__doc__)
# Author: Nelle Varoquaux <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
###############################################################################
# Fit IsotonicRegression and LinearRegression models
ir = IsotonicRegression()
y_ = ir.fit_transform(x, y)
lr = LinearRegression()
lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression
###############################################################################
# plot result
segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
lc = LineCollection(segments, zorder=0)
lc.set_array(np.ones(len(y)))
lc.set_linewidths(0.5 * np.ones(n))
fig = plt.figure()
plt.plot(x, y, 'r.', markersize=12)
plt.plot(x, y_, 'g.-', markersize=12)
plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
plt.gca().add_collection(lc)
plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
plt.title('Isotonic regression')
plt.show()
| bsd-3-clause |
JeanKossaifi/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
jblackburne/scikit-learn | examples/ensemble/plot_ensemble_oob.py | 58 | 3265 | """
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <[email protected]>
# Gilles Louppe <[email protected]>
# Andreas Mueller <[email protected]>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for parallelized ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
| bsd-3-clause |
ChanderG/scikit-learn | sklearn/metrics/metrics.py | 233 | 1262 | import warnings
warnings.warn("sklearn.metrics.metrics is deprecated and will be removed in "
"0.18. Please import from sklearn.metrics",
DeprecationWarning)
from .ranking import auc
from .ranking import average_precision_score
from .ranking import label_ranking_average_precision_score
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
| bsd-3-clause |
pratapvardhan/scikit-learn | examples/neighbors/plot_digits_kde_sampling.py | 108 | 2026 | """
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
| bsd-3-clause |
lancezlin/ml_template_py | lib/python2.7/site-packages/mpl_toolkits/gtktools.py | 7 | 19393 | """
Some gtk specific tools and widgets
* rec2gtk : put record array in GTK treeview - requires gtk
Example usage
import matplotlib.mlab as mlab
import mpl_toolkits.gtktools as gtktools
r = mlab.csv2rec('somefile.csv', checkrows=0)
formatd = dict(
weight = mlab.FormatFloat(2),
change = mlab.FormatPercent(2),
cost = mlab.FormatThousands(2),
)
exceltools.rec2excel(r, 'test.xls', formatd=formatd)
mlab.rec2csv(r, 'test.csv', formatd=formatd)
import gtk
scroll = gtktools.rec2gtk(r, formatd=formatd)
win = gtk.Window()
win.set_size_request(600,800)
win.add(scroll)
win.show_all()
gtk.main()
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange, zip
import copy
import gtk, gobject
import numpy as npy
import matplotlib.cbook as cbook
import matplotlib.mlab as mlab
def error_message(msg, parent=None, title=None):
"""
create an error message dialog with string msg. Optionally set
the parent widget and dialog title
"""
dialog = gtk.MessageDialog(
parent = None,
type = gtk.MESSAGE_ERROR,
buttons = gtk.BUTTONS_OK,
message_format = msg)
if parent is not None:
dialog.set_transient_for(parent)
if title is not None:
dialog.set_title(title)
else:
dialog.set_title('Error!')
dialog.show()
dialog.run()
dialog.destroy()
return None
def simple_message(msg, parent=None, title=None):
"""
create a simple message dialog with string msg. Optionally set
the parent widget and dialog title
"""
dialog = gtk.MessageDialog(
parent = None,
type = gtk.MESSAGE_INFO,
buttons = gtk.BUTTONS_OK,
message_format = msg)
if parent is not None:
dialog.set_transient_for(parent)
if title is not None:
dialog.set_title(title)
dialog.show()
dialog.run()
dialog.destroy()
return None
def gtkformat_factory(format, colnum):
"""
copy the format, perform any overrides, and attach an gtk style attrs
xalign = 0.
cell = None
"""
if format is None: return None
format = copy.copy(format)
format.xalign = 0.
format.cell = None
def negative_red_cell(column, cell, model, thisiter):
val = model.get_value(thisiter, colnum)
try: val = float(val)
except: cell.set_property('foreground', 'black')
else:
if val<0:
cell.set_property('foreground', 'red')
else:
cell.set_property('foreground', 'black')
if isinstance(format, mlab.FormatFloat) or isinstance(format, mlab.FormatInt):
format.cell = negative_red_cell
format.xalign = 1.
elif isinstance(format, mlab.FormatDate):
format.xalign = 1.
return format
class SortedStringsScrolledWindow(gtk.ScrolledWindow):
"""
A simple treeview/liststore assuming all columns are strings.
Supports ascending/descending sort by clicking on column header
"""
def __init__(self, colheaders, formatterd=None):
"""
xalignd if not None, is a dict mapping col header to xalignent (default 1)
formatterd if not None, is a dict mapping col header to a ColumnFormatter
"""
gtk.ScrolledWindow.__init__(self)
self.colheaders = colheaders
self.seq = None # not initialized with accts
self.set_shadow_type(gtk.SHADOW_ETCHED_IN)
self.set_policy(gtk.POLICY_AUTOMATIC,
gtk.POLICY_AUTOMATIC)
types = [gobject.TYPE_STRING] * len(colheaders)
model = self.model = gtk.ListStore(*types)
treeview = gtk.TreeView(self.model)
treeview.show()
treeview.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
treeview.set_rules_hint(True)
class Clicked:
def __init__(self, parent, i):
self.parent = parent
self.i = i
self.num = 0
def __call__(self, column):
ind = []
dsu = []
for rownum, thisiter in enumerate(self.parent.iters):
val = model.get_value(thisiter, self.i)
try: val = float(val.strip().rstrip('%'))
except ValueError: pass
if mlab.safe_isnan(val): val = npy.inf # force nan to sort uniquely
dsu.append((val, rownum))
dsu.sort()
if not self.num%2: dsu.reverse()
vals, otherind = list(zip(*dsu))
ind.extend(otherind)
self.parent.model.reorder(ind)
newiters = []
for i in ind:
newiters.append(self.parent.iters[i])
self.parent.iters = newiters[:]
for i, thisiter in enumerate(self.parent.iters):
key = tuple([self.parent.model.get_value(thisiter, j) for j in range(len(colheaders))])
self.parent.rownumd[i] = key
self.num+=1
if formatterd is None:
formatterd = dict()
formatterd = formatterd.copy()
for i, header in enumerate(colheaders):
renderer = gtk.CellRendererText()
if header not in formatterd:
formatterd[header] = ColumnFormatter()
formatter = formatterd[header]
column = gtk.TreeViewColumn(header, renderer, text=i)
renderer.set_property('xalign', formatter.xalign)
renderer.set_property('editable', True)
renderer.connect("edited", self.position_edited, i)
column.connect('clicked', Clicked(self, i))
column.set_property('clickable', True)
if formatter.cell is not None:
column.set_cell_data_func(renderer, formatter.cell)
treeview.append_column(column)
self.formatterd = formatterd
self.lastcol = column
self.add(treeview)
self.treeview = treeview
self.clear()
def position_edited(self, renderer, path, newtext, position):
#print path, position
self.model[path][position] = newtext
def clear(self):
self.iterd = dict()
self.iters = [] # an ordered list of iters
self.rownumd = dict() # a map from rownum -> symbol
self.model.clear()
self.datad = dict()
def flat(self, row):
seq = []
for i,val in enumerate(row):
formatter = self.formatterd.get(self.colheaders[i])
seq.extend([i,formatter.tostr(val)])
return seq
def __delete_selected(self, *unused): # untested
keyd = dict([(thisiter, key) for key, thisiter in self.iterd.values()])
for row in self.get_selected():
key = tuple(row)
thisiter = self.iterd[key]
self.model.remove(thisiter)
del self.datad[key]
del self.iterd[key]
self.iters.remove(thisiter)
for i, thisiter in enumerate(self.iters):
self.rownumd[i] = keyd[thisiter]
def delete_row(self, row):
key = tuple(row)
thisiter = self.iterd[key]
self.model.remove(thisiter)
del self.datad[key]
del self.iterd[key]
self.rownumd[len(self.iters)] = key
self.iters.remove(thisiter)
for rownum, thiskey in list(six.iteritems(self.rownumd)):
if thiskey==key: del self.rownumd[rownum]
def add_row(self, row):
thisiter = self.model.append()
self.model.set(thisiter, *self.flat(row))
key = tuple(row)
self.datad[key] = row
self.iterd[key] = thisiter
self.rownumd[len(self.iters)] = key
self.iters.append(thisiter)
def update_row(self, rownum, newrow):
key = self.rownumd[rownum]
thisiter = self.iterd[key]
newkey = tuple(newrow)
self.rownumd[rownum] = newkey
del self.datad[key]
del self.iterd[key]
self.datad[newkey] = newrow
self.iterd[newkey] = thisiter
self.model.set(thisiter, *self.flat(newrow))
def get_row(self, rownum):
key = self.rownumd[rownum]
return self.datad[key]
def get_selected(self):
selected = []
def foreach(model, path, iter, selected):
selected.append(model.get_value(iter, 0))
self.treeview.get_selection().selected_foreach(foreach, selected)
return selected
def rec2gtk(r, formatd=None, rownum=0, autowin=True):
"""
formatd is a dictionary mapping dtype name -> mlab.Format instances
This function creates a SortedStringsScrolledWindow (derived
from gtk.ScrolledWindow) and returns it. if autowin is True,
a gtk.Window is created, attached to the
SortedStringsScrolledWindow instance, shown and returned. If
autowin=False, the caller is responsible for adding the
SortedStringsScrolledWindow instance to a gtk widget and
showing it.
"""
if formatd is None:
formatd = dict()
formats = []
for i, name in enumerate(r.dtype.names):
dt = r.dtype[name]
format = formatd.get(name)
if format is None:
format = mlab.defaultformatd.get(dt.type, mlab.FormatObj())
#print 'gtk fmt factory', i, name, format, type(format)
format = gtkformat_factory(format, i)
formatd[name] = format
colheaders = r.dtype.names
scroll = SortedStringsScrolledWindow(colheaders, formatd)
ind = npy.arange(len(r.dtype.names))
for row in r:
scroll.add_row(row)
if autowin:
win = gtk.Window()
win.set_default_size(800,600)
#win.set_geometry_hints(scroll)
win.add(scroll)
win.show_all()
scroll.win = win
return scroll
class RecListStore(gtk.ListStore):
"""
A liststore as a model of an editable record array.
attributes:
* r - the record array with the edited values
* formatd - the list of mlab.FormatObj instances, with gtk attachments
* stringd - a dict mapping dtype names to a list of valid strings for the combo drop downs
* callbacks - a matplotlib.cbook.CallbackRegistry. Connect to the cell_changed with
def mycallback(liststore, rownum, colname, oldval, newval):
print('verify: old=%s, new=%s, rec=%s'%(oldval, newval, liststore.r[rownum][colname]))
cid = liststore.callbacks.connect('cell_changed', mycallback)
"""
def __init__(self, r, formatd=None, stringd=None):
"""
r is a numpy record array
formatd is a dict mapping dtype name to mlab.FormatObj instances
stringd, if not None, is a dict mapping dtype names to a list of
valid strings for a combo drop down editor
"""
if stringd is None:
stringd = dict()
if formatd is None:
formatd = mlab.get_formatd(r)
self.stringd = stringd
self.callbacks = cbook.CallbackRegistry(['cell_changed'])
self.r = r
self.headers = r.dtype.names
self.formats = [gtkformat_factory(formatd.get(name, mlab.FormatObj()),i)
for i,name in enumerate(self.headers)]
# use the gtk attached versions
self.formatd = formatd = dict(zip(self.headers, self.formats))
types = []
for format in self.formats:
if isinstance(format, mlab.FormatBool):
types.append(gobject.TYPE_BOOLEAN)
else:
types.append(gobject.TYPE_STRING)
self.combod = dict()
if len(stringd):
types.extend([gobject.TYPE_INT]*len(stringd))
keys = list(six.iterkeys(stringd))
keys.sort()
valid = set(r.dtype.names)
for ikey, key in enumerate(keys):
assert(key in valid)
combostore = gtk.ListStore(gobject.TYPE_STRING)
for s in stringd[key]:
combostore.append([s])
self.combod[key] = combostore, len(self.headers)+ikey
gtk.ListStore.__init__(self, *types)
for row in r:
vals = []
for formatter, val in zip(self.formats, row):
if isinstance(formatter, mlab.FormatBool):
vals.append(val)
else:
vals.append(formatter.tostr(val))
if len(stringd):
# todo, get correct index here?
vals.extend([0]*len(stringd))
self.append(vals)
def position_edited(self, renderer, path, newtext, position):
position = int(position)
format = self.formats[position]
rownum = int(path)
colname = self.headers[position]
oldval = self.r[rownum][colname]
try: newval = format.fromstr(newtext)
except ValueError:
msg = cbook.exception_to_str('Error converting "%s"'%newtext)
error_message(msg, title='Error')
return
self.r[rownum][colname] = newval
self[path][position] = format.tostr(newval)
self.callbacks.process('cell_changed', self, rownum, colname, oldval, newval)
def position_toggled(self, cellrenderer, path, position):
position = int(position)
format = self.formats[position]
newval = not cellrenderer.get_active()
rownum = int(path)
colname = self.headers[position]
oldval = self.r[rownum][colname]
self.r[rownum][colname] = newval
self[path][position] = newval
self.callbacks.process('cell_changed', self, rownum, colname, oldval, newval)
class RecTreeView(gtk.TreeView):
"""
An editable tree view widget for record arrays
"""
def __init__(self, recliststore, constant=None):
"""
build a gtk.TreeView to edit a RecListStore
constant, if not None, is a list of dtype names which are not editable
"""
self.recliststore = recliststore
gtk.TreeView.__init__(self, recliststore)
combostrings = set(recliststore.stringd.keys())
if constant is None:
constant = []
constant = set(constant)
for i, header in enumerate(recliststore.headers):
formatter = recliststore.formatd[header]
coltype = recliststore.get_column_type(i)
if coltype==gobject.TYPE_BOOLEAN:
renderer = gtk.CellRendererToggle()
if header not in constant:
renderer.connect("toggled", recliststore.position_toggled, i)
renderer.set_property('activatable', True)
elif header in combostrings:
renderer = gtk.CellRendererCombo()
renderer.connect("edited", recliststore.position_edited, i)
combostore, listind = recliststore.combod[header]
renderer.set_property("model", combostore)
renderer.set_property('editable', True)
else:
renderer = gtk.CellRendererText()
if header not in constant:
renderer.connect("edited", recliststore.position_edited, i)
renderer.set_property('editable', True)
if formatter is not None:
renderer.set_property('xalign', formatter.xalign)
tvcol = gtk.TreeViewColumn(header)
self.append_column(tvcol)
tvcol.pack_start(renderer, True)
if coltype == gobject.TYPE_STRING:
tvcol.add_attribute(renderer, 'text', i)
if header in combostrings:
combostore, listind = recliststore.combod[header]
tvcol.add_attribute(renderer, 'text-column', listind)
elif coltype == gobject.TYPE_BOOLEAN:
tvcol.add_attribute(renderer, 'active', i)
if formatter is not None and formatter.cell is not None:
tvcol.set_cell_data_func(renderer, formatter.cell)
self.connect("button-release-event", self.on_selection_changed)
#self.set_grid_lines(gtk.TREE_VIEW_GRID_LINES_BOTH)
self.get_selection().set_mode(gtk.SELECTION_BROWSE)
self.get_selection().set_select_function(self.on_select)
def on_select(self, *args):
return False
def on_selection_changed(self, *args):
(path, col) = self.get_cursor()
ren = col.get_cell_renderers()[0]
if isinstance(ren, gtk.CellRendererText):
self.set_cursor_on_cell(path, col, ren, start_editing=True)
def edit_recarray(r, formatd=None, stringd=None, constant=None, autowin=True):
"""
create a RecListStore and RecTreeView and return them.
If autowin is True, create a gtk.Window, insert the treeview into
it, and return it (return value will be (liststore, treeview, win)
See RecListStore and RecTreeView for a description of the keyword args
"""
liststore = RecListStore(r, formatd=formatd, stringd=stringd)
treeview = RecTreeView(liststore, constant=constant)
if autowin:
win = gtk.Window()
win.add(treeview)
win.show_all()
return liststore, treeview, win
else:
return liststore, treeview
if __name__=='__main__':
import datetime
import gtk
import numpy as np
import matplotlib.mlab as mlab
N = 10
today = datetime.date.today()
dates = [today+datetime.timedelta(days=i) for i in range(N)] # datetimes
weekdays = [d.strftime('%a') for d in dates] # strings
gains = np.random.randn(N) # floats
prices = np.random.rand(N)*1e7 # big numbers
up = gains>0 # bools
clientid = list(xrange(N)) # ints
r = np.rec.fromarrays([clientid, dates, weekdays, gains, prices, up],
names='clientid,date,weekdays,gains,prices,up')
# some custom formatters
formatd = mlab.get_formatd(r)
formatd['date'] = mlab.FormatDate('%Y-%m-%d')
formatd['prices'] = mlab.FormatMillions(precision=1)
formatd['gain'] = mlab.FormatPercent(precision=2)
# use a drop down combo for weekdays
stringd = dict(weekdays=['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'])
constant = ['clientid'] # block editing of this field
liststore = RecListStore(r, formatd=formatd, stringd=stringd)
treeview = RecTreeView(liststore, constant=constant)
def mycallback(liststore, rownum, colname, oldval, newval):
print('verify: old=%s, new=%s, rec=%s'%(oldval, newval, liststore.r[rownum][colname]))
liststore.callbacks.connect('cell_changed', mycallback)
win = gtk.Window()
win.set_title('with full customization')
win.add(treeview)
win.show_all()
# or you just use the defaults
r2 = r.copy()
ls, tv, win2 = edit_recarray(r2)
win2.set_title('with all defaults')
gtk.main()
| mit |
dimroc/tensorflow-mnist-tutorial | lib/python3.6/site-packages/matplotlib/tests/test_offsetbox.py | 12 | 2817 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import nose
from nose.tools import assert_true, assert_false
from matplotlib.testing.decorators import image_comparison, cleanup
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
from matplotlib.offsetbox import AnchoredOffsetbox, DrawingArea
@image_comparison(baseline_images=['offsetbox_clipping'], remove_text=True)
def test_offsetbox_clipping():
# - create a plot
# - put an AnchoredOffsetbox with a child DrawingArea
# at the center of the axes
# - give the DrawingArea a gray background
# - put a black line across the bounds of the DrawingArea
# - see that the black line is clipped to the edges of
# the DrawingArea.
fig, ax = plt.subplots()
size = 100
da = DrawingArea(size, size, clip=True)
bg = mpatches.Rectangle((0, 0), size, size,
facecolor='#CCCCCC',
edgecolor='None',
linewidth=0)
line = mlines.Line2D([-size*.5, size*1.5], [size/2, size/2],
color='black',
linewidth=10)
anchored_box = AnchoredOffsetbox(
loc=10,
child=da,
pad=0.,
frameon=False,
bbox_to_anchor=(.5, .5),
bbox_transform=ax.transAxes,
borderpad=0.)
da.add_artist(bg)
da.add_artist(line)
ax.add_artist(anchored_box)
ax.set_xlim((0, 1))
ax.set_ylim((0, 1))
@cleanup
def test_offsetbox_clip_children():
# - create a plot
# - put an AnchoredOffsetbox with a child DrawingArea
# at the center of the axes
# - give the DrawingArea a gray background
# - put a black line across the bounds of the DrawingArea
# - see that the black line is clipped to the edges of
# the DrawingArea.
fig, ax = plt.subplots()
size = 100
da = DrawingArea(size, size, clip=True)
bg = mpatches.Rectangle((0, 0), size, size,
facecolor='#CCCCCC',
edgecolor='None',
linewidth=0)
line = mlines.Line2D([-size*.5, size*1.5], [size/2, size/2],
color='black',
linewidth=10)
anchored_box = AnchoredOffsetbox(
loc=10,
child=da,
pad=0.,
frameon=False,
bbox_to_anchor=(.5, .5),
bbox_transform=ax.transAxes,
borderpad=0.)
da.add_artist(bg)
da.add_artist(line)
ax.add_artist(anchored_box)
fig.canvas.draw()
assert_false(fig.stale)
da.clip_children = True
assert_true(fig.stale)
if __name__ == '__main__':
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| apache-2.0 |
jrbourbeau/cr-composition | feature_selection/submit_feature_scan.py | 1 | 4411 | #!/usr/bin/env python
from __future__ import division, print_function
import os
import numpy as np
import argparse
import warnings
from itertools import product, combinations
import pycondor
import comptools as comp
warnings.filterwarnings("ignore", category=DeprecationWarning, module="sklearn")
base_features = ['lap_cos_zenith', 'log_s125', 'log_dEdX']
# Want to check how IceTop alone does
icetop_only_features = ['lap_cos_zenith', 'log_s125', 'IceTopLLHRatio'],
scan_features = [base_features,
base_features + ['avg_inice_radius'],
base_features + ['IceTopLLHRatio'],
icetop_only_features,
]
# Sanity check to make sure only two of the base_features perform worse
scan_features += [list(i) for i in combinations(base_features, 2)]
dom_numbers = [1, 15, 30, 45, 60]
scan_features += [base_features + ['NChannels_1_60'] + \
['NChannels_{}_{}'.format(min_DOM, max_DOM)
for min_DOM, max_DOM in zip(dom_numbers[:-1], dom_numbers[1:])] + \
['FractionContainment_Laputop_InIce']
]
scan_features += [base_features + ['NHits_1_60'] + \
['NHits_{}_{}'.format(min_DOM, max_DOM)
for min_DOM, max_DOM in zip(dom_numbers[:-1], dom_numbers[1:])] + \
['FractionContainment_Laputop_InIce']
]
min_dists = np.arange(0, 1125, 125)
scan_features += [base_features + ['IceTop_charge_beyond_{}m'.format(min_dist) for min_dist in min_dists] + ['FractionContainment_Laputop_IceTop']]
if __name__ == '__main__':
description = 'Saves trained composition classification model for later use'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-c', '--config', dest='config',
choices=comp.simfunctions.get_sim_configs(),
help='Detector configuration')
parser.add_argument('--num_groups', dest='num_groups', type=int,
default=4, choices=[2, 3, 4],
help='Number of composition groups')
parser.add_argument('--pipeline', dest='pipeline',
default='xgboost',
help='Composition classification pipeline to use')
parser.add_argument('--n_jobs', dest='n_jobs', type=int,
default=1, choices=list(range(1, 21)),
help='Number of jobs to run in parallel for the '
'gridsearch. Ignored if gridsearch=False.')
args = parser.parse_args()
config = args.config
num_groups = args.num_groups
pipeline = args.pipeline
n_jobs = args.n_jobs
executable = os.path.abspath('feature_scan.py')
# Define pycondor Job/Dagman directories
error = os.path.join(comp.paths.condor_data_dir, 'error')
output = os.path.join(comp.paths.condor_data_dir, 'output')
log = os.path.join(comp.paths.condor_scratch_dir, 'log')
submit = os.path.join(comp.paths.condor_scratch_dir, 'submit')
outdir = os.path.join(os.path.dirname(__file__),
'feature_scan_results')
dag_name = 'feature_scan_{}_num_groups-{}'.format(pipeline, num_groups)
dag = pycondor.Dagman(name=dag_name,
submit=submit)
for idx, (features, random_feature) in enumerate(product(scan_features, [True, False])):
feature_str = '-'.join(features)
job = pycondor.Job(name='feature_scan_num_groups-{}_{}'.format(num_groups, idx),
executable=executable,
submit=submit,
error=error,
output=output,
log=log,
request_cpus=n_jobs,
request_memory='3GB',
verbose=1,
dag=dag)
argument = '--features {} '.format(' '.join(features))
for arg_name in ['config', 'num_groups', 'pipeline', 'n_jobs']:
argument += '--{} {} '.format(arg_name, getattr(args, arg_name))
if random_feature:
argument += '--random_feature '
outfile = os.path.join(outdir, '{}_{}-groups-{}.pkl'.format(pipeline, num_groups, idx))
argument += '--outfile {} '.format(outfile)
job.add_arg(argument)
dag.build_submit()
| mit |
khalibartan/pgmpy | pgmpy/tests/test_models/test_BayesianModel.py | 1 | 32062 | import unittest
import networkx as nx
import pandas as pd
import numpy as np
import numpy.testing as np_test
from pgmpy.models import BayesianModel, MarkovModel
import pgmpy.tests.help_functions as hf
from pgmpy.factors.discrete import TabularCPD, JointProbabilityDistribution, DiscreteFactor
from pgmpy.independencies import Independencies
from pgmpy.estimators import BayesianEstimator, BaseEstimator, MaximumLikelihoodEstimator
class TestBaseModelCreation(unittest.TestCase):
def setUp(self):
self.G = BayesianModel()
def test_class_init_without_data(self):
self.assertIsInstance(self.G, nx.DiGraph)
def test_class_init_with_data_string(self):
self.g = BayesianModel([('a', 'b'), ('b', 'c')])
self.assertListEqual(sorted(self.g.nodes()), ['a', 'b', 'c'])
self.assertListEqual(hf.recursive_sorted(self.g.edges()),
[['a', 'b'], ['b', 'c']])
def test_class_init_with_data_nonstring(self):
BayesianModel([(1, 2), (2, 3)])
def test_add_node_string(self):
self.G.add_node('a')
self.assertListEqual(list(self.G.nodes()), ['a'])
def test_add_node_nonstring(self):
self.G.add_node(1)
def test_add_nodes_from_string(self):
self.G.add_nodes_from(['a', 'b', 'c', 'd'])
self.assertListEqual(sorted(self.G.nodes()), ['a', 'b', 'c', 'd'])
def test_add_nodes_from_non_string(self):
self.G.add_nodes_from([1, 2, 3, 4])
def test_add_edge_string(self):
self.G.add_edge('d', 'e')
self.assertListEqual(sorted(self.G.nodes()), ['d', 'e'])
self.assertListEqual(list(self.G.edges()), [('d', 'e')])
self.G.add_nodes_from(['a', 'b', 'c'])
self.G.add_edge('a', 'b')
self.assertListEqual(hf.recursive_sorted(self.G.edges()),
[['a', 'b'], ['d', 'e']])
def test_add_edge_nonstring(self):
self.G.add_edge(1, 2)
def test_add_edge_selfloop(self):
self.assertRaises(ValueError, self.G.add_edge, 'a', 'a')
def test_add_edge_result_cycle(self):
self.G.add_edges_from([('a', 'b'), ('a', 'c')])
self.assertRaises(ValueError, self.G.add_edge, 'c', 'a')
def test_add_edges_from_string(self):
self.G.add_edges_from([('a', 'b'), ('b', 'c')])
self.assertListEqual(sorted(self.G.nodes()), ['a', 'b', 'c'])
self.assertListEqual(hf.recursive_sorted(self.G.edges()),
[['a', 'b'], ['b', 'c']])
self.G.add_nodes_from(['d', 'e', 'f'])
self.G.add_edges_from([('d', 'e'), ('e', 'f')])
self.assertListEqual(sorted(self.G.nodes()),
['a', 'b', 'c', 'd', 'e', 'f'])
self.assertListEqual(hf.recursive_sorted(self.G.edges()),
hf.recursive_sorted([('a', 'b'), ('b', 'c'),
('d', 'e'), ('e', 'f')]))
def test_add_edges_from_nonstring(self):
self.G.add_edges_from([(1, 2), (2, 3)])
def test_add_edges_from_self_loop(self):
self.assertRaises(ValueError, self.G.add_edges_from,
[('a', 'a')])
def test_add_edges_from_result_cycle(self):
self.assertRaises(ValueError, self.G.add_edges_from,
[('a', 'b'), ('b', 'c'), ('c', 'a')])
def test_update_node_parents_bm_constructor(self):
self.g = BayesianModel([('a', 'b'), ('b', 'c')])
self.assertListEqual(list(self.g.predecessors('a')), [])
self.assertListEqual(list(self.g.predecessors('b')), ['a'])
self.assertListEqual(list(self.g.predecessors('c')), ['b'])
def test_update_node_parents(self):
self.G.add_nodes_from(['a', 'b', 'c'])
self.G.add_edges_from([('a', 'b'), ('b', 'c')])
self.assertListEqual(list(self.G.predecessors('a')), [])
self.assertListEqual(list(self.G.predecessors('b')), ['a'])
self.assertListEqual(list(self.G.predecessors('c')), ['b'])
def tearDown(self):
del self.G
class TestBayesianModelMethods(unittest.TestCase):
def setUp(self):
self.G = BayesianModel([('a', 'd'), ('b', 'd'),
('d', 'e'), ('b', 'c')])
self.G1 = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
diff_cpd = TabularCPD('diff', 2, values=[[0.2], [0.8]])
intel_cpd = TabularCPD('intel', 3, values=[[0.5], [0.3], [0.2]])
grade_cpd = TabularCPD('grade', 3, values=[[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
evidence=['diff', 'intel'], evidence_card=[2, 3])
self.G1.add_cpds(diff_cpd, intel_cpd, grade_cpd)
self.G2 = BayesianModel([('d', 'g'), ('g', 'l'), ('i', 'g'), ('i', 'l')])
def test_moral_graph(self):
moral_graph = self.G.moralize()
self.assertListEqual(sorted(moral_graph.nodes()), ['a', 'b', 'c', 'd', 'e'])
for edge in moral_graph.edges():
self.assertTrue(edge in [('a', 'b'), ('a', 'd'), ('b', 'c'), ('d', 'b'), ('e', 'd')] or
(edge[1], edge[0]) in [('a', 'b'), ('a', 'd'), ('b', 'c'), ('d', 'b'), ('e', 'd')])
def test_moral_graph_with_edge_present_over_parents(self):
G = BayesianModel([('a', 'd'), ('d', 'e'), ('b', 'd'), ('b', 'c'), ('a', 'b')])
moral_graph = G.moralize()
self.assertListEqual(sorted(moral_graph.nodes()), ['a', 'b', 'c', 'd', 'e'])
for edge in moral_graph.edges():
self.assertTrue(edge in [('a', 'b'), ('c', 'b'), ('d', 'a'), ('d', 'b'), ('d', 'e')] or
(edge[1], edge[0]) in [('a', 'b'), ('c', 'b'), ('d', 'a'), ('d', 'b'), ('d', 'e')])
def test_get_ancestors_of_success(self):
ancenstors1 = self.G2._get_ancestors_of('g')
ancenstors2 = self.G2._get_ancestors_of('d')
ancenstors3 = self.G2._get_ancestors_of(['i', 'l'])
self.assertEqual(ancenstors1, {'d', 'i', 'g'})
self.assertEqual(ancenstors2, {'d'})
self.assertEqual(ancenstors3, {'g', 'i', 'l', 'd'})
def test_get_ancestors_of_failure(self):
self.assertRaises(ValueError, self.G2._get_ancestors_of, 'h')
def test_get_cardinality(self):
self.assertDictEqual(self.G1.get_cardinality(), {'diff': 2, 'intel': 3, 'grade': 3})
def test_get_cardinality_with_node(self):
self.assertEqual(self.G1.get_cardinality('diff'), 2)
self.assertEqual(self.G1.get_cardinality('intel'), 3)
self.assertEqual(self.G1.get_cardinality('grade'), 3)
def test_local_independencies(self):
self.assertEqual(self.G.local_independencies('a'), Independencies(['a', ['b', 'c']]))
self.assertEqual(self.G.local_independencies('c'), Independencies(['c', ['a', 'd', 'e'], 'b']))
self.assertEqual(self.G.local_independencies('d'), Independencies(['d', 'c', ['b', 'a']]))
self.assertEqual(self.G.local_independencies('e'), Independencies(['e', ['c', 'b', 'a'], 'd']))
self.assertEqual(self.G.local_independencies('b'), Independencies(['b', 'a']))
self.assertEqual(self.G1.local_independencies('grade'), Independencies())
def test_get_independencies(self):
chain = BayesianModel([('X', 'Y'), ('Y', 'Z')])
self.assertEqual(chain.get_independencies(), Independencies(('X', 'Z', 'Y'), ('Z', 'X', 'Y')))
fork = BayesianModel([('Y', 'X'), ('Y', 'Z')])
self.assertEqual(fork.get_independencies(), Independencies(('X', 'Z', 'Y'), ('Z', 'X', 'Y')))
collider = BayesianModel([('X', 'Y'), ('Z', 'Y')])
self.assertEqual(collider.get_independencies(), Independencies(('X', 'Z'), ('Z', 'X')))
def test_is_imap(self):
val = [0.01, 0.01, 0.08, 0.006, 0.006, 0.048, 0.004, 0.004, 0.032,
0.04, 0.04, 0.32, 0.024, 0.024, 0.192, 0.016, 0.016, 0.128]
JPD = JointProbabilityDistribution(['diff', 'intel', 'grade'], [2, 3, 3], val)
fac = DiscreteFactor(['diff', 'intel', 'grade'], [2, 3, 3], val)
self.assertTrue(self.G1.is_imap(JPD))
self.assertRaises(TypeError, self.G1.is_imap, fac)
def test_markov_blanet(self):
G = BayesianModel([('x', 'y'), ('z', 'y'), ('y', 'w'), ('y', 'v'), ('u', 'w'),
('s', 'v'), ('w', 't'), ('w', 'm'), ('v', 'n'), ('v', 'q')])
self.assertEqual(set(G.get_markov_blanket('y')), set(['s', 'w', 'x', 'u', 'z', 'v']))
def test_get_immoralities(self):
G = BayesianModel([('x', 'y'), ('z', 'y'), ('x', 'z'), ('w', 'y')])
self.assertEqual(G.get_immoralities(), {('w', 'x'), ('w', 'z')})
G1 = BayesianModel([('x', 'y'), ('z', 'y'), ('z', 'x'), ('w', 'y')])
self.assertEqual(G1.get_immoralities(), {('w', 'x'), ('w', 'z')})
G2 = BayesianModel([('x', 'y'), ('z', 'y'), ('x', 'z'), ('w', 'y'), ('w', 'x')])
self.assertEqual(G2.get_immoralities(), {('w', 'z')})
def test_is_iequivalent(self):
G = BayesianModel([('x', 'y'), ('z', 'y'), ('x', 'z'), ('w', 'y')])
self.assertRaises(TypeError, G.is_iequivalent, MarkovModel())
G1 = BayesianModel([('V', 'W'), ('W', 'X'), ('X', 'Y'), ('Z', 'Y')])
G2 = BayesianModel([('W', 'V'), ('X', 'W'), ('X', 'Y'), ('Z', 'Y')])
self.assertTrue(G1.is_iequivalent(G2))
G3 = BayesianModel([('W', 'V'), ('W', 'X'), ('Y', 'X'), ('Z', 'Y')])
self.assertFalse(G3.is_iequivalent(G2))
def test_copy(self):
model_copy = self.G1.copy()
self.assertEqual(sorted(self.G1.nodes()), sorted(model_copy.nodes()))
self.assertEqual(sorted(self.G1.edges()), sorted(model_copy.edges()))
self.assertNotEqual(id(self.G1.get_cpds('diff')),
id(model_copy.get_cpds('diff')))
self.G1.remove_cpds('diff')
diff_cpd = TabularCPD('diff', 2, values=[[0.3], [0.7]])
self.G1.add_cpds(diff_cpd)
self.assertNotEqual(self.G1.get_cpds('diff'),
model_copy.get_cpds('diff'))
self.G1.remove_node('intel')
self.assertNotEqual(sorted(self.G1.nodes()), sorted(model_copy.nodes()))
self.assertNotEqual(sorted(self.G1.edges()), sorted(model_copy.edges()))
def test_remove_node(self):
self.G1.remove_node('diff')
self.assertEqual(sorted(self.G1.nodes()), sorted(['grade', 'intel']))
self.assertRaises(ValueError, self.G1.get_cpds, 'diff')
def test_remove_nodes_from(self):
self.G1.remove_nodes_from(['diff', 'grade'])
self.assertEqual(sorted(self.G1.nodes()), sorted(['intel']))
self.assertRaises(ValueError, self.G1.get_cpds, 'diff')
self.assertRaises(ValueError, self.G1.get_cpds, 'grade')
def tearDown(self):
del self.G
del self.G1
class TestBayesianModelCPD(unittest.TestCase):
def setUp(self):
self.G = BayesianModel([('d', 'g'), ('i', 'g'), ('g', 'l'),
('i', 's')])
def test_active_trail_nodes(self):
self.assertEqual(sorted(self.G.active_trail_nodes('d')['d']), ['d', 'g', 'l'])
self.assertEqual(sorted(self.G.active_trail_nodes('i')['i']), ['g', 'i', 'l', 's'])
self.assertEqual(sorted(self.G.active_trail_nodes(['d', 'i'])['d']), ['d', 'g', 'l'])
def test_active_trail_nodes_args(self):
self.assertEqual(sorted(self.G.active_trail_nodes(['d', 'l'], observed='g')['d']), ['d', 'i', 's'])
self.assertEqual(sorted(self.G.active_trail_nodes(['d', 'l'], observed='g')['l']), ['l'])
self.assertEqual(sorted(self.G.active_trail_nodes('s', observed=['i', 'l'])['s']), ['s'])
self.assertEqual(sorted(self.G.active_trail_nodes('s', observed=['d', 'l'])['s']), ['g', 'i', 's'])
def test_is_active_trail_triplets(self):
self.assertTrue(self.G.is_active_trail('d', 'l'))
self.assertTrue(self.G.is_active_trail('g', 's'))
self.assertFalse(self.G.is_active_trail('d', 'i'))
self.assertTrue(self.G.is_active_trail('d', 'i', observed='g'))
self.assertFalse(self.G.is_active_trail('d', 'l', observed='g'))
self.assertFalse(self.G.is_active_trail('i', 'l', observed='g'))
self.assertTrue(self.G.is_active_trail('d', 'i', observed='l'))
self.assertFalse(self.G.is_active_trail('g', 's', observed='i'))
def test_is_active_trail(self):
self.assertFalse(self.G.is_active_trail('d', 's'))
self.assertTrue(self.G.is_active_trail('s', 'l'))
self.assertTrue(self.G.is_active_trail('d', 's', observed='g'))
self.assertFalse(self.G.is_active_trail('s', 'l', observed='g'))
def test_is_active_trail_args(self):
self.assertFalse(self.G.is_active_trail('s', 'l', 'i'))
self.assertFalse(self.G.is_active_trail('s', 'l', 'g'))
self.assertTrue(self.G.is_active_trail('d', 's', 'l'))
self.assertFalse(self.G.is_active_trail('d', 's', ['i', 'l']))
def test_get_cpds(self):
cpd_d = TabularCPD('d', 2, values=np.random.rand(2, 1))
cpd_i = TabularCPD('i', 2, values=np.random.rand(2, 1))
cpd_g = TabularCPD('g', 2, values=np.random.rand(2, 4),
evidence=['d', 'i'], evidence_card=[2, 2])
cpd_l = TabularCPD('l', 2, values=np.random.rand(2, 2),
evidence=['g'], evidence_card=[2])
cpd_s = TabularCPD('s', 2, values=np.random.rand(2, 2),
evidence=['i'], evidence_card=[2])
self.G.add_cpds(cpd_d, cpd_i, cpd_g, cpd_l, cpd_s)
self.assertEqual(self.G.get_cpds('d').variable, 'd')
def test_get_cpds1(self):
self.model = BayesianModel([('A', 'AB')])
cpd_a = TabularCPD('A', 2, values=np.random.rand(2, 1))
cpd_ab = TabularCPD('AB', 2, values=np.random.rand(2, 2),
evidence=['A'], evidence_card=[2])
self.model.add_cpds(cpd_a, cpd_ab)
self.assertEqual(self.model.get_cpds('A').variable, 'A')
self.assertEqual(self.model.get_cpds('AB').variable, 'AB')
self.assertRaises(ValueError, self.model.get_cpds, 'B')
self.model.add_node('B')
self.assertIsNone(self.model.get_cpds('B'))
def test_add_single_cpd(self):
cpd_s = TabularCPD('s', 2, np.random.rand(2, 2), ['i'], [2])
self.G.add_cpds(cpd_s)
self.assertListEqual(self.G.get_cpds(), [cpd_s])
def test_add_multiple_cpds(self):
cpd_d = TabularCPD('d', 2, values=np.random.rand(2, 1))
cpd_i = TabularCPD('i', 2, values=np.random.rand(2, 1))
cpd_g = TabularCPD('g', 2, values=np.random.rand(2, 4),
evidence=['d', 'i'], evidence_card=[2, 2])
cpd_l = TabularCPD('l', 2, values=np.random.rand(2, 2),
evidence=['g'], evidence_card=[2])
cpd_s = TabularCPD('s', 2, values=np.random.rand(2, 2),
evidence=['i'], evidence_card=[2])
self.G.add_cpds(cpd_d, cpd_i, cpd_g, cpd_l, cpd_s)
self.assertEqual(self.G.get_cpds('d'), cpd_d)
self.assertEqual(self.G.get_cpds('i'), cpd_i)
self.assertEqual(self.G.get_cpds('g'), cpd_g)
self.assertEqual(self.G.get_cpds('l'), cpd_l)
self.assertEqual(self.G.get_cpds('s'), cpd_s)
def test_check_model(self):
cpd_g = TabularCPD('g', 2, values=np.array([[0.2, 0.3, 0.4, 0.6],
[0.8, 0.7, 0.6, 0.4]]),
evidence=['d', 'i'], evidence_card=[2, 2])
cpd_s = TabularCPD('s', 2, values=np.array([[0.2, 0.3],
[0.8, 0.7]]),
evidence=['i'], evidence_card=[2])
cpd_l = TabularCPD('l', 2, values=np.array([[0.2, 0.3],
[0.8, 0.7]]),
evidence=['g'], evidence_card=[2])
self.G.add_cpds(cpd_g, cpd_s, cpd_l)
self.assertRaises(ValueError, self.G.check_model)
cpd_d = TabularCPD('d', 2, values=[[0.8, 0.2]])
cpd_i = TabularCPD('i', 2, values=[[0.7, 0.3]])
self.G.add_cpds(cpd_d, cpd_i)
self.assertTrue(self.G.check_model())
def test_check_model1(self):
cpd_g = TabularCPD('g', 2, values=np.array([[0.2, 0.3],
[0.8, 0.7]]),
evidence=['i'], evidence_card=[2])
self.G.add_cpds(cpd_g)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_g)
cpd_g = TabularCPD('g', 2, values=np.array([[0.2, 0.3, 0.4, 0.6],
[0.8, 0.7, 0.6, 0.4]]),
evidence=['d', 's'], evidence_card=[2, 2])
self.G.add_cpds(cpd_g)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_g)
cpd_g = TabularCPD('g', 2, values=np.array([[0.2, 0.3],
[0.8, 0.7]]),
evidence=['l'], evidence_card=[2])
self.G.add_cpds(cpd_g)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_g)
cpd_l = TabularCPD('l', 2, values=np.array([[0.2, 0.3],
[0.8, 0.7]]),
evidence=['d'], evidence_card=[2])
self.G.add_cpds(cpd_l)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_l)
cpd_l = TabularCPD('l', 2, values=np.array([[0.2, 0.3, 0.4, 0.6],
[0.8, 0.7, 0.6, 0.4]]),
evidence=['d', 'i'], evidence_card=[2, 2])
self.G.add_cpds(cpd_l)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_l)
cpd_l = TabularCPD('l', 2, values=np.array([[0.2, 0.3, 0.4, 0.6, 0.2, 0.3, 0.4, 0.6],
[0.8, 0.7, 0.6, 0.4, 0.8, 0.7, 0.6, 0.4]]),
evidence=['g', 'd', 'i'], evidence_card=[2, 2, 2])
self.G.add_cpds(cpd_l)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_l)
def test_check_model2(self):
cpd_s = TabularCPD('s', 2, values=np.array([[0.5, 0.3],
[0.8, 0.7]]),
evidence=['i'], evidence_card=[2])
self.G.add_cpds(cpd_s)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_s)
cpd_g = TabularCPD('g', 2, values=np.array([[0.2, 0.3, 0.4, 0.6],
[0.3, 0.7, 0.6, 0.4]]),
evidence=['d', 'i'], evidence_card=[2, 2])
self.G.add_cpds(cpd_g)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_g)
cpd_l = TabularCPD('l', 2, values=np.array([[0.2, 0.3],
[0.1, 0.7]]),
evidence=['g'], evidence_card=[2])
self.G.add_cpds(cpd_l)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_l)
def tearDown(self):
del self.G
class TestBayesianModelFitPredict(unittest.TestCase):
def setUp(self):
self.model_disconnected = BayesianModel()
self.model_disconnected.add_nodes_from(['A', 'B', 'C', 'D', 'E'])
self.model_connected = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
self.model2 = BayesianModel([('A', 'C'), ('B', 'C')])
self.data1 = pd.DataFrame(data={'A': [0, 0, 1], 'B': [0, 1, 0], 'C': [1, 1, 0]})
self.data2 = pd.DataFrame(data={'A': [0, np.NaN, 1],
'B': [0, 1, 0],
'C': [1, 1, np.NaN],
'D': [np.NaN, 'Y', np.NaN]})
# data_link - "https://www.kaggle.com/c/titanic/download/train.csv"
self.titanic_data = pd.read_csv('pgmpy/tests/test_estimators/testdata/titanic_train.csv', dtype=str)
self.titanic_data2 = self.titanic_data[["Survived", "Sex", "Pclass"]]
def test_bayesian_fit(self):
print(isinstance(BayesianEstimator, BaseEstimator))
print(isinstance(MaximumLikelihoodEstimator, BaseEstimator))
self.model2.fit(self.data1, estimator=BayesianEstimator, prior_type="dirichlet",
pseudo_counts={'A': [[9], [3]],
'B': [[9], [3]],
'C': [[9, 9, 9, 9],
[3, 3, 3, 3]]})
self.assertEqual(self.model2.get_cpds('B'), TabularCPD('B', 2, [[11.0 / 15], [4.0 / 15]]))
def test_fit_missing_data(self):
self.model2.fit(self.data2, state_names={'C': [0, 1]}, complete_samples_only=False)
cpds = set([TabularCPD('A', 2, [[0.5], [0.5]]),
TabularCPD('B', 2, [[2. / 3], [1. / 3]]),
TabularCPD('C', 2, [[0, 0.5, 0.5, 0.5], [1, 0.5, 0.5, 0.5]],
evidence=['A', 'B'], evidence_card=[2, 2])])
self.assertSetEqual(cpds, set(self.model2.get_cpds()))
def test_disconnected_fit(self):
values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
columns=['A', 'B', 'C', 'D', 'E'])
self.model_disconnected.fit(values)
for node in ['A', 'B', 'C', 'D', 'E']:
cpd = self.model_disconnected.get_cpds(node)
self.assertEqual(cpd.variable, node)
np_test.assert_array_equal(cpd.cardinality, np.array([2]))
value = (values.ix[:, node].value_counts() /
values.ix[:, node].value_counts().sum())
value = value.reindex(sorted(value.index)).values
np_test.assert_array_equal(cpd.values, value)
def test_predict(self):
titanic = BayesianModel()
titanic.add_edges_from([("Sex", "Survived"), ("Pclass", "Survived")])
titanic.fit(self.titanic_data2[500:])
p1 = titanic.predict(self.titanic_data2[["Sex", "Pclass"]][:30])
p2 = titanic.predict(self.titanic_data2[["Survived", "Pclass"]][:30])
p3 = titanic.predict(self.titanic_data2[["Survived", "Sex"]][:30])
p1_res = np.array(['0', '1', '0', '1', '0', '0', '0', '0', '0', '1', '0', '1', '0',
'0', '0', '1', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0',
'0', '0', '0', '0'])
p2_res = np.array(['male', 'female', 'female', 'female', 'male', 'male', 'male',
'male', 'female', 'female', 'female', 'female', 'male', 'male',
'male', 'female', 'male', 'female', 'male', 'female', 'male',
'female', 'female', 'female', 'male', 'female', 'male', 'male',
'female', 'male'])
p3_res = np.array(['3', '1', '1', '1', '3', '3', '3', '3', '1', '1', '1', '1', '3',
'3', '3', '1', '3', '1', '3', '1', '3', '1', '1', '1', '3', '1',
'3', '3', '1', '3'])
np_test.assert_array_equal(p1.values.ravel(), p1_res)
np_test.assert_array_equal(p2.values.ravel(), p2_res)
np_test.assert_array_equal(p3.values.ravel(), p3_res)
def test_connected_predict(self):
np.random.seed(42)
values = pd.DataFrame(np.array(np.random.randint(low=0, high=2, size=(1000, 5)),
dtype=str),
columns=['A', 'B', 'C', 'D', 'E'])
fit_data = values[:800]
predict_data = values[800:].copy()
self.model_connected.fit(fit_data)
self.assertRaises(ValueError, self.model_connected.predict, predict_data)
predict_data.drop('E', axis=1, inplace=True)
e_predict = self.model_connected.predict(predict_data)
np_test.assert_array_equal(e_predict.values.ravel(),
np.array([1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1,
1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0,
0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0,
0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1,
0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1,
1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1,
1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0,
1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1,
0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1,
1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1,
1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1,
0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0,
1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1,
1, 1, 1, 0], dtype=str))
def test_connected_predict_probability(self):
np.random.seed(42)
values = pd.DataFrame(np.random.randint(low=0, high=2, size=(100, 5)),
columns=['A', 'B', 'C', 'D', 'E'])
fit_data = values[:80]
predict_data = values[80:].copy()
self.model_connected.fit(fit_data)
predict_data.drop('E', axis=1, inplace=True)
e_prob = self.model_connected.predict_probability(predict_data)
np_test.assert_allclose(e_prob.values.ravel(),
np.array([0.57894737, 0.42105263, 0.57894737, 0.42105263, 0.57894737,
0.42105263, 0.5, 0.5, 0.57894737, 0.42105263,
0.5, 0.5, 0.57894737, 0.42105263, 0.57894737,
0.42105263, 0.57894737, 0.42105263, 0.5, 0.5,
0.57894737, 0.42105263, 0.57894737, 0.42105263, 0.5,
0.5, 0.57894737, 0.42105263, 0.57894737, 0.42105263,
0.5, 0.5, 0.57894737, 0.42105263, 0.5,
0.5, 0.5, 0.5, 0.5, 0.5]), atol=0)
predict_data = pd.DataFrame(np.random.randint(low=0, high=2, size=(1, 5)),
columns=['A', 'B', 'C', 'F', 'E'])[:]
def test_predict_probability_errors(self):
np.random.seed(42)
values = pd.DataFrame(np.random.randint(low=0, high=2, size=(2, 5)),
columns=['A', 'B', 'C', 'D', 'E'])
fit_data = values[:1]
predict_data = values[1:].copy()
self.model_connected.fit(fit_data)
self.assertRaises(ValueError, self.model_connected.predict_probability, predict_data)
predict_data = pd.DataFrame(np.random.randint(low=0, high=2, size=(1, 5)),
columns=['A', 'B', 'C', 'F', 'E'])[:]
self.assertRaises(ValueError, self.model_connected.predict_probability, predict_data)
def tearDown(self):
del self.model_connected
del self.model_disconnected
class TestDirectedGraphCPDOperations(unittest.TestCase):
def setUp(self):
self.graph = BayesianModel()
def test_add_single_cpd(self):
cpd = TabularCPD('grade', 2, values=np.random.rand(2, 4),
evidence=['diff', 'intel'], evidence_card=[2, 2])
self.graph.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
self.graph.add_cpds(cpd)
self.assertListEqual(self.graph.get_cpds(), [cpd])
def test_add_multiple_cpds(self):
cpd1 = TabularCPD('diff', 2, values=np.random.rand(2, 1))
cpd2 = TabularCPD('intel', 2, values=np.random.rand(2, 1))
cpd3 = TabularCPD('grade', 2, values=np.random.rand(2, 4),
evidence=['diff', 'intel'], evidence_card=[2, 2])
self.graph.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.assertListEqual(self.graph.get_cpds(), [cpd1, cpd2, cpd3])
def test_remove_single_cpd(self):
cpd1 = TabularCPD('diff', 2, values=np.random.rand(2, 1))
cpd2 = TabularCPD('intel', 2, values=np.random.rand(2, 1))
cpd3 = TabularCPD('grade', 2, values=np.random.rand(2, 4),
evidence=['diff', 'intel'], evidence_card=[2, 2])
self.graph.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.graph.remove_cpds(cpd1)
self.assertListEqual(self.graph.get_cpds(), [cpd2, cpd3])
def test_remove_multiple_cpds(self):
cpd1 = TabularCPD('diff', 2, values=np.random.rand(2, 1))
cpd2 = TabularCPD('intel', 2, values=np.random.rand(2, 1))
cpd3 = TabularCPD('grade', 2, values=np.random.rand(2, 4),
evidence=['diff', 'intel'], evidence_card=[2, 2])
self.graph.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.graph.remove_cpds(cpd1, cpd3)
self.assertListEqual(self.graph.get_cpds(), [cpd2])
def test_remove_single_cpd_string(self):
cpd1 = TabularCPD('diff', 2, values=np.random.rand(2, 1))
cpd2 = TabularCPD('intel', 2, values=np.random.rand(2, 1))
cpd3 = TabularCPD('grade', 2, values=np.random.rand(2, 4),
evidence=['diff', 'intel'], evidence_card=[2, 2])
self.graph.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.graph.remove_cpds('diff')
self.assertListEqual(self.graph.get_cpds(), [cpd2, cpd3])
def test_remove_multiple_cpds_string(self):
cpd1 = TabularCPD('diff', 2, values=np.random.rand(2, 1))
cpd2 = TabularCPD('intel', 2, values=np.random.rand(2, 1))
cpd3 = TabularCPD('grade', 2, values=np.random.rand(2, 4),
evidence=['diff', 'intel'], evidence_card=[2, 2])
self.graph.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.graph.remove_cpds('diff', 'grade')
self.assertListEqual(self.graph.get_cpds(), [cpd2])
def test_get_values_for_node(self):
cpd1 = TabularCPD('diff', 2, values=np.random.rand(2, 1))
cpd2 = TabularCPD('intel', 2, values=np.random.rand(2, 1))
cpd3 = TabularCPD('grade', 2, values=np.random.rand(2, 4),
evidence=['diff', 'intel'], evidence_card=[2, 2])
self.graph.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.assertEqual(self.graph.get_cpds('diff'), cpd1)
self.assertEqual(self.graph.get_cpds('intel'), cpd2)
self.assertEqual(self.graph.get_cpds('grade'), cpd3)
def test_get_values_raises_error(self):
cpd1 = TabularCPD('diff', 2, values=np.random.rand(2, 1))
cpd2 = TabularCPD('intel', 2, values=np.random.rand(2, 1))
cpd3 = TabularCPD('grade', 2, values=np.random.rand(2, 4),
evidence=['diff', 'intel'], evidence_card=[2, 2])
self.graph.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.assertRaises(ValueError, self.graph.get_cpds, 'sat')
def tearDown(self):
del self.graph
| mit |
aewhatley/scikit-learn | sklearn/svm/classes.py | 13 | 40017 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
from ..utils.validation import _num_samples
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive'
(default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight,
**params)
return self
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
| bsd-3-clause |
ChanderG/scikit-learn | sklearn/tests/test_grid_search.py | 68 | 28778 | """
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler,
ChangedBehaviorWarning)
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import KFold, StratifiedKFold, FitFailedWarning
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| bsd-3-clause |
zuku1985/scikit-learn | examples/decomposition/plot_incremental_pca.py | 175 | 1974 | """
===============
Incremental PCA
===============
Incremental principal component analysis (IPCA) is typically used as a
replacement for principal component analysis (PCA) when the dataset to be
decomposed is too large to fit in memory. IPCA builds a low-rank approximation
for the input data using an amount of memory which is independent of the
number of input data samples. It is still dependent on the input data features,
but changing the batch size allows for control of memory usage.
This example serves as a visual check that IPCA is able to find a similar
projection of the data to PCA (to a sign flip), while only processing a
few samples at a time. This can be considered a "toy example", as IPCA is
intended for large datasets which do not fit in main memory, requiring
incremental approaches.
"""
print(__doc__)
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
colors = ['navy', 'turquoise', 'darkorange']
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for color, i, target_name in zip(colors, [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
color=color, lw=2, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best", shadow=False, scatterpoints=1)
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
| bsd-3-clause |
stimpsonsg/moose | modules/porous_flow/doc/tests/dirackernels.py | 10 | 7355 | #!/usr/bin/env python
import os
import sys
import numpy as np
from scipy.special import erf
import matplotlib.pyplot as plt
def bh02_expected(pressure):
perm = 1.0E-12
ele_length = 2
radius = 0.1
bh_length = 1
re = 0.28
r0 = re * np.sqrt(ele_length**2 + ele_length**2) / 2.0
wc = 2 * np.pi * np.sqrt(perm**2) * bh_length / np.log(r0 / radius)
density = 1000
viscosity = 1.0E-3
return wc * density * pressure / viscosity
def bh02():
f = open("../../tests/dirackernels/gold/bh02.csv")
data = [line.strip().split(",") for line in f.readlines()[1:]]
f.close()
data = [map(float, line) for line in data if len(line) > 5]
pfe = [(data[i][4], data[i][1] / (data[i][0] - data[i - 1][0]), data[i][5]) for i in range(1, len(data))]
return pfe
def bh03_expected(pressure):
perm = 1.0E-12
ele_length = 2
radius = 0.1
bh_length = 1
re = 0.28
r0 = re * np.sqrt(ele_length**2 + ele_length**2) / 2.0
wc = 2 * np.pi * np.sqrt(perm**2) * bh_length / np.log(r0 / radius)
density = 1000
viscosity = 1.0E-3
return wc * density * (pressure - 1E7) / viscosity
def bh03():
f = open("../../tests/dirackernels/gold/bh03.csv")
data = [line.strip().split(",") for line in f.readlines()[1:]]
f.close()
data = [map(float, line) for line in data if len(line) > 5]
pfe = [(data[i][4], data[i][1] / (data[i][0] - data[i - 1][0]), data[i][5]) for i in range(1, len(data))]
return pfe
def bh04_expected(pressure):
perm = 1.0E-12
ele_length = 2
radius = 0.1
bh_length = 1
re = 0.28
r0 = re * np.sqrt(ele_length**2 + ele_length**2) / 2.0
wc = 2 * np.pi * np.sqrt(perm**2) * bh_length / np.log(r0 / radius)
alpha = 1.0E-5
m = 0.8
n = 2.0
bottom_p = -1.0E6
bulk = 2.0E9
dens0 = 1000
viscosity = 1.0E-3
saturation = (1.0 + (- alpha * pressure)**(1.0 / (1.0 - m)))**(- m)
relperm = (n + 1.0) * saturation**n - n * saturation**(n + 1.0)
density = dens0 * np.exp(pressure / bulk)
return wc * density * relperm * (pressure - bottom_p) / viscosity
def bh04():
f = open("../../tests/dirackernels/gold/bh04.csv")
data = [line.strip().split(",") for line in f.readlines()[1:]]
f.close()
data = [map(float, line) for line in data if len(line) > 5]
pfe = [(data[i][4], data[i][1] / (data[i][0] - data[i - 1][0]), data[i][5]) for i in range(1, len(data))]
return pfe
def bh05_expected(pressure):
perm = 1.0E-12
ele_length = 2
radius = 0.1
bh_length = 1
re = 0.28
r0 = re * np.sqrt(ele_length**2 + ele_length**2) / 2.0
wc = 2 * np.pi * np.sqrt(perm**2) * bh_length / np.log(r0 / radius)
alpha = 1.0E-5
m = 0.8
n = 2.0
bottom_p = 0
bulk = 2.0E9
dens0 = 1000
viscosity = 1.0E-3
saturation = (1.0 + (- alpha * pressure)**(1.0 / (1.0 - m)))**(- m)
relperm = (n + 1.0) * saturation**n - n * saturation**(n + 1.0)
density = dens0 * np.exp(pressure / bulk)
return wc * density * relperm * (pressure - bottom_p) / viscosity
def bh05():
f = open("../../tests/dirackernels/gold/bh05.csv")
data = [line.strip().split(",") for line in f.readlines()[1:]]
f.close()
data = [map(float, line) for line in data if len(line) > 5]
pfe = [(data[i][4], data[i][1] / (data[i][0] - data[i - 1][0]), data[i][5]) for i in range(1, len(data))]
return pfe
def bh07_expected(r):
dens0 = 1000.0
bulk = 2.0E9
P_bh = 0
rho_bh = dens0 * np.exp(P_bh / bulk)
P_R = 1.0E7
rho_R = dens0 * np.exp(P_R / bulk)
r_bh = 1.0
outer_r = 300
rho = rho_bh + (rho_R - rho_bh) * np.log(r / r_bh) / np.log(outer_r / r_bh)
return bulk * np.log(rho / dens0)
def bh07():
f = open("../../tests/dirackernels/gold/bh07_csv_pp_0003.csv")
data = [line.strip().split(",") for line in f.readlines()[1:]]
f.close()
data = [map(float, line) for line in data if len(line) > 3]
xp = [(data[i][2], data[i][1]) for i in range(0, len(data), 10)]
return xp
ppoints = np.arange(0, 1.01E7, 1E6)
bh02 = bh02()
plt.figure()
plt.plot(ppoints/1E6, bh02_expected(ppoints), 'k-', linewidth = 3.0, label = 'expected')
plt.plot([x[0]/1E6 for x in bh02], [x[1] for x in bh02], 'rs', markersize = 10.0, label = 'MOOSE')
plt.legend(loc = 'lower right')
plt.xlabel("Porepressure (MPa)")
plt.ylabel("flow rate (kg/s)")
plt.title("Fully-saturated production well: flow")
plt.savefig("bh02_flow.pdf")
plt.figure()
plt.plot([x[0]/1E6 for x in bh02], [x[2]*1E15 for x in bh02], 'rs', markersize = 10.0, label = 'MOOSE')
plt.xlabel("Porepressure (MPa)")
plt.ylabel("Mass-balance error (units 1E-15)")
plt.title("Fully-saturated production well: mass-balance error")
plt.savefig("bh02_error.pdf")
ppoints = np.arange(0, 1.01E7, 1E6)
bh03 = bh03()
plt.figure()
plt.plot(ppoints/1E6, bh03_expected(ppoints), 'k-', linewidth = 3.0, label = 'expected')
plt.plot([x[0]/1E6 for x in bh03], [x[1] for x in bh03], 'rs', markersize = 10.0, label = 'MOOSE')
plt.legend(loc = 'lower right')
plt.xlabel("Porepressure (MPa)")
plt.ylabel("flow rate (kg/s)")
plt.title("Fully-saturated injection well: flow")
plt.savefig("bh03_flow.pdf")
plt.figure()
plt.plot([x[0]/1E6 for x in bh03], [x[2]*1E15 for x in bh03], 'rs', markersize = 10.0, label = 'MOOSE')
plt.xlabel("Porepressure (MPa)")
plt.ylabel("Mass-balance error (units 1E-15)")
plt.title("Fully-saturated injection well: mass-balance error")
plt.savefig("bh03_error.pdf")
ppoints = np.arange(-2.0E5, 0, 1E3)
bh04 = bh04()
plt.figure()
plt.plot(ppoints/1E3, bh04_expected(ppoints), 'k-', linewidth = 3.0, label = 'expected')
plt.plot([x[0]/1E3 for x in bh04], [x[1] for x in bh04], 'rs', markersize = 10.0, label = 'MOOSE')
plt.legend(loc = 'lower right')
plt.xlabel("Porepressure (kPa)")
plt.ylabel("flow rate (kg/s)")
plt.title("Unsaturated production well: flow")
plt.savefig("bh04_flow.pdf")
plt.figure()
plt.plot([x[0]/1E3 for x in bh04], [x[2]*1E13 for x in bh04], 'rs', markersize = 10.0, label = 'MOOSE')
plt.xlabel("Porepressure (kPa)")
plt.ylabel("Mass-balance error (units 1E-13)")
plt.title("Unsaturated production well: mass-balance error")
plt.savefig("bh04_error.pdf")
ppoints = np.arange(-2.0E5, 0, 1E3)
bh05 = bh05()
plt.figure()
plt.plot(ppoints/1E3, bh05_expected(ppoints), 'k-', linewidth = 3.0, label = 'expected')
plt.plot([x[0]/1E3 for x in bh05], [x[1] for x in bh05], 'rs', markersize = 10.0, label = 'MOOSE')
plt.legend(loc = 'lower right')
plt.xlabel("Porepressure (kPa)")
plt.ylabel("flow rate (kg/s)")
plt.title("Unsaturated injection well: flow")
plt.savefig("bh05_flow.pdf")
plt.figure()
plt.plot([x[0]/1E3 for x in bh05], [x[2]*1E10 for x in bh05], 'rs', markersize = 10.0, label = 'MOOSE')
plt.xlabel("Porepressure (kPa)")
plt.ylabel("Mass-balance error (units 1E-10)")
plt.title("Unsaturated injection well: mass-balance error")
plt.savefig("bh05_error.pdf")
rpoints = np.arange(1, 301, 3)
bh07 = bh07()
plt.figure()
plt.plot(rpoints, bh07_expected(rpoints)/1E6, 'k-', linewidth = 3.0, label = 'expected')
plt.plot([x[0] for x in bh07], [x[1]/1E6 for x in bh07], 'rs', markersize = 10.0, label = 'MOOSE')
plt.legend(loc = 'lower right')
plt.xlabel("radius (m)")
plt.ylabel("Porepressure (MPa)")
plt.title("Steadystate porepressure distribution due to production borehole")
plt.savefig("bh07.pdf")
sys.exit(0)
| lgpl-2.1 |
chintak/scikit-image | doc/examples/plot_piecewise_affine.py | 2 | 1084 | """
===============================
Piecewise Affine Transformation
===============================
This example shows how to use the Piecewise Affine Transformation.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import PiecewiseAffineTransform, warp
from skimage import data
image = data.lena()
rows, cols = image.shape[0], image.shape[1]
src_cols = np.linspace(0, cols, 20)
src_rows = np.linspace(0, rows, 10)
src_rows, src_cols = np.meshgrid(src_rows, src_cols)
src = np.dstack([src_cols.flat, src_rows.flat])[0]
# add sinusoidal oscillation to row coordinates
dst_rows = src[:, 1] - np.sin(np.linspace(0, 3 * np.pi, src.shape[0])) * 50
dst_cols = src[:, 0]
dst_rows *= 1.5
dst_rows -= 1.5 * 50
dst = np.vstack([dst_cols, dst_rows]).T
tform = PiecewiseAffineTransform()
tform.estimate(src, dst)
out_rows = image.shape[0] - 1.5 * 50
out_cols = cols
out = warp(image, tform, output_shape=(out_rows, out_cols))
plt.imshow(out)
plt.plot(tform.inverse(src)[:, 0], tform.inverse(src)[:, 1], '.b')
plt.axis((0, out_cols, out_rows, 0))
plt.show()
| bsd-3-clause |
mikejbrown/HN_plotting | data_reader.py | 1 | 4620 | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 22 21:16:23 2016
Data file has fields (* == non-numeric):
Patient
AgeatRecruit
Gender
*Diagnosis
*DxCode
Category
*T
*N
*Side
*Notes
Dose
Fraction
Chemo
Modality
Taste_B
Taste_W2
Taste_W4
Taste_W6
Taste_FU1
Taste_FU3
Taste_FU6
Taste_FU12
Taste_W4_diff
Taste_W6_diff
Taste_FU1_diff
Taste_FU3_diff
Taste_FU6_diff
Taste_FU12_diff
filter_$
Overall_QOL_B
Overall_QOL_W2
Overall_QOL_W4
Overall_QOL_W6
Overall_QOL_FU1
Overall_QOL_FU3
Overall_QOL_FU6
Overall_QOL_FU12
Missing data value:
99999
Category codes:
1 Hypopharynx
2 Nasopharynx
3 Oral
4 Oropharynx
5 Parotid
6 Skin
7 Larynx
Gender codes:
0 Male
1 Female
Chemo codes:
0 No chemo
1 Chemo
Modality codes:
1 3D
2 IMRT
@author: Michael
"""
import pandas as pd
import numpy as np
from common import get_data_file_path, output_analysis
from common import time_points_for_variable
def read_data(file_path):
"""
Reads the data from a csv file, appropriately munging null and missing
values and assigning category labels.
Returns: a Pandas dataframe object containing the cleaned data.
"""
non_numeric_fields = ['Diagnosis', 'DxCode', 'T', 'N', 'Side', 'Notes']
category_codes = ['Hypopharynx',
'Nasopharynx',
'Oral',
'Oropharynx',
'Parotid',
'Skin',
'Larynx']
data = pd.read_csv(file_path)
# Some columns need explicit type conversion to numerics because the csv
# file has spaces which cause the field to be mis-parsed.
# NOTE: pylint persistently complains about no member existing, because it
# is stupid about recognizing pandas objects. Explicitly ignore the linter
# checks here because _everything is fine_!
# pylint: disable=E1103
columns_which_need_munging = [c for c in data.columns
if data[c].dtype == np.dtype('O') and
c not in non_numeric_fields]
# pylint: enable=E1103
for col in columns_which_need_munging:
data[col] = pd.to_numeric(data[col], errors='coerce')
# clean 99999 represeting missing data
# NOTE: same comment as above about _stupid_ pylint
# pylint: disable=E1103
data = data.where(data != 99999)
# pylint: enable=E1103
# mark 'Category' as a categorical variable and label appropriately
data['Category'] = data['Category'].astype("category")
data['Category'].cat.categories = category_codes
data['Gender'] = data['Gender'].astype("category")
data['Gender'].cat.categories = ['Male', 'Female']
data['Chemo'] = data['Chemo'].astype("category")
data['Chemo'].cat.categories = ['No chemo', 'Chemo']
data['Modality'] = data['Modality'].astype("category")
data['Modality'].cat.categories = ['3D', 'IMRT']
data.set_index('Patient')
return data
if __name__ == "__main__":
DATA_FILE_PATH = get_data_file_path()
SAVE_ANALYSIS = True
DATA = read_data(DATA_FILE_PATH)
for cat in DATA.Category.sort_values().unique():
cat_data = DATA[DATA.Category == cat]
print("*** Category: % s ***" % cat)
print(cat_data[time_points_for_variable('Taste')].describe())
COLS = ['Category', ]
COLS.extend(time_points_for_variable('Taste'))
COLS.extend(time_points_for_variable('Overall_QOL'))
SUMMARY = DATA[COLS].groupby('Category').describe()
output_analysis(SUMMARY.to_csv(), 'summary-stats.csv', SAVE_ANALYSIS)
SUMMARY_HTML = """
<html>
<head>
<title>Summary statistics</title>
</head>
<body>
%s
</body>
</html>
""" % SUMMARY.to_html()
output_analysis(SUMMARY_HTML, 'summary-stats.html', SAVE_ANALYSIS)
REDUCE_DATA = DATA.groupby(['Category', 'T', 'N', 'Gender'])
SUMMARY2 = REDUCE_DATA.size().sort_values(ascending=False).unstack()
output_analysis(SUMMARY2.to_csv(),
'summary-stats-frequency-by-staging.csv',
SAVE_ANALYSIS)
SUMMARY_HTML = """
<html>
<head>
<title>Summary statistics by staging</title>
</head>
<body>
%s
</body>
</html>
""" % SUMMARY2.to_html()
output_analysis(SUMMARY_HTML,
'summary-stats-frequency-by-staging.html',
SAVE_ANALYSIS)
| mit |
plotly/plotly.py | packages/python/plotly/plotly/graph_objs/_surface.py | 1 | 98543 | from plotly.basedatatypes import BaseTraceType as _BaseTraceType
import copy as _copy
class Surface(_BaseTraceType):
# class properties
# --------------------
_parent_path_str = ""
_path_str = "surface"
_valid_props = {
"autocolorscale",
"cauto",
"cmax",
"cmid",
"cmin",
"coloraxis",
"colorbar",
"colorscale",
"connectgaps",
"contours",
"customdata",
"customdatasrc",
"hidesurface",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hovertemplate",
"hovertemplatesrc",
"hovertext",
"hovertextsrc",
"ids",
"idssrc",
"legendgroup",
"legendgrouptitle",
"legendrank",
"lighting",
"lightposition",
"meta",
"metasrc",
"name",
"opacity",
"opacityscale",
"reversescale",
"scene",
"showlegend",
"showscale",
"stream",
"surfacecolor",
"surfacecolorsrc",
"text",
"textsrc",
"type",
"uid",
"uirevision",
"visible",
"x",
"xcalendar",
"xhoverformat",
"xsrc",
"y",
"ycalendar",
"yhoverformat",
"ysrc",
"z",
"zcalendar",
"zhoverformat",
"zsrc",
}
# autocolorscale
# --------------
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be chosen
according to whether numbers in the `color` array are all
positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
# cauto
# -----
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here z or surfacecolor) or the
bounds set in `cmin` and `cmax` Defaults to `false` when
`cmin` and `cmax` are set by the user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
# cmax
# ----
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Value should have the
same units as z or surfacecolor and if set, `cmin` must be set
as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
# cmid
# ----
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling `cmin` and/or
`cmax` to be equidistant to this point. Value should have the
same units as z or surfacecolor. Has no effect when `cauto` is
`false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
# cmin
# ----
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Value should have the
same units as z or surfacecolor and if set, `cmax` must be set
as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
# coloraxis
# ---------
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
# colorbar
# --------
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of :class:`plotly.graph_objs.surface.ColorBar`
- A dict of string/value properties that will be passed
to the ColorBar constructor
Supported dict properties:
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this
number. This only has an effect when
`tickformat` is "SI" or "B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format.
And for dates see:
https://github.com/d3/d3-time-
format#locale_format. We add two items to d3's
date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for
fractional seconds with n digits. For example,
*2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.surface
.colorbar.Tickformatstop` instances or dicts
with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.surface.colorbar.tickformatstopdefaults),
sets the default property values to use for
elements of surface.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of
the axis. The default value for inside tick
labels is *hide past domain*. In other cases
the default is *hide past div*.
ticklabelposition
Determines where tick labels are drawn.
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.surface.colorbar.T
itle` instance or dict with compatible
properties
titlefont
Deprecated: Please use
surface.colorbar.title.font instead. Sets this
color bar's title font. Note that the title's
font used to be set by the now deprecated
`titlefont` attribute.
titleside
Deprecated: Please use
surface.colorbar.title.side instead. Determines
the location of color bar's title with respect
to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
plotly.graph_objs.surface.ColorBar
"""
return self["colorbar"]
@colorbar.setter
def colorbar(self, val):
self["colorbar"] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale. The colorscale must be an array containing
arrays mapping a normalized value to an rgb, rgba, hex, hsl,
hsv, or named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To control the
bounds of the colorscale in color space, use`cmin` and `cmax`.
Alternatively, `colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Bl
ues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,Earth,Electric,Vi
ridis,Cividis.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'oxy', 'peach', 'phase', 'picnic', 'pinkyl',
'piyg', 'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn',
'puor', 'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu',
'rdgy', 'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar',
'spectral', 'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn',
'tealrose', 'tempo', 'temps', 'thermal', 'tropic', 'turbid',
'turbo', 'twilight', 'viridis', 'ylgn', 'ylgnbu', 'ylorbr',
'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
# connectgaps
# -----------
@property
def connectgaps(self):
"""
Determines whether or not gaps (i.e. {nan} or missing values)
in the `z` data are filled in.
The 'connectgaps' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["connectgaps"]
@connectgaps.setter
def connectgaps(self, val):
self["connectgaps"] = val
# contours
# --------
@property
def contours(self):
"""
The 'contours' property is an instance of Contours
that may be specified as:
- An instance of :class:`plotly.graph_objs.surface.Contours`
- A dict of string/value properties that will be passed
to the Contours constructor
Supported dict properties:
x
:class:`plotly.graph_objects.surface.contours.X
` instance or dict with compatible properties
y
:class:`plotly.graph_objects.surface.contours.Y
` instance or dict with compatible properties
z
:class:`plotly.graph_objects.surface.contours.Z
` instance or dict with compatible properties
Returns
-------
plotly.graph_objs.surface.Contours
"""
return self["contours"]
@contours.setter
def contours(self, val):
self["contours"] = val
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for customdata
.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
# hidesurface
# -----------
@property
def hidesurface(self):
"""
Determines whether or not a surface is drawn. For example, set
`hidesurface` to False `contours.x.show` to True and
`contours.y.show` to True to draw a wire frame plot.
The 'hidesurface' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["hidesurface"]
@hidesurface.setter
def hidesurface(self, val):
self["hidesurface"] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['x', 'y', 'z', 'text', 'name'] joined with '+' characters
(e.g. 'x+y')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
# hoverinfosrc
# ------------
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for hoverinfo
.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.surface.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for align .
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for namelength .
Returns
-------
plotly.graph_objs.surface.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
# hovertemplate
# -------------
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y: %{y}"
as well as %{xother}, {%_xother}, {%_xother_}, {%xother_}. When
showing info for several points, "xother" will be added to
those with different x positions from the first point. An
underscore before or after "(x|y)other" will add a space on
that side, only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for details on
the formatting syntax. Dates are formatted using d3-time-
format's syntax %{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format#locale_format for details on the date formatting syntax.
The variables available in `hovertemplate` are the ones emitted
as event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-data.
Additionally, every attributes that can be specified per-point
(the ones that are `arrayOk: true`) are available. Anything
contained in tag `<extra>` is displayed in the secondary box,
for example "<extra>{fullData.name}</extra>". To hide the
secondary box completely, use an empty tag `<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
# hovertemplatesrc
# ----------------
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
hovertemplate .
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
# hovertext
# ---------
@property
def hovertext(self):
"""
Same as `text`.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
# hovertextsrc
# ------------
@property
def hovertextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for hovertext
.
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertextsrc"]
@hovertextsrc.setter
def hovertextsrc(self, val):
self["hovertextsrc"] = val
# ids
# ---
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
# idssrc
# ------
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for ids .
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
# legendgroup
# -----------
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces part of the same
legend group hide/show at the same time when toggling legend
items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
# legendgrouptitle
# ----------------
@property
def legendgrouptitle(self):
"""
The 'legendgrouptitle' property is an instance of Legendgrouptitle
that may be specified as:
- An instance of :class:`plotly.graph_objs.surface.Legendgrouptitle`
- A dict of string/value properties that will be passed
to the Legendgrouptitle constructor
Supported dict properties:
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
plotly.graph_objs.surface.Legendgrouptitle
"""
return self["legendgrouptitle"]
@legendgrouptitle.setter
def legendgrouptitle(self, val):
self["legendgrouptitle"] = val
# legendrank
# ----------
@property
def legendrank(self):
"""
Sets the legend rank for this trace. Items and groups with
smaller ranks are presented on top/left side while with
`*reversed* `legend.traceorder` they are on bottom/right side.
The default legendrank is 1000, so that you can use ranks less
than 1000 to place certain items before all unranked items, and
ranks greater than 1000 to go after all unranked items.
The 'legendrank' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["legendrank"]
@legendrank.setter
def legendrank(self, val):
self["legendrank"] = val
# lighting
# --------
@property
def lighting(self):
"""
The 'lighting' property is an instance of Lighting
that may be specified as:
- An instance of :class:`plotly.graph_objs.surface.Lighting`
- A dict of string/value properties that will be passed
to the Lighting constructor
Supported dict properties:
ambient
Ambient light increases overall color
visibility but can wash out the image.
diffuse
Represents the extent that incident rays are
reflected in a range of angles.
fresnel
Represents the reflectance as a dependency of
the viewing angle; e.g. paper is reflective
when viewing it from the edge of the paper
(almost 90 degrees), causing shine.
roughness
Alters specular reflection; the rougher the
surface, the wider and less contrasty the
shine.
specular
Represents the level that incident rays are
reflected in a single direction, causing shine.
Returns
-------
plotly.graph_objs.surface.Lighting
"""
return self["lighting"]
@lighting.setter
def lighting(self, val):
self["lighting"] = val
# lightposition
# -------------
@property
def lightposition(self):
"""
The 'lightposition' property is an instance of Lightposition
that may be specified as:
- An instance of :class:`plotly.graph_objs.surface.Lightposition`
- A dict of string/value properties that will be passed
to the Lightposition constructor
Supported dict properties:
x
Numeric vector, representing the X coordinate
for each vertex.
y
Numeric vector, representing the Y coordinate
for each vertex.
z
Numeric vector, representing the Z coordinate
for each vertex.
Returns
-------
plotly.graph_objs.surface.Lightposition
"""
return self["lightposition"]
@lightposition.setter
def lightposition(self, val):
self["lightposition"] = val
# meta
# ----
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
# metasrc
# -------
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for meta .
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
# name
# ----
@property
def name(self):
"""
Sets the trace name. The trace name appear as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the surface. Please note that in the case
of using high `opacity` values for example a value greater than
or equal to 0.5 on two surfaces (and 0.25 with four surfaces),
an overlay of multiple transparent surfaces may not perfectly
be sorted in depth by the webgl API. This behavior may be
improved in the near future and is subject to change.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# opacityscale
# ------------
@property
def opacityscale(self):
"""
Sets the opacityscale. The opacityscale must be an array
containing arrays mapping a normalized value to an opacity
value. At minimum, a mapping for the lowest (0) and highest (1)
values are required. For example, `[[0, 1], [0.5, 0.2], [1,
1]]` means that higher/lower values would have higher opacity
values and those in the middle would be more transparent
Alternatively, `opacityscale` may be a palette name string of
the following list: 'min', 'max', 'extremes' and 'uniform'. The
default is 'uniform'.
The 'opacityscale' property accepts values of any type
Returns
-------
Any
"""
return self["opacityscale"]
@opacityscale.setter
def opacityscale(self, val):
self["opacityscale"] = val
# reversescale
# ------------
@property
def reversescale(self):
"""
Reverses the color mapping if true. If true, `cmin` will
correspond to the last color in the array and `cmax` will
correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
# scene
# -----
@property
def scene(self):
"""
Sets a reference between this trace's 3D coordinate system and
a 3D scene. If "scene" (the default value), the (x,y,z)
coordinates refer to `layout.scene`. If "scene2", the (x,y,z)
coordinates refer to `layout.scene2`, and so on.
The 'scene' property is an identifier of a particular
subplot, of type 'scene', that may be specified as the string 'scene'
optionally followed by an integer >= 1
(e.g. 'scene', 'scene1', 'scene2', 'scene3', etc.)
Returns
-------
str
"""
return self["scene"]
@scene.setter
def scene(self, val):
self["scene"] = val
# showlegend
# ----------
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
# showscale
# ---------
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showscale"]
@showscale.setter
def showscale(self, val):
self["showscale"] = val
# stream
# ------
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.surface.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Supported dict properties:
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See https://chart-
studio.plotly.com/settings for more details.
Returns
-------
plotly.graph_objs.surface.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
# surfacecolor
# ------------
@property
def surfacecolor(self):
"""
Sets the surface color values, used for setting a color scale
independent of `z`.
The 'surfacecolor' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["surfacecolor"]
@surfacecolor.setter
def surfacecolor(self, val):
self["surfacecolor"] = val
# surfacecolorsrc
# ---------------
@property
def surfacecolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
surfacecolor .
The 'surfacecolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["surfacecolorsrc"]
@surfacecolorsrc.setter
def surfacecolorsrc(self, val):
self["surfacecolorsrc"] = val
# text
# ----
@property
def text(self):
"""
Sets the text elements associated with each z value. If trace
`hoverinfo` contains a "text" flag and "hovertext" is not set,
these elements will be seen in the hover labels.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# textsrc
# -------
@property
def textsrc(self):
"""
Sets the source reference on Chart Studio Cloud for text .
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textsrc"]
@textsrc.setter
def textsrc(self, val):
self["textsrc"] = val
# uid
# ---
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
# uirevision
# ----------
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# x
# -
@property
def x(self):
"""
Sets the x coordinates.
The 'x' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xcalendar
# ---------
@property
def xcalendar(self):
"""
Sets the calendar system to use with `x` date data.
The 'xcalendar' property is an enumeration that may be specified as:
- One of the following enumeration values:
['gregorian', 'chinese', 'coptic', 'discworld',
'ethiopian', 'hebrew', 'islamic', 'julian', 'mayan',
'nanakshahi', 'nepali', 'persian', 'jalali', 'taiwan',
'thai', 'ummalqura']
Returns
-------
Any
"""
return self["xcalendar"]
@xcalendar.setter
def xcalendar(self, val):
self["xcalendar"] = val
# xhoverformat
# ------------
@property
def xhoverformat(self):
"""
Sets the hover text formatting rulefor `x` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for dates
see: https://github.com/d3/d3-time-format#locale_format. We add
two items to d3's date formatter: "%h" for half of the year as
a decimal number as well as "%{n}f" for fractional seconds with
n digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display *09~15~23.46*By default
the values are formatted using `xaxis.hoverformat`.
The 'xhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["xhoverformat"]
@xhoverformat.setter
def xhoverformat(self, val):
self["xhoverformat"] = val
# xsrc
# ----
@property
def xsrc(self):
"""
Sets the source reference on Chart Studio Cloud for x .
The 'xsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["xsrc"]
@xsrc.setter
def xsrc(self, val):
self["xsrc"] = val
# y
# -
@property
def y(self):
"""
Sets the y coordinates.
The 'y' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# ycalendar
# ---------
@property
def ycalendar(self):
"""
Sets the calendar system to use with `y` date data.
The 'ycalendar' property is an enumeration that may be specified as:
- One of the following enumeration values:
['gregorian', 'chinese', 'coptic', 'discworld',
'ethiopian', 'hebrew', 'islamic', 'julian', 'mayan',
'nanakshahi', 'nepali', 'persian', 'jalali', 'taiwan',
'thai', 'ummalqura']
Returns
-------
Any
"""
return self["ycalendar"]
@ycalendar.setter
def ycalendar(self, val):
self["ycalendar"] = val
# yhoverformat
# ------------
@property
def yhoverformat(self):
"""
Sets the hover text formatting rulefor `y` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for dates
see: https://github.com/d3/d3-time-format#locale_format. We add
two items to d3's date formatter: "%h" for half of the year as
a decimal number as well as "%{n}f" for fractional seconds with
n digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display *09~15~23.46*By default
the values are formatted using `yaxis.hoverformat`.
The 'yhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["yhoverformat"]
@yhoverformat.setter
def yhoverformat(self, val):
self["yhoverformat"] = val
# ysrc
# ----
@property
def ysrc(self):
"""
Sets the source reference on Chart Studio Cloud for y .
The 'ysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ysrc"]
@ysrc.setter
def ysrc(self, val):
self["ysrc"] = val
# z
# -
@property
def z(self):
"""
Sets the z coordinates.
The 'z' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
# zcalendar
# ---------
@property
def zcalendar(self):
"""
Sets the calendar system to use with `z` date data.
The 'zcalendar' property is an enumeration that may be specified as:
- One of the following enumeration values:
['gregorian', 'chinese', 'coptic', 'discworld',
'ethiopian', 'hebrew', 'islamic', 'julian', 'mayan',
'nanakshahi', 'nepali', 'persian', 'jalali', 'taiwan',
'thai', 'ummalqura']
Returns
-------
Any
"""
return self["zcalendar"]
@zcalendar.setter
def zcalendar(self, val):
self["zcalendar"] = val
# zhoverformat
# ------------
@property
def zhoverformat(self):
"""
Sets the hover text formatting rulefor `z` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for dates
see: https://github.com/d3/d3-time-format#locale_format. We add
two items to d3's date formatter: "%h" for half of the year as
a decimal number as well as "%{n}f" for fractional seconds with
n digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display *09~15~23.46*By default
the values are formatted using `zaxis.hoverformat`.
The 'zhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["zhoverformat"]
@zhoverformat.setter
def zhoverformat(self, val):
self["zhoverformat"] = val
# zsrc
# ----
@property
def zsrc(self):
"""
Sets the source reference on Chart Studio Cloud for z .
The 'zsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["zsrc"]
@zsrc.setter
def zsrc(self, val):
self["zsrc"] = val
# type
# ----
@property
def type(self):
return self._props["type"]
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color`
array are all positive, all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here z or surfacecolor)
or the bounds set in `cmin` and `cmax` Defaults to
`false` when `cmin` and `cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Value should
have the same units as z or surfacecolor and if set,
`cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`cmin` and/or `cmax` to be equidistant to this point.
Value should have the same units as z or surfacecolor.
Has no effect when `cauto` is `false`.
cmin
Sets the lower bound of the color domain. Value should
have the same units as z or surfacecolor and if set,
`cmax` must be set as well.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.surface.ColorBar` instance
or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use`cmin` and `cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Greys,YlGnBu,Greens,YlOrR
d,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland,Jet,H
ot,Blackbody,Earth,Electric,Viridis,Cividis.
connectgaps
Determines whether or not gaps (i.e. {nan} or missing
values) in the `z` data are filled in.
contours
:class:`plotly.graph_objects.surface.Contours` instance
or dict with compatible properties
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
hidesurface
Determines whether or not a surface is drawn. For
example, set `hidesurface` to False `contours.x.show`
to True and `contours.y.show` to True to draw a wire
frame plot.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.surface.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.surface.Legendgrouptitle`
instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with `*reversed* `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items.
lighting
:class:`plotly.graph_objects.surface.Lighting` instance
or dict with compatible properties
lightposition
:class:`plotly.graph_objects.surface.Lightposition`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the surface. Please note that in
the case of using high `opacity` values for example a
value greater than or equal to 0.5 on two surfaces (and
0.25 with four surfaces), an overlay of multiple
transparent surfaces may not perfectly be sorted in
depth by the webgl API. This behavior may be improved
in the near future and is subject to change.
opacityscale
Sets the opacityscale. The opacityscale must be an
array containing arrays mapping a normalized value to
an opacity value. At minimum, a mapping for the lowest
(0) and highest (1) values are required. For example,
`[[0, 1], [0.5, 0.2], [1, 1]]` means that higher/lower
values would have higher opacity values and those in
the middle would be more transparent Alternatively,
`opacityscale` may be a palette name string of the
following list: 'min', 'max', 'extremes' and 'uniform'.
The default is 'uniform'.
reversescale
Reverses the color mapping if true. If true, `cmin`
will correspond to the last color in the array and
`cmax` will correspond to the first color.
scene
Sets a reference between this trace's 3D coordinate
system and a 3D scene. If "scene" (the default value),
the (x,y,z) coordinates refer to `layout.scene`. If
"scene2", the (x,y,z) coordinates refer to
`layout.scene2`, and so on.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showscale
Determines whether or not a colorbar is displayed for
this trace.
stream
:class:`plotly.graph_objects.surface.Stream` instance
or dict with compatible properties
surfacecolor
Sets the surface color values, used for setting a color
scale independent of `z`.
surfacecolorsrc
Sets the source reference on Chart Studio Cloud for
surfacecolor .
text
Sets the text elements associated with each z value. If
trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in
the hover labels.
textsrc
Sets the source reference on Chart Studio Cloud for
text .
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
x
Sets the x coordinates.
xcalendar
Sets the calendar system to use with `x` date data.
xhoverformat
Sets the hover text formatting rulefor `x` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for
dates see: https://github.com/d3/d3-time-
format#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal
number as well as "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display *09~15~23.46*By
default the values are formatted using
`xaxis.hoverformat`.
xsrc
Sets the source reference on Chart Studio Cloud for x
.
y
Sets the y coordinates.
ycalendar
Sets the calendar system to use with `y` date data.
yhoverformat
Sets the hover text formatting rulefor `y` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for
dates see: https://github.com/d3/d3-time-
format#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal
number as well as "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display *09~15~23.46*By
default the values are formatted using
`yaxis.hoverformat`.
ysrc
Sets the source reference on Chart Studio Cloud for y
.
z
Sets the z coordinates.
zcalendar
Sets the calendar system to use with `z` date data.
zhoverformat
Sets the hover text formatting rulefor `z` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for
dates see: https://github.com/d3/d3-time-
format#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal
number as well as "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display *09~15~23.46*By
default the values are formatted using
`zaxis.hoverformat`.
zsrc
Sets the source reference on Chart Studio Cloud for z
.
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
coloraxis=None,
colorbar=None,
colorscale=None,
connectgaps=None,
contours=None,
customdata=None,
customdatasrc=None,
hidesurface=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
legendgroup=None,
legendgrouptitle=None,
legendrank=None,
lighting=None,
lightposition=None,
meta=None,
metasrc=None,
name=None,
opacity=None,
opacityscale=None,
reversescale=None,
scene=None,
showlegend=None,
showscale=None,
stream=None,
surfacecolor=None,
surfacecolorsrc=None,
text=None,
textsrc=None,
uid=None,
uirevision=None,
visible=None,
x=None,
xcalendar=None,
xhoverformat=None,
xsrc=None,
y=None,
ycalendar=None,
yhoverformat=None,
ysrc=None,
z=None,
zcalendar=None,
zhoverformat=None,
zsrc=None,
**kwargs
):
"""
Construct a new Surface object
The data the describes the coordinates of the surface is set in
`z`. Data in `z` should be a 2D list. Coordinates in `x` and
`y` can either be 1D lists or 2D lists (e.g. to graph
parametric surfaces). If not provided in `x` and `y`, the x and
y coordinates are assumed to be linear starting at 0 with a
unit step. The color scale corresponds to the `z` values by
default. For custom color scales, use `surfacecolor` which
should be a 2D list, where its bounds can be controlled using
`cmin` and `cmax`.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Surface`
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color`
array are all positive, all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here z or surfacecolor)
or the bounds set in `cmin` and `cmax` Defaults to
`false` when `cmin` and `cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Value should
have the same units as z or surfacecolor and if set,
`cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`cmin` and/or `cmax` to be equidistant to this point.
Value should have the same units as z or surfacecolor.
Has no effect when `cauto` is `false`.
cmin
Sets the lower bound of the color domain. Value should
have the same units as z or surfacecolor and if set,
`cmax` must be set as well.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.surface.ColorBar` instance
or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use`cmin` and `cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Greys,YlGnBu,Greens,YlOrR
d,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland,Jet,H
ot,Blackbody,Earth,Electric,Viridis,Cividis.
connectgaps
Determines whether or not gaps (i.e. {nan} or missing
values) in the `z` data are filled in.
contours
:class:`plotly.graph_objects.surface.Contours` instance
or dict with compatible properties
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
hidesurface
Determines whether or not a surface is drawn. For
example, set `hidesurface` to False `contours.x.show`
to True and `contours.y.show` to True to draw a wire
frame plot.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.surface.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.surface.Legendgrouptitle`
instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with `*reversed* `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items.
lighting
:class:`plotly.graph_objects.surface.Lighting` instance
or dict with compatible properties
lightposition
:class:`plotly.graph_objects.surface.Lightposition`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the surface. Please note that in
the case of using high `opacity` values for example a
value greater than or equal to 0.5 on two surfaces (and
0.25 with four surfaces), an overlay of multiple
transparent surfaces may not perfectly be sorted in
depth by the webgl API. This behavior may be improved
in the near future and is subject to change.
opacityscale
Sets the opacityscale. The opacityscale must be an
array containing arrays mapping a normalized value to
an opacity value. At minimum, a mapping for the lowest
(0) and highest (1) values are required. For example,
`[[0, 1], [0.5, 0.2], [1, 1]]` means that higher/lower
values would have higher opacity values and those in
the middle would be more transparent Alternatively,
`opacityscale` may be a palette name string of the
following list: 'min', 'max', 'extremes' and 'uniform'.
The default is 'uniform'.
reversescale
Reverses the color mapping if true. If true, `cmin`
will correspond to the last color in the array and
`cmax` will correspond to the first color.
scene
Sets a reference between this trace's 3D coordinate
system and a 3D scene. If "scene" (the default value),
the (x,y,z) coordinates refer to `layout.scene`. If
"scene2", the (x,y,z) coordinates refer to
`layout.scene2`, and so on.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showscale
Determines whether or not a colorbar is displayed for
this trace.
stream
:class:`plotly.graph_objects.surface.Stream` instance
or dict with compatible properties
surfacecolor
Sets the surface color values, used for setting a color
scale independent of `z`.
surfacecolorsrc
Sets the source reference on Chart Studio Cloud for
surfacecolor .
text
Sets the text elements associated with each z value. If
trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in
the hover labels.
textsrc
Sets the source reference on Chart Studio Cloud for
text .
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
x
Sets the x coordinates.
xcalendar
Sets the calendar system to use with `x` date data.
xhoverformat
Sets the hover text formatting rulefor `x` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for
dates see: https://github.com/d3/d3-time-
format#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal
number as well as "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display *09~15~23.46*By
default the values are formatted using
`xaxis.hoverformat`.
xsrc
Sets the source reference on Chart Studio Cloud for x
.
y
Sets the y coordinates.
ycalendar
Sets the calendar system to use with `y` date data.
yhoverformat
Sets the hover text formatting rulefor `y` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for
dates see: https://github.com/d3/d3-time-
format#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal
number as well as "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display *09~15~23.46*By
default the values are formatted using
`yaxis.hoverformat`.
ysrc
Sets the source reference on Chart Studio Cloud for y
.
z
Sets the z coordinates.
zcalendar
Sets the calendar system to use with `z` date data.
zhoverformat
Sets the hover text formatting rulefor `z` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for
dates see: https://github.com/d3/d3-time-
format#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal
number as well as "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display *09~15~23.46*By
default the values are formatted using
`zaxis.hoverformat`.
zsrc
Sets the source reference on Chart Studio Cloud for z
.
Returns
-------
Surface
"""
super(Surface, self).__init__("surface")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Surface
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Surface`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("autocolorscale", None)
_v = autocolorscale if autocolorscale is not None else _v
if _v is not None:
self["autocolorscale"] = _v
_v = arg.pop("cauto", None)
_v = cauto if cauto is not None else _v
if _v is not None:
self["cauto"] = _v
_v = arg.pop("cmax", None)
_v = cmax if cmax is not None else _v
if _v is not None:
self["cmax"] = _v
_v = arg.pop("cmid", None)
_v = cmid if cmid is not None else _v
if _v is not None:
self["cmid"] = _v
_v = arg.pop("cmin", None)
_v = cmin if cmin is not None else _v
if _v is not None:
self["cmin"] = _v
_v = arg.pop("coloraxis", None)
_v = coloraxis if coloraxis is not None else _v
if _v is not None:
self["coloraxis"] = _v
_v = arg.pop("colorbar", None)
_v = colorbar if colorbar is not None else _v
if _v is not None:
self["colorbar"] = _v
_v = arg.pop("colorscale", None)
_v = colorscale if colorscale is not None else _v
if _v is not None:
self["colorscale"] = _v
_v = arg.pop("connectgaps", None)
_v = connectgaps if connectgaps is not None else _v
if _v is not None:
self["connectgaps"] = _v
_v = arg.pop("contours", None)
_v = contours if contours is not None else _v
if _v is not None:
self["contours"] = _v
_v = arg.pop("customdata", None)
_v = customdata if customdata is not None else _v
if _v is not None:
self["customdata"] = _v
_v = arg.pop("customdatasrc", None)
_v = customdatasrc if customdatasrc is not None else _v
if _v is not None:
self["customdatasrc"] = _v
_v = arg.pop("hidesurface", None)
_v = hidesurface if hidesurface is not None else _v
if _v is not None:
self["hidesurface"] = _v
_v = arg.pop("hoverinfo", None)
_v = hoverinfo if hoverinfo is not None else _v
if _v is not None:
self["hoverinfo"] = _v
_v = arg.pop("hoverinfosrc", None)
_v = hoverinfosrc if hoverinfosrc is not None else _v
if _v is not None:
self["hoverinfosrc"] = _v
_v = arg.pop("hoverlabel", None)
_v = hoverlabel if hoverlabel is not None else _v
if _v is not None:
self["hoverlabel"] = _v
_v = arg.pop("hovertemplate", None)
_v = hovertemplate if hovertemplate is not None else _v
if _v is not None:
self["hovertemplate"] = _v
_v = arg.pop("hovertemplatesrc", None)
_v = hovertemplatesrc if hovertemplatesrc is not None else _v
if _v is not None:
self["hovertemplatesrc"] = _v
_v = arg.pop("hovertext", None)
_v = hovertext if hovertext is not None else _v
if _v is not None:
self["hovertext"] = _v
_v = arg.pop("hovertextsrc", None)
_v = hovertextsrc if hovertextsrc is not None else _v
if _v is not None:
self["hovertextsrc"] = _v
_v = arg.pop("ids", None)
_v = ids if ids is not None else _v
if _v is not None:
self["ids"] = _v
_v = arg.pop("idssrc", None)
_v = idssrc if idssrc is not None else _v
if _v is not None:
self["idssrc"] = _v
_v = arg.pop("legendgroup", None)
_v = legendgroup if legendgroup is not None else _v
if _v is not None:
self["legendgroup"] = _v
_v = arg.pop("legendgrouptitle", None)
_v = legendgrouptitle if legendgrouptitle is not None else _v
if _v is not None:
self["legendgrouptitle"] = _v
_v = arg.pop("legendrank", None)
_v = legendrank if legendrank is not None else _v
if _v is not None:
self["legendrank"] = _v
_v = arg.pop("lighting", None)
_v = lighting if lighting is not None else _v
if _v is not None:
self["lighting"] = _v
_v = arg.pop("lightposition", None)
_v = lightposition if lightposition is not None else _v
if _v is not None:
self["lightposition"] = _v
_v = arg.pop("meta", None)
_v = meta if meta is not None else _v
if _v is not None:
self["meta"] = _v
_v = arg.pop("metasrc", None)
_v = metasrc if metasrc is not None else _v
if _v is not None:
self["metasrc"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("opacityscale", None)
_v = opacityscale if opacityscale is not None else _v
if _v is not None:
self["opacityscale"] = _v
_v = arg.pop("reversescale", None)
_v = reversescale if reversescale is not None else _v
if _v is not None:
self["reversescale"] = _v
_v = arg.pop("scene", None)
_v = scene if scene is not None else _v
if _v is not None:
self["scene"] = _v
_v = arg.pop("showlegend", None)
_v = showlegend if showlegend is not None else _v
if _v is not None:
self["showlegend"] = _v
_v = arg.pop("showscale", None)
_v = showscale if showscale is not None else _v
if _v is not None:
self["showscale"] = _v
_v = arg.pop("stream", None)
_v = stream if stream is not None else _v
if _v is not None:
self["stream"] = _v
_v = arg.pop("surfacecolor", None)
_v = surfacecolor if surfacecolor is not None else _v
if _v is not None:
self["surfacecolor"] = _v
_v = arg.pop("surfacecolorsrc", None)
_v = surfacecolorsrc if surfacecolorsrc is not None else _v
if _v is not None:
self["surfacecolorsrc"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
_v = arg.pop("textsrc", None)
_v = textsrc if textsrc is not None else _v
if _v is not None:
self["textsrc"] = _v
_v = arg.pop("uid", None)
_v = uid if uid is not None else _v
if _v is not None:
self["uid"] = _v
_v = arg.pop("uirevision", None)
_v = uirevision if uirevision is not None else _v
if _v is not None:
self["uirevision"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("xcalendar", None)
_v = xcalendar if xcalendar is not None else _v
if _v is not None:
self["xcalendar"] = _v
_v = arg.pop("xhoverformat", None)
_v = xhoverformat if xhoverformat is not None else _v
if _v is not None:
self["xhoverformat"] = _v
_v = arg.pop("xsrc", None)
_v = xsrc if xsrc is not None else _v
if _v is not None:
self["xsrc"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("ycalendar", None)
_v = ycalendar if ycalendar is not None else _v
if _v is not None:
self["ycalendar"] = _v
_v = arg.pop("yhoverformat", None)
_v = yhoverformat if yhoverformat is not None else _v
if _v is not None:
self["yhoverformat"] = _v
_v = arg.pop("ysrc", None)
_v = ysrc if ysrc is not None else _v
if _v is not None:
self["ysrc"] = _v
_v = arg.pop("z", None)
_v = z if z is not None else _v
if _v is not None:
self["z"] = _v
_v = arg.pop("zcalendar", None)
_v = zcalendar if zcalendar is not None else _v
if _v is not None:
self["zcalendar"] = _v
_v = arg.pop("zhoverformat", None)
_v = zhoverformat if zhoverformat is not None else _v
if _v is not None:
self["zhoverformat"] = _v
_v = arg.pop("zsrc", None)
_v = zsrc if zsrc is not None else _v
if _v is not None:
self["zsrc"] = _v
# Read-only literals
# ------------------
self._props["type"] = "surface"
arg.pop("type", None)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit |
gingi99/research_dr | python/FPgrowth/fpgrowh.py | 1 | 15402 | # -*- coding:utf-8 -*-
# Usage : spark-submit ~~.py
# Usage : spark-submit --properties-file spark.conf ~~.py
from pyspark.context import SparkContext
from pyspark.mllib.fpm import FPGrowth
from pyspark import StorageLevel
from pyspark import SparkConf
import sys
import os
import pickle
import collections
import pandas as pd
import numpy as np
from itertools import chain
from itertools import combinations
from itertools import compress
from itertools import product
from sklearn.metrics import accuracy_score
from multiprocessing import Pool
from multiprocessing import freeze_support
# Global Setting
DIR_UCI = '/mnt/data/uci'
# ------------------------------------------------------
# Rule Class
# ------------------------------------------------------
class Rule :
def __init__(self):
self.value = list()
self.consequent = list()
self.strength = float()
self.support = list()
self.support_v = float()
self.conf = float()
def setValue(self, values) :
self.value = values
def setConsequent(self, consequents) :
self.consequent = consequents
def setStrength(self, strength) :
self.strength = strength
def setSupport(self, supports) :
self.support = supports
def setSupportV(self, support_v):
self.support_v = support_v
def setConf(self, confidence) :
self.conf = confidence
def getValue(self) :
return(self.value)
def getConsequent(self) :
return(self.consequent)
def getStrength(self):
return(self.strength)
def getSupport(self) :
return(self.support)
def getSupportV(self) :
return(self.support_v)
def getSupportD(self) :
return(self.strength * len(self.value))
def getConf(self) :
return(self.conf)
def output(self) :
print("value:" + str(self.value))
print("consequent:" + str(self.consequent))
print("strength:" + str(self.strength))
print("support:" + str(self.support))
print("support_v:" + str(self.support_v))
print("conf:" + str(self.conf))
# ======================================================
# rules load and save
# ======================================================
def loadPickleRules(fullpath_filename) :
with open(fullpath_filename, mode='rb') as inputfile:
rules = pickle.load(inputfile)
return(rules)
def savePickleRules(rules, fullpath_filename) :
with open(fullpath_filename, mode='wb') as outfile:
pickle.dump(rules, outfile, pickle.HIGHEST_PROTOCOL)
# ======================================================
# Rules のうち、P個の属性値が分かれば、クラスを推定できるか
# ======================================================
def getPerIdentifiedClass(rules, p) :
attribute_values = [rule.getValue() for rule in rules]
attribute_values = list(chain.from_iterable(attribute_values))
attribute_values = list(set(attribute_values))
combi_attribute_values = combinations(attribute_values,p)
count = 0
bunbo = 0
for combi in combi_attribute_values :
bunbo += 1
rules_target = []
for rule in rules :
matching_count = len(list(set(combi) & set(rule.getValue())))
if matching_count == len(list(combi)) :
rules_target.append(rule)
# rules_target が空なら評価から外す
if len(rules_target) == 0:
bunbo -= 1
#
else :
consequents = [rule.getConsequent() for rule in rules_target]
if len(list(set(consequents))) == 1:
count += 1
if bunbo == 0:
ans = 0
else:
ans = (float(count) / float(bunbo))
return(ans)
# ======================================================
# ルールが対象のクラスを説明するかどうか
# ======================================================
def isExplainRule(obj, rule) :
matching_count = len(list(set(obj) & set(rule.getValue())))
if matching_count == len(rule.getValue()) : return(True)
else : return(False)
# ======================================================
# ルールが対象のクラスを説明するかどうか
# ======================================================
def getMatchingFactor(obj, rule) :
matching_factor = len(list(set(obj) & set(rule.getValue())))
matching_factor = matching_factor / len(rule.getValue())
return(matching_factor)
# ======================================================
# ルールのsupport P を返す
# ======================================================
def getSupportP(obj, rule) :
matching_factor = getMatchingFactor(obj, rule)
return(rule.getSupportD() * matching_factor)
# ======================================================
# ルールから対象のクラスを予測
# ======================================================
def estimateClass(obj, rules) :
list_judge = [isExplainRule(obj, r) for r in rules]
# 1つ以上マッチするなら
if any(list_judge) :
consequents = [rules[i].getConsequent() for i, judge in enumerate(list_judge) if judge]
# マッチしたルールが推論するクラスの数がただ1つなら
if len(set(consequents)) == 1 :
return(consequents[0])
else :
rules_match = list(compress(rules,list_judge))
supportD = [r.getSupportD() for r in rules_match]
return(rules_match[supportD.index(max(supportD))].getConsequent())
# rule が objに1つもマッチしない場合は部分一致ルールによる推定
else :
supportP = [getSupportP(obj, rule) for rule in rules]
return(rules[supportP.index(max(supportP))].getConsequent())
# ======================================================
# FP-growth によるルール抽出
# ======================================================
def getRulesByFPGrowth(FILENAME, iter1, iter2, classes, min_sup=0.1, min_conf=0.0, numPartitions=32, ratio=True) :
# read data
filepath = DIR_UCI+'/'+FILENAME+'/alpha/'+FILENAME+'-train'+str(iter1)+'-'+str(iter2)+'.txt'
data = sc.textFile(filepath)
print(filepath)
transactions = data.map(lambda line: line.strip().split(' '))
# 最小支持度を定める
nrow = sum(1 for line in open(filepath))
minSupport = float(min_sup) if ratio == True else float(min_sup) / float(nrow)
# model 定義
model = FPGrowth.train(transactions, minSupport = minSupport, numPartitions=numPartitions)
# クラスを含まない頻出アイテム集合だけを取り出す
nocls_freq_item_sets = model.freqItemsets().filter(lambda fis: all(not x in fis.items for x in classes))
# クラスを含む頻出アイテム集合でかつ長さが2以上のものを取り出す
cls_freq_item_sets = model.freqItemsets().filter(lambda fis: any(x in fis.items for x in classes)).filter(lambda fis: len(fis.items) > 1).collect()
rules = []
#def getRule(cls_freq_item):
# クラス以外の分が同じアイテムでかつ長さが1違いのアイテムを取り出す
# cls_freq_item = cls_freq_item.first()
# nocls_freq_item = nocls_freq_item_sets.filter(lambda ifs : all(x in cls_freq_item.items for x in ifs.items)).filter(lambda fis: len(fis.items) == len(cls_freq_item.items) - 1).first()
#print(cls_freq_item)
#print(nocls_freq_item)
# conf = float(cls_freq_item.freq) / float(nocls_freq_item.freq)
# if conf >= min_conf:
# rule = Rule()
# rule.setValue(nocls_freq_item.items)
# cls = list(set(cls_freq_item.items) & set(nocls_freq_item.items))[0]
# rule.setConsequent(cls)
# rule.setSupport(cls_freq_item.freq)
# rule.setConf(conf)
# return(rule)
# else :
# return(None)
#
#rules = cls_freq_item_sets.foreach(getRule)
rules = []
print("item count :"+str(len(cls_freq_item_sets)))
for cls_freq_item in cls_freq_item_sets:
# クラス以外の分が同じアイテムでかつ長さが1違いのアイテムを取り出す
# nocls_freq_item = nocls_freq_item_sets.filter(lambda ifs : all(x in cls_freq_item.items for x in ifs.items)).filter(lambda fis: len(fis.items) == len(cls_freq_item.items) - 1).first()
#print(cls_freq_item)
# print(nocls_freq_item)
#for nocls_freq_item in nocls_freq_item_sets:
# # クラス以外の部分が同じアイテムでかつ長さが1違いのアイテムを取り出す
# cls_freq_item = cls_freq_item_sets.filter(lambda fis: (all(x in fis.items for x in nocls_freq_item.items))).filter(lambda fis: len(fis.items) == len(nocls_freq_item.items) + 1).collect()
# if cls_freq_item:
# conf = float(cls_freq_item.freq) / float(nocls_freq_item.freq)
# if conf >= min_conf:
values = [x for x in cls_freq_item.items if not x in classes]
cls = [x for x in cls_freq_item.items if x in classes][0]
conf = 0.0
rule = Rule()
rule.setValue(values)
#cls = list(set(cls_freq_item.items) & set(nocls_freq_item.items))[0]
rule.setConsequent(cls)
rule.setStrength(cls_freq_item.freq)
rule.setConf(conf)
rules.append(rule)
return(rules)
# 実行
#result = model.freqItemsets().collect()
# class
#rules = []
#for cls in classes:
# for fi1 in result:
# if cls in fi1.items:
# target = [x for x in fi1.items if x != cls]
# for fi2 in result:
# if collections.Counter(target) == collections.Counter(fi2.items):
# conf = float(fi1.freq) / float(fi2.freq)
# if conf >= min_conf:
# rule = Rule()
# rule.setValue(fi2.items)
# rule.setConsequent(cls)
# rule.setSupport(fi1.freq)
# rule.setConf(conf)
# rules.append(rule)
#return(rules)
# ======================================================
# LERS による精度評価
# ======================================================
def predictByLERS(FILENAME, iter1, iter2, rules) :
# read test data
filepath = DIR_UCI+'/'+FILENAME+'/alpha/'+FILENAME+'-test'+str(iter1)+'-'+str(iter2)+'.txt'
decision_table_test = pd.read_csv(filepath, delimiter=' ', header=None)
decision_table_test = decision_table_test.dropna()
decision_class = decision_table_test[decision_table_test.columns[-1]].values.tolist()
decision_table_test = decision_table_test.drop(decision_table_test.columns[len(decision_table_test.columns)-1], axis=1)
decision_table_test = decision_table_test.values.tolist()
# LERS で予測
predictions = []
for obj in decision_table_test:
estimated_class = estimateClass(obj, rules)
predictions.append(estimated_class)
# 正答率を求める
accuracy = accuracy_score(decision_class, predictions)
print(accuracy)
return(accuracy)
# ======================================================
# FP-growth_LERS
# ======================================================
def FPGrowth_LERS(FILENAME, iter1, iter2, min_sup):
classes = ['D1', 'D2']
min_conf = 0.0
# rule 抽出
fullpath_filename = DIR_UCI+'/'+FILENAME+'/FPGrowth/rules/'+'rules-'+str(min_sup)+'_'+str(iter1)+'-'+str(iter2)+'.pkl'
rules = loadPickleRules(fullpath_filename) if os.path.isfile(fullpath_filename) else getRulesByFPGrowth(FILENAME, iter1, iter2, classes, min_sup, min_conf)
if not os.path.isfile(fullpath_filename): savePickleRules(rules, fullpath_filename)
# predict by LERS
acc = predictByLERS(FILENAME, iter1, iter2, rules)
# save
savepath = DIR_UCI+'/'+FILENAME+'/FPGrowth/FPGrowth_LERS.csv'
with open(savepath, "a") as f :
f.writelines('FPGrowth_LERS,{min_sup},{min_conf},{FILENAME},{iter1},{iter2},{acc}'.format(FILENAME=FILENAME,iter1=iter1,iter2=iter2,acc=acc,min_sup=min_sup,min_conf=min_conf)+"\n")
# ========================================
# listの平均と分散を求める
# ========================================
def getEvalMeanVar(result):
ans = '{mean}±{std}'.format(mean=('%.3f' % round(np.mean(results),3)), std=('%.3f' % round(np.std(results),3)))
return(ans)
# ========================================
# multi に実行する
# ========================================
def multi_main(proc, FILENAME, FUN, **kargs):
pool = Pool(proc)
results = []
multiargs = []
# FPGrowth_LERS 用
if FUN == FPGrowth_LERS :
min_sup_range = kargs['min_sup_range']
for iter1, iter2, min_sup in product(range(1,11), range(1,11), min_sup_range):
multiargs.append((FILENAME, iter1, iter2, min_sup))
print(multiargs)
results = pool.starmap(FUN, multiargs)
else :
print("I dont' know the function.")
return(results)
# ======================================================
# main
# ======================================================
if __name__ == "__main__":
# Spark の設定
#SparkContext.setSystemProperty('spark.executor.memory', '128g')
#SparkContext.setSystemProperty('spark.driver.memory', '128g')
#sc = SparkContext("local[4]", appName="Sample FP-growth")
#sc.setLogLevel("ERROR")
sc = SparkContext(conf=SparkConf())
# データ準備
FILENAME = "adult_cleansing2"
#FILENAME = "default_cleansing"
#FILENAME = "german_credit_categorical"
# データのインデックス
#iter1 = 6
#iter2 = 9
# クラスの数を設定
classes = ['D1', 'D2']
# support と confidence の閾値
min_sup = 0.1
min_sup_range = [0.05, 0.10, 0.15, 0.20, 0.25]
min_conf = 0.0
# ファイルを保存する場所
#savepath = DIR_UCI+'/'+FILENAME+'/FPGrowth_LERS.csv'
# rule 抽出
# rules = getRulesByFPGrowth(FILENAME, iter1, iter2, classes, min_sup, min_conf, ratio=False)
# for rule in rules :
# print(rule.output())
# predict by LERS
# print(predictByLERS(FILENAME, iter1, iter2, rules))
# exit(0)
# identify
#p = 2
#print(getPerIdentifiedClass(rules, 2))
# 並列実行して全データで評価
#proc = 32
#freeze_support()
#FUN = FPGrowth_LERS
#results = multi_main(proc, FILENAME, FUN, min_sup_range = min_sup_range)
#exit(0)
# 直列実行して全データで評価
#SAVEPATH = DIR_UCI+'/'+FILENAME+'/FPGrowth/FPGrowth_LERS.csv'
for min_sup in min_sup_range:
for iter1 in range(1,2,1):
for iter2 in range(1,11,1):
print(str(min_sup),",",str(iter1),",",str(iter2))
FPGrowth_LERS(FILENAME, iter1, iter2, min_sup)
#rules = getRulesByFPGrowth(FILENAME, iter1, iter2, classes, min_sup, min_conf)
#acc = predictByLERS(FILENAME, iter1, iter2, rules)
# save
#with open(SAVEPATH, "a") as f :
# f.writelines('FPGrowth_LERS,{min_sup},{min_conf},{FILENAME},{iter1},{iter2},{acc}'.format(FILENAME=FILENAME,iter1=iter1,iter2=iter2,acc=acc,min_sup=min_sup,min_conf=min_conf)+"\n")
| mit |
AnishShah/tensorflow | tensorflow/contrib/timeseries/examples/predict.py | 24 | 5843 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An example of training and predicting with a TFTS estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import numpy as np
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
FLAGS = None
_MODULE_PATH = os.path.dirname(__file__)
_DEFAULT_DATA_FILE = os.path.join(_MODULE_PATH, "data/period_trend.csv")
def structural_ensemble_train_and_predict(csv_file_name):
# Cycle between 5 latent values over a period of 100. This leads to a very
# smooth periodic component (and a small model), which is a good fit for our
# example data. Modeling high-frequency periodic variations will require a
# higher cycle_num_latent_values.
structural = tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=100, num_features=1, cycle_num_latent_values=5)
return train_and_predict(structural, csv_file_name, training_steps=150)
def ar_train_and_predict(csv_file_name):
# An autoregressive model, with periodicity handled as a time-based
# regression. Note that this requires windows of size 16 (input_window_size +
# output_window_size) for training.
ar = tf.contrib.timeseries.ARRegressor(
periodicities=100, input_window_size=10, output_window_size=6,
num_features=1,
# Use the (default) normal likelihood loss to adaptively fit the
# variance. SQUARED_LOSS overestimates variance when there are trends in
# the series.
loss=tf.contrib.timeseries.ARModel.NORMAL_LIKELIHOOD_LOSS)
return train_and_predict(ar, csv_file_name, training_steps=600)
def train_and_predict(estimator, csv_file_name, training_steps):
"""A simple example of training and predicting."""
# Read data in the default "time,value" CSV format with no header
reader = tf.contrib.timeseries.CSVReader(csv_file_name)
# Set up windowing and batching for training
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=16, window_size=16)
# Fit model parameters to data
estimator.train(input_fn=train_input_fn, steps=training_steps)
# Evaluate on the full dataset sequentially, collecting in-sample predictions
# for a qualitative evaluation. Note that this loads the whole dataset into
# memory. For quantitative evaluation, use RandomWindowChunker.
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Predict starting after the evaluation
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=200)))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, 0]
mean = np.squeeze(np.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
variance = np.squeeze(np.concatenate(
[evaluation["covariance"][0], predictions["covariance"]], axis=0))
all_times = np.concatenate([times, predictions["times"]], axis=0)
upper_limit = mean + np.sqrt(variance)
lower_limit = mean - np.sqrt(variance)
return times, observed, all_times, mean, upper_limit, lower_limit
def make_plot(name, training_times, observed, all_times, mean,
upper_limit, lower_limit):
"""Plot a time series in a new figure."""
pyplot.figure()
pyplot.plot(training_times, observed, "b", label="training series")
pyplot.plot(all_times, mean, "r", label="forecast")
pyplot.plot(all_times, upper_limit, "g", label="forecast upper bound")
pyplot.plot(all_times, lower_limit, "g", label="forecast lower bound")
pyplot.fill_between(all_times, lower_limit, upper_limit, color="grey",
alpha="0.2")
pyplot.axvline(training_times[-1], color="k", linestyle="--")
pyplot.xlabel("time")
pyplot.ylabel("observations")
pyplot.legend(loc=0)
pyplot.title(name)
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
input_filename = FLAGS.input_filename
if input_filename is None:
input_filename = _DEFAULT_DATA_FILE
make_plot("Structural ensemble",
*structural_ensemble_train_and_predict(input_filename))
make_plot("AR", *ar_train_and_predict(input_filename))
pyplot.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_filename",
type=str,
required=False,
help="Input csv file (omit to use the data/period_trend.csv).")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
taroplus/spark | python/pyspark/serializers.py | 4 | 21331 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
PySpark supports custom serializers for transferring data; this can improve
performance.
By default, PySpark uses L{PickleSerializer} to serialize objects using Python's
C{cPickle} serializer, which can serialize nearly any Python object.
Other serializers, like L{MarshalSerializer}, support fewer datatypes but can be
faster.
The serializer is chosen when creating L{SparkContext}:
>>> from pyspark.context import SparkContext
>>> from pyspark.serializers import MarshalSerializer
>>> sc = SparkContext('local', 'test', serializer=MarshalSerializer())
>>> sc.parallelize(list(range(1000))).map(lambda x: 2 * x).take(10)
[0, 2, 4, 6, 8, 10, 12, 14, 16, 18]
>>> sc.stop()
PySpark serialize objects in batches; By default, the batch size is chosen based
on the size of objects, also configurable by SparkContext's C{batchSize} parameter:
>>> sc = SparkContext('local', 'test', batchSize=2)
>>> rdd = sc.parallelize(range(16), 4).map(lambda x: x)
Behind the scenes, this creates a JavaRDD with four partitions, each of
which contains two batches of two objects:
>>> rdd.glom().collect()
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]
>>> int(rdd._jrdd.count())
8
>>> sc.stop()
"""
import sys
from itertools import chain, product
import marshal
import struct
import types
import collections
import zlib
import itertools
if sys.version < '3':
import cPickle as pickle
protocol = 2
from itertools import izip as zip, imap as map
else:
import pickle
protocol = 3
xrange = range
from pyspark import cloudpickle
__all__ = ["PickleSerializer", "MarshalSerializer", "UTF8Deserializer"]
class SpecialLengths(object):
END_OF_DATA_SECTION = -1
PYTHON_EXCEPTION_THROWN = -2
TIMING_DATA = -3
END_OF_STREAM = -4
NULL = -5
START_ARROW_STREAM = -6
class PythonEvalType(object):
NON_UDF = 0
SQL_BATCHED_UDF = 1
SQL_PANDAS_UDF = 2
class Serializer(object):
def dump_stream(self, iterator, stream):
"""
Serialize an iterator of objects to the output stream.
"""
raise NotImplementedError
def load_stream(self, stream):
"""
Return an iterator of deserialized objects from the input stream.
"""
raise NotImplementedError
def _load_stream_without_unbatching(self, stream):
"""
Return an iterator of deserialized batches (iterable) of objects from the input stream.
if the serializer does not operate on batches the default implementation returns an
iterator of single element lists.
"""
return map(lambda x: [x], self.load_stream(stream))
# Note: our notion of "equality" is that output generated by
# equal serializers can be deserialized using the same serializer.
# This default implementation handles the simple cases;
# subclasses should override __eq__ as appropriate.
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
def __hash__(self):
return hash(str(self))
class FramedSerializer(Serializer):
"""
Serializer that writes objects as a stream of (length, data) pairs,
where C{length} is a 32-bit integer and data is C{length} bytes.
"""
def __init__(self):
# On Python 2.6, we can't write bytearrays to streams, so we need to convert them
# to strings first. Check if the version number is that old.
self._only_write_strings = sys.version_info[0:2] <= (2, 6)
def dump_stream(self, iterator, stream):
for obj in iterator:
self._write_with_length(obj, stream)
def load_stream(self, stream):
while True:
try:
yield self._read_with_length(stream)
except EOFError:
return
def _write_with_length(self, obj, stream):
serialized = self.dumps(obj)
if serialized is None:
raise ValueError("serialized value should not be None")
if len(serialized) > (1 << 31):
raise ValueError("can not serialize object larger than 2G")
write_int(len(serialized), stream)
if self._only_write_strings:
stream.write(str(serialized))
else:
stream.write(serialized)
def _read_with_length(self, stream):
length = read_int(stream)
if length == SpecialLengths.END_OF_DATA_SECTION:
raise EOFError
elif length == SpecialLengths.NULL:
return None
obj = stream.read(length)
if len(obj) < length:
raise EOFError
return self.loads(obj)
def dumps(self, obj):
"""
Serialize an object into a byte array.
When batching is used, this will be called with an array of objects.
"""
raise NotImplementedError
def loads(self, obj):
"""
Deserialize an object from a byte array.
"""
raise NotImplementedError
class ArrowSerializer(FramedSerializer):
"""
Serializes bytes as Arrow data with the Arrow file format.
"""
def dumps(self, batch):
import pyarrow as pa
import io
sink = io.BytesIO()
writer = pa.RecordBatchFileWriter(sink, batch.schema)
writer.write_batch(batch)
writer.close()
return sink.getvalue()
def loads(self, obj):
import pyarrow as pa
reader = pa.RecordBatchFileReader(pa.BufferReader(obj))
return reader.read_all()
def __repr__(self):
return "ArrowSerializer"
def _create_batch(series):
import pyarrow as pa
# Make input conform to [(series1, type1), (series2, type2), ...]
if not isinstance(series, (list, tuple)) or \
(len(series) == 2 and isinstance(series[1], pa.DataType)):
series = [series]
series = ((s, None) if not isinstance(s, (list, tuple)) else s for s in series)
# If a nullable integer series has been promoted to floating point with NaNs, need to cast
# NOTE: this is not necessary with Arrow >= 0.7
def cast_series(s, t):
if t is None or s.dtype == t.to_pandas_dtype():
return s
else:
return s.fillna(0).astype(t.to_pandas_dtype(), copy=False)
arrs = [pa.Array.from_pandas(cast_series(s, t), mask=s.isnull(), type=t) for s, t in series]
return pa.RecordBatch.from_arrays(arrs, ["_%d" % i for i in xrange(len(arrs))])
class ArrowStreamPandasSerializer(Serializer):
"""
Serializes Pandas.Series as Arrow data with Arrow streaming format.
"""
def dump_stream(self, iterator, stream):
"""
Make ArrowRecordBatches from Pandas Series and serialize. Input is a single series or
a list of series accompanied by an optional pyarrow type to coerce the data to.
"""
import pyarrow as pa
writer = None
try:
for series in iterator:
batch = _create_batch(series)
if writer is None:
write_int(SpecialLengths.START_ARROW_STREAM, stream)
writer = pa.RecordBatchStreamWriter(stream, batch.schema)
writer.write_batch(batch)
finally:
if writer is not None:
writer.close()
def load_stream(self, stream):
"""
Deserialize ArrowRecordBatches to an Arrow table and return as a list of pandas.Series.
"""
import pyarrow as pa
reader = pa.open_stream(stream)
for batch in reader:
table = pa.Table.from_batches([batch])
yield [c.to_pandas() for c in table.itercolumns()]
def __repr__(self):
return "ArrowStreamPandasSerializer"
class BatchedSerializer(Serializer):
"""
Serializes a stream of objects in batches by calling its wrapped
Serializer with streams of objects.
"""
UNLIMITED_BATCH_SIZE = -1
UNKNOWN_BATCH_SIZE = 0
def __init__(self, serializer, batchSize=UNLIMITED_BATCH_SIZE):
self.serializer = serializer
self.batchSize = batchSize
def _batched(self, iterator):
if self.batchSize == self.UNLIMITED_BATCH_SIZE:
yield list(iterator)
elif hasattr(iterator, "__len__") and hasattr(iterator, "__getslice__"):
n = len(iterator)
for i in xrange(0, n, self.batchSize):
yield iterator[i: i + self.batchSize]
else:
items = []
count = 0
for item in iterator:
items.append(item)
count += 1
if count == self.batchSize:
yield items
items = []
count = 0
if items:
yield items
def dump_stream(self, iterator, stream):
self.serializer.dump_stream(self._batched(iterator), stream)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def _load_stream_without_unbatching(self, stream):
return self.serializer.load_stream(stream)
def __repr__(self):
return "BatchedSerializer(%s, %d)" % (str(self.serializer), self.batchSize)
class FlattenedValuesSerializer(BatchedSerializer):
"""
Serializes a stream of list of pairs, split the list of values
which contain more than a certain number of objects to make them
have similar sizes.
"""
def __init__(self, serializer, batchSize=10):
BatchedSerializer.__init__(self, serializer, batchSize)
def _batched(self, iterator):
n = self.batchSize
for key, values in iterator:
for i in range(0, len(values), n):
yield key, values[i:i + n]
def load_stream(self, stream):
return self.serializer.load_stream(stream)
def __repr__(self):
return "FlattenedValuesSerializer(%s, %d)" % (self.serializer, self.batchSize)
class AutoBatchedSerializer(BatchedSerializer):
"""
Choose the size of batch automatically based on the size of object
"""
def __init__(self, serializer, bestSize=1 << 16):
BatchedSerializer.__init__(self, serializer, self.UNKNOWN_BATCH_SIZE)
self.bestSize = bestSize
def dump_stream(self, iterator, stream):
batch, best = 1, self.bestSize
iterator = iter(iterator)
while True:
vs = list(itertools.islice(iterator, batch))
if not vs:
break
bytes = self.serializer.dumps(vs)
write_int(len(bytes), stream)
stream.write(bytes)
size = len(bytes)
if size < best:
batch *= 2
elif size > best * 10 and batch > 1:
batch //= 2
def __repr__(self):
return "AutoBatchedSerializer(%s)" % self.serializer
class CartesianDeserializer(Serializer):
"""
Deserializes the JavaRDD cartesian() of two PythonRDDs.
Due to pyspark batching we cannot simply use the result of the Java RDD cartesian,
we additionally need to do the cartesian within each pair of batches.
"""
def __init__(self, key_ser, val_ser):
self.key_ser = key_ser
self.val_ser = val_ser
def _load_stream_without_unbatching(self, stream):
key_batch_stream = self.key_ser._load_stream_without_unbatching(stream)
val_batch_stream = self.val_ser._load_stream_without_unbatching(stream)
for (key_batch, val_batch) in zip(key_batch_stream, val_batch_stream):
# for correctness with repeated cartesian/zip this must be returned as one batch
yield product(key_batch, val_batch)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def __repr__(self):
return "CartesianDeserializer(%s, %s)" % \
(str(self.key_ser), str(self.val_ser))
class PairDeserializer(Serializer):
"""
Deserializes the JavaRDD zip() of two PythonRDDs.
Due to pyspark batching we cannot simply use the result of the Java RDD zip,
we additionally need to do the zip within each pair of batches.
"""
def __init__(self, key_ser, val_ser):
self.key_ser = key_ser
self.val_ser = val_ser
def _load_stream_without_unbatching(self, stream):
key_batch_stream = self.key_ser._load_stream_without_unbatching(stream)
val_batch_stream = self.val_ser._load_stream_without_unbatching(stream)
for (key_batch, val_batch) in zip(key_batch_stream, val_batch_stream):
# For double-zipped RDDs, the batches can be iterators from other PairDeserializer,
# instead of lists. We need to convert them to lists if needed.
key_batch = key_batch if hasattr(key_batch, '__len__') else list(key_batch)
val_batch = val_batch if hasattr(val_batch, '__len__') else list(val_batch)
if len(key_batch) != len(val_batch):
raise ValueError("Can not deserialize PairRDD with different number of items"
" in batches: (%d, %d)" % (len(key_batch), len(val_batch)))
# for correctness with repeated cartesian/zip this must be returned as one batch
yield zip(key_batch, val_batch)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def __repr__(self):
return "PairDeserializer(%s, %s)" % (str(self.key_ser), str(self.val_ser))
class NoOpSerializer(FramedSerializer):
def loads(self, obj):
return obj
def dumps(self, obj):
return obj
# Hook namedtuple, make it picklable
__cls = {}
def _restore(name, fields, value):
""" Restore an object of namedtuple"""
k = (name, fields)
cls = __cls.get(k)
if cls is None:
cls = collections.namedtuple(name, fields)
__cls[k] = cls
return cls(*value)
def _hack_namedtuple(cls):
""" Make class generated by namedtuple picklable """
name = cls.__name__
fields = cls._fields
def __reduce__(self):
return (_restore, (name, fields, tuple(self)))
cls.__reduce__ = __reduce__
cls._is_namedtuple_ = True
return cls
def _hijack_namedtuple():
""" Hack namedtuple() to make it picklable """
# hijack only one time
if hasattr(collections.namedtuple, "__hijack"):
return
global _old_namedtuple # or it will put in closure
global _old_namedtuple_kwdefaults # or it will put in closure too
def _copy_func(f):
return types.FunctionType(f.__code__, f.__globals__, f.__name__,
f.__defaults__, f.__closure__)
def _kwdefaults(f):
# __kwdefaults__ contains the default values of keyword-only arguments which are
# introduced from Python 3. The possible cases for __kwdefaults__ in namedtuple
# are as below:
#
# - Does not exist in Python 2.
# - Returns None in <= Python 3.5.x.
# - Returns a dictionary containing the default values to the keys from Python 3.6.x
# (See https://bugs.python.org/issue25628).
kargs = getattr(f, "__kwdefaults__", None)
if kargs is None:
return {}
else:
return kargs
_old_namedtuple = _copy_func(collections.namedtuple)
_old_namedtuple_kwdefaults = _kwdefaults(collections.namedtuple)
def namedtuple(*args, **kwargs):
for k, v in _old_namedtuple_kwdefaults.items():
kwargs[k] = kwargs.get(k, v)
cls = _old_namedtuple(*args, **kwargs)
return _hack_namedtuple(cls)
# replace namedtuple with new one
collections.namedtuple.__globals__["_old_namedtuple_kwdefaults"] = _old_namedtuple_kwdefaults
collections.namedtuple.__globals__["_old_namedtuple"] = _old_namedtuple
collections.namedtuple.__globals__["_hack_namedtuple"] = _hack_namedtuple
collections.namedtuple.__code__ = namedtuple.__code__
collections.namedtuple.__hijack = 1
# hack the cls already generated by namedtuple
# those created in other module can be pickled as normal,
# so only hack those in __main__ module
for n, o in sys.modules["__main__"].__dict__.items():
if (type(o) is type and o.__base__ is tuple
and hasattr(o, "_fields")
and "__reduce__" not in o.__dict__):
_hack_namedtuple(o) # hack inplace
_hijack_namedtuple()
class PickleSerializer(FramedSerializer):
"""
Serializes objects using Python's pickle serializer:
http://docs.python.org/2/library/pickle.html
This serializer supports nearly any Python object, but may
not be as fast as more specialized serializers.
"""
def dumps(self, obj):
return pickle.dumps(obj, protocol)
if sys.version >= '3':
def loads(self, obj, encoding="bytes"):
return pickle.loads(obj, encoding=encoding)
else:
def loads(self, obj, encoding=None):
return pickle.loads(obj)
class CloudPickleSerializer(PickleSerializer):
def dumps(self, obj):
return cloudpickle.dumps(obj, 2)
class MarshalSerializer(FramedSerializer):
"""
Serializes objects using Python's Marshal serializer:
http://docs.python.org/2/library/marshal.html
This serializer is faster than PickleSerializer but supports fewer datatypes.
"""
def dumps(self, obj):
return marshal.dumps(obj)
def loads(self, obj):
return marshal.loads(obj)
class AutoSerializer(FramedSerializer):
"""
Choose marshal or pickle as serialization protocol automatically
"""
def __init__(self):
FramedSerializer.__init__(self)
self._type = None
def dumps(self, obj):
if self._type is not None:
return b'P' + pickle.dumps(obj, -1)
try:
return b'M' + marshal.dumps(obj)
except Exception:
self._type = b'P'
return b'P' + pickle.dumps(obj, -1)
def loads(self, obj):
_type = obj[0]
if _type == b'M':
return marshal.loads(obj[1:])
elif _type == b'P':
return pickle.loads(obj[1:])
else:
raise ValueError("invalid sevialization type: %s" % _type)
class CompressedSerializer(FramedSerializer):
"""
Compress the serialized data
"""
def __init__(self, serializer):
FramedSerializer.__init__(self)
assert isinstance(serializer, FramedSerializer), "serializer must be a FramedSerializer"
self.serializer = serializer
def dumps(self, obj):
return zlib.compress(self.serializer.dumps(obj), 1)
def loads(self, obj):
return self.serializer.loads(zlib.decompress(obj))
def __repr__(self):
return "CompressedSerializer(%s)" % self.serializer
class UTF8Deserializer(Serializer):
"""
Deserializes streams written by String.getBytes.
"""
def __init__(self, use_unicode=True):
self.use_unicode = use_unicode
def loads(self, stream):
length = read_int(stream)
if length == SpecialLengths.END_OF_DATA_SECTION:
raise EOFError
elif length == SpecialLengths.NULL:
return None
s = stream.read(length)
return s.decode("utf-8") if self.use_unicode else s
def load_stream(self, stream):
try:
while True:
yield self.loads(stream)
except struct.error:
return
except EOFError:
return
def __repr__(self):
return "UTF8Deserializer(%s)" % self.use_unicode
def read_long(stream):
length = stream.read(8)
if not length:
raise EOFError
return struct.unpack("!q", length)[0]
def write_long(value, stream):
stream.write(struct.pack("!q", value))
def pack_long(value):
return struct.pack("!q", value)
def read_int(stream):
length = stream.read(4)
if not length:
raise EOFError
return struct.unpack("!i", length)[0]
def write_int(value, stream):
stream.write(struct.pack("!i", value))
def write_with_length(obj, stream):
write_int(len(obj), stream)
stream.write(obj)
if __name__ == '__main__':
import doctest
(failure_count, test_count) = doctest.testmod()
if failure_count:
exit(-1)
| apache-2.0 |
dkushner/zipline | tests/modelling/test_modelling_algo.py | 9 | 7105 | """
Tests for Algorithms running the full FFC stack.
"""
from unittest import TestCase
from os.path import (
dirname,
join,
realpath,
)
from numpy import (
array,
full_like,
nan,
)
from numpy.testing import assert_almost_equal
from pandas import (
concat,
DataFrame,
DatetimeIndex,
Panel,
read_csv,
Series,
Timestamp,
)
from six import iteritems
from testfixtures import TempDirectory
from zipline.algorithm import TradingAlgorithm
from zipline.api import (
# add_filter,
add_factor,
get_datetime,
)
from zipline.assets import AssetFinder
# from zipline.data.equities import USEquityPricing
from zipline.data.ffc.loaders.us_equity_pricing import (
BcolzDailyBarReader,
DailyBarWriterFromCSVs,
SQLiteAdjustmentReader,
SQLiteAdjustmentWriter,
USEquityPricingLoader,
)
# from zipline.modelling.factor import CustomFactor
from zipline.modelling.factor.technical import VWAP
from zipline.utils.test_utils import (
make_simple_asset_info,
str_to_seconds,
)
from zipline.utils.tradingcalendar import trading_days
TEST_RESOURCE_PATH = join(
dirname(dirname(realpath(__file__))), # zipline_repo/tests
'resources',
'modelling_inputs',
)
def rolling_vwap(df, length):
"Simple rolling vwap implementation for testing"
closes = df['close'].values
volumes = df['volume'].values
product = closes * volumes
out = full_like(closes, nan)
for upper_bound in range(length, len(closes) + 1):
bounds = slice(upper_bound - length, upper_bound)
out[upper_bound - 1] = product[bounds].sum() / volumes[bounds].sum()
return Series(out, index=df.index)
class FFCAlgorithmTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.AAPL = 1
cls.MSFT = 2
cls.BRK_A = 3
cls.assets = [cls.AAPL, cls.MSFT, cls.BRK_A]
asset_info = make_simple_asset_info(
cls.assets,
Timestamp('2014'),
Timestamp('2015'),
['AAPL', 'MSFT', 'BRK_A'],
)
cls.asset_finder = AssetFinder(asset_info)
cls.tempdir = tempdir = TempDirectory()
tempdir.create()
try:
cls.raw_data, cls.bar_reader = cls.create_bar_reader(tempdir)
cls.adj_reader = cls.create_adjustment_reader(tempdir)
cls.ffc_loader = USEquityPricingLoader(
cls.bar_reader, cls.adj_reader
)
except:
cls.tempdir.cleanup()
raise
cls.dates = cls.raw_data[cls.AAPL].index.tz_localize('UTC')
@classmethod
def create_bar_reader(cls, tempdir):
resources = {
cls.AAPL: join(TEST_RESOURCE_PATH, 'AAPL.csv'),
cls.MSFT: join(TEST_RESOURCE_PATH, 'MSFT.csv'),
cls.BRK_A: join(TEST_RESOURCE_PATH, 'BRK-A.csv'),
}
raw_data = {
asset: read_csv(path, parse_dates=['day']).set_index('day')
for asset, path in iteritems(resources)
}
# Add 'price' column as an alias because all kinds of stuff in zipline
# depends on it being present. :/
for frame in raw_data.values():
frame['price'] = frame['close']
writer = DailyBarWriterFromCSVs(resources)
data_path = tempdir.getpath('testdata.bcolz')
table = writer.write(data_path, trading_days, cls.assets)
return raw_data, BcolzDailyBarReader(table)
@classmethod
def create_adjustment_reader(cls, tempdir):
dbpath = tempdir.getpath('adjustments.sqlite')
writer = SQLiteAdjustmentWriter(dbpath)
splits = DataFrame.from_records([
{
'effective_date': str_to_seconds('2014-06-09'),
'ratio': (1 / 7.0),
'sid': cls.AAPL,
}
])
mergers = dividends = DataFrame(
{
# Hackery to make the dtypes correct on an empty frame.
'effective_date': array([], dtype=int),
'ratio': array([], dtype=float),
'sid': array([], dtype=int),
},
index=DatetimeIndex([], tz='UTC'),
columns=['effective_date', 'ratio', 'sid'],
)
writer.write(splits, mergers, dividends)
return SQLiteAdjustmentReader(dbpath)
@classmethod
def tearDownClass(cls):
cls.tempdir.cleanup()
def make_source(self):
return Panel(self.raw_data).tz_localize('UTC', axis=1)
def test_handle_adjustment(self):
AAPL, MSFT, BRK_A = assets = self.AAPL, self.MSFT, self.BRK_A
raw_data = self.raw_data
adjusted_data = {k: v.copy() for k, v in iteritems(raw_data)}
AAPL_split_date = Timestamp("2014-06-09", tz='UTC')
split_loc = raw_data[AAPL].index.get_loc(AAPL_split_date)
# Our view of AAPL's history changes after the split.
ohlc = ['open', 'high', 'low', 'close']
adjusted_data[AAPL].ix[:split_loc, ohlc] /= 7.0
adjusted_data[AAPL].ix[:split_loc, ['volume']] *= 7.0
window_lengths = [1, 2, 5, 10]
# length -> asset -> expected vwap
vwaps = {length: {} for length in window_lengths}
vwap_keys = {}
for length in window_lengths:
vwap_keys[length] = "vwap_%d" % length
for asset in AAPL, MSFT, BRK_A:
raw = rolling_vwap(raw_data[asset], length)
adj = rolling_vwap(adjusted_data[asset], length)
vwaps[length][asset] = concat(
[
raw[:split_loc],
adj[split_loc:]
]
)
def initialize(context):
context.vwaps = []
for length, key in iteritems(vwap_keys):
context.vwaps.append(VWAP(window_length=length))
add_factor(context.vwaps[-1], name=key)
def handle_data(context, data):
today = get_datetime()
factors = data.factors
for length, key in iteritems(vwap_keys):
for asset in assets:
computed = factors.loc[asset, key]
expected = vwaps[length][asset].loc[today]
# Only having two places of precision here is a bit
# unfortunate.
assert_almost_equal(computed, expected, decimal=2)
# Do the same checks in before_trading_start
before_trading_start = handle_data
algo = TradingAlgorithm(
initialize=initialize,
handle_data=handle_data,
before_trading_start=before_trading_start,
data_frequency='daily',
ffc_loader=self.ffc_loader,
asset_finder=self.asset_finder,
start=self.dates[max(window_lengths)],
end=self.dates[-1],
)
algo.run(
source=self.make_source(),
# Yes, I really do want to use the start and end dates I passed to
# TradingAlgorithm.
overwrite_sim_params=False,
)
| apache-2.0 |
glyg/peak_detection | examples/tifffile.py | 1 | 120523 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# tifffile.py
# Copyright (c) 2008-2013, Christoph Gohlke
# Copyright (c) 2008-2013, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Read and write image data from and to TIFF files.
Image and meta-data can be read from TIFF, BigTIFF, OME-TIFF, STK, LSM, NIH,
ImageJ, FluoView, SEQ and GEL files.
Only a subset of the TIFF specification is supported, mainly uncompressed
and losslessly compressed 2**(0 to 6) bit integer, 16, 32 and 64-bit float,
grayscale and RGB(A) images, which are commonly used in bio-scientific imaging.
Specifically, reading JPEG/CCITT compressed image data or EXIF/IPTC/GPS/XMP
meta-data is not implemented. Only primary info records are read for STK,
FluoView, and NIH image formats.
TIFF, the Tagged Image File Format, is under the control of Adobe Systems.
BigTIFF allows for files greater than 4 GB. STK, LSM, FluoView, SEQ, GEL,
and OME-TIFF, are custom extensions defined by MetaMorph, Carl Zeiss
MicroImaging, Olympus, Media Cybernetics, Molecular Dynamics, and the Open
Microscopy Environment consortium respectively.
For command line usage run ``python tifffile.py --help``
:Author:
`Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 2013.01.18
Requirements
------------
* `CPython 2.7, 3.2 or 3.3 <http://www.python.org>`_
* `Numpy 1.7 <http://www.numpy.org>`_
* `Matplotlib 1.2 <http://www.matplotlib.org>`_ (optional for plotting)
* `Tifffile.c 2013.01.18 <http://www.lfd.uci.edu/~gohlke/>`_
(recommended for faster decoding of PackBits and LZW encoded strings)
Notes
-----
The API is not stable yet and might change between revisions.
Tested on little-endian platforms only.
Acknowledgements
----------------
* Egor Zindy, University of Manchester, for cz_lsm_scan_info specifics.
* Wim Lewis, for a bug fix and some read_cz_lsm functions.
References
----------
(1) TIFF 6.0 Specification and Supplements. Adobe Systems Incorporated.
http://partners.adobe.com/public/developer/tiff/
(2) TIFF File Format FAQ. http://www.awaresystems.be/imaging/tiff/faq.html
(3) MetaMorph Stack (STK) Image File Format.
http://support.meta.moleculardevices.com/docs/t10243.pdf
(4) File Format Description - LSM 5xx Release 2.0.
http://ibb.gsf.de/homepage/karsten.rodenacker/IDL/Lsmfile.doc
(5) BioFormats. http://www.loci.wisc.edu/ome/formats.html
(6) The OME-TIFF format.
http://www.openmicroscopy.org/site/support/file-formats/ome-tiff
(7) TiffDecoder.java
http://rsbweb.nih.gov/ij/developer/source/ij/io/TiffDecoder.java.html
(8) UltraQuant(r) Version 6.0 for Windows Start-Up Guide.
http://www.ultralum.com/images%20ultralum/pdf/UQStart%20Up%20Guide.pdf
Examples
--------
>>> data = numpy.random.rand(301, 219)
>>> imsave('temp.tif', data)
>>> image = imread('temp.tif')
>>> assert numpy.all(image == data)
>>> tif = TiffFile('test.tif')
>>> images = tif.asarray()
>>> image0 = tif[0].asarray()
>>> for page in tif:
... for tag in page.tags.values():
... t = tag.name, tag.value
... image = page.asarray()
... if page.is_rgb: pass
... if page.is_palette:
... t = page.color_map
... if page.is_stk:
... t = page.mm_uic_tags.number_planes
... if page.is_lsm:
... t = page.cz_lsm_info
>>> tif.close()
"""
from __future__ import division, print_function
import sys
import os
import re
import glob
import math
import zlib
import time
import struct
import warnings
import datetime
import collections
from fractions import Fraction
from xml.etree import cElementTree as ElementTree
import numpy
__version__ = '2013.01.18'
__docformat__ = 'restructuredtext en'
__all__ = ['imsave', 'imread', 'imshow', 'TiffFile', 'TiffSequence']
def imsave(filename, data, photometric=None, planarconfig=None,
resolution=None, description=None, software='tifffile.py',
byteorder=None, bigtiff=False):
"""Write image data to TIFF file.
Image data are written uncompressed in one stripe per plane.
Dimensions larger than 2 or 3 (depending on photometric mode and
planar configuration) are flattened and saved as separate pages.
Parameters
----------
filename : str
Name of file to write.
data : array_like
Input image. The last dimensions are assumed to be image height,
width, and samples.
photometric : {'minisblack', 'miniswhite', 'rgb'}
The color space of the image data.
By default this setting is inferred from the data shape.
planarconfig : {'contig', 'planar'}
Specifies if samples are stored contiguous or in separate planes.
By default this setting is inferred from the data shape.
'contig': last dimension contains samples.
'planar': third last dimension contains samples.
resolution : (float, float) or ((int, int), (int, int))
X and Y resolution in dots per inch as float or rational numbers.
description : str
The subject of the image. Saved with the first page only.
software : str
Name of the software used to create the image.
Saved with the first page only.
byteorder : {'<', '>'}
The endianness of the data in the file.
By default this is the system's native byte order.
bigtiff : bool
If True the BigTIFF format is used.
By default the standard TIFF format is used for data less than 2040 MB.
Examples
--------
>>> data = numpy.random.rand(10, 3, 301, 219)
>>> imsave('temp.tif', data)
"""
assert(photometric in (None, 'minisblack', 'miniswhite', 'rgb'))
assert(planarconfig in (None, 'contig', 'planar'))
assert(byteorder in (None, '<', '>'))
if byteorder is None:
byteorder = '<' if sys.byteorder == 'little' else '>'
data = numpy.asarray(data, dtype=byteorder + data.dtype.char, order='C')
data_shape = shape = data.shape
data = numpy.atleast_2d(data)
if not bigtiff and data.size * data.dtype.itemsize < 2040 * 2 ** 20:
bigtiff = False
offset_size = 4
tag_size = 12
numtag_format = 'H'
offset_format = 'I'
val_format = '4s'
else:
bigtiff = True
offset_size = 8
tag_size = 20
numtag_format = 'Q'
offset_format = 'Q'
val_format = '8s'
# unify shape of data
samplesperpixel = 1
extrasamples = 0
if photometric is None:
if data.ndim > 2 and (shape[-3] in (3, 4) or shape[-1] in (3, 4)):
photometric = 'rgb'
else:
photometric = 'minisblack'
if photometric == 'rgb':
if len(shape) < 3:
raise ValueError("not a RGB(A) image")
if planarconfig is None:
planarconfig = 'planar' if shape[-3] in (3, 4) else 'contig'
if planarconfig == 'contig':
if shape[-1] not in (3, 4):
raise ValueError("not a contiguous RGB(A) image")
data = data.reshape((-1, 1) + shape[-3:])
samplesperpixel = shape[-1]
else:
if shape[-3] not in (3, 4):
raise ValueError("not a planar RGB(A) image")
data = data.reshape((-1, ) + shape[-3:] + (1, ))
samplesperpixel = shape[-3]
if samplesperpixel == 4:
extrasamples = 1
elif planarconfig and len(shape) > 2:
if planarconfig == 'contig':
data = data.reshape((-1, 1) + shape[-3:])
samplesperpixel = shape[-1]
else:
data = data.reshape((-1, ) + shape[-3:] + (1, ))
samplesperpixel = shape[-3]
extrasamples = samplesperpixel - 1
else:
planarconfig = None
data = data.reshape((-1, 1) + shape[-2:] + (1, ))
shape = data.shape # (pages, planes, height, width, contig samples)
bytestr = bytes if sys.version[0] == '2' else lambda x: bytes(x, 'ascii')
tifftypes = {'B': 1, 's': 2, 'H': 3, 'I': 4, '2I': 5, 'b': 6,
'h': 8, 'i': 9, 'f': 11, 'd': 12, 'Q': 16, 'q': 17}
tifftags = {
'new_subfile_type': 254, 'subfile_type': 255,
'image_width': 256, 'image_length': 257, 'bits_per_sample': 258,
'compression': 259, 'photometric': 262, 'fill_order': 266,
'document_name': 269, 'image_description': 270, 'strip_offsets': 273,
'orientation': 274, 'samples_per_pixel': 277, 'rows_per_strip': 278,
'strip_byte_counts': 279, 'x_resolution': 282, 'y_resolution': 283,
'planar_configuration': 284, 'page_name': 285, 'resolution_unit': 296,
'software': 305, 'datetime': 306, 'predictor': 317, 'color_map': 320,
'extra_samples': 338, 'sample_format': 339}
tags = []
tag_data = []
def pack(fmt, *val):
return struct.pack(byteorder + fmt, *val)
def tag(name, dtype, number, value, offset=[0]):
# append tag binary string to tags list
# append (offset, value as binary string) to tag_data list
# increment offset by tag_size
if dtype == 's':
value = bytestr(value) + b'\0'
number = len(value)
value = (value, )
t = [pack('HH', tifftags[name], tifftypes[dtype]),
pack(offset_format, number)]
if len(dtype) > 1:
number *= int(dtype[:-1])
dtype = dtype[-1]
if number == 1:
if isinstance(value, (tuple, list)):
value = value[0]
t.append(pack(val_format, pack(dtype, value)))
elif struct.calcsize(dtype) * number <= offset_size:
t.append(pack(val_format, pack(str(number) + dtype, *value)))
else:
t.append(pack(offset_format, 0))
tag_data.append((offset[0] + offset_size + 4,
pack(str(number) + dtype, *value)))
tags.append(b''.join(t))
offset[0] += tag_size
def rational(arg, max_denominator=1000000):
# return nominator and denominator from float or two integers
try:
f = Fraction.from_float(arg)
except TypeError:
f = Fraction(arg[0], arg[1])
f = f.limit_denominator(max_denominator)
return f.numerator, f.denominator
if software:
tag('software', 's', 0, software)
if description:
tag('image_description', 's', 0, description)
elif shape != data_shape:
tag('image_description', 's', 0,
"shape=(%s)" % (",".join('%i' % i for i in data_shape)))
tag('datetime', 's', 0,
datetime.datetime.now().strftime("%Y:%m:%d %H:%M:%S"))
# write previous tags only once
writeonce = (len(tags), len(tag_data)) if shape[0] > 1 else None
tag('compression', 'H', 1, 1)
tag('orientation', 'H', 1, 1)
tag('image_width', 'I', 1, shape[-2])
tag('image_length', 'I', 1, shape[-3])
tag('new_subfile_type', 'I', 1, 0 if shape[0] == 1 else 2)
tag('sample_format', 'H', 1,
{'u': 1, 'i': 2, 'f': 3, 'c': 6}[data.dtype.kind])
tag('photometric', 'H', 1,
{'miniswhite': 0, 'minisblack': 1, 'rgb': 2}[photometric])
tag('samples_per_pixel', 'H', 1, samplesperpixel)
if planarconfig:
tag('planar_configuration', 'H', 1,
1 if planarconfig == 'contig' else 2)
tag('bits_per_sample', 'H', samplesperpixel,
(data.dtype.itemsize * 8, ) * samplesperpixel)
else:
tag('bits_per_sample', 'H', 1, data.dtype.itemsize * 8)
if extrasamples:
if photometric == 'rgb':
tag('extra_samples', 'H', 1, 1) # alpha channel
else:
tag('extra_samples', 'H', extrasamples, (0, ) * extrasamples)
if resolution:
tag('x_resolution', '2I', 1, rational(resolution[0]))
tag('y_resolution', '2I', 1, rational(resolution[1]))
tag('resolution_unit', 'H', 1, 2)
tag('rows_per_strip', 'I', 1, shape[-3])
# use one strip per plane
strip_byte_counts = (data[0, 0].size * data.dtype.itemsize, ) * shape[1]
tag('strip_byte_counts', offset_format, shape[1], strip_byte_counts)
# strip_offsets must be the last tag; will be updated later
tag('strip_offsets', offset_format, shape[1], (0, ) * shape[1])
fh = open(filename, 'wb')
seek = fh.seek
tell = fh.tell
def write(arg, *args):
fh.write(pack(arg, *args) if args else arg)
write({'<': b'II', '>': b'MM'}[byteorder])
if bigtiff:
write('HHH', 43, 8, 0)
else:
write('H', 42)
ifd_offset = tell()
write(offset_format, 0) # first IFD
for i in range(shape[0]):
# update pointer at ifd_offset
pos = tell()
seek(ifd_offset)
write(offset_format, pos)
seek(pos)
# write tags
write(numtag_format, len(tags))
tag_offset = tell()
write(b''.join(tags))
ifd_offset = tell()
write(offset_format, 0) # offset to next ifd
# write extra tag data and update pointers
for off, dat in tag_data:
pos = tell()
seek(tag_offset + off)
write(offset_format, pos)
seek(pos)
write(dat)
# update strip_offsets
pos = tell()
if len(strip_byte_counts) == 1:
seek(ifd_offset - offset_size)
write(offset_format, pos)
else:
seek(pos - offset_size * shape[1])
strip_offset = pos
for size in strip_byte_counts:
write(offset_format, strip_offset)
strip_offset += size
seek(pos)
# write data
data[i].tofile(fh) # if this fails, try update Python and numpy
fh.flush()
# remove tags that should be written only once
if writeonce:
tags = tags[writeonce[0]:]
d = writeonce[0] * tag_size
tag_data = [(o - d, v) for (o, v) in tag_data[writeonce[1]:]]
writeonce = None
fh.close()
def imread(files, *args, **kwargs):
"""Return image data from TIFF file(s) as numpy array.
The first image series is returned if no arguments are provided.
Parameters
----------
files : str or list
File name, glob pattern, or list of file names.
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int
Defines which series of pages in file to return as array.
multifile : bool
If True (default), OME-TIFF data may include pages from multiple files.
pattern : str
Regular expression pattern that matches axes names and indices in
file names.
Examples
--------
>>> im = imread('test.tif', 0)
>>> im.shape
(256, 256, 4)
>>> ims = imread(['test.tif', 'test.tif'])
>>> ims.shape
(2, 256, 256, 4)
"""
kwargs_file = {}
if 'multifile' in kwargs:
kwargs_file['multifile'] = kwargs['multifile']
del kwargs['multifile']
else:
kwargs_file['multifile'] = True
kwargs_seq = {}
if 'pattern' in kwargs:
kwargs_seq['pattern'] = kwargs['pattern']
del kwargs['pattern']
if isinstance(files, basestring) and any(i in files for i in '?*'):
files = glob.glob(files)
if not files:
raise ValueError('no files found')
if len(files) == 1:
files = files[0]
if isinstance(files, basestring):
with TiffFile(files, **kwargs_file) as tif:
return tif.asarray(*args, **kwargs)
else:
with TiffSequence(files, **kwargs_seq) as imseq:
return imseq.asarray(*args, **kwargs)
class lazyattr(object):
"""Lazy object attribute whose value is computed on first access."""
__slots__ = ('func')
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
if instance is None:
return self
value = self.func(instance)
if value is NotImplemented:
return getattr(super(owner, instance), self.func.__name__)
setattr(instance, self.func.__name__, value)
return value
class TiffFile(object):
"""Read image and meta-data from TIFF, STK, LSM, and FluoView files.
TiffFile instances must be closed using the close method.
Attributes
----------
pages : list
All TIFF pages in file.
series : list of Records(shape, dtype, axes, TIFFpages)
TIFF pages with compatible shapes and types.
All attributes are read-only.
Examples
--------
>>> tif = TiffFile('test.tif')
... try:
... images = tif.asarray()
... except Exception as e:
... print(e)
... finally:
... tif.close()
"""
def __init__(self, arg, name=None, multifile=False):
"""Initialize instance from file.
Parameters
----------
arg : str or open file
Name of file or open file object.
name : str
Human readable label of open file.
multifile : bool
If True, series may include pages from multiple files.
"""
if isinstance(arg, basestring):
filename = os.path.abspath(arg)
self._fh = open(filename, 'rb')
else:
filename = str(name)
self._fh = arg
self._fh.seek(0, 2)
self._fsize = self._fh.tell()
self._fh.seek(0)
self.fname = os.path.basename(filename)
self.fpath = os.path.dirname(filename)
self._tiffs = {self.fname: self} # cache of TIFFfiles
self.offset_size = None
self.pages = []
self._multifile = bool(multifile)
try:
self._fromfile()
except Exception:
self._fh.close()
raise
def close(self):
"""Close open file handle(s)."""
if not hasattr(self, 'tiffs'):
return
for tif in self._tiffs.values():
if tif._fh:
tif._fh.close()
tif._fh = None
def _fromfile(self):
"""Read TIFF header and all page records from file."""
self._fh.seek(0)
try:
self.byteorder = {b'II': '<', b'MM': '>'}[self._fh.read(2)]
except KeyError:
raise ValueError("not a valid TIFF file")
version = struct.unpack(self.byteorder + 'H', self._fh.read(2))[0]
if version == 43: # BigTiff
self.offset_size, zero = struct.unpack(self.byteorder + 'HH',
self._fh.read(4))
if zero or self.offset_size != 8:
raise ValueError("not a valid BigTIFF file")
elif version == 42:
self.offset_size = 4
else:
raise ValueError("not a TIFF file")
self.pages = []
while True:
try:
page = TiffPage(self)
self.pages.append(page)
except StopIteration:
break
if not self.pages:
raise ValueError("empty TIFF file")
@lazyattr
def series(self):
"""Return series of TiffPage with compatible shape and properties."""
series = []
if self.is_ome:
series = self._omeseries()
elif self.is_fluoview:
dims = {b'X': 'X', b'Y': 'Y', b'Z': 'Z', b'T': 'T',
b'WAVELENGTH': 'C', b'TIME': 'T', b'XY': 'R',
b'EVENT': 'V', b'EXPOSURE': 'L'}
mmhd = list(reversed(self.pages[0].mm_header.dimensions))
series = [Record(
axes=''.join(dims.get(i[0].strip().upper(), 'Q')
for i in mmhd if i[1] > 1),
shape=tuple(int(i[1]) for i in mmhd if i[1] > 1),
pages=self.pages, dtype=numpy.dtype(self.pages[0].dtype))]
elif self.is_lsm:
lsmi = self.pages[0].cz_lsm_info
axes = CZ_SCAN_TYPES[lsmi.scan_type]
if self.pages[0].is_rgb:
axes = axes.replace('C', '').replace('XY', 'XYC')
axes = axes[::-1]
shape = [getattr(lsmi, CZ_DIMENSIONS[i]) for i in axes]
pages = [p for p in self.pages if not p.is_reduced]
series = [Record(axes=axes, shape=shape, pages=pages,
dtype=numpy.dtype(pages[0].dtype))]
if len(pages) != len(self.pages): # reduced RGB pages
pages = [p for p in self.pages if p.is_reduced]
cp = 1
i = 0
while cp < len(pages) and i < len(shape) - 2:
cp *= shape[i]
i += 1
shape = shape[:i] + list(pages[0].shape)
axes = axes[:i] + 'CYX'
series.append(Record(axes=axes, shape=shape, pages=pages,
dtype=numpy.dtype(pages[0].dtype)))
elif self.is_imagej:
shape = []
axes = []
ij = self.pages[0].imagej_tags
if 'frames' in ij:
shape.append(ij['frames'])
axes.append('T')
if 'slices' in ij:
shape.append(ij['slices'])
axes.append('Z')
if 'channels' in ij and not self.is_rgb:
shape.append(ij['channels'])
axes.append('C')
remain = len(self.pages) // (numpy.prod(shape) if shape else 1)
if remain > 1:
shape.append(remain)
axes.append('I')
shape.extend(self.pages[0].shape)
axes.extend(self.pages[0].axes)
series = [Record(pages=self.pages, shape=shape, axes=axes,
dtype=numpy.dtype(self.pages[0].dtype))]
elif self.is_nih:
series = [Record(pages=self.pages,
shape=(len(self.pages),) + self.pages[0].shape,
axes='I' + self.pages[0].axes,
dtype=numpy.dtype(self.pages[0].dtype))]
elif self.pages[0].is_shaped:
shape = self.pages[0].tags['image_description'].value[7:-1]
shape = tuple(int(i) for i in shape.split(b','))
series = [Record(pages=self.pages, shape=shape,
axes='Q' * len(shape),
dtype=numpy.dtype(self.pages[0].dtype))]
if not series:
shapes = []
pages = {}
for page in self.pages:
if not page.shape:
continue
shape = page.shape + (page.axes,
page.compression in TIFF_DECOMPESSORS)
if not shape in pages:
shapes.append(shape)
pages[shape] = [page]
else:
pages[shape].append(page)
series = [Record(pages=pages[s],
axes=(('I' + s[-2])
if len(pages[s]) > 1 else s[-2]),
dtype=numpy.dtype(pages[s][0].dtype),
shape=((len(pages[s]), ) + s[:-2]
if len(pages[s]) > 1 else s[:-2]))
for s in shapes]
return series
def asarray(self, key=None, series=None):
"""Return image data of multiple TIFF pages as numpy array.
By default the first image series is returned.
Parameters
----------
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int
Defines which series of pages to return as array.
"""
if key is None and series is None:
series = 0
if series is not None:
pages = self.series[series].pages
else:
pages = self.pages
if key is None:
pass
elif isinstance(key, int):
pages = [pages[key]]
elif isinstance(key, slice):
pages = pages[key]
elif isinstance(key, collections.Iterable):
pages = [pages[k] for k in key]
else:
raise TypeError("key must be an int, slice, or sequence")
if len(pages) == 1:
return pages[0].asarray()
elif self.is_nih:
result = numpy.vstack(p.asarray(colormapped=False,
squeeze=False) for p in pages)
if pages[0].is_palette:
result = numpy.take(pages[0].color_map, result, axis=1)
result = numpy.swapaxes(result, 0, 1)
else:
if self.is_ome and any(p is None for p in pages):
firstpage = next(p for p in pages if p)
nopage = numpy.zeros_like(
firstpage.asarray())
result = numpy.vstack((p.asarray() if p else nopage)
for p in pages)
if key is None:
try:
result.shape = self.series[series].shape
except ValueError:
warnings.warn("failed to reshape %s to %s" % (
result.shape, self.series[series].shape))
result.shape = (-1,) + pages[0].shape
else:
result.shape = (-1,) + pages[0].shape
return result
def _omeseries(self):
"""Return image series in OME-TIFF file(s)."""
root = ElementTree.XML(self.pages[0].tags['image_description'].value)
uuid = root.attrib.get('UUID', None)
self._tiffs = {uuid: self}
modulo = {}
result = []
for element in root:
if element.tag.endswith('BinaryOnly'):
warnings.warn("not an OME-TIFF master file")
break
if element.tag.endswith('StructuredAnnotations'):
for annot in element:
if not annot.attrib.get('Namespace',
'').endswith('modulo'):
continue
for value in annot:
for modul in value:
for along in modul:
if not along.tag[:-1].endswith('Along'):
continue
axis = along.tag[-1]
newaxis = along.attrib.get('Type', 'other')
newaxis = AXES_LABELS[newaxis]
if 'Start' in along.attrib:
labels = range(
int(along.attrib['Start']),
int(along.attrib['End']) + 1,
int(along.attrib.get('Step', 1)))
else:
labels = [label.text for label in along
if label.tag.endswith('Label')]
modulo[axis] = (newaxis, labels)
if not element.tag.endswith('Image'):
continue
for pixels in element:
if not pixels.tag.endswith('Pixels'):
continue
atr = pixels.attrib
axes = "".join(reversed(atr['DimensionOrder']))
shape = list(int(atr['Size' + ax]) for ax in axes)
size = numpy.prod(shape[:-2])
ifds = [None] * size
for data in pixels:
if not data.tag.endswith('TiffData'):
continue
atr = data.attrib
ifd = int(atr.get('IFD', 0))
num = int(atr.get('NumPlanes', 1 if 'IFD' in atr else 0))
num = int(atr.get('PlaneCount', num))
idx = [int(atr.get('First' + ax, 0)) for ax in axes[:-2]]
idx = numpy.ravel_multi_index(idx, shape[:-2])
for uuid in data:
if uuid.tag.endswith('UUID'):
if uuid.text not in self._tiffs:
if not self._multifile:
# abort reading multi file OME series
return []
fn = uuid.attrib['FileName']
try:
tf = TiffFile(os.path.join(self.fpath, fn))
except (IOError, ValueError):
warnings.warn("failed to read %s" % fn)
break
self._tiffs[uuid.text] = tf
pages = self._tiffs[uuid.text].pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
break
else:
pages = self.pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
result.append(Record(axes=axes, shape=shape, pages=ifds,
dtype=numpy.dtype(ifds[0].dtype)))
for record in result:
for axis, (newaxis, labels) in modulo.items():
i = record.axes.index(axis)
size = len(labels)
if record.shape[i] == size:
record.axes = record.axes.replace(axis, newaxis, 1)
else:
record.shape[i] //= size
record.shape.insert(i + 1, size)
record.axes = record.axes.replace(axis, axis + newaxis, 1)
return result
def __len__(self):
"""Return number of image pages in file."""
return len(self.pages)
def __getitem__(self, key):
"""Return specified page."""
return self.pages[key]
def __iter__(self):
"""Return iterator over pages."""
return iter(self.pages)
def __str__(self):
"""Return string containing information about file."""
result = [
self.fname.capitalize(),
format_size(self._fsize),
{'<': 'little endian', '>': 'big endian'}[self.byteorder]]
if self.is_bigtiff:
result.append("bigtiff")
if len(self.pages) > 1:
result.append("%i pages" % len(self.pages))
if len(self.series) > 1:
result.append("%i series" % len(self.series))
if len(self._tiffs) > 1:
result.append("%i files" % (len(self._tiffs)))
return ", ".join(result)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
@lazyattr
def fstat(self):
try:
return os.fstat(self._fh.fileno())
except Exception: # io.UnsupportedOperation
return None
@lazyattr
def is_bigtiff(self):
return self.offset_size != 4
@lazyattr
def is_rgb(self):
return all(p.is_rgb for p in self.pages)
@lazyattr
def is_palette(self):
return all(p.is_palette for p in self.pages)
@lazyattr
def is_mdgel(self):
return any(p.is_mdgel for p in self.pages)
@lazyattr
def is_mediacy(self):
return any(p.is_mediacy for p in self.pages)
@lazyattr
def is_stk(self):
return all(p.is_stk for p in self.pages)
@lazyattr
def is_lsm(self):
return self.pages[0].is_lsm
@lazyattr
def is_imagej(self):
return self.pages[0].is_imagej
@lazyattr
def is_nih(self):
return self.pages[0].is_nih
@lazyattr
def is_fluoview(self):
return self.pages[0].is_fluoview
@lazyattr
def is_ome(self):
return self.pages[0].is_ome
class TiffPage(object):
"""A TIFF image file directory (IFD).
Attributes
----------
index : int
Index of page in file.
dtype : str {TIFF_SAMPLE_DTYPES}
Data type of image, colormapped if applicable.
shape : tuple
Dimensions of the image array in TIFF page,
colormapped and with one alpha channel if applicable.
axes : str
Axes label codes:
'X' width, 'Y' height, 'S' sample, 'P' plane, 'I' image series,
'Z' depth, 'C' color|em-wavelength|channel, 'E' ex-wavelength|lambda,
'T' time, 'R' region|tile, 'A' angle, 'F' phase, 'H' lifetime,
'L' exposure, 'V' event, 'Q' unknown, '_' missing
tags : TiffTags
Dictionary of tags in page.
Tag values are also directly accessible as attributes.
color_map : numpy array
Color look up table if exists.
mm_uic_tags: Record(dict)
Consolidated MetaMorph mm_uic# tags, if exists.
cz_lsm_scan_info: Record(dict)
LSM scan info attributes, if exists.
imagej_tags: Record(dict)
Consolidated ImageJ description and meta_data tags, if exists.
All attributes are read-only.
"""
def __init__(self, parent):
"""Initialize instance from file."""
self.parent = parent
self.index = len(parent.pages)
self.shape = self._shape = ()
self.dtype = self._dtype = None
self.axes = ""
self.tags = TiffTags()
self._fromfile()
self._process_tags()
def _fromfile(self):
"""Read TIFF IFD structure and its tags from file.
File cursor must be at storage position of IFD offset and is left at
offset to next IFD.
Raises StopIteration if offset (first bytes read) is 0.
"""
fh = self.parent._fh
byteorder = self.parent.byteorder
offset_size = self.parent.offset_size
fmt = {4: 'I', 8: 'Q'}[offset_size]
offset = struct.unpack(byteorder + fmt, fh.read(offset_size))[0]
if not offset:
raise StopIteration()
# read standard tags
tags = self.tags
fh.seek(offset)
fmt, size = {4: ('H', 2), 8: ('Q', 8)}[offset_size]
try:
numtags = struct.unpack(byteorder + fmt, fh.read(size))[0]
except Exception:
warnings.warn("corrupted page list")
raise StopIteration()
for _ in range(numtags):
tag = TiffTag(self.parent)
tags[tag.name] = tag
# read LSM info subrecords
if self.is_lsm:
pos = fh.tell()
for name, reader in CZ_LSM_INFO_READERS.items():
try:
offset = self.cz_lsm_info['offset_' + name]
except KeyError:
continue
if not offset:
continue
fh.seek(offset)
try:
setattr(self, 'cz_lsm_' + name, reader(fh, byteorder))
except ValueError:
pass
fh.seek(pos)
def _process_tags(self):
"""Validate standard tags and initialize attributes.
Raise ValueError if tag values are not supported.
"""
tags = self.tags
for code, (name, default, dtype, count, validate) in TIFF_TAGS.items():
if not (name in tags or default is None):
tags[name] = TiffTag(code, dtype=dtype, count=count,
value=default, name=name)
if name in tags and validate:
try:
if tags[name].count == 1:
setattr(self, name, validate[tags[name].value])
else:
setattr(self, name, tuple(
validate[value] for value in tags[name].value))
except KeyError:
raise ValueError("%s.value (%s) not supported" %
(name, tags[name].value))
tag = tags['bits_per_sample']
if tag.count == 1:
self.bits_per_sample = tag.value
else:
value = tag.value[:self.samples_per_pixel]
if any((v - value[0] for v in value)):
self.bits_per_sample = value
else:
self.bits_per_sample = value[0]
tag = tags['sample_format']
if tag.count == 1:
self.sample_format = TIFF_SAMPLE_FORMATS[tag.value]
else:
value = tag.value[:self.samples_per_pixel]
if any((v - value[0] for v in value)):
self.sample_format = [TIFF_SAMPLE_FORMATS[v] for v in value]
else:
self.sample_format = TIFF_SAMPLE_FORMATS[value[0]]
if not 'photometric' in tags:
self.photometric = None
if 'image_length' in tags:
self.strips_per_image = int(math.floor(
float(self.image_length + self.rows_per_strip - 1) /
self.rows_per_strip))
else:
self.strips_per_image = 0
key = (self.sample_format, self.bits_per_sample)
self.dtype = self._dtype = TIFF_SAMPLE_DTYPES.get(key, None)
if self.is_imagej:
# consolidate imagej meta data
adict = imagej_description(tags['image_description'].value)
try:
adict.update(imagej_meta_data(
tags['imagej_meta_data'].value,
tags['imagej_byte_counts'].value,
self.parent.byteorder))
except Exception:
pass
self.imagej_tags = Record(adict)
if not 'image_length' in self.tags or not 'image_width' in self.tags:
# some GEL file pages are missing image data
self.image_length = 0
self.image_width = 0
self._shape = ()
self.shape = ()
self.axes = ''
elif self.is_stk:
# consolidate mm_uci tags
planes = tags['mm_uic2'].count
self.mm_uic_tags = Record(tags['mm_uic2'].value)
for key in ('mm_uic3', 'mm_uic4', 'mm_uic1'):
if key in tags:
self.mm_uic_tags.update(tags[key].value)
if self.planar_configuration == 'contig':
self._shape = (planes, 1, self.image_length,
self.image_width, self.samples_per_pixel)
self.shape = tuple(self._shape[i] for i in (0, 2, 3, 4))
self.axes = 'PYXS'
else:
self._shape = (planes, self.samples_per_pixel,
self.image_length, self.image_width, 1)
self.shape = self._shape[:4]
self.axes = 'PSYX'
elif self.is_palette:
self.dtype = self.tags['color_map'].dtype[1]
self.color_map = numpy.array(self.color_map, self.dtype)
dmax = self.color_map.max()
if dmax < 256:
self.dtype = numpy.uint8
self.color_map = self.color_map.astype(self.dtype)
# else:
# self.dtype = numpy.uint8
# self.color_map >>= 8
# self.color_map = self.color_map.astype(self.dtype)
self.color_map.shape = (3, -1)
self._shape = (1, 1, self.image_length, self.image_width, 1)
if self.color_map.shape[1] >= 2 ** self.bits_per_sample:
self.shape = (3, self.image_length, self.image_width)
self.axes = 'SYX'
else:
# LSM and FluoView
self.shape = (self.image_length, self.image_width)
self.axes = 'YX'
elif self.is_rgb or self.samples_per_pixel > 1:
if self.planar_configuration == 'contig':
self._shape = (1, 1, self.image_length, self.image_width,
self.samples_per_pixel)
self.shape = (self.image_length, self.image_width,
self.samples_per_pixel)
self.axes = 'YXS'
else:
self._shape = (1, self.samples_per_pixel, self.image_length,
self.image_width, 1)
self.shape = self._shape[1:-1]
self.axes = 'SYX'
if self.is_rgb and 'extra_samples' in self.tags:
extra_samples = self.extra_samples
if self.tags['extra_samples'].count == 1:
extra_samples = (extra_samples, )
for exs in extra_samples:
if exs in ('unassalpha', 'assocalpha', 'unspecified'):
if self.planar_configuration == 'contig':
self.shape = self.shape[:2] + (4,)
else:
self.shape = (4,) + self.shape[1:]
break
else:
self._shape = (1, 1, self.image_length, self.image_width, 1)
self.shape = self._shape[2:4]
self.axes = 'YX'
if not self.compression and not 'strip_byte_counts' in tags:
self.strip_byte_counts = numpy.prod(self.shape) * (
self.bits_per_sample // 8)
def asarray(self, squeeze=True, colormapped=True, rgbonly=True):
"""Read image data from file and return as numpy array.
Raise ValueError if format is unsupported.
If any argument is False, the shape of the returned array might be
different from the page shape.
Parameters
----------
squeeze : bool
If True all length-1 dimensions (except X and Y) are
squeezed out from result.
colormapped : bool
If True color mapping is applied for palette-indexed images.
rgbonly : bool
If True return RGB(A) image without additional extra samples.
"""
fh = self.parent._fh
if not fh:
raise IOError("TIFF file is not open")
if self.dtype is None:
raise ValueError("data type not supported: %s%i" % (
self.sample_format, self.bits_per_sample))
if self.compression not in TIFF_DECOMPESSORS:
raise ValueError("cannot decompress %s" % self.compression)
if ('ycbcr_subsampling' in self.tags
and self.tags['ycbcr_subsampling'].value not in (1, (1, 1))):
raise ValueError("YCbCr subsampling not supported")
tag = self.tags['sample_format']
if tag.count != 1 and any((i - tag.value[0] for i in tag.value)):
raise ValueError("sample formats don't match %s" % str(tag.value))
dtype = self._dtype
shape = self._shape
if not shape:
return None
image_width = self.image_width
image_length = self.image_length
typecode = self.parent.byteorder + dtype
bits_per_sample = self.bits_per_sample
if self.is_tiled:
if 'tile_offsets' in self.tags:
byte_counts = self.tile_byte_counts
offsets = self.tile_offsets
else:
byte_counts = self.strip_byte_counts
offsets = self.strip_offsets
tile_width = self.tile_width
tile_length = self.tile_length
tw = (image_width + tile_width - 1) // tile_width
tl = (image_length + tile_length - 1) // tile_length
shape = shape[:-3] + (tl * tile_length, tw * tile_width, shape[-1])
tile_shape = (tile_length, tile_width, shape[-1])
runlen = tile_width
else:
byte_counts = self.strip_byte_counts
offsets = self.strip_offsets
runlen = image_width
try:
offsets[0]
except TypeError:
offsets = (offsets, )
byte_counts = (byte_counts, )
if any(o < 2 for o in offsets):
raise ValueError("corrupted file")
if (not self.is_tiled and (self.is_stk or (not self.compression
and bits_per_sample in (8, 16, 32, 64)
and all(offsets[i] == offsets[i + 1] - byte_counts[i]
for i in range(len(offsets) - 1))))):
# contiguous data
fh.seek(offsets[0])
result = numpy_fromfile(fh, typecode, numpy.prod(shape))
result = result.astype('=' + dtype)
else:
if self.planar_configuration == 'contig':
runlen *= self.samples_per_pixel
if bits_per_sample in (8, 16, 32, 64, 128):
if (bits_per_sample * runlen) % 8:
raise ValueError("data and sample size mismatch")
unpack = lambda x: numpy.fromstring(x, typecode)
elif isinstance(bits_per_sample, tuple):
unpack = lambda x: unpackrgb(x, typecode, bits_per_sample)
else:
unpack = lambda x: unpackints(x, typecode, bits_per_sample,
runlen)
decompress = TIFF_DECOMPESSORS[self.compression]
if self.is_tiled:
result = numpy.empty(shape, dtype)
tw, tl, pl = 0, 0, 0
for offset, bytecount in zip(offsets, byte_counts):
fh.seek(offset)
tile = unpack(decompress(fh.read(bytecount)))
tile.shape = tile_shape
if self.predictor == 'horizontal':
numpy.cumsum(tile, axis=-2, dtype=dtype, out=tile)
result[0, pl, tl:tl + tile_length,
tw:tw + tile_width, :] = tile
del tile
tw += tile_width
if tw >= shape[-2]:
tw, tl = 0, tl + tile_length
if tl >= shape[-3]:
tl, pl = 0, pl + 1
result = result[..., :image_length, :image_width, :]
else:
strip_size = (self.rows_per_strip * self.image_width *
self.samples_per_pixel)
result = numpy.empty(shape, dtype).reshape(-1)
index = 0
for offset, bytecount in zip(offsets, byte_counts):
fh.seek(offset)
strip = unpack(decompress(fh.read(bytecount)))
size = min(result.size, strip.size, strip_size,
result.size - index)
result[index:index + size] = strip[:size]
del strip
index += size
result.shape = self._shape
if self.predictor == 'horizontal' and not self.is_tiled:
# workaround bug in LSM510 software
if not (self.parent.is_lsm and not self.compression):
numpy.cumsum(result, axis=-2, dtype=dtype, out=result)
if colormapped and self.is_palette:
if self.color_map.shape[1] >= 2 ** bits_per_sample:
# FluoView and LSM might fail here
result = numpy.take(self.color_map, result, axis=1)
elif rgbonly and self.is_rgb and 'extra_samples' in self.tags:
# return only RGB and first alpha channel if exists
extra_samples = self.extra_samples
if self.tags['extra_samples'].count == 1:
extra_samples = (extra_samples, )
for i, exs in enumerate(extra_samples):
if exs in ('unassalpha', 'assocalpha', 'unspecified'):
if self.planar_configuration == 'contig':
result = result[..., [0, 1, 2, 3 + i]]
else:
result = result[:, [0, 1, 2, 3 + i]]
break
else:
if self.planar_configuration == 'contig':
result = result[..., :3]
else:
result = result[:, :3]
if squeeze:
try:
result.shape = self.shape
except ValueError:
warnings.warn("failed to reshape from %s to %s" % (
str(result.shape), str(self.shape)))
return result
def __str__(self):
"""Return string containing information about page."""
s = ', '.join(s for s in (
' x '.join(str(i) for i in self.shape),
str(numpy.dtype(self.dtype)),
'%s bit' % str(self.bits_per_sample),
self.photometric if 'photometric' in self.tags else '',
self.compression if self.compression else 'raw',
','.join(t[3:] for t in ('is_stk', 'is_lsm', 'is_nih', 'is_ome',
'is_imagej', 'is_fluoview', 'is_mdgel',
'is_mediacy', 'is_reduced', 'is_tiled')
if getattr(self, t))) if s)
return "Page %i: %s" % (self.index, s)
def __getattr__(self, name):
"""Return tag value."""
if name in self.tags:
value = self.tags[name].value
setattr(self, name, value)
return value
raise AttributeError(name)
@lazyattr
def is_rgb(self):
"""True if page contains a RGB image."""
return self.tags['photometric'].value == 2
@lazyattr
def is_palette(self):
"""True if page contains a palette-colored image."""
return ('photometric' in self.tags and
self.tags['photometric'].value == 3)
@lazyattr
def is_tiled(self):
"""True if page contains tiled image."""
return 'tile_width' in self.tags
@lazyattr
def is_reduced(self):
"""True if page is a reduced image of another image."""
return bool(self.tags['new_subfile_type'].value & 1)
@lazyattr
def is_mdgel(self):
"""True if page contains md_file_tag tag."""
return 'md_file_tag' in self.tags
@lazyattr
def is_mediacy(self):
"""True if page contains Media Cybernetics Id tag."""
return ('mc_id' in self.tags and
self.tags['mc_id'].value.startswith(b'MC TIFF'))
@lazyattr
def is_stk(self):
"""True if page contains MM_UIC2 tag."""
return 'mm_uic2' in self.tags
@lazyattr
def is_lsm(self):
"""True if page contains LSM CZ_LSM_INFO tag."""
return 'cz_lsm_info' in self.tags
@lazyattr
def is_fluoview(self):
"""True if page contains FluoView MM_STAMP tag."""
return 'mm_stamp' in self.tags
@lazyattr
def is_nih(self):
"""True if page contains NIH image header."""
return 'nih_image_header' in self.tags
@lazyattr
def is_ome(self):
"""True if page contains OME-XML in image_description tag."""
return ('image_description' in self.tags and self.tags[
'image_description'].value.startswith(b'<?xml version='))
@lazyattr
def is_shaped(self):
"""True if page contains shape in image_description tag."""
return ('image_description' in self.tags and self.tags[
'image_description'].value.startswith(b'shape=('))
@lazyattr
def is_imagej(self):
"""True if page contains ImageJ description."""
return ('image_description' in self.tags and
self.tags['image_description'].value.startswith(b'ImageJ='))
class TiffTag(object):
"""A TIFF tag structure.
Attributes
----------
name : string
Attribute name of tag.
code : int
Decimal code of tag.
dtype : str
Datatype of tag data. One of TIFF_DATA_TYPES.
count : int
Number of values.
value : various types
Tag data. For codes in CUSTOM_TAGS the 4 bytes file content.
value_offset : int
Location of value in file, if any.
All attributes are read-only.
"""
__slots__ = ('code', 'name', 'count', 'dtype', 'value', 'value_offset',
'_offset')
def __init__(self, arg, **kwargs):
"""Initialize instance from file or arguments."""
self._offset = None
if hasattr(arg, '_fh'):
self._fromfile(arg, **kwargs)
else:
self._fromdata(arg, **kwargs)
def _fromdata(self, code, dtype, count, value, name=None):
"""Initialize instance from arguments."""
self.code = int(code)
self.name = name if name else str(code)
self.dtype = TIFF_DATA_TYPES[dtype]
self.count = int(count)
self.value = value
def _fromfile(self, parent):
"""Read tag structure from open file. Advance file cursor."""
fh = parent._fh
byteorder = parent.byteorder
self._offset = fh.tell()
self.value_offset = self._offset + parent.offset_size + 4
fmt, size = {4: ('HHI4s', 12), 8: ('HHQ8s', 20)}[parent.offset_size]
data = fh.read(size)
code, dtype = struct.unpack(byteorder + fmt[:2], data[:4])
count, value = struct.unpack(byteorder + fmt[2:], data[4:])
if code in TIFF_TAGS:
name = TIFF_TAGS[code][0]
elif code in CUSTOM_TAGS:
name = CUSTOM_TAGS[code][0]
else:
name = str(code)
try:
dtype = TIFF_DATA_TYPES[dtype]
except KeyError:
raise ValueError("unknown TIFF tag data type %i" % dtype)
fmt = '%s%i%s' % (byteorder, count * int(dtype[0]), dtype[1])
size = struct.calcsize(fmt)
if size > parent.offset_size or code in CUSTOM_TAGS:
pos = fh.tell()
tof = {4: 'I', 8: 'Q'}[parent.offset_size]
self.value_offset = offset = struct.unpack(
byteorder + tof, value)[0]
if offset < 4 or offset > parent._fsize:
raise ValueError("corrupt file - invalid tag value offset")
fh.seek(offset)
if code in CUSTOM_TAGS:
readfunc = CUSTOM_TAGS[code][1]
value = readfunc(fh, byteorder, dtype, count)
fh.seek(0, 2) # bug in numpy/Python 3.x ?
if isinstance(value, dict): # numpy.core.records.record
value = Record(value)
elif code in TIFF_TAGS or dtype[-1] == 's':
value = struct.unpack(fmt, fh.read(size))
else:
value = read_numpy(fh, byteorder, dtype, count)
fh.seek(0, 2) # bug in numpy/Python 3.x ?
fh.seek(pos)
else:
value = struct.unpack(fmt, value[:size])
if not code in CUSTOM_TAGS:
if len(value) == 1:
value = value[0]
if dtype.endswith('s'):
value = stripnull(value)
self.code = code
self.name = name
self.dtype = dtype
self.count = count
self.value = value
def __str__(self):
"""Return string containing information about tag."""
return ' '.join(str(getattr(self, s)) for s in self.__slots__)
class TiffSequence(object):
"""Sequence of image files.
Properties
----------
files : list
List of file names.
shape : tuple
Shape of image sequence.
axes : str
Labels of axes in shape.
Examples
--------
>>> ims = TiffSequence("test.oif.files/*.tif")
>>> ims = ims.asarray()
>>> ims.shape
(2, 100, 256, 256)
"""
_axes_pattern = """
# matches Olympus OIF and Leica TIFF series
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
"""
class _ParseError(Exception):
pass
def __init__(self, files, imread=TiffFile, pattern='axes'):
"""Initialize instance from multiple files.
Parameters
----------
files : str, or sequence of str
Glob pattern or sequence of file names.
imread : function or class
Image read function or class with asarray function returning numpy
array from single file.
pattern : str
Regular expression pattern that matches axes names and sequence
indices in file names.
"""
if isinstance(files, basestring):
files = natural_sorted(glob.glob(files))
files = list(files)
if not files:
raise ValueError("no files found")
# if not os.path.isfile(files[0]):
# raise ValueError("file not found")
self.files = files
if hasattr(imread, 'asarray'):
_imread = imread
def imread(fname, *args, **kwargs):
with _imread(fname) as im:
return im.asarray(*args, **kwargs)
self.imread = imread
self.pattern = self._axes_pattern if pattern == 'axes' else pattern
try:
self._parse()
if not self.axes:
self.axes = 'I'
except self._ParseError:
self.axes = 'I'
self.shape = (len(files),)
self._start_index = (0,)
self._indices = ((i,) for i in range(len(files)))
def __str__(self):
"""Return string with information about image sequence."""
return "\n".join([
self.files[0],
'* files: %i' % len(self.files),
'* axes: %s' % self.axes,
'* shape: %s' % str(self.shape)])
def __len__(self):
return len(self.files)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def close(self):
pass
def asarray(self, *args, **kwargs):
"""Read image data from all files and return as single numpy array.
Raise IndexError if image shapes don't match.
"""
im = self.imread(self.files[0])
result_shape = self.shape + im.shape
result = numpy.zeros(result_shape, dtype=im.dtype)
result = result.reshape(-1, *im.shape)
for index, fname in zip(self._indices, self.files):
index = [i - j for i, j in zip(index, self._start_index)]
index = numpy.ravel_multi_index(index, self.shape)
im = self.imread(fname, *args, **kwargs)
result[index] = im
result.shape = result_shape
return result
def _parse(self):
"""Get axes and shape from file names."""
if not self.pattern:
raise self._ParseError("invalid pattern")
pattern = re.compile(self.pattern, re.IGNORECASE | re.VERBOSE)
matches = pattern.findall(self.files[0])
if not matches:
raise self._ParseError("pattern doesn't match file names")
matches = matches[-1]
if len(matches) % 2:
raise self._ParseError("pattern doesn't match axis name and index")
axes = ''.join(m for m in matches[::2] if m)
if not axes:
raise self._ParseError("pattern doesn't match file names")
indices = []
for fname in self.files:
matches = pattern.findall(fname)[-1]
if axes != ''.join(m for m in matches[::2] if m):
raise ValueError("axes don't match within the image sequence")
indices.append([int(m) for m in matches[1::2] if m])
shape = tuple(numpy.max(indices, axis=0))
start_index = tuple(numpy.min(indices, axis=0))
shape = tuple(i - j + 1 for i, j in zip(shape, start_index))
if numpy.prod(shape) != len(self.files):
warnings.warn("files are missing. Missing data are zeroed")
self.axes = axes.upper()
self.shape = shape
self._indices = indices
self._start_index = start_index
class Record(dict):
"""Dictionary with attribute access.
Can also be initialized with numpy.core.records.record.
"""
__slots__ = ()
def __init__(self, arg=None, **kwargs):
if kwargs:
arg = kwargs
elif arg is None:
arg = {}
try:
dict.__init__(self, arg)
except (TypeError, ValueError):
for i, name in enumerate(arg.dtype.names):
v = arg[i]
self[name] = v if v.dtype.char != 'S' else stripnull(v)
def __getattr__(self, name):
return self[name]
def __setattr__(self, name, value):
self.__setitem__(name, value)
def __str__(self):
"""Pretty print Record."""
s = []
lists = []
for k in sorted(self):
if k.startswith('_'): # does not work with byte
continue
v = self[k]
if isinstance(v, (list, tuple)) and len(v):
if isinstance(v[0], Record):
lists.append((k, v))
continue
elif isinstance(v[0], TiffPage):
v = [i.index for i in v if i]
s.append(
("* %s: %s" % (k, str(v))).split("\n", 1)[0]
[:PRINT_LINE_LEN].rstrip())
for k, v in lists:
l = []
for i, w in enumerate(v):
l.append("* %s[%i]\n %s" % (k, i,
str(w).replace("\n", "\n ")))
s.append('\n'.join(l))
return '\n'.join(s)
class TiffTags(Record):
"""Dictionary of TIFFtags with attribute access."""
def __str__(self):
"""Return string with information about all tags."""
s = []
# sortbycode = lambda a, b: cmp(a.code, b.code)
# for tag in sorted(self.values(), sortbycode):
for tag in sorted(self.values(), key=lambda x: x.code):
typecode = "%i%s" % (tag.count * int(tag.dtype[0]), tag.dtype[1])
line = "* %i %s (%s) %s" % (tag.code, tag.name, typecode,
str(tag.value).split('\n', 1)[0])
s.append(line[:PRINT_LINE_LEN].lstrip())
return '\n'.join(s)
def read_bytes(fh, byteorder, dtype, count):
"""Read tag data from file and return as byte string."""
return numpy_fromfile(fh, byteorder + dtype[-1], count).tostring()
def read_numpy(fh, byteorder, dtype, count):
"""Read tag data from file and return as numpy array."""
return numpy_fromfile(fh, byteorder + dtype[-1], count)
def read_mm_header(fh, byteorder, dtype, count):
"""Read MM_HEADER tag from file and return as numpy.rec.array."""
return numpy.rec.fromfile(fh, MM_HEADER, 1, byteorder=byteorder)[0]
def read_mm_stamp(fh, byteorder, dtype, count):
"""Read MM_STAMP tag from file and return as numpy.array."""
return numpy_fromfile(fh, byteorder + '8f8', 1)[0]
def read_mm_uic1(fh, byteorder, dtype, count):
"""Read MM_UIC1 tag from file and return as dictionary."""
t = fh.read(8 * count)
t = struct.unpack('%s%iI' % (byteorder, 2 * count), t)
return dict((MM_TAG_IDS[k], v) for k, v in zip(t[::2], t[1::2])
if k in MM_TAG_IDS)
def read_mm_uic2(fh, byteorder, dtype, count):
"""Read MM_UIC2 tag from file and return as dictionary."""
result = {'number_planes': count}
values = numpy_fromfile(fh, byteorder + 'I', 6 * count)
result['z_distance'] = values[0::6] // values[1::6]
# result['date_created'] = tuple(values[2::6])
# result['time_created'] = tuple(values[3::6])
# result['date_modified'] = tuple(values[4::6])
# result['time_modified'] = tuple(values[5::6])
return result
def read_mm_uic3(fh, byteorder, dtype, count):
"""Read MM_UIC3 tag from file and return as dictionary."""
t = numpy_fromfile(fh, byteorder + 'I', 2 * count)
return {'wavelengths': t[0::2] // t[1::2]}
def read_mm_uic4(fh, byteorder, dtype, count):
"""Read MM_UIC4 tag from file and return as dictionary."""
t = struct.unpack(byteorder + 'hI' * count, fh.read(6 * count))
return dict((MM_TAG_IDS[k], v) for k, v in zip(t[::2], t[1::2])
if k in MM_TAG_IDS)
def read_cz_lsm_info(fh, byteorder, dtype, count):
"""Read CS_LSM_INFO tag from file and return as numpy.rec.array."""
result = numpy.rec.fromfile(fh, CZ_LSM_INFO, 1,
byteorder=byteorder)[0]
{50350412: '1.3', 67127628: '2.0'}[result.magic_number] # validation
return result
def read_cz_lsm_time_stamps(fh, byteorder):
"""Read LSM time stamps from file and return as list."""
size, count = struct.unpack(byteorder + 'II', fh.read(8))
if size != (8 + 8 * count):
raise ValueError("lsm_time_stamps block is too short")
return struct.unpack(('%s%dd' % (byteorder, count)),
fh.read(8 * count))
def read_cz_lsm_event_list(fh, byteorder):
"""Read LSM events from file and return as list of (time, type, text)."""
count = struct.unpack(byteorder + 'II', fh.read(8))[1]
events = []
while count > 0:
esize, etime, etype = struct.unpack(byteorder + 'IdI', fh.read(16))
etext = stripnull(fh.read(esize - 16))
events.append((etime, etype, etext))
count -= 1
return events
def read_cz_lsm_scan_info(fh, byteorder):
"""Read LSM scan information from file and return as Record."""
block = Record()
blocks = [block]
unpack = struct.unpack
if 0x10000000 != struct.unpack(byteorder + "I", fh.read(4))[0]:
raise ValueError("not a lsm_scan_info structure")
fh.read(8)
while True:
entry, dtype, size = unpack(byteorder + "III", fh.read(12))
if dtype == 2:
value = stripnull(fh.read(size))
elif dtype == 4:
value = unpack(byteorder + "i", fh.read(4))[0]
elif dtype == 5:
value = unpack(byteorder + "d", fh.read(8))[0]
else:
value = 0
if entry in CZ_LSM_SCAN_INFO_ARRAYS:
blocks.append(block)
name = CZ_LSM_SCAN_INFO_ARRAYS[entry]
newobj = []
setattr(block, name, newobj)
block = newobj
elif entry in CZ_LSM_SCAN_INFO_STRUCTS:
blocks.append(block)
newobj = Record()
block.append(newobj)
block = newobj
elif entry in CZ_LSM_SCAN_INFO_ATTRIBUTES:
name = CZ_LSM_SCAN_INFO_ATTRIBUTES[entry]
setattr(block, name, value)
elif entry == 0xffffffff:
block = blocks.pop()
else:
setattr(block, "unknown_%x" % entry, value)
if not blocks:
break
return block
def read_nih_image_header(fh, byteorder, dtype, count):
"""Read NIH_IMAGE_HEADER tag from file and return as numpy.rec.array."""
a = numpy.rec.fromfile(fh, NIH_IMAGE_HEADER, 1, byteorder=byteorder)[0]
a = a.newbyteorder(byteorder)
a.xunit = a.xunit[:a._xunit_len]
a.um = a.um[:a._um_len]
return a
def imagej_meta_data(data, bytecounts, byteorder):
"""Return dict from ImageJ meta data tag value."""
if sys.version_info[0] > 2:
_str = lambda x: str(x, 'cp1252')
else:
_str = str
def read_string(data, byteorder):
return _str(data[1::2])
def read_double(data, byteorder):
return struct.unpack(byteorder + ('d' * (len(data) // 8)), data)
def read_bytes(data, byteorder):
# return struct.unpack('b' * len(data), data)
return numpy.fromstring(data, 'uint8')
metadata_types = {
b'info': ('info', read_string),
b'labl': ('labels', read_string),
b'rang': ('ranges', read_double),
b'luts': ('luts', read_bytes),
b'roi ': ('roi', read_bytes),
b'over': ('overlays', read_bytes)}
if not bytecounts:
raise ValueError("no ImageJ meta data")
if not data.startswith(b'IJIJ'):
raise ValueError("invalid ImageJ meta data")
header_size = bytecounts[0]
if header_size < 12 or header_size > 804:
raise ValueError("invalid ImageJ meta data header size")
ntypes = (header_size - 4) // 8
header = struct.unpack(byteorder + '4sI' * ntypes, data[4:4 + ntypes * 8])
pos = 4 + ntypes * 8
counter = 0
result = {}
for mtype, count in zip(header[::2], header[1::2]):
values = []
name, func = metadata_types.get(mtype, (_str(mtype), read_bytes))
for _ in range(count):
counter += 1
pos1 = pos + bytecounts[counter]
values.append(func(data[pos:pos1], byteorder))
pos = pos1
result[name.strip()] = values[0] if count == 1 else values
return result
def imagej_description(description):
"""Return dict from ImageJ image_description tag."""
def _bool(val):
return {b'true': True, b'false': False}[val.lower()]
if sys.version_info[0] > 2:
_str = lambda x: str(x, 'cp1252')
else:
_str = str
result = {}
for line in description.splitlines():
try:
key, val = line.split(b'=')
except Exception:
continue
key = key.strip()
val = val.strip()
for dtype in (int, float, _bool, _str):
try:
val = dtype(val)
break
except Exception:
pass
result[_str(key)] = val
return result
def _replace_by(module_function, package=None, warn=True):
"""Try replace decorated function by module.function."""
try:
from importlib import import_module
except ImportError:
warnings.warn('Could not import module importlib')
return lambda func: func
def decorate(func, module_function=module_function, warn=warn):
try:
module, function = module_function.split('.')
if not package:
module = import_module(module)
else:
module = import_module('.' + module, package=package)
func, oldfunc = getattr(module, function), func
globals()['__old_' + func.__name__] = oldfunc
except Exception:
if warn:
warnings.warn("failed to import %s" % module_function)
return func
return decorate
@_replace_by('_tifffile.decodepackbits')
def decodepackbits(encoded):
"""Decompress PackBits encoded byte string.
PackBits is a simple byte-oriented run-length compression scheme.
"""
func = ord if sys.version[0] == '2' else lambda x: x
result = []
result_extend = result.extend
i = 0
try:
while True:
n = func(encoded[i]) + 1
i += 1
if n < 129:
result_extend(encoded[i:i + n])
i += n
elif n > 129:
result_extend(encoded[i:i + 1] * (258 - n))
i += 1
except IndexError:
pass
return b''.join(result) if sys.version[0] == '2' else bytes(result)
@_replace_by('_tifffile.decodelzw')
def decodelzw(encoded):
"""Decompress LZW (Lempel-Ziv-Welch) encoded TIFF strip (byte string).
The strip must begin with a CLEAR code and end with an EOI code.
This is an implementation of the LZW decoding algorithm described in (1).
It is not compatible with old style LZW compressed files like quad-lzw.tif.
"""
len_encoded = len(encoded)
bitcount_max = len_encoded * 8
unpack = struct.unpack
if sys.version[0] == '2':
newtable = [chr(i) for i in range(256)]
else:
newtable = [bytes([i]) for i in range(256)]
newtable.extend((0, 0))
def next_code():
"""Return integer of `bitw` bits at `bitcount` position in encoded."""
start = bitcount // 8
s = encoded[start:start + 4]
try:
code = unpack('>I', s)[0]
except Exception:
code = unpack('>I', s + b'\x00' * (4 - len(s)))[0]
code = code << (bitcount % 8)
code = code & mask
return code >> shr
switchbitch = { # code: bit-width, shr-bits, bit-mask
255: (9, 23, int(9 * '1' + '0' * 23, 2)),
511: (10, 22, int(10 * '1' + '0' * 22, 2)),
1023: (11, 21, int(11 * '1' + '0' * 21, 2)),
2047: (12, 20, int(12 * '1' + '0' * 20, 2)), }
bitw, shr, mask = switchbitch[255]
bitcount = 0
if len_encoded < 4:
raise ValueError("strip must be at least 4 characters long")
if next_code() != 256:
raise ValueError("strip must begin with CLEAR code")
code = 0
oldcode = 0
result = []
result_append = result.append
while True:
code = next_code() # ~5% faster when inlining this function
bitcount += bitw
if code == 257 or bitcount >= bitcount_max: # EOI
break
if code == 256: # CLEAR
table = newtable[:]
table_append = table.append
lentable = 258
bitw, shr, mask = switchbitch[255]
code = next_code()
bitcount += bitw
if code == 257: # EOI
break
result_append(table[code])
else:
if code < lentable:
decoded = table[code]
newcode = table[oldcode] + decoded[:1]
else:
newcode = table[oldcode]
newcode += newcode[:1]
decoded = newcode
result_append(decoded)
table_append(newcode)
lentable += 1
oldcode = code
if lentable in switchbitch:
bitw, shr, mask = switchbitch[lentable]
if code != 257:
warnings.warn(
"decodelzw encountered unexpected end of stream (code %i)" % code)
return b''.join(result)
@_replace_by('_tifffile.unpackints')
def unpackints(data, dtype, itemsize, runlen=0):
"""Decompress byte string to array of integers of any bit size <= 32.
Parameters
----------
data : byte str
Data to decompress.
dtype : numpy.dtype or str
A numpy boolean or integer type.
itemsize : int
Number of bits per integer.
runlen : int
Number of consecutive integers, after which to start at next byte.
"""
if itemsize == 1: # bitarray
data = numpy.fromstring(data, '|B')
data = numpy.unpackbits(data)
if runlen % 8:
data = data.reshape(-1, runlen + (8 - runlen % 8))
data = data[:, :runlen].reshape(-1)
return data.astype(dtype)
dtype = numpy.dtype(dtype)
if itemsize in (8, 16, 32, 64):
return numpy.fromstring(data, dtype)
if itemsize < 1 or itemsize > 32:
raise ValueError("itemsize out of range: %i" % itemsize)
if dtype.kind not in "biu":
raise ValueError("invalid dtype")
itembytes = next(i for i in (1, 2, 4, 8) if 8 * i >= itemsize)
if itembytes != dtype.itemsize:
raise ValueError("dtype.itemsize too small")
if runlen == 0:
runlen = len(data) // itembytes
skipbits = runlen * itemsize % 8
if skipbits:
skipbits = 8 - skipbits
shrbits = itembytes * 8 - itemsize
bitmask = int(itemsize * '1' + '0' * shrbits, 2)
dtypestr = '>' + dtype.char # dtype always big endian?
unpack = struct.unpack
l = runlen * (len(data) * 8 // (runlen * itemsize + skipbits))
result = numpy.empty((l, ), dtype)
bitcount = 0
for i in range(len(result)):
start = bitcount // 8
s = data[start:start + itembytes]
try:
code = unpack(dtypestr, s)[0]
except Exception:
code = unpack(dtypestr, s + b'\x00' * (itembytes - len(s)))[0]
code = code << (bitcount % 8)
code = code & bitmask
result[i] = code >> shrbits
bitcount += itemsize
if (i + 1) % runlen == 0:
bitcount += skipbits
return result
def unpackrgb(data, dtype='<B', bitspersample=(5, 6, 5), rescale=True):
"""Return array from byte string containing packed samples.
Use to unpack RGB565 or RGB555 to RGB888 format.
Parameters
----------
data : byte str
The data to be decoded. Samples in each pixel are stored consecutively.
Pixels are aligned to 8, 16, or 32 bit boundaries.
dtype : numpy.dtype
The sample data type. The byteorder applies also to the data stream.
bitspersample : tuple
Number of bits for each sample in a pixel.
rescale : bool
Upscale samples to the number of bits in dtype.
Returns
-------
result : ndarray
Flattened array of unpacked samples of native dtype.
Examples
--------
>>> data = struct.pack('BBBB', 0x21, 0x08, 0xff, 0xff)
>>> print(unpackrgb(data, '<B', (5, 6, 5), False))
[ 1 1 1 31 63 31]
>>> print(unpackrgb(data, '<B', (5, 6, 5)))
[ 8 4 8 255 255 255]
>>> print(unpackrgb(data, '<B', (5, 5, 5)))
[ 16 8 8 255 255 255]
"""
dtype = numpy.dtype(dtype)
bits = int(numpy.sum(bitspersample))
if not (bits <= 32 and all(i <= dtype.itemsize * 8 for i in bitspersample)):
raise ValueError("sample size not supported %s" % str(bitspersample))
dt = next(i for i in 'BHI' if numpy.dtype(i).itemsize * 8 >= bits)
data = numpy.fromstring(data, dtype.byteorder + dt)
result = numpy.empty((data.size, len(bitspersample)), dtype.char)
for i, bps in enumerate(bitspersample):
t = data >> int(numpy.sum(bitspersample[i + 1:]))
t &= int('0b' + '1' * bps, 2)
if rescale:
o = ((dtype.itemsize * 8) // bps + 1) * bps
if o > data.dtype.itemsize * 8:
t = t.astype('I')
t *= (2 ** o - 1) // (2 ** bps - 1)
t //= 2 ** (o - (dtype.itemsize * 8))
result[:, i] = t
return result.reshape(-1)
def reorient(image, orientation):
"""Return reoriented view of image array.
Parameters
----------
image : numpy array
Non-squeezed output of asarray() functions.
Axes -3 and -2 must be image length and width respectively.
orientation : int or str
One of TIFF_ORIENTATIONS keys or values.
"""
o = TIFF_ORIENTATIONS.get(orientation, orientation)
if o == 'top_left':
return image
elif o == 'top_right':
return image[..., ::-1, :]
elif o == 'bottom_left':
return image[..., ::-1, :, :]
elif o == 'bottom_right':
return image[..., ::-1, ::-1, :]
elif o == 'left_top':
return numpy.swapaxes(image, -3, -2)
elif o == 'right_top':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :]
elif o == 'left_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :, :]
elif o == 'right_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, ::-1, :]
def numpy_fromfile(arg, dtype=float, count=-1, sep=''):
"""Return array from data in binary file.
Work around numpy issue #2230, "numpy.fromfile does not accept StringIO
object" https://github.com/numpy/numpy/issues/2230.
"""
try:
return numpy.fromfile(arg, dtype, count, sep)
except IOError:
if count < 0:
size = 2 ** 30
else:
size = count * numpy.dtype(dtype).itemsize
data = arg.read(int(size))
return numpy.fromstring(data, dtype, count, sep)
def stripnull(string):
"""Return string truncated at first null character."""
i = string.find(b'\x00')
return string if (i < 0) else string[:i]
def format_size(size):
"""Return file size as string from byte size."""
for unit in ('B', 'KB', 'MB', 'GB', 'TB'):
if size < 2048:
return "%.f %s" % (size, unit)
size /= 1024.0
def natural_sorted(iterable):
"""Return human sorted list of strings.
Examples
--------
>>> natural_sorted(['f1', 'f2', 'f10'])
['f1', 'f2', 'f10']
"""
numbers = re.compile('(\d+)')
sortkey = lambda x: [(int(c) if c.isdigit() else c)
for c in re.split(numbers, x)]
return sorted(iterable, key=sortkey)
def datetime_from_timestamp(n, epoch=datetime.datetime.fromordinal(693594)):
"""Return datetime object from timestamp in Excel serial format.
Examples
--------
>>> datetime_from_timestamp(40237.029999999795)
datetime.datetime(2010, 2, 28, 0, 43, 11, 999982)
"""
return epoch + datetime.timedelta(n)
def test_tifffile(directory='testimages', verbose=True):
"""Read all images in directory. Print error message on failure.
Examples
--------
>>> test_tifffile(verbose=False)
"""
successful = 0
failed = 0
start = time.time()
for f in glob.glob(os.path.join(directory, '*.*')):
if verbose:
print("\n%s>\n" % f.lower(), end='')
t0 = time.time()
try:
tif = TiffFile(f, multifile=True)
except Exception as e:
if not verbose:
print(f, end=' ')
print("ERROR:", e)
failed += 1
continue
try:
img = tif.asarray()
except ValueError:
try:
img = tif[0].asarray()
except Exception as e:
if not verbose:
print(f, end=' ')
print("ERROR:", e)
failed += 1
continue
finally:
tif.close()
successful += 1
if verbose:
print("%s, %s %s, %s, %.0f ms" % (
str(tif), str(img.shape), img.dtype, tif[0].compression,
(time.time() - t0) * 1e3))
if verbose:
print("\nSuccessfully read %i of %i files in %.3f s\n" % (
successful, successful + failed, time.time() - start))
class TIFF_SUBFILE_TYPES(object):
def __getitem__(self, key):
result = []
if key & 1:
result.append('reduced_image')
if key & 2:
result.append('page')
if key & 4:
result.append('mask')
return tuple(result)
TIFF_PHOTOMETRICS = {
0: 'miniswhite',
1: 'minisblack',
2: 'rgb',
3: 'palette',
4: 'mask',
5: 'separated',
6: 'cielab',
7: 'icclab',
8: 'itulab',
32844: 'logl',
32845: 'logluv',
}
TIFF_COMPESSIONS = {
1: None,
2: 'ccittrle',
3: 'ccittfax3',
4: 'ccittfax4',
5: 'lzw',
6: 'ojpeg',
7: 'jpeg',
8: 'adobe_deflate',
9: 't85',
10: 't43',
32766: 'next',
32771: 'ccittrlew',
32773: 'packbits',
32809: 'thunderscan',
32895: 'it8ctpad',
32896: 'it8lw',
32897: 'it8mp',
32898: 'it8bl',
32908: 'pixarfilm',
32909: 'pixarlog',
32946: 'deflate',
32947: 'dcs',
34661: 'jbig',
34676: 'sgilog',
34677: 'sgilog24',
34712: 'jp2000',
34713: 'nef',
}
TIFF_DECOMPESSORS = {
None: lambda x: x,
'adobe_deflate': zlib.decompress,
'deflate': zlib.decompress,
'packbits': decodepackbits,
'lzw': decodelzw,
}
TIFF_DATA_TYPES = {
1: '1B', # BYTE 8-bit unsigned integer.
2: '1s', # ASCII 8-bit byte that contains a 7-bit ASCII code;
# the last byte must be NULL (binary zero).
3: '1H', # SHORT 16-bit (2-byte) unsigned integer
4: '1I', # LONG 32-bit (4-byte) unsigned integer.
5: '2I', # RATIONAL Two LONGs: the first represents the numerator of
# a fraction; the second, the denominator.
6: '1b', # SBYTE An 8-bit signed (twos-complement) integer.
7: '1B', # UNDEFINED An 8-bit byte that may contain anything,
# depending on the definition of the field.
8: '1h', # SSHORT A 16-bit (2-byte) signed (twos-complement) integer.
9: '1i', # SLONG A 32-bit (4-byte) signed (twos-complement) integer.
10: '2i', # SRATIONAL Two SLONGs: the first represents the numerator
# of a fraction, the second the denominator.
11: '1f', # FLOAT Single precision (4-byte) IEEE format.
12: '1d', # DOUBLE Double precision (8-byte) IEEE format.
13: '1I', # IFD unsigned 4 byte IFD offset.
# 14: '', # UNICODE
# 15: '', # COMPLEX
16: '1Q', # LONG8 unsigned 8 byte integer (BigTiff)
17: '1q', # SLONG8 signed 8 byte integer (BigTiff)
18: '1Q', # IFD8 unsigned 8 byte IFD offset (BigTiff)
}
TIFF_SAMPLE_FORMATS = {
1: 'uint',
2: 'int',
3: 'float',
# 4: 'void',
# 5: 'complex_int',
6: 'complex',
}
TIFF_SAMPLE_DTYPES = {
('uint', 1): '?', # bitmap
('uint', 2): 'B',
('uint', 3): 'B',
('uint', 4): 'B',
('uint', 5): 'B',
('uint', 6): 'B',
('uint', 7): 'B',
('uint', 8): 'B',
('uint', 9): 'H',
('uint', 10): 'H',
('uint', 11): 'H',
('uint', 12): 'H',
('uint', 13): 'H',
('uint', 14): 'H',
('uint', 15): 'H',
('uint', 16): 'H',
('uint', 17): 'I',
('uint', 18): 'I',
('uint', 19): 'I',
('uint', 20): 'I',
('uint', 21): 'I',
('uint', 22): 'I',
('uint', 23): 'I',
('uint', 24): 'I',
('uint', 25): 'I',
('uint', 26): 'I',
('uint', 27): 'I',
('uint', 28): 'I',
('uint', 29): 'I',
('uint', 30): 'I',
('uint', 31): 'I',
('uint', 32): 'I',
('uint', 64): 'Q',
('int', 8): 'b',
('int', 16): 'h',
('int', 32): 'i',
('int', 64): 'q',
('float', 16): 'e',
('float', 32): 'f',
('float', 64): 'd',
('complex', 64): 'F',
('complex', 128): 'D',
('uint', (5, 6, 5)): 'B',
}
TIFF_ORIENTATIONS = {
1: 'top_left',
2: 'top_right',
3: 'bottom_right',
4: 'bottom_left',
5: 'left_top',
6: 'right_top',
7: 'right_bottom',
8: 'left_bottom',
}
AXES_LABELS = {
'X': 'width',
'Y': 'height',
'Z': 'depth',
'S': 'sample', # rgb(a)
'P': 'plane', # page
'T': 'time',
'C': 'channel', # color, emission wavelength
'A': 'angle',
'F': 'phase',
'R': 'tile', # region, point
'H': 'lifetime', # histogram
'E': 'lambda', # excitation wavelength
'L': 'exposure', # lux
'V': 'event',
'Q': 'other',
}
AXES_LABELS.update(dict((v, k) for k, v in AXES_LABELS.items()))
# NIH Image PicHeader v1.63
NIH_IMAGE_HEADER = [
('fileid', 'a8'),
('nlines', 'i2'),
('pixelsperline', 'i2'),
('version', 'i2'),
('oldlutmode', 'i2'),
('oldncolors', 'i2'),
('colors', 'u1', (3, 32)),
('oldcolorstart', 'i2'),
('colorwidth', 'i2'),
('extracolors', 'u2', (6, 3)),
('nextracolors', 'i2'),
('foregroundindex', 'i2'),
('backgroundindex', 'i2'),
('xscale', 'f8'),
('_x0', 'i2'),
('_x1', 'i2'),
('units_t', 'i2'),
('p1', [('x', 'i2'), ('y', 'i2')]),
('p2', [('x', 'i2'), ('y', 'i2')]),
('curvefit_t', 'i2'),
('ncoefficients', 'i2'),
('coeff', 'f8', 6),
('_um_len', 'u1'),
('um', 'a15'),
('_x2', 'u1'),
('binarypic', 'b1'),
('slicestart', 'i2'),
('sliceend', 'i2'),
('scalemagnification', 'f4'),
('nslices', 'i2'),
('slicespacing', 'f4'),
('currentslice', 'i2'),
('frameinterval', 'f4'),
('pixelaspectratio', 'f4'),
('colorstart', 'i2'),
('colorend', 'i2'),
('ncolors', 'i2'),
('fill1', '3u2'),
('fill2', '3u2'),
('colortable_t', 'u1'),
('lutmode_t', 'u1'),
('invertedtable', 'b1'),
('zeroclip', 'b1'),
('_xunit_len', 'u1'),
('xunit', 'a11'),
('stacktype_t', 'i2'),
]
# NIH_COLORTABLE_TYPE = (
# 'CustomTable', 'AppleDefault', 'Pseudo20', 'Pseudo32', 'Rainbow',
# 'Fire1', 'Fire2', 'Ice', 'Grays', 'Spectrum')
# NIH_LUTMODE_TYPE = (
# 'PseudoColor', 'OldAppleDefault', 'OldSpectrum', 'GrayScale',
# 'ColorLut', 'CustomGrayscale')
# NIH_CURVEFIT_TYPE = (
# 'StraightLine', 'Poly2', 'Poly3', 'Poly4', 'Poly5', 'ExpoFit',
# 'PowerFit', 'LogFit', 'RodbardFit', 'SpareFit1', 'Uncalibrated',
# 'UncalibratedOD')
# NIH_UNITS_TYPE = (
# 'Nanometers', 'Micrometers', 'Millimeters', 'Centimeters', 'Meters',
# 'Kilometers', 'Inches', 'Feet', 'Miles', 'Pixels', 'OtherUnits')
# NIH_STACKTYPE_TYPE = (
# 'VolumeStack', 'RGBStack', 'MovieStack', 'HSVStack')
# MetaMorph STK tags
MM_TAG_IDS = {
0: 'auto_scale',
1: 'min_scale',
2: 'max_scale',
3: 'spatial_calibration',
# 4: 'x_calibration',
# 5: 'y_calibration',
# 6: 'calibration_units',
# 7: 'name',
8: 'thresh_state',
9: 'thresh_state_red',
11: 'thresh_state_green',
12: 'thresh_state_blue',
13: 'thresh_state_lo',
14: 'thresh_state_hi',
15: 'zoom',
# 16: 'create_time',
# 17: 'last_saved_time',
18: 'current_buffer',
19: 'gray_fit',
20: 'gray_point_count',
# 21: 'gray_x',
# 22: 'gray_y',
# 23: 'gray_min',
# 24: 'gray_max',
# 25: 'gray_unit_name',
26: 'standard_lut',
27: 'wavelength',
# 28: 'stage_position',
# 29: 'camera_chip_offset',
# 30: 'overlay_mask',
# 31: 'overlay_compress',
# 32: 'overlay',
# 33: 'special_overlay_mask',
# 34: 'special_overlay_compress',
# 35: 'special_overlay',
36: 'image_property',
# 37: 'stage_label',
# 38: 'autoscale_lo_info',
# 39: 'autoscale_hi_info',
# 40: 'absolute_z',
# 41: 'absolute_z_valid',
# 42: 'gamma',
# 43: 'gamma_red',
# 44: 'gamma_green',
# 45: 'gamma_blue',
# 46: 'camera_bin',
47: 'new_lut',
# 48: 'image_property_ex',
49: 'plane_property',
# 50: 'user_lut_table',
51: 'red_autoscale_info',
# 52: 'red_autoscale_lo_info',
# 53: 'red_autoscale_hi_info',
54: 'red_minscale_info',
55: 'red_maxscale_info',
56: 'green_autoscale_info',
# 57: 'green_autoscale_lo_info',
# 58: 'green_autoscale_hi_info',
59: 'green_minscale_info',
60: 'green_maxscale_info',
61: 'blue_autoscale_info',
# 62: 'blue_autoscale_lo_info',
# 63: 'blue_autoscale_hi_info',
64: 'blue_min_scale_info',
65: 'blue_max_scale_info',
# 66: 'overlay_plane_color'
}
# Olymus Fluoview
MM_DIMENSION = [
('name', 'a16'),
('size', 'i4'),
('origin', 'f8'),
('resolution', 'f8'),
('unit', 'a64'),
]
MM_HEADER = [
('header_flag', 'i2'),
('image_type', 'u1'),
('image_name', 'a257'),
('offset_data', 'u4'),
('palette_size', 'i4'),
('offset_palette0', 'u4'),
('offset_palette1', 'u4'),
('comment_size', 'i4'),
('offset_comment', 'u4'),
('dimensions', MM_DIMENSION, 10),
('offset_position', 'u4'),
('map_type', 'i2'),
('map_min', 'f8'),
('map_max', 'f8'),
('min_value', 'f8'),
('max_value', 'f8'),
('offset_map', 'u4'),
('gamma', 'f8'),
('offset', 'f8'),
('gray_channel', MM_DIMENSION),
('offset_thumbnail', 'u4'),
('voice_field', 'i4'),
('offset_voice_field', 'u4'),
]
# Carl Zeiss LSM
CZ_LSM_INFO = [
('magic_number', 'i4'),
('structure_size', 'i4'),
('dimension_x', 'i4'),
('dimension_y', 'i4'),
('dimension_z', 'i4'),
('dimension_channels', 'i4'),
('dimension_time', 'i4'),
('dimension_data_type', 'i4'),
('thumbnail_x', 'i4'),
('thumbnail_y', 'i4'),
('voxel_size_x', 'f8'),
('voxel_size_y', 'f8'),
('voxel_size_z', 'f8'),
('origin_x', 'f8'),
('origin_y', 'f8'),
('origin_z', 'f8'),
('scan_type', 'u2'),
('spectral_scan', 'u2'),
('data_type', 'u4'),
('offset_vector_overlay', 'u4'),
('offset_input_lut', 'u4'),
('offset_output_lut', 'u4'),
('offset_channel_colors', 'u4'),
('time_interval', 'f8'),
('offset_channel_data_types', 'u4'),
('offset_scan_information', 'u4'),
('offset_ks_data', 'u4'),
('offset_time_stamps', 'u4'),
('offset_event_list', 'u4'),
('offset_roi', 'u4'),
('offset_bleach_roi', 'u4'),
('offset_next_recording', 'u4'),
('display_aspect_x', 'f8'),
('display_aspect_y', 'f8'),
('display_aspect_z', 'f8'),
('display_aspect_time', 'f8'),
('offset_mean_of_roi_overlay', 'u4'),
('offset_topo_isoline_overlay', 'u4'),
('offset_topo_profile_overlay', 'u4'),
('offset_linescan_overlay', 'u4'),
('offset_toolbar_flags', 'u4'),
]
# Import functions for LSM_INFO subrecords
CZ_LSM_INFO_READERS = {
'scan_information': read_cz_lsm_scan_info,
'time_stamps': read_cz_lsm_time_stamps,
'event_list': read_cz_lsm_event_list,
}
# Map cz_lsm_info.scan_type to dimension order
CZ_SCAN_TYPES = {
0: 'XYZCT', # x-y-z scan
1: 'XYZCT', # z scan (x-z plane)
2: 'XYZCT', # line scan
3: 'XYTCZ', # time series x-y
4: 'XYZTC', # time series x-z
5: 'XYTCZ', # time series 'Mean of ROIs'
6: 'XYZTC', # time series x-y-z
7: 'XYCTZ', # spline scan
8: 'XYCZT', # spline scan x-z
9: 'XYTCZ', # time series spline plane x-z
10: 'XYZCT', # point mode
}
# Map dimension codes to cz_lsm_info attribute
CZ_DIMENSIONS = {
'X': 'dimension_x',
'Y': 'dimension_y',
'Z': 'dimension_z',
'C': 'dimension_channels',
'T': 'dimension_time',
}
# Descriptions of cz_lsm_info.data_type
CZ_DATA_TYPES = {
0: 'varying data types',
2: '12 bit unsigned integer',
5: '32 bit float',
}
CZ_LSM_SCAN_INFO_ARRAYS = {
0x20000000: "tracks",
0x30000000: "lasers",
0x60000000: "detectionchannels",
0x80000000: "illuminationchannels",
0xa0000000: "beamsplitters",
0xc0000000: "datachannels",
0x13000000: "markers",
0x11000000: "timers",
}
CZ_LSM_SCAN_INFO_STRUCTS = {
0x40000000: "tracks",
0x50000000: "lasers",
0x70000000: "detectionchannels",
0x90000000: "illuminationchannels",
0xb0000000: "beamsplitters",
0xd0000000: "datachannels",
0x14000000: "markers",
0x12000000: "timers",
}
CZ_LSM_SCAN_INFO_ATTRIBUTES = {
0x10000001: "name",
0x10000002: "description",
0x10000003: "notes",
0x10000004: "objective",
0x10000005: "processing_summary",
0x10000006: "special_scan_mode",
0x10000007: "oledb_recording_scan_type",
0x10000008: "oledb_recording_scan_mode",
0x10000009: "number_of_stacks",
0x1000000a: "lines_per_plane",
0x1000000b: "samples_per_line",
0x1000000c: "planes_per_volume",
0x1000000d: "images_width",
0x1000000e: "images_height",
0x1000000f: "images_number_planes",
0x10000010: "images_number_stacks",
0x10000011: "images_number_channels",
0x10000012: "linscan_xy_size",
0x10000013: "scan_direction",
0x10000014: "time_series",
0x10000015: "original_scan_data",
0x10000016: "zoom_x",
0x10000017: "zoom_y",
0x10000018: "zoom_z",
0x10000019: "sample_0x",
0x1000001a: "sample_0y",
0x1000001b: "sample_0z",
0x1000001c: "sample_spacing",
0x1000001d: "line_spacing",
0x1000001e: "plane_spacing",
0x1000001f: "plane_width",
0x10000020: "plane_height",
0x10000021: "volume_depth",
0x10000023: "nutation",
0x10000034: "rotation",
0x10000035: "precession",
0x10000036: "sample_0time",
0x10000037: "start_scan_trigger_in",
0x10000038: "start_scan_trigger_out",
0x10000039: "start_scan_event",
0x10000040: "start_scan_time",
0x10000041: "stop_scan_trigger_in",
0x10000042: "stop_scan_trigger_out",
0x10000043: "stop_scan_event",
0x10000044: "stop_scan_time",
0x10000045: "use_rois",
0x10000046: "use_reduced_memory_rois",
0x10000047: "user",
0x10000048: "use_bccorrection",
0x10000049: "position_bccorrection1",
0x10000050: "position_bccorrection2",
0x10000051: "interpolation_y",
0x10000052: "camera_binning",
0x10000053: "camera_supersampling",
0x10000054: "camera_frame_width",
0x10000055: "camera_frame_height",
0x10000056: "camera_offset_x",
0x10000057: "camera_offset_y",
# lasers
0x50000001: "name",
0x50000002: "acquire",
0x50000003: "power",
# tracks
0x40000001: "multiplex_type",
0x40000002: "multiplex_order",
0x40000003: "sampling_mode",
0x40000004: "sampling_method",
0x40000005: "sampling_number",
0x40000006: "acquire",
0x40000007: "sample_observation_time",
0x4000000b: "time_between_stacks",
0x4000000c: "name",
0x4000000d: "collimator1_name",
0x4000000e: "collimator1_position",
0x4000000f: "collimator2_name",
0x40000010: "collimator2_position",
0x40000011: "is_bleach_track",
0x40000012: "is_bleach_after_scan_number",
0x40000013: "bleach_scan_number",
0x40000014: "trigger_in",
0x40000015: "trigger_out",
0x40000016: "is_ratio_track",
0x40000017: "bleach_count",
0x40000018: "spi_center_wavelength",
0x40000019: "pixel_time",
0x40000021: "condensor_frontlens",
0x40000023: "field_stop_value",
0x40000024: "id_condensor_aperture",
0x40000025: "condensor_aperture",
0x40000026: "id_condensor_revolver",
0x40000027: "condensor_filter",
0x40000028: "id_transmission_filter1",
0x40000029: "id_transmission1",
0x40000030: "id_transmission_filter2",
0x40000031: "id_transmission2",
0x40000032: "repeat_bleach",
0x40000033: "enable_spot_bleach_pos",
0x40000034: "spot_bleach_posx",
0x40000035: "spot_bleach_posy",
0x40000036: "spot_bleach_posz",
0x40000037: "id_tubelens",
0x40000038: "id_tubelens_position",
0x40000039: "transmitted_light",
0x4000003a: "reflected_light",
0x4000003b: "simultan_grab_and_bleach",
0x4000003c: "bleach_pixel_time",
# detection_channels
0x70000001: "integration_mode",
0x70000002: "special_mode",
0x70000003: "detector_gain_first",
0x70000004: "detector_gain_last",
0x70000005: "amplifier_gain_first",
0x70000006: "amplifier_gain_last",
0x70000007: "amplifier_offs_first",
0x70000008: "amplifier_offs_last",
0x70000009: "pinhole_diameter",
0x7000000a: "counting_trigger",
0x7000000b: "acquire",
0x7000000c: "point_detector_name",
0x7000000d: "amplifier_name",
0x7000000e: "pinhole_name",
0x7000000f: "filter_set_name",
0x70000010: "filter_name",
0x70000013: "integrator_name",
0x70000014: "detection_channel_name",
0x70000015: "detection_detector_gain_bc1",
0x70000016: "detection_detector_gain_bc2",
0x70000017: "detection_amplifier_gain_bc1",
0x70000018: "detection_amplifier_gain_bc2",
0x70000019: "detection_amplifier_offset_bc1",
0x70000020: "detection_amplifier_offset_bc2",
0x70000021: "detection_spectral_scan_channels",
0x70000022: "detection_spi_wavelength_start",
0x70000023: "detection_spi_wavelength_stop",
0x70000026: "detection_dye_name",
0x70000027: "detection_dye_folder",
# illumination_channels
0x90000001: "name",
0x90000002: "power",
0x90000003: "wavelength",
0x90000004: "aquire",
0x90000005: "detchannel_name",
0x90000006: "power_bc1",
0x90000007: "power_bc2",
# beam_splitters
0xb0000001: "filter_set",
0xb0000002: "filter",
0xb0000003: "name",
# data_channels
0xd0000001: "name",
0xd0000003: "acquire",
0xd0000004: "color",
0xd0000005: "sample_type",
0xd0000006: "bits_per_sample",
0xd0000007: "ratio_type",
0xd0000008: "ratio_track1",
0xd0000009: "ratio_track2",
0xd000000a: "ratio_channel1",
0xd000000b: "ratio_channel2",
0xd000000c: "ratio_const1",
0xd000000d: "ratio_const2",
0xd000000e: "ratio_const3",
0xd000000f: "ratio_const4",
0xd0000010: "ratio_const5",
0xd0000011: "ratio_const6",
0xd0000012: "ratio_first_images1",
0xd0000013: "ratio_first_images2",
0xd0000014: "dye_name",
0xd0000015: "dye_folder",
0xd0000016: "spectrum",
0xd0000017: "acquire",
# markers
0x14000001: "name",
0x14000002: "description",
0x14000003: "trigger_in",
0x14000004: "trigger_out",
# timers
0x12000001: "name",
0x12000002: "description",
0x12000003: "interval",
0x12000004: "trigger_in",
0x12000005: "trigger_out",
0x12000006: "activation_time",
0x12000007: "activation_number",
}
# Map TIFF tag code to attribute name, default value, type, count, validator
TIFF_TAGS = {
254: ('new_subfile_type', 0, 4, 1, TIFF_SUBFILE_TYPES()),
255: ('subfile_type', None, 3, 1,
{0: 'undefined', 1: 'image', 2: 'reduced_image', 3: 'page'}),
256: ('image_width', None, 4, 1, None),
257: ('image_length', None, 4, 1, None),
258: ('bits_per_sample', 1, 3, 1, None),
259: ('compression', 1, 3, 1, TIFF_COMPESSIONS),
262: ('photometric', None, 3, 1, TIFF_PHOTOMETRICS),
266: ('fill_order', 1, 3, 1, {1: 'msb2lsb', 2: 'lsb2msb'}),
269: ('document_name', None, 2, None, None),
270: ('image_description', None, 2, None, None),
271: ('make', None, 2, None, None),
272: ('model', None, 2, None, None),
273: ('strip_offsets', None, 4, None, None),
274: ('orientation', 1, 3, 1, TIFF_ORIENTATIONS),
277: ('samples_per_pixel', 1, 3, 1, None),
278: ('rows_per_strip', 2 ** 32 - 1, 4, 1, None),
279: ('strip_byte_counts', None, 4, None, None),
280: ('min_sample_value', None, 3, None, None),
281: ('max_sample_value', None, 3, None, None), # 2**bits_per_sample
282: ('x_resolution', None, 5, 1, None),
283: ('y_resolution', None, 5, 1, None),
284: ('planar_configuration', 1, 3, 1, {1: 'contig', 2: 'separate'}),
285: ('page_name', None, 2, None, None),
286: ('x_position', None, 5, 1, None),
287: ('y_position', None, 5, 1, None),
296: ('resolution_unit', 2, 4, 1, {1: 'none', 2: 'inch', 3: 'centimeter'}),
297: ('page_number', None, 3, 2, None),
305: ('software', None, 2, None, None),
306: ('datetime', None, 2, None, None),
315: ('artist', None, 2, None, None),
316: ('host_computer', None, 2, None, None),
317: ('predictor', 1, 3, 1, {1: None, 2: 'horizontal'}),
320: ('color_map', None, 3, None, None),
322: ('tile_width', None, 4, 1, None),
323: ('tile_length', None, 4, 1, None),
324: ('tile_offsets', None, 4, None, None),
325: ('tile_byte_counts', None, 4, None, None),
338: ('extra_samples', None, 3, None,
{0: 'unspecified', 1: 'assocalpha', 2: 'unassalpha'}),
339: ('sample_format', 1, 3, 1, TIFF_SAMPLE_FORMATS),
347: ('jpeg_tables', None, None, None, None),
530: ('ycbcr_subsampling', 1, 3, 2, None),
531: ('ycbcr_positioning', 1, 3, 1, None),
32997: ('image_depth', None, 4, 1, None),
32998: ('tile_depth', None, 4, 1, None),
33432: ('copyright', None, 1, None, None),
33445: ('md_file_tag', None, 4, 1, None),
33446: ('md_scale_pixel', None, 5, 1, None),
33447: ('md_color_table', None, 3, None, None),
33448: ('md_lab_name', None, 2, None, None),
33449: ('md_sample_info', None, 2, None, None),
33450: ('md_prep_date', None, 2, None, None),
33451: ('md_prep_time', None, 2, None, None),
33452: ('md_file_units', None, 2, None, None),
33550: ('model_pixel_scale', None, 12, 3, None),
33922: ('model_tie_point', None, 12, None, None),
37510: ('user_comment', None, None, None, None),
34665: ('exif_ifd', None, None, 1, None),
34735: ('geo_key_directory', None, 3, None, None),
34736: ('geo_double_params', None, 12, None, None),
34737: ('geo_ascii_params', None, 2, None, None),
34853: ('gps_ifd', None, None, 1, None),
42112: ('gdal_metadata', None, 2, None, None),
42113: ('gdal_nodata', None, 2, None, None),
50838: ('imagej_byte_counts', None, None, None, None),
50289: ('mc_xy_position', None, 12, 2, None),
50290: ('mc_z_position', None, 12, 1, None),
50291: ('mc_xy_calibration', None, 12, 3, None),
50292: ('mc_lens_lem_na_n', None, 12, 3, None),
50293: ('mc_channel_name', None, 1, None, None),
50294: ('mc_ex_wavelength', None, 12, 1, None),
50295: ('mc_time_stamp', None, 12, 1, None),
# code: (attribute name, default value, type, count, validator)
}
# Map custom TIFF tag codes to attribute names and import functions
CUSTOM_TAGS = {
700: ('xmp', read_bytes),
34377: ('photoshop', read_numpy),
33723: ('iptc', read_bytes),
34675: ('icc_profile', read_numpy),
33628: ('mm_uic1', read_mm_uic1),
33629: ('mm_uic2', read_mm_uic2),
33630: ('mm_uic3', read_mm_uic3),
33631: ('mm_uic4', read_mm_uic4),
34361: ('mm_header', read_mm_header),
34362: ('mm_stamp', read_mm_stamp),
34386: ('mm_user_block', read_bytes),
34412: ('cz_lsm_info', read_cz_lsm_info),
43314: ('nih_image_header', read_nih_image_header),
# 40001: ('mc_ipwinscal', read_bytes),
40100: ('mc_id_old', read_bytes),
50288: ('mc_id', read_bytes),
50296: ('mc_frame_properties', read_bytes),
50839: ('imagej_meta_data', read_bytes),
}
# Max line length of printed output
PRINT_LINE_LEN = 79
def imshow(data, title=None, vmin=0, vmax=None, cmap=None,
bitspersample=None, photometric='rgb', interpolation='nearest',
dpi=96, figure=None, subplot=111, maxdim=8192, **kwargs):
"""Plot n-dimensional images using matplotlib.pyplot.
Return figure, subplot and plot axis.
Requires pyplot already imported ``from matplotlib import pyplot``.
Parameters
----------
bitspersample : int or None
Number of bits per channel in integer RGB images.
photometric : {'miniswhite', 'minisblack', 'rgb', or 'palette'}
The color space of the image data.
title : str
Window and subplot title.
figure : matplotlib.figure.Figure (optional).
Matplotlib to use for plotting.
subplot : int
A matplotlib.pyplot.subplot axis.
maxdim : int
maximum image size in any dimension.
kwargs : optional
Arguments for matplotlib.pyplot.imshow.
"""
# if photometric not in ('miniswhite', 'minisblack', 'rgb', 'palette'):
# raise ValueError("Can't handle %s photometrics" % photometric)
# TODO: handle photometric == 'separated' (CMYK)
isrgb = photometric in ('rgb', 'palette')
data = numpy.atleast_2d(data.squeeze())
data = data[(slice(0, maxdim), ) * len(data.shape)]
dims = data.ndim
if dims < 2:
raise ValueError("not an image")
elif dims == 2:
dims = 0
isrgb = False
else:
if (isrgb and data.shape[-3] in (3, 4)):
data = numpy.swapaxes(data, -3, -2)
data = numpy.swapaxes(data, -2, -1)
elif (not isrgb and data.shape[-1] in (3, 4)):
data = numpy.swapaxes(data, -3, -1)
data = numpy.swapaxes(data, -2, -1)
isrgb = isrgb and data.shape[-1] in (3, 4)
dims -= 3 if isrgb else 2
if photometric == 'palette':
datamax = data.max()
if datamax > 255:
data >>= 8 # possible precision loss
data = data.astype('B')
elif data.dtype.kind in 'ui':
if not (isrgb and data.dtype.itemsize <= 1) or bitspersample is None:
try:
bitspersample = int(math.ceil(math.log(data.max(), 2)))
except Exception:
bitspersample = data.dtype.itemsize * 8
elif not isinstance(bitspersample, int):
# bitspersample can be tuple, e.g. (5, 6, 5)
bitspersample = data.dtype.itemsize * 8
datamax = 2 ** bitspersample
if isrgb:
if bitspersample < 8:
data <<= 8 - bitspersample
elif bitspersample > 8:
data >>= bitspersample - 8 # precision loss
data = data.astype('B')
elif data.dtype.kind == 'f':
datamax = data.max()
if isrgb and datamax > 1.0:
if data.dtype.char == 'd':
data = data.astype('f')
data /= datamax
elif data.dtype.kind == 'b':
datamax = 1
if not isrgb:
if vmax is None:
vmax = datamax
if vmin is None:
if data.dtype.kind == 'i':
dtmin = numpy.iinfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data > dtmin)
if data.dtype.kind == 'f':
dtmin = numpy.finfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data > dtmin)
else:
vmin = 0
pyplot = sys.modules['matplotlib.pyplot']
if figure is None:
pyplot.rc('font', family='sans-serif', weight='normal', size=8)
figure = pyplot.figure(dpi=dpi, figsize=(10.3, 6.3), frameon=True,
facecolor='1.0', edgecolor='w')
try:
figure.canvas.manager.window.title(title)
except Exception:
pass
pyplot.subplots_adjust(bottom=0.03 * (dims + 2), top=0.9,
left=0.1, right=0.95, hspace=0.05, wspace=0.0)
subplot = pyplot.subplot(subplot)
if title:
try:
title = unicode(title, 'Windows-1252')
except TypeError:
pass
pyplot.title(title, size=11)
if cmap is None:
if data.dtype.kind in 'ub' and vmin == 0:
cmap = 'gray'
else:
cmap = 'coolwarm'
if photometric == 'miniswhite':
cmap += '_r'
image = pyplot.imshow(data[(0, ) * dims].squeeze(), vmin=vmin, vmax=vmax,
cmap=cmap, interpolation=interpolation, **kwargs)
if not isrgb:
pyplot.colorbar() # panchor=(0.55, 0.5), fraction=0.05
def format_coord(x, y):
# callback function to format coordinate display in toolbar
x = int(x + 0.5)
y = int(y + 0.5)
try:
if dims:
return "%s @ %s [%4i, %4i]" % (cur_ax_dat[1][y, x],
current, x, y)
else:
return "%s @ [%4i, %4i]" % (data[y, x], x, y)
except IndexError:
return ""
pyplot.gca().format_coord = format_coord
if dims:
current = list((0, ) * dims)
cur_ax_dat = [0, data[tuple(current)].squeeze()]
sliders = [pyplot.Slider(
pyplot.axes([0.125, 0.03 * (axis + 1), 0.725, 0.025]),
'Dimension %i' % axis, 0, data.shape[axis] - 1, 0, facecolor='0.5',
valfmt='%%.0f [%i]' % data.shape[axis]) for axis in range(dims)]
for slider in sliders:
slider.drawon = False
def set_image(current, sliders=sliders, data=data):
# change image and redraw canvas
cur_ax_dat[1] = data[tuple(current)].squeeze()
image.set_data(cur_ax_dat[1])
for ctrl, index in zip(sliders, current):
ctrl.eventson = False
ctrl.set_val(index)
ctrl.eventson = True
figure.canvas.draw()
def on_changed(index, axis, data=data, current=current):
# callback function for slider change event
index = int(round(index))
cur_ax_dat[0] = axis
if index == current[axis]:
return
if index >= data.shape[axis]:
index = 0
elif index < 0:
index = data.shape[axis] - 1
current[axis] = index
set_image(current)
def on_keypressed(event, data=data, current=current):
# callback function for key press event
key = event.key
axis = cur_ax_dat[0]
if str(key) in '0123456789':
on_changed(key, axis)
elif key == 'right':
on_changed(current[axis] + 1, axis)
elif key == 'left':
on_changed(current[axis] - 1, axis)
elif key == 'up':
cur_ax_dat[0] = 0 if axis == len(data.shape) - 1 else axis + 1
elif key == 'down':
cur_ax_dat[0] = len(data.shape) - 1 if axis == 0 else axis - 1
elif key == 'end':
on_changed(data.shape[axis] - 1, axis)
elif key == 'home':
on_changed(0, axis)
figure.canvas.mpl_connect('key_press_event', on_keypressed)
for axis, ctrl in enumerate(sliders):
ctrl.on_changed(lambda k, a=axis: on_changed(k, a))
return figure, subplot, image
def _app_show():
"""Block the GUI. For use as skimage plugin."""
pyplot = sys.modules['matplotlib.pyplot']
pyplot.show()
def main(argv=None):
"""Command line usage main function."""
if float(sys.version[0:3]) < 2.6:
print("This script requires Python version 2.6 or better.")
print("This is Python version %s" % sys.version)
return 0
if argv is None:
argv = sys.argv
import optparse
search_doc = lambda r, d: re.search(r, __doc__).group(1) if __doc__ else d
parser = optparse.OptionParser(
usage="usage: %prog [options] path",
description=search_doc("\n\n([^|]*?)\n\n", ''),
version="%%prog %s" % search_doc(":Version: (.*)", "Unknown"))
opt = parser.add_option
opt('-p', '--page', dest='page', type='int', default=-1,
help="display single page")
opt('-s', '--series', dest='series', type='int', default=-1,
help="display series of pages of same shape")
opt('--nomultifile', dest='nomultifile', action='store_true',
default=False, help="don't read OME series from multiple files")
opt('--noplot', dest='noplot', action='store_true', default=False,
help="don't display images")
opt('--interpol', dest='interpol', metavar='INTERPOL', default='bilinear',
help="image interpolation method")
opt('--dpi', dest='dpi', type='int', default=96,
help="set plot resolution")
opt('--debug', dest='debug', action='store_true', default=False,
help="raise exception on failures")
opt('--test', dest='test', action='store_true', default=False,
help="try read all images in path")
opt('--doctest', dest='doctest', action='store_true', default=False,
help="runs the internal tests")
opt('-v', '--verbose', dest='verbose', action='store_true', default=True)
opt('-q', '--quiet', dest='verbose', action='store_false')
settings, path = parser.parse_args()
path = ' '.join(path)
if settings.doctest:
import doctest
doctest.testmod()
return 0
if not path:
parser.error("No file specified")
if settings.test:
test_tifffile(path, settings.verbose)
return 0
if any(i in path for i in '?*'):
path = glob.glob(path)
if not path:
print('no files match the pattern')
return 0
# TODO: handle image sequences
# if len(path) == 1:
path = path[0]
print("Reading file structure...", end=' ')
start = time.time()
try:
tif = TiffFile(path, multifile=not settings.nomultifile)
except Exception as e:
if settings.debug:
raise
else:
print("\n", e)
sys.exit(0)
print("%.3f ms" % ((time.time() - start) * 1e3))
if tif.is_ome:
settings.norgb = True
images = [(None, tif[0 if settings.page < 0 else settings.page])]
if not settings.noplot:
print("Reading image data... ", end=' ')
notnone = lambda x: next(i for i in x if i is not None)
start = time.time()
try:
if settings.page >= 0:
images = [(tif.asarray(key=settings.page),
tif[settings.page])]
elif settings.series >= 0:
images = [(tif.asarray(series=settings.series),
notnone(tif.series[settings.series].pages))]
else:
images = []
for i, s in enumerate(tif.series):
try:
images.append(
(tif.asarray(series=i), notnone(s.pages)))
except ValueError as e:
images.append((None, notnone(s.pages)))
if settings.debug:
raise
else:
print("\n* series %i failed: %s... " % (i, e),
end='')
print("%.3f ms" % ((time.time() - start) * 1e3))
except Exception as e:
if settings.debug:
raise
else:
print(e)
tif.close()
print("\nTIFF file:", tif)
print()
for i, s in enumerate(tif.series):
print ("Series %i" % i)
print(s)
print()
for i, page in images:
print(page)
print(page.tags)
if page.is_palette:
print("\nColor Map:", page.color_map.shape, page.color_map.dtype)
for attr in ('cz_lsm_info', 'cz_lsm_scan_information', 'mm_uic_tags',
'mm_header', 'imagej_tags', 'nih_image_header'):
if hasattr(page, attr):
print("", attr.upper(), Record(getattr(page, attr)), sep="\n")
print()
if images and not settings.noplot:
try:
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot
except ImportError as e:
warnings.warn("failed to import matplotlib.\n%s" % e)
else:
for img, page in images:
if img is None:
continue
vmin, vmax = None, None
if 'gdal_nodata' in page.tags:
vmin = numpy.min(img[img > float(page.gdal_nodata)])
if page.is_stk:
try:
vmin = page.mm_uic_tags['min_scale']
vmax = page.mm_uic_tags['max_scale']
except KeyError:
pass
else:
if vmax <= vmin:
vmin, vmax = None, None
title = "%s\n %s" % (str(tif), str(page))
imshow(img, title=title, vmin=vmin, vmax=vmax,
bitspersample=page.bits_per_sample,
photometric=page.photometric,
interpolation=settings.interpol,
dpi=settings.dpi)
pyplot.show()
TIFFfile = TiffFile # backwards compatibility
if sys.version_info[0] > 2:
basestring = str
unicode = str
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause |
bbfamily/abu | abupy/IndicatorBu/ABuNDMa.py | 1 | 7883 | # -*- encoding:utf-8 -*-
"""
移动平均线,Moving Average,简称MA,原本的意思是移动平均,由于我们将其制作成线形,所以一般称之为移动平均线,简称均线。
它是将某一段时间的收盘价之和除以该周期。 比如日线MA5指5天内的收盘价除以5 。
移动平均线是由著名的美国投资专家Joseph E.Granville(葛兰碧,又译为格兰威尔)于20世纪中期提出来的。
均线理论是当今应用最普遍的技术指标之一,它帮助交易者确认现有趋势、判断将出现的趋势、发现过度延生即将反转的趋势
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from collections import Iterable
import matplotlib.pyplot as plt
import pandas as pd
from enum import Enum
from .ABuNDBase import plot_from_order, g_calc_type, ECalcType
from ..CoreBu.ABuPdHelper import pd_rolling_mean, pd_ewm_mean
from ..CoreBu.ABuFixes import six
from ..UtilBu.ABuDTUtil import catch_error
__author__ = '阿布'
__weixin__ = 'abu_quant'
class EMACalcType(Enum):
"""计算移动移动平均使用的方法"""
"""简单移动平均线"""
E_MA_MA = 0
"""加权移动平均线"""
E_MA_EMA = 1
# noinspection PyUnresolvedReferences
def _calc_ma_from_ta(prices, time_period=10, from_calc=EMACalcType.E_MA_MA):
"""
使用talib计算ma,即透传talib.MA or talib.EMA计算结果
:param prices: 收盘价格序列,pd.Series或者np.array
:param time_period: 移动平均的N值,int
:param from_calc: EMACalcType enum对象,移动移动平均使用的方法
"""
import talib
if isinstance(prices, pd.Series):
prices = prices.values
if from_calc == EMACalcType.E_MA_MA:
ma = talib.MA(prices, timeperiod=time_period)
else:
ma = talib.EMA(prices, timeperiod=time_period)
return ma
def _calc_ma_from_pd(prices, time_period=10, from_calc=EMACalcType.E_MA_MA):
"""
通过pandas计算ma或者ema
:param prices: 收盘价格序列,pd.Series或者np.array
:param time_period: 移动平均的N值,int
:param from_calc: EMACalcType enum对象,移动移动平均使用的方法
"""
if isinstance(prices, pd.Series):
prices = prices.values
if from_calc == EMACalcType.E_MA_MA:
ma = pd_rolling_mean(prices, window=time_period, min_periods=time_period)
else:
ma = pd_ewm_mean(prices, span=time_period, min_periods=time_period)
return ma
def calc_ma_from_prices(prices, time_period=10, min_periods=None, from_calc=EMACalcType.E_MA_MA):
"""
通过pandas计算ma或者ema, 添加min_periods参数
:param prices: 收盘价格序列,pd.Series或者np.array
:param time_period: 移动平均的N值,int
:param min_periods: int,默认None则使用time_period
:param from_calc: EMACalcType enum对象,移动移动平均使用的方法
"""
if isinstance(prices, pd.Series):
prices = prices.values
min_periods = time_period if min_periods is None else min_periods
if from_calc == EMACalcType.E_MA_MA:
ma = pd_rolling_mean(prices, window=time_period, min_periods=min_periods)
else:
ma = pd_ewm_mean(prices, span=time_period, min_periods=min_periods)
return ma
"""通过在ABuNDBase中尝试import talib来统一确定指标计算方式, 外部计算只应该使用calc_ma"""
calc_ma = _calc_ma_from_pd if g_calc_type == ECalcType.E_FROM_PD else _calc_ma_from_ta
def plot_ma_from_order(order, date_ext=120, **kwargs):
"""
封装ABuNDBase中的plot_from_order与模块中绘制技术指标的函数,完成技术指标可视化及标注买入卖出点位
:param order: AbuOrder对象转换的pd.DataFrame对象or pd.Series对象
:param date_ext: int对象 eg. 如交易在2015-06-01执行,如date_ext=120,择start向前推120天,end向后推120天
:param kwargs: 绘制技术指标需要的其它关键字参数,time_period,from_calc, with_price,最终透传给plot_ma
"""
return plot_from_order(plot_ma_from_klpd, order, date_ext, **kwargs)
def plot_ma_from_klpd(kl_pd, with_points=None, with_points_ext=None, **kwargs):
"""
封装plot_ma,绘制收盘价格,以及多条移动均线
:param kl_pd: 金融时间序列,pd.DataFrame对象
:param with_points: 这里的常规用途是传入买入order, with_points=buy_index=pd.to_datetime(orders['buy_date']))
:param with_points_ext: 这里的常规用途是传入卖出order, with_points_ext=sell_index=pd.to_datetime(orders['sell_date']))
:param kwargs: 绘制技术指标需要的其它关键字参数,time_period,from_calc, with_price,最终透传给plot_ma
"""
# 如果外部不设置均线,这里pop的default为[30, 60, 90]
time_period = kwargs.pop('time_period', [30, 60, 90])
plot_ma(kl_pd.close, kl_pd.index, time_period, with_points=with_points,
with_points_ext=with_points_ext)
def plot_ma(prices, kl_index, time_period, from_calc=EMACalcType.E_MA_MA,
with_points=None, with_points_ext=None, with_price=True):
"""
一个画布上,绘制价格曲线以及多条移动平均线,如果有with_points点位标注,使用竖线标注
:param prices: 收盘价格序列,pd.Series或者np.array
:param kl_index: pd.Index时间序列
:param time_period: 注意是Iterable类型,需要可迭代对象,即使一个元素也要使用如[10]包裹
:param from_calc: EMACalcType enum对象,默认使用简单移动平均线
:param with_points: 这里的常规用途是传入买入order, with_points=buy_index=pd.to_datetime(orders['buy_date']))
:param with_points_ext: 这里的常规用途是传入卖出order, with_points_ext=sell_index=pd.to_datetime(orders['sell_date']))
:param with_price: 将价格一起绘制
:return:
"""
# TODO Iterable和six.string_types的判断抽出来放在一个模块,做为Iterable的判断来使用
if not isinstance(time_period, Iterable) or isinstance(time_period, six.string_types):
raise TypeError('MA CALC time_period MUST PASS Iterable!!!')
calc_type_func = calc_ma
# 迭代计算多条移动均线,使用from_calc使用的方法计算
ma_array = [calc_type_func(prices, period, from_calc) for period in time_period]
plt.figure(figsize=[14, 7])
for ind, ma in enumerate(ma_array):
# ind的作用是索引在原始time_period中对应label需要的名称
# noinspection PyUnresolvedReferences
plt.plot(kl_index, ma, label='ma {}'.format(time_period[ind]))
if with_price:
plt.plot(kl_index, prices, label='prices')
@catch_error(return_val=None, log=False)
def plot_with_point(points, co, cc):
"""
点位使用圆点+竖线进行标注
:param points: 点位坐标序列
:param co: 点颜色 eg. 'go' 'ro'
:param cc: markeredgecolor和竖线axvline颜色 eg. 'green' 'red'
"""
v_index_num = kl_index.tolist().index(points)
# 如果有ma线,y点做目标画在第一根ma线上否则画在价格上面
y_array = ma_array[0] if len(ma_array) > 0 else prices
plt.plot(points, y_array[v_index_num], co, markersize=12, markeredgewidth=3.0,
markerfacecolor='None', markeredgecolor=cc)
plt.axvline(points, color=cc)
# with_points和with_points_ext的点位使用竖线标注
if with_points is not None:
# plt.axvline(with_points, color='green', linestyle='--')
plot_with_point(with_points, 'go', 'green')
if with_points_ext is not None:
# plt.axvline(with_points_ext, color='red')
plot_with_point(with_points_ext, 'ro', 'red')
plt.grid(True)
plt.legend(loc='best')
plt.show()
| gpl-3.0 |
puruckertom/ubertool | ubertool/ted/ted_aggregate_methods.py | 1 | 46537 | from __future__ import division # brings in Python 3.0 mixed type calculation rules
import logging
import numpy as np
from numpy import math
import pandas as pd
class TedAggregateMethods(object):
"""
Aggregate Method class for TED model(logical organizations of functions for a collective purpose).
"""
def __init__(self):
"""Class representing the functions for Trex"""
super(TedAggregateMethods, self).__init__()
def set_global_constants(self):
# Assigned constants
self.num_simulations = len(self.chemical_name)
self.num_simulation_days = 366
self.day_num = np.arange(366) # create array of day numbers from 0 - 365
self.max_distance_from_source = 1000. # max distance (m) from source for distance calculations
self.num_ts = 11 # number of food item daily time series to be processed to determine number of exceedances of EECs
self.num_tox = 13 # number of toxicity measures to be processed to determine number of exceedances of EECs
# constants and conversions
self.density_h2o = 1. # kg/L
self.stan_temp_kelvin = 298. # temperature in Kelvin for 25degC
self.gas_const = 8.205e-5 # universal gas constant (atm-m3/mol-K)
self.hectare_area = 10000. # area of hectare (m2)
self.lbs_to_gms = 453.592
self.hectare_to_acre = 2.47105
self.gms_to_mg = 1000.
self.m3_to_liters = 1000.
self.mg_to_ug = 1000.
self.minutes_per_hr = 60.
self.unitless_henry_law = self.henry_law_const / (self.gas_const * self.stan_temp_kelvin)
self.log_unitless_hlc = np.log10(self.unitless_henry_law)
# initial residue concentration multiplier (upper bound)
self.food_multiplier_upper_sg = 240. # short grass
self.food_multiplier_upper_tg = 110. # tall grass
self.food_multiplier_upper_blp = 135. # broad-leafed plants
self.food_multiplier_upper_fp = 15. # fruits/pods
self.food_multiplier_upper_arthro = 94. # arthropods
# mean residue concentration multiplier (mean)
self.food_multiplier_mean_sg = 85. # short grass
self.food_multiplier_mean_tg = 36. # tall grass
self.food_multiplier_mean_blp = 45. # broad-leafed plants
self.food_multiplier_mean_fp = 7. # fruits/pods
self.food_multiplier_mean_arthro = 65. # arthropods
# soil properties
self.soil_depth = 2.6 # cm
self.soil_foc = 0.015
self.app_rate_conv1 = 11.2 # conversion factor used to convert units of application rate (lbs a.i./acre) to metric units to derive concentration in units of (ug a.i./mL); assuming depth/height in units of centimeters
self.app_rate_conv2 = 0.112 # conversion factor used to convert units of application rate (lbs a.i./acre) to metric units to derive concentration in units of (ug a.i./mL); assuming depth/height in units of meters
self.soil_particle_density = 2.65 # kg/L
self.soil_bulk_density = 1.5 # kg/L
self.soil_porosity = (1. - (self.soil_bulk_density / self.soil_particle_density))
self.h2o_depth_puddles = 1.3 # centimeters; for water depth in Eq.3
self.h2o_depth_soil = 0.0 # centimeters; for water depth in Eq.3
# concentration levels in water (ug/L) used in calculating aquatic invertebrate and fish tissue concentrations
self.aq_conc_level1 = 0.01
self.aq_conc_level2 = 0.1
self.aq_conc_level3 = 1.0
self.aq_conc_level4 = 10.
self.aq_conc_level5 = 100.
# earthworm properties
self.lipid_earthworm = 0.01 # lipid content of earthworm
self.density_earthworm = 1.0 # assumed equivalent to water
# broad leaf plant properties (for determining pesticide concentration in dew)
self.frac_pest_on_surface = 0.62 # from Eq 11; accounts for the amount of pesticide that is present on leaf surface and thus may partition between waxy layer and dew
self.mass_wax = 0.012 # central tendency of mass of wax on broadleaf plants kg/m2
self.crop_hgt = 1. # default crop height (m) for use in Eq.26
self.mass_plant = 25000. # mass of plant (crop) per hectare (kg)
self.density_plant = 0.77 # the density of the crop tissue assumed as fresh leaf (kg/L)
# fraction of water in fresh food items (Table A 1-7.4 of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
self.frac_h2o_amphi = 0.85
self.frac_h2o_arthro = 0.69
self.frac_h2o_aq_plant = 0.8
self.frac_h2o_ben_invert = 0.78
self.frac_h2o_bird_mamm = 0.68
self.frac_h2o_broadleaves = 0.85
self.frac_h2o_fruit = 0.77
self.frac_h2o_fish = 0.75
self.frac_h2o_grass = 0.79
self.frac_h2o_filt_feeder = 0.82
self.frac_h2o_nectar = 0.70
self.frac_h2o_pollen = 0.063
self.frac_h2o_soil_inv = 0.84
self.frac_h2o_reptile = 0.66
self.frac_h2o_seeds = 0.093
self.frac_h2o_zooplanton = 0.83
# parameters used to calculate food intake rate for vertebrates (Table A 1-7.5 of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
self.intake_param_a1_birds_gen = 0.648 # these ..._gen parameter values are used in calculation of concentration based EECs (see OPP TED worksheet 'Min/Max rate concentrations' column L)
self.intake_param_b1_birds_gen = 0.651
self.intake_param_a1_birds_pass = 0.398
self.intake_param_b1_birds_pass = 0.850
self.intake_param_a1_birds_nonpass = 0.301
self.intake_param_b1_birds_nonpass = 0.751
self.intake_param_a1_mamm_rodent = 0.621
self.intake_param_b1_mamm_rodent = 0.564
self.intake_param_a1_mamm_nonrodent = 0.235
self.intake_param_b1_mamm_nonrodent = 0.822
self.intake_param_a1_rep_amphi = 0.013
self.intake_param_b1_rep_amphi = 0.773
# parameters used to calculate water intake rate for vertebrates (Table A 1-7.7 of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
self.h2ointake_param_a2_birds_pass = 1.18
self.h2ointake_param_b2_birds_pass = 0.874
self.h2ointake_param_c2_birds_pass = 1.0
self.h2ointake_param_a2_birds_nonpass = 1.18
self.h2ointake_param_b2_birds_nonpass = 0.874
self.h2ointake_param_c2_birds_nonpass = 3.7
self.h2ointake_param_a2_mamm = 0.326
self.h2ointake_param_b2_mamm = 0.818
self.h2ointake_param_c2_mamm = 1.0
self.h2ointake_param_a2_rep_amphi = 0.065
self.h2ointake_param_b2_rep_amphi = 0.726
self.h2ointake_param_c2_rep_amphi = 1.0 # this number is in question; in OPP sreadsheet it is 3.7; in table A 1-7.7 it is 1.0
# set constants for dermal dose calculations (from Table A 1-7.9 and Eq 16)
self.foliar_residue_factor = 0.62
self.foliar_contact_rate = 6.01
self.derm_contact_hours = 8.0
self.frac_animal_foliage_contact = 0.079
self.derm_contact_factor = 0.1
self.derm_absorp_factor = 1.0
self.frac_body_exposed = 0.5
# set species surface area parameters (from Eq 12)
self.surface_area_birds_a3 = 10.0
self.surface_area_birds_b3 = 0.667
self.surface_area_mamm_a3 = 12.3
self.surface_area_mamm_b3 = 0.65
self.surface_area_amphi_frogs_toads_a3 = 1.131
self.surface_area_amphi_frogs_toads_b3 = 0.579
self.surface_area_amphi_sal_a3 = 8.42
self.surface_area_amphi_sal_b3 = 0.694
self.surface_area_reptile_turtle_a3 = 16.61
self.surface_area_reptile_turtle_b3 = 0.61
self.surface_area_reptile_snake_a3 = 25.05
self.surface_area_reptile_snake_b3 = 0.63
# set parameters used to calculate inhalation rate for vertebrates (Table A 1-7.12)
self.inhal_rate_birds_a4 = 284.
self.inhal_rate_birds_b4 = 0.77
self.inhal_rate_mamm_a4 = 379.
self.inhal_rate_mamm_b4 = 0.80
self.inhal_rate_rep_amphi_a4 = 76.9
self.inhal_rate_rep_amphi_b4 = 0.76
self.app_frac_timestep_aerial = 0.025 # fraction of 1 hour time step (i.e., 90 seconds)
self.app_frac_timestep_gnd_blast = 0.0083 # fraction of 1 hour time step (i.e., 30 seconds)
self.spray_release_hgt_aerial = 3.3 # meters
self.spray_release_hgt_gnd_blast = 1.0 # meters
self.lab_to_field_factor = 3.0 # as included in Eq 19
self.bird_to_mamm_pulmonary_diff_rate = 3.4 # as included in Eq 22
self.inhal_dose_period = 24.0
# generic animal bodyweights (for use in daily allometric dietary consumption rate calculations)
self.mamm_sm_bodywgt = 15. # gms
self.mamm_lg_bodywgt = 1000. # gms
self.bird_sm_bodywgt = 20. # gms
self.rep_amphi_bodywgt = 2. # gms
def spray_drift_params(self, sim_num):
"""
:description sets spray drift parameters for calculations of distance from source area associated with pesticide concentrations
:param sim_num number of simulation
:return:
"""
# set spray drift parameters for estimating distances from source related to downgradient pesticide concentrations (for min/max application scenarios)
self.drift_param_a_min, self.drift_param_b_min, self.drift_param_c_min = \
self.set_drift_parameters(self.app_method_min[sim_num], self.boom_hgt_min[sim_num],
self.droplet_spec_min[sim_num])
self.drift_param_a_max, self.drift_param_b_max, self.drift_param_c_max = \
self.set_drift_parameters(self.app_method_max[sim_num], self.boom_hgt_max[sim_num],
self.droplet_spec_max[sim_num])
# set maximum distances for spray drift calculations for min/max application scenarios
self.max_drift_distance_minapp = self.set_max_drift_distance(self.app_method_min[sim_num])
self.max_drift_distance_maxapp = self.set_max_drift_distance(self.app_method_max[sim_num])
def runoff_params(self, sim_num):
"""
:description calculate runoff parameters for min/max application scenarios
:param sim_num number of simulation
:return:
"""
# runoff parameters (represents OPP TED Excel model worksheet 'plants' columns C & D rows 3 - 5
self.pest_incorp_min, self.runoff_frac_min = self.calc_runoff_params(sim_num, self.app_method_min[sim_num], self.app_rate_min[sim_num], self.pest_incorp_depth_min[sim_num])
self.pest_incorp_max, self.runoff_frac_max = self.calc_runoff_params(sim_num, self.app_method_max[sim_num], self.app_rate_max[sim_num], self.pest_incorp_depth_max[sim_num])
def plants(self, sim_num):
"""
:description executes collection of functions/methods associated with the 'plants' worksheet in the OPP TED Excel model
:return:
"""
# calculate plant toxicity to application rate (min/max) ratios across all simulations
# (represents columns G & H rows 205 - 224 of OPP TED Excel spreadsheet 'inputs' worksheet)
self.calc_plant_tox_ratios()
# calculate plant runoff-based EECs for min/max application scenarios
# (represents calculations found in columns C & D rows 9 - 10 in OPP TED Excel spreadsheet 'Plants' worksheet)
self.runoff_eec_dry_area_min, self.runoff_eec_semiaq_area_min = self.calc_runoff_based_eec(self.app_rate_min[sim_num], self.pest_incorp_min, self.runoff_frac_min)
self.runoff_eec_dry_area_max, self.runoff_eec_semiaq_area_max = self.calc_runoff_based_eec(self.app_rate_max[sim_num], self.pest_incorp_max, self.runoff_frac_max)
# determine if plant EEC due to runoff exceeds various thresholds for pre-emergence of monocots and dicots
# (represents determinations found in columns C & D rows 14 - 28 in OPP TED Excel spreadsheet 'Plants' worksheet)
self.plant_risk_conclusions(sim_num)
# calculate plant risk threshold distances
# (represents columns C & D rows 32 to 51 in OPP TED Excel spreadsheet 'Plants' worksheet)
self.plant_risk_threshold_distances(sim_num)
def conc_based_eec_timeseries(self, sim_num):
"""
:description executes collection of functions/methods associated with the 'min/max rate concentrations' worksheet in the OPP TED Excel model
# calculate upper bound and mean concentration based EECs for food items (daily values for a year) - min application scenario
:param sim_num number of simulation
:return:
"""
# set/reset arrays for holding single simulation results
self.initialize_simlation_panda_series()
# generate daily flag to identify application day numbers within year for min/max application scenarios
self.app_flags_min_scenario = self.daily_app_flag(self.num_apps_min[sim_num], self.app_interval_min[sim_num])
self.app_flags_max_scenario = self.daily_app_flag(self.num_apps_max[sim_num], self.app_interval_max[sim_num])
# calculate upper bound and mean concentration based EECs for food items (daily values for a year) - min application scenario
self.out_diet_eec_upper_min_sg = self.daily_plant_timeseries(sim_num, self.app_rate_min[sim_num], self.food_multiplier_upper_sg, self.app_flags_min_scenario) # short grass
self.out_diet_eec_upper_min_tg = self.daily_plant_timeseries(sim_num, self.app_rate_min[sim_num], self.food_multiplier_upper_tg, self.app_flags_min_scenario) # tall grass
self.out_diet_eec_upper_min_blp = self.daily_plant_timeseries(sim_num, self.app_rate_min[sim_num], self.food_multiplier_upper_blp, self.app_flags_min_scenario) # broad-leafed plants
self.out_diet_eec_upper_min_fp = self.daily_plant_timeseries(sim_num, self.app_rate_min[sim_num], self.food_multiplier_upper_fp, self.app_flags_min_scenario) # seeds/fruits/pods
self.out_diet_eec_upper_min_arthro = self.daily_plant_timeseries(sim_num, self.app_rate_min[sim_num], self.food_multiplier_upper_arthro, self.app_flags_min_scenario) # arthropods
self.out_diet_eec_mean_min_sg = self.daily_plant_timeseries(sim_num, self.app_rate_min[sim_num], self.food_multiplier_mean_sg, self.app_flags_min_scenario) # short grass
self.out_diet_eec_mean_min_tg = self.daily_plant_timeseries(sim_num, self.app_rate_min[sim_num], self.food_multiplier_mean_tg, self.app_flags_min_scenario) # tall grass
self.out_diet_eec_mean_min_blp = self.daily_plant_timeseries(sim_num, self.app_rate_min[sim_num], self.food_multiplier_mean_blp, self.app_flags_min_scenario) # broad-leafed plants
self.out_diet_eec_mean_min_fp = self.daily_plant_timeseries(sim_num, self.app_rate_min[sim_num], self.food_multiplier_mean_fp, self.app_flags_min_scenario) # seeds/fruits/pods
self.out_diet_eec_mean_min_arthro = self.daily_plant_timeseries(sim_num, self.app_rate_min[sim_num], self.food_multiplier_mean_arthro, self.app_flags_min_scenario) # arthropods
# calculate upper bound and mean concentration based EECs for food items (daily values for a year) - max application scenario
self.out_diet_eec_upper_max_sg = self.daily_plant_timeseries(sim_num, self.app_rate_max[sim_num], self.food_multiplier_upper_sg, self.app_flags_max_scenario) # short grass
self.out_diet_eec_upper_max_tg = self.daily_plant_timeseries(sim_num, self.app_rate_max[sim_num], self.food_multiplier_upper_tg, self.app_flags_max_scenario) # tall grass
self.out_diet_eec_upper_max_blp = self.daily_plant_timeseries(sim_num, self.app_rate_max[sim_num], self.food_multiplier_upper_blp, self.app_flags_max_scenario) # broad-leafed plants
self.out_diet_eec_upper_max_fp = self.daily_plant_timeseries(sim_num, self.app_rate_max[sim_num], self.food_multiplier_upper_fp, self.app_flags_max_scenario) # seeds/fruits/pods
self.out_diet_eec_upper_max_arthro = self.daily_plant_timeseries(sim_num, self.app_rate_max[sim_num], self.food_multiplier_upper_arthro, self.app_flags_max_scenario) # arthropods
self.out_diet_eec_mean_max_sg = self.daily_plant_timeseries(sim_num, self.app_rate_max[sim_num], self.food_multiplier_mean_sg, self.app_flags_max_scenario) # short grass
self.out_diet_eec_mean_max_tg = self.daily_plant_timeseries(sim_num, self.app_rate_max[sim_num], self.food_multiplier_mean_tg, self.app_flags_max_scenario) # tall grass
self.out_diet_eec_mean_max_blp = self.daily_plant_timeseries(sim_num, self.app_rate_max[sim_num], self.food_multiplier_mean_blp, self.app_flags_max_scenario) # broad-leafed plants
self.out_diet_eec_mean_max_fp = self.daily_plant_timeseries(sim_num, self.app_rate_max[sim_num], self.food_multiplier_mean_fp, self.app_flags_max_scenario) # seeds/fruits/pods
self.out_diet_eec_mean_max_arthro = self.daily_plant_timeseries(sim_num, self.app_rate_max[sim_num], self.food_multiplier_mean_arthro, self.app_flags_max_scenario) # arthropods
# calculate daily soil pore water, soil, puddles, and dew concentrations (min/max application scenarios)
self.out_conc_pore_h2o_min = self.daily_soil_h2o_timeseries(sim_num, self.app_rate_min[sim_num], self.app_flags_min_scenario, "pore_water")
self.out_conc_pore_h2o_max = self.daily_soil_h2o_timeseries(sim_num, self.app_rate_max[sim_num], self.app_flags_max_scenario, "pore_water")
self.out_conc_puddles_min = self.daily_soil_h2o_timeseries(sim_num, self.app_rate_min[sim_num], self.app_flags_min_scenario, "puddles")
self.out_conc_puddles_max = self.daily_soil_h2o_timeseries(sim_num, self.app_rate_max[sim_num], self.app_flags_max_scenario, "puddles")
self.out_conc_dew_min = self.daily_plant_dew_timeseries(sim_num, self.out_diet_eec_upper_min_blp)
self.out_conc_dew_max = self.daily_plant_dew_timeseries(sim_num, self.out_diet_eec_upper_max_blp)
self.out_soil_conc_min = self.daily_soil_timeseries(sim_num, self.out_conc_pore_h2o_min)
self.out_soil_conc_max = self.daily_soil_timeseries(sim_num, self.out_conc_pore_h2o_max)
# calculate daily air (under canopy) concentrations (min/max application scenarios)
self.out_air_conc_min = self.daily_canopy_air_timeseries(sim_num, self.app_rate_min[sim_num], self.app_flags_min_scenario)
self.out_air_conc_max = self.daily_canopy_air_timeseries(sim_num, self.app_rate_max[sim_num], self.app_flags_max_scenario)
# calculate daily concentrations for soil-dwelling invertebrates, small mammals, large mammals, and small birds
# (min/max application scenarios & upper/mean food multipliers)
self.out_diet_eec_min_soil_inv = self.daily_soil_inv_timeseries(sim_num, self.out_conc_pore_h2o_min)
self.out_diet_eec_max_soil_inv = self.daily_soil_inv_timeseries(sim_num, self.out_conc_pore_h2o_max)
# calculate daily whole body concentrations for prey items (small mammals, large mammals, small birds, small terrestrial phase amphibians/reptiles
# (min/max application scenarios & upper/mean food multipliers)
self.out_diet_eec_upper_min_sm_mamm = self.daily_animal_dose_timeseries(self.intake_param_a1_mamm_rodent, self.intake_param_b1_mamm_rodent, self.mamm_sm_bodywgt, self.frac_h2o_aq_plant, self.out_diet_eec_upper_min_sg,
self.frac_retained_mamm[sim_num])
self.out_diet_eec_upper_min_lg_mamm = self.daily_animal_dose_timeseries(self.intake_param_a1_mamm_rodent, self.intake_param_b1_mamm_rodent, self.mamm_lg_bodywgt, self.frac_h2o_aq_plant, self.out_diet_eec_upper_min_sg,
self.frac_retained_mamm[sim_num])
self.out_diet_eec_upper_min_sm_bird = self.daily_animal_dose_timeseries(self.intake_param_a1_birds_gen, self.intake_param_b1_birds_gen, self.bird_sm_bodywgt, self.frac_h2o_arthro, self.out_diet_eec_upper_min_arthro,
self.frac_retained_birds[sim_num])
self.out_diet_eec_upper_min_sm_amphi = self.daily_animal_dose_timeseries(self.intake_param_a1_rep_amphi, self.intake_param_b1_rep_amphi, self.rep_amphi_bodywgt, self.frac_h2o_arthro, self.out_diet_eec_upper_min_arthro,
self.frac_retained_birds[sim_num])
self.out_diet_eec_mean_min_sm_mamm = self.daily_animal_dose_timeseries(self.intake_param_a1_mamm_rodent, self.intake_param_b1_mamm_rodent, self.mamm_sm_bodywgt, self.frac_h2o_aq_plant, self.out_diet_eec_mean_min_sg,
self.frac_retained_mamm[sim_num])
self.out_diet_eec_mean_min_lg_mamm = self.daily_animal_dose_timeseries(self.intake_param_a1_mamm_rodent, self.intake_param_b1_mamm_rodent, self.mamm_lg_bodywgt, self.frac_h2o_aq_plant, self.out_diet_eec_mean_min_sg,
self.frac_retained_mamm[sim_num])
self.out_diet_eec_mean_min_sm_bird = self.daily_animal_dose_timeseries(self.intake_param_a1_birds_gen, self.intake_param_b1_birds_gen, self.bird_sm_bodywgt, self.frac_h2o_arthro, self.out_diet_eec_mean_min_arthro,
self.frac_retained_birds[sim_num])
self.out_diet_eec_mean_min_sm_amphi = self.daily_animal_dose_timeseries(self.intake_param_a1_rep_amphi, self.intake_param_b1_rep_amphi, self.rep_amphi_bodywgt, self.frac_h2o_arthro, self.out_diet_eec_mean_min_arthro,
self.frac_retained_birds[sim_num])
self.out_diet_eec_upper_max_sm_mamm = self.daily_animal_dose_timeseries(self.intake_param_a1_mamm_rodent, self.intake_param_b1_mamm_rodent, self.mamm_sm_bodywgt, self.frac_h2o_aq_plant, self.out_diet_eec_upper_max_sg,
self.frac_retained_mamm[sim_num])
self.out_diet_eec_upper_max_lg_mamm = self.daily_animal_dose_timeseries(self.intake_param_a1_mamm_rodent, self.intake_param_b1_mamm_rodent, self.mamm_lg_bodywgt, self.frac_h2o_aq_plant, self.out_diet_eec_upper_max_sg,
self.frac_retained_mamm[sim_num])
self.out_diet_eec_upper_max_sm_bird = self.daily_animal_dose_timeseries(self.intake_param_a1_birds_gen, self.intake_param_b1_birds_gen, self.bird_sm_bodywgt, self.frac_h2o_arthro, self.out_diet_eec_upper_max_arthro,
self.frac_retained_birds[sim_num])
self.out_diet_eec_upper_max_sm_amphi = self.daily_animal_dose_timeseries(self.intake_param_a1_rep_amphi, self.intake_param_b1_rep_amphi, self.rep_amphi_bodywgt, self.frac_h2o_arthro, self.out_diet_eec_upper_max_arthro,
self.frac_retained_birds[sim_num])
self.out_diet_eec_mean_max_sm_mamm = self.daily_animal_dose_timeseries(self.intake_param_a1_mamm_rodent, self.intake_param_b1_mamm_rodent, self.mamm_sm_bodywgt, self.frac_h2o_aq_plant, self.out_diet_eec_mean_max_sg,
self.frac_retained_mamm[sim_num])
self.out_diet_eec_mean_max_lg_mamm = self.daily_animal_dose_timeseries(self.intake_param_a1_mamm_rodent, self.intake_param_b1_mamm_rodent, self.mamm_lg_bodywgt, self.frac_h2o_aq_plant, self.out_diet_eec_mean_max_sg,
self.frac_retained_mamm[sim_num])
self.out_diet_eec_mean_max_sm_bird = self.daily_animal_dose_timeseries(self.intake_param_a1_birds_gen, self.intake_param_b1_birds_gen, self.bird_sm_bodywgt, self.frac_h2o_arthro, self.out_diet_eec_mean_max_arthro,
self.frac_retained_birds[sim_num])
self.out_diet_eec_mean_max_sm_amphi = self.daily_animal_dose_timeseries(self.intake_param_a1_rep_amphi, self.intake_param_b1_rep_amphi, self.rep_amphi_bodywgt, self.frac_h2o_arthro, self.out_diet_eec_mean_max_arthro,
self.frac_retained_birds[sim_num])
def eec_exceedances(self, sim_num):
"""
:description calculates the number of times a food item concentration (a time series of days for a year)
exceeds the various toxicity thresholds
:param sim_num number of simulation
:NOTE this represents OPP TED Excel model worksheet 'Min/Max rate - dietary conc results' columns D - N lines 3 - 54 and 58 - 109
(the objective is to replicate the rows and columns of the worksheets)
:return:
"""
na_series = pd.Series(366*['NA']) # dummy daily time series containing 'NA' to be used when food item is not relevant to a speicies specific toxicity measure
# collect concentration based toxicity data into a single series for each taxa
self.tox_cbt_mamm = pd.Series([self.cbt_mamm_1inmill_mort[sim_num], self.cbt_mamm_1inten_mort[sim_num], self.cbt_mamm_low_lc50[sim_num], self.cbt_mamm_sub_direct[sim_num],
self.cbt_mamm_grow_noec[sim_num], self.cbt_mamm_grow_loec[sim_num], self.cbt_mamm_repro_noec[sim_num], self.cbt_mamm_repro_loec[sim_num], self.cbt_mamm_behav_noec[sim_num],
self.cbt_mamm_behav_loec[sim_num], self.cbt_mamm_sensory_noec[sim_num], self.cbt_mamm_sensory_loec[sim_num], self.cbt_mamm_sub_indirect[sim_num]])
self.tox_cbt_bird = pd.Series([self.cbt_bird_1inmill_mort[sim_num],self.cbt_bird_1inten_mort[sim_num],self.cbt_bird_low_lc50[sim_num],self.cbt_bird_sub_direct[sim_num],
self.cbt_bird_grow_noec[sim_num],self.cbt_bird_grow_loec[sim_num],self.cbt_bird_repro_noec[sim_num],self.cbt_bird_repro_loec[sim_num],self.cbt_bird_behav_noec[sim_num],
self.cbt_bird_behav_loec[sim_num],self.cbt_bird_sensory_noec[sim_num],self.cbt_bird_sensory_loec[sim_num],self.cbt_bird_sub_indirect[sim_num]])
self.tox_cbt_reptile = pd.Series([self.cbt_reptile_1inmill_mort[sim_num],self.cbt_reptile_1inten_mort[sim_num],self.cbt_reptile_low_lc50[sim_num],self.cbt_reptile_sub_direct[sim_num],
self.cbt_reptile_grow_noec[sim_num],self.cbt_reptile_grow_loec[sim_num],self.cbt_reptile_repro_noec[sim_num],self.cbt_reptile_repro_loec[sim_num],self.cbt_reptile_behav_noec[sim_num],
self.cbt_reptile_behav_loec[sim_num],self.cbt_reptile_sensory_noec[sim_num],self.cbt_reptile_sensory_loec[sim_num],self.cbt_reptile_sub_indirect[sim_num]])
self.tox_cbt_inv = pd.Series([self.cbt_inv_food_1inmill_mort[sim_num],self.cbt_inv_food_1inten_mort[sim_num],self.cbt_inv_food_low_lc50[sim_num],self.cbt_inv_food_sub_direct[sim_num],
self.cbt_inv_food_grow_noec[sim_num],self.cbt_inv_food_grow_loec[sim_num],self.cbt_inv_food_repro_noec[sim_num],self.cbt_inv_food_repro_loec[sim_num],self.cbt_inv_food_behav_noec[sim_num],
self.cbt_inv_food_behav_loec[sim_num],self.cbt_inv_food_sensory_noec[sim_num],self.cbt_inv_food_sensory_loec[sim_num],self.cbt_inv_food_sub_indirect[sim_num]])
# collect/aggregate timeseries of food item concentrations into a single series (upper bound/mean and min/max application scenario)
# notice that the time series have an _1 an _2 associated with them; the difference is associated with the list of food items
# that are relevant to the taxa; the _1 aggregates food item time series are relevant to mammals/birds/reptiles while the
# _2 aggregates time series relevant to terrestrial invertebrates --
# for all non-relevant food items per taxa a dummy time series filled with 'NA' is used in the aggregation; this allows the
# OPP TED spreadsheet to be replicated
# process minimum application scenario time series with upper bound & mean residue concentration multipliers for food items
self.eec_ts_upper_min_1 = pd.Series([[self.out_diet_eec_upper_min_sg], [self.out_diet_eec_upper_min_tg], [self.out_diet_eec_upper_min_blp], \
[self.out_diet_eec_upper_min_fp], [self.out_diet_eec_upper_min_arthro], [self.out_diet_eec_min_soil_inv],
[self.out_diet_eec_upper_min_sm_mamm], [self.out_diet_eec_upper_min_lg_mamm], [self.out_diet_eec_upper_min_sm_bird],
[self.out_diet_eec_upper_min_sm_amphi], [na_series]])
self.eec_ts_upper_min_2 = pd.Series([[self.out_diet_eec_upper_min_sg], [self.out_diet_eec_upper_min_tg], [self.out_diet_eec_upper_min_blp], \
[self.out_diet_eec_upper_min_fp], [self.out_diet_eec_upper_min_arthro], [self.out_diet_eec_min_soil_inv],
[na_series], [na_series], [na_series], [na_series], [self.out_soil_conc_min]])
self.eec_exc_upper_min_mamm = self.sum_exceedances(self.num_ts, self.num_tox, self.eec_ts_upper_min_1, self.tox_cbt_mamm)
self.eec_exc_upper_min_bird = self.sum_exceedances(self.num_ts, self.num_tox, self.eec_ts_upper_min_1, self.tox_cbt_bird)
self.eec_exc_upper_min_reptile = self.sum_exceedances(self.num_ts, self.num_tox, self.eec_ts_upper_min_1, self.tox_cbt_reptile)
self.eec_exc_upper_min_inv = self.sum_exceedances(self.num_ts, self.num_tox, self.eec_ts_upper_min_2, self.tox_cbt_inv)
self.eec_ts_mean_min_1 = pd.Series([[self.out_diet_eec_mean_min_sg], [self.out_diet_eec_mean_min_tg], [self.out_diet_eec_mean_min_blp], \
[self.out_diet_eec_mean_min_fp], [self.out_diet_eec_mean_min_arthro], [na_series],
[self.out_diet_eec_mean_min_sm_mamm], [self.out_diet_eec_mean_min_lg_mamm], [self.out_diet_eec_mean_min_sm_bird],
[self.out_diet_eec_mean_min_sm_amphi], [na_series]])
self.eec_ts_mean_min_2 = pd.Series([[self.out_diet_eec_mean_min_sg], [self.out_diet_eec_mean_min_tg], [self.out_diet_eec_mean_min_blp], \
[self.out_diet_eec_mean_min_fp], [self.out_diet_eec_mean_min_arthro], [na_series],
[na_series], [na_series], [na_series], [na_series], [na_series]]) # soil concentration timeseries same as upper bound case
self.eec_exc_mean_min_mamm = self.sum_exceedances(self.num_ts, self.num_tox, self.eec_ts_mean_min_1, self.tox_cbt_mamm)
self.eec_exc_mean_min_bird = self.sum_exceedances(self.num_ts, self.num_tox, self.eec_ts_mean_min_1, self.tox_cbt_bird)
self.eec_exc_mean_min_reptile = self.sum_exceedances(self.num_ts, self.num_tox, self.eec_ts_mean_min_1, self.tox_cbt_reptile)
self.eec_exc_mean_min_inv = self.sum_exceedances(self.num_ts, self.num_tox, self.eec_ts_mean_min_2, self.tox_cbt_inv)
# process maximum application scenario time series with upper bound & mean residue concentration multipliers for food items
self.eec_ts_upper_max_1 = pd.Series([[self.out_diet_eec_upper_max_sg], [self.out_diet_eec_upper_max_tg], [self.out_diet_eec_upper_max_blp], \
[self.out_diet_eec_upper_max_fp], [self.out_diet_eec_upper_max_arthro], [self.out_diet_eec_max_soil_inv],
[self.out_diet_eec_upper_max_sm_mamm], [self.out_diet_eec_upper_max_lg_mamm], [self.out_diet_eec_upper_max_sm_bird],
[self.out_diet_eec_upper_max_sm_amphi], [na_series]])
self.eec_ts_upper_max_2 = pd.Series([[self.out_diet_eec_upper_max_sg], [self.out_diet_eec_upper_max_tg], [self.out_diet_eec_upper_max_blp],
[self.out_diet_eec_upper_max_fp], [self.out_diet_eec_upper_max_arthro], [self.out_diet_eec_max_soil_inv],
[na_series], [na_series], [na_series], [na_series], [self.out_soil_conc_max]])
self.eec_exc_upper_max_mamm = self.sum_exceedances(self.num_ts, self.num_tox, self.eec_ts_upper_max_1, self.tox_cbt_mamm)
self.eec_exc_upper_max_bird = self.sum_exceedances(self.num_ts, self.num_tox, self.eec_ts_upper_max_1, self.tox_cbt_bird)
self.eec_exc_upper_max_reptile = self.sum_exceedances(self.num_ts, self.num_tox, self.eec_ts_upper_max_1, self.tox_cbt_reptile)
self.eec_exc_upper_max_inv = self.sum_exceedances(self.num_ts, self.num_tox, self.eec_ts_upper_max_2, self.tox_cbt_inv)
self.eec_ts_mean_max_1 = pd.Series([[self.out_diet_eec_mean_max_sg], [self.out_diet_eec_mean_max_tg], [self.out_diet_eec_mean_max_blp], \
[self.out_diet_eec_mean_max_fp], [self.out_diet_eec_mean_max_arthro], [na_series],
[self.out_diet_eec_mean_max_sm_mamm], [self.out_diet_eec_mean_max_lg_mamm], [self.out_diet_eec_mean_max_sm_bird],
[self.out_diet_eec_mean_max_sm_amphi], [na_series]])
self.eec_ts_mean_max_2 = pd.Series([[self.out_diet_eec_mean_max_sg], [self.out_diet_eec_mean_max_tg], [self.out_diet_eec_mean_max_blp], \
[self.out_diet_eec_mean_max_fp], [self.out_diet_eec_mean_max_arthro], [na_series],
[na_series], [na_series], [na_series], [na_series], [na_series]]) # soil concentration timeseries same as upper bound case
self.eec_exc_mean_max_mamm = self.sum_exceedances(self.num_ts, self.num_tox, self.eec_ts_mean_max_1, self.tox_cbt_mamm)
self.eec_exc_mean_max_bird = self.sum_exceedances(self.num_ts, self.num_tox, self.eec_ts_mean_max_1, self.tox_cbt_bird)
self.eec_exc_mean_max_reptile = self.sum_exceedances(self.num_ts, self.num_tox, self.eec_ts_mean_max_1, self.tox_cbt_reptile)
self.eec_exc_mean_max_inv = self.sum_exceedances(self.num_ts, self.num_tox, self.eec_ts_mean_max_2, self.tox_cbt_inv)
def eec_drift_distances(self, sim_num):
"""
:description calculates the distance from the pesticide spray source area to where the maximum daily food item concentration
occurs (only computed for upper bound residue concentrations and min/max application scenarios)
:param sim_num number of simulation
:NOTE this represents OPP TED Excel model worksheet 'Min/Max rate - dietary conc results' columns D - N lines 113 - 164
(the objective is to replicate the rows and columns of the worksheets)
:return:
"""
# we will reference 'self.tox_cbt_mamm', 'self.tox_cbt_bird', 'self.tox_cbt_reptile', & 'self.tox_cbt_inv' which
# represent aggregations of the toxicity measures per taxa into panda series for processing (the series were constructed
# in method 'eec_exceedances'
na_series = pd.Series(366*['NA']) # dummy daily time series containing 'NA' to be used when food item is not relevant to a speicies specific toxicity measure
# collect/aggregate maximum concentrations from timeseries of food item concentrations into a single series (upper bound/mean and min/max application scenario)
# notice that the time series have an _1 an _2 associated with them; the difference is associated with the list of food items
# that are relevant to the taxa; the _1 aggregates food item time series are relevant to mammals/birds/reptiles while the
# _2 aggregates time series relevant to terrestrial invertebrates --
# for all non-relevant food items per taxa a dummy time series filled with 'NA' is used in the aggregation; this allows the
# OPP TED spreadsheet to be replicated
# process minimum application scenario time series with upper bound residue concentration multipliers for food items
self.eec_ts_upper_min_1 = pd.Series([self.out_diet_eec_upper_min_sg.max(), self.out_diet_eec_upper_min_tg.max(), self.out_diet_eec_upper_min_blp.max(), \
self.out_diet_eec_upper_min_fp.max(), self.out_diet_eec_upper_min_arthro.max(), self.out_diet_eec_min_soil_inv.max(),
self.out_diet_eec_upper_min_sm_mamm.max(), self.out_diet_eec_upper_min_lg_mamm.max(), self.out_diet_eec_upper_min_sm_bird.max(),
self.out_diet_eec_upper_min_sm_amphi.max(), na_series.max()])
self.eec_ts_upper_min_2 = pd.Series([self.out_diet_eec_upper_min_sg.max(), self.out_diet_eec_upper_min_tg.max(), self.out_diet_eec_upper_min_blp.max(), \
self.out_diet_eec_upper_min_fp.max(), self.out_diet_eec_upper_min_arthro.max(), self.out_diet_eec_min_soil_inv.max(),
na_series.max(), na_series.max(), na_series.max(), na_series.max(), self.out_soil_conc_min.max()])
self.eec_tox_frac_mamm_1 = self.calc_eec_tox_frac(self.num_ts, self.num_tox, self.eec_ts_upper_min_1, self.tox_cbt_mamm)
self.eec_tox_frac_bird_1 = self.calc_eec_tox_frac(self.num_ts, self.num_tox, self.eec_ts_upper_min_1, self.tox_cbt_bird)
self.eec_tox_frac_reptile_1 = self.calc_eec_tox_frac(self.num_ts, self.num_tox, self.eec_ts_upper_min_1, self.tox_cbt_reptile)
self.eec_tox_frac_inv_1 = self.calc_eec_tox_frac(self.num_ts, self.num_tox, self.eec_ts_upper_min_1, self.tox_cbt_inv)
# calculate distances from source area related to max daily concentration (minimum application scenario)
self.eec_dist_upper_min_mamm = self.calc_maxeec_distance(self.eec_tox_frac_mamm_1, self.drift_param_a_min, self.drift_param_b_min, self.drift_param_c_min, self.max_drift_distance_minapp)
self.eec_dist_upper_min_bird = self.calc_maxeec_distance(self.eec_tox_frac_bird_1, self.drift_param_a_min, self.drift_param_b_min, self.drift_param_c_min, self.max_drift_distance_minapp)
self.eec_dist_upper_min_reptile = self.calc_maxeec_distance(self.eec_tox_frac_reptile_1, self.drift_param_a_min, self.drift_param_b_min, self.drift_param_c_min, self.max_drift_distance_minapp)
self.eec_dist_upper_min_inv = self.calc_maxeec_distance(self.eec_tox_frac_inv_1, self.drift_param_a_min, self.drift_param_b_min, self.drift_param_c_min, self.max_drift_distance_minapp)
# process maximum application scenario time series with upper bound residue concentration multipliers for food items
self.eec_ts_upper_max_1 = pd.Series([self.out_diet_eec_upper_max_sg.max(), self.out_diet_eec_upper_max_tg.max(), self.out_diet_eec_upper_max_blp.max(), \
self.out_diet_eec_upper_max_fp.max(), self.out_diet_eec_upper_max_arthro.max(), self.out_diet_eec_max_soil_inv.max(),
self.out_diet_eec_upper_max_sm_mamm.max(), self.out_diet_eec_upper_max_lg_mamm.max(), self.out_diet_eec_upper_max_sm_bird.max(),
self.out_diet_eec_upper_max_sm_amphi.max(), na_series.max()])
self.eec_ts_upper_max_2 = pd.Series([self.out_diet_eec_upper_max_sg.max(), self.out_diet_eec_upper_max_tg.max(), self.out_diet_eec_upper_max_blp.max(), \
self.out_diet_eec_upper_max_fp.max(), self.out_diet_eec_upper_max_arthro.max(), self.out_diet_eec_max_soil_inv.max(),
na_series.max(), na_series.max(), na_series.max(), na_series.max(), self.out_soil_conc_max.max()])
self.eec_tox_frac_mamm_1 = self.calc_eec_tox_frac(self.num_ts, self.num_tox, self.eec_ts_upper_max_1, self.tox_cbt_mamm)
self.eec_tox_frac_bird_1 = self.calc_eec_tox_frac(self.num_ts, self.num_tox, self.eec_ts_upper_max_1, self.tox_cbt_bird)
self.eec_tox_frac_reptile_1 = self.calc_eec_tox_frac(self.num_ts, self.num_tox, self.eec_ts_upper_max_1, self.tox_cbt_reptile)
self.eec_tox_frac_inv_1 = self.calc_eec_tox_frac(self.num_ts, self.num_tox, self.eec_ts_upper_max_1, self.tox_cbt_inv)
# calculate distances from source area related to max daily concentration (maximum application scenario)
self.eec_dist_upper_max_mamm = self.calc_maxeec_distance(self.eec_tox_frac_mamm_1, self.drift_param_a_max, self.drift_param_b_max, self.drift_param_c_max, self.max_drift_distance_maxapp)
self.eec_dist_upper_max_bird = self.calc_maxeec_distance(self.eec_tox_frac_bird_1, self.drift_param_a_max, self.drift_param_b_max, self.drift_param_c_max, self.max_drift_distance_maxapp)
self.eec_dist_upper_max_reptile = self.calc_maxeec_distance(self.eec_tox_frac_reptile_1, self.drift_param_a_max, self.drift_param_b_max, self.drift_param_c_max, self.max_drift_distance_maxapp)
self.eec_dist_upper_max_inv = self.calc_maxeec_distance(self.eec_tox_frac_inv_1, self.drift_param_a_max, self.drift_param_b_max, self.drift_param_c_max, self.max_drift_distance_maxapp)
def species_doses(self, sim_num):
"""
:description executes collection of functions/methods associated with the 'Min/Max rate doses' worksheet in the OPP TED Excel model
# calculate species/food item specific doses (from exposure pathways) and health measure ratios
:param sim_num model simulation number
:return:
"""
# calculate upper bound and mean concentrations in diet per species/diet item combination (for min/max application scenarios)
# (represents columns I & J of worksheet 'Min/Max rate doses' of OPP TED spreadsheet model)
self.calc_species_diet_concs_minapp(sim_num)
self.calc_species_diet_concs_maxapp(sim_num)
# calculate upper bound and mean dietary doses per species/dieatary item (for min/max application scenarios)
# (represents columns K & L of worksheet 'Min/Max rate doses' of OPP TED spreadsheet model)
self.calc_species_diet_dose_minapp(sim_num)
self.calc_species_diet_dose_maxapp(sim_num)
# calculate water doses from puddle and dew water consumption
# represents columns M & N of worksheet 'Min/Max rate doses' of OPP TED spreadsheet model)
self.calc_h2o_doses_minapp(sim_num)
self.calc_h2o_doses_maxapp(sim_num)
# calculate dermal to oral toxicity equivalency factors
# (this method implements Eqs 14 and 15 of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment')
self.calc_derm_route_equiv_factor(sim_num)
# calculate dermal contact doses (upper bound and mean for minimum/maximum application scenarios)
# (method addresses columns O & P of worksheet 'Min/Max rate doses' of OPP TED spreadsheet model)
self.calc_species_derm_contact_dose_minapp(sim_num)
self.calc_species_derm_contact_dose_maxapp(sim_num)
# calculate dermal spray doses (for minimum/maximum application scenarios)
# (represents column Q of worksheet 'Min rate doses' of OPP TED spreadsheet model)
self.calc_species_derm_spray_dose_minmaxapp(sim_num)
# calculate air concentration immediately after application (for use in calculating inhalation vapor/spray doses)
# (represents Eq 18 of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment')
self.calc_air_conc_drops_minmaxapp(sim_num)
# set value for volumetric fraction of droplet spectrum related to bird respiration limits (for use in calculating inhalation vapor/spray doses)
# (represents specification from OPP TED Excel 'inputs' worksheet columns H & I rows 14 - 16)
self.max_respire_frac_minapp = self.set_max_respire_frac(self.app_method_min[sim_num], self.droplet_spec_min[sim_num])
self.max_respire_frac_maxapp = self.set_max_respire_frac(self.app_method_max[sim_num], self.droplet_spec_max[sim_num])
# calculate inhalation to oral toxicity equivalency factors (for use in calculating inhalation vapor/spray doses)
# (Eqs 20 and 22 of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment')
self.calc_inhal_route_equiv_factor(sim_num)
# calculate inhalation vapor/spray doses
# (represents columns R & S of worksheets 'Min/Max rate doses' of OPP TED spreadsheet model)
self.calc_species_inhal_dose_vapor()
self.calc_species_inhal_dose_spray(sim_num)
# scan species specific doses (from diet based to inhalation) and determine maximum
# are used in calculations included in worksheets 'Min/Max rate doses' columns T, U, Z, AA
self.determine_max_dose_minmaxapp()
# calculate Mortality threshold
# (represents column V in OPP TED spreadsheet model in worksheets 'Min/Max rate doses'
self.calc_species_mortality_thres(sim_num)
# calculate sublethal threshold
# (represents column W in OPP TED spreadsheet model in worksheets 'Min/Max rate doses'
self.calc_species_sublethal_thres(sim_num)
# calculate lowest LD50 threshold
# (represents column X in OPP TED spreadsheet model in worksheets 'Min/Max rate doses'
self.calc_species_lowld50_thres(sim_num)
# calculate HC50 threshold
# (represents column Y in OPP TED spreadsheet model in worksheets 'Min/Max rate doses'
self.calc_species_hc50_thres(sim_num)
# calculate distances from source area to where toxicity thresholds occur
# (represents columns T & U in OPP TED spreadsheet model in worksheets 'Min/Max rate doses'
self.calc_distance_to_risk_thres(sim_num)
# calculate ratio of maximum dose to toxicity thresholds: mortality and sublethal
# (represents columns Z & AA in OPP TED spreadsheet model in worksheets 'Min/Max rate doses'
self.calc_maxdose_toxthres_ratios(sim_num)
return
| unlicense |
alexmojaki/blaze | blaze/compute/tests/test_pandas_compute.py | 1 | 26918 | from __future__ import absolute_import, division, print_function
import pytest
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series
from string import ascii_lowercase
from blaze.compute.core import compute
from blaze import dshape, discover, transform
from blaze.expr import symbol, join, by, summary, distinct, shape
from blaze.expr import (merge, exp, mean, count, nunique, sum, min, max, any,
var, std, concat)
from blaze.compatibility import builtins, xfail, assert_series_equal
t = symbol('t', 'var * {name: string, amount: int, id: int}')
nt = symbol('t', 'var * {name: ?string, amount: float64, id: int}')
df = DataFrame([['Alice', 100, 1],
['Bob', 200, 2],
['Alice', 50, 3]], columns=['name', 'amount', 'id'])
ndf = DataFrame([['Alice', 100.0, 1],
['Bob', np.nan, 2],
[np.nan, 50.0, 3]], columns=['name', 'amount', 'id'])
tbig = symbol('tbig',
'var * {name: string, sex: string[1], amount: int, id: int}')
dfbig = DataFrame([['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5]],
columns=['name', 'sex', 'amount', 'id'])
def test_series_columnwise():
s = Series([1, 2, 3], name='a')
t = symbol('t', 'var * {a: int64}')
result = compute(t.a + 1, s)
assert_series_equal(s + 1, result)
def test_symbol():
tm.assert_frame_equal(compute(t, df), df)
def test_projection():
tm.assert_frame_equal(compute(t[['name', 'id']], df),
df[['name', 'id']])
def test_eq():
assert_series_equal(compute(t['amount'] == 100, df),
df['amount'] == 100)
def test_selection():
tm.assert_frame_equal(compute(t[t['amount'] == 0], df),
df[df['amount'] == 0])
tm.assert_frame_equal(compute(t[t['amount'] > 150], df),
df[df['amount'] > 150])
def test_arithmetic():
assert_series_equal(compute(t['amount'] + t['id'], df),
df.amount + df.id)
assert_series_equal(compute(t['amount'] * t['id'], df),
df.amount * df.id)
assert_series_equal(compute(t['amount'] % t['id'], df),
df.amount % df.id)
def test_join():
left = DataFrame(
[['Alice', 100], ['Bob', 200]], columns=['name', 'amount'])
right = DataFrame([['Alice', 1], ['Bob', 2]], columns=['name', 'id'])
lsym = symbol('L', 'var * {name: string, amount: int}')
rsym = symbol('R', 'var * {name: string, id: int}')
joined = join(lsym, rsym, 'name')
assert (dshape(joined.schema) ==
dshape('{name: string, amount: int, id: int}'))
result = compute(joined, {lsym: left, rsym: right})
expected = DataFrame([['Alice', 100, 1], ['Bob', 200, 2]],
columns=['name', 'amount', 'id'])
tm.assert_frame_equal(result, expected)
assert list(result.columns) == list(joined.fields)
def test_multi_column_join():
left = [(1, 2, 3),
(2, 3, 4),
(1, 3, 5)]
left = DataFrame(left, columns=['x', 'y', 'z'])
right = [(1, 2, 30),
(1, 3, 50),
(1, 3, 150)]
right = DataFrame(right, columns=['x', 'y', 'w'])
lsym = symbol('lsym', 'var * {x: int, y: int, z: int}')
rsym = symbol('rsym', 'var * {x: int, y: int, w: int}')
j = join(lsym, rsym, ['x', 'y'])
expected = [(1, 2, 3, 30),
(1, 3, 5, 50),
(1, 3, 5, 150)]
expected = DataFrame(expected, columns=['x', 'y', 'z', 'w'])
result = compute(j, {lsym: left, rsym: right})
print(result)
tm.assert_frame_equal(result, expected)
assert list(result.columns) == list(j.fields)
def test_unary_op():
assert (compute(exp(t['amount']), df) == np.exp(df['amount'])).all()
def test_abs():
assert (compute(abs(t['amount']), df) == abs(df['amount'])).all()
def test_neg():
assert_series_equal(compute(-t['amount'], df),
-df['amount'])
@xfail(reason='Projection does not support arithmetic')
def test_neg_projection():
assert_series_equal(compute(-t[['amount', 'id']], df),
-df[['amount', 'id']])
def test_columns_series():
assert isinstance(compute(t['amount'], df), Series)
assert isinstance(compute(t['amount'] > 150, df), Series)
def test_reductions():
assert compute(mean(t['amount']), df) == 350 / 3
assert compute(count(t['amount']), df) == 3
assert compute(sum(t['amount']), df) == 100 + 200 + 50
assert compute(min(t['amount']), df) == 50
assert compute(max(t['amount']), df) == 200
assert compute(nunique(t['amount']), df) == 3
assert compute(nunique(t['name']), df) == 2
assert compute(any(t['amount'] > 150), df) is True
assert compute(any(t['amount'] > 250), df) is False
assert compute(var(t['amount']), df) == df.amount.var(ddof=0)
assert compute(var(t['amount'], unbiased=True), df) == df.amount.var()
assert compute(std(t['amount']), df) == df.amount.std(ddof=0)
assert compute(std(t['amount'], unbiased=True), df) == df.amount.std()
assert compute(t.amount[0], df) == df.amount.iloc[0]
assert compute(t.amount[-1], df) == df.amount.iloc[-1]
def test_reductions_on_dataframes():
assert compute(count(t), df) == 3
assert shape(compute(count(t, keepdims=True), df)) == (1,)
def test_1d_reductions_keepdims():
series = df['amount']
for r in [sum, min, max, nunique, count, std, var]:
result = compute(r(t.amount, keepdims=True), {t.amount: series})
assert type(result) == type(series)
def test_distinct():
dftoobig = DataFrame([['Alice', 'F', 100, 1],
['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5],
['Drew', 'M', 200, 5]],
columns=['name', 'sex', 'amount', 'id'])
d_t = distinct(tbig)
d_df = compute(d_t, dftoobig)
tm.assert_frame_equal(d_df, dfbig)
# Test idempotence
tm.assert_frame_equal(compute(d_t, d_df), d_df)
def test_distinct_on():
cols = ['name', 'sex', 'amount', 'id']
df = DataFrame([['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5]],
columns=cols)
s = symbol('s', discover(df))
computed = compute(s.distinct('sex'), df)
tm.assert_frame_equal(
computed,
pd.DataFrame([['Alice', 'F', 100, 1],
['Drew', 'M', 100, 5]],
columns=cols),
)
def test_by_one():
result = compute(by(t['name'], total=t['amount'].sum()), df)
expected = df.groupby('name')['amount'].sum().reset_index()
expected.columns = ['name', 'total']
tm.assert_frame_equal(result, expected)
def test_by_two():
result = compute(by(tbig[['name', 'sex']],
total=sum(tbig['amount'])), dfbig)
expected = DataFrame([['Alice', 'F', 200],
['Drew', 'F', 100],
['Drew', 'M', 300]],
columns=['name', 'sex', 'total'])
tm.assert_frame_equal(result, expected)
def test_by_three():
expr = by(tbig[['name', 'sex']],
total=(tbig['id'] + tbig['amount']).sum())
result = compute(expr, dfbig)
expected = DataFrame([['Alice', 'F', 204],
['Drew', 'F', 104],
['Drew', 'M', 310]], columns=['name', 'sex', 'total'])
expected.columns = expr.fields
tm.assert_frame_equal(result, expected)
def test_by_four():
t = tbig[['sex', 'amount']]
expr = by(t['sex'], max=t['amount'].max())
result = compute(expr, dfbig)
expected = DataFrame([['F', 100],
['M', 200]], columns=['sex', 'max'])
tm.assert_frame_equal(result, expected)
def test_join_by_arcs():
df_idx = DataFrame([['A', 1],
['B', 2],
['C', 3]],
columns=['name', 'node_id'])
df_arc = DataFrame([[1, 3],
[2, 3],
[3, 1]],
columns=['node_out', 'node_id'])
t_idx = symbol('t_idx', 'var * {name: string, node_id: int32}')
t_arc = symbol('t_arc', 'var * {node_out: int32, node_id: int32}')
joined = join(t_arc, t_idx, "node_id")
want = by(joined['name'], count=joined['node_id'].count())
result = compute(want, {t_arc: df_arc, t_idx: df_idx})
result_pandas = pd.merge(df_arc, df_idx, on='node_id')
gb = result_pandas.groupby('name')
expected = gb.node_id.count().reset_index().rename(columns={
'node_id': 'count'
})
tm.assert_frame_equal(result, expected)
assert list(result.columns) == ['name', 'count']
def test_join_suffixes():
df = pd.DataFrame(
list(dict((k, n) for k in ascii_lowercase[:5]) for n in range(5)),
)
a = symbol('a', discover(df))
b = symbol('b', discover(df))
suffixes = '_x', '_y'
joined = join(a, b, 'a', suffixes=suffixes)
expected = pd.merge(df, df, on='a', suffixes=suffixes)
result = compute(joined, {a: df, b: df})
tm.assert_frame_equal(result, expected)
def test_join_promotion():
a_data = pd.DataFrame([[0.0, 1.5], [1.0, 2.5]], columns=list('ab'))
b_data = pd.DataFrame([[0, 1], [1, 2]], columns=list('ac'))
a = symbol('a', discover(a_data))
b = symbol('b', discover(b_data))
joined = join(a, b, 'a')
assert joined.dshape == dshape('var * {a: float64, b: ?float64, c: int64}')
expected = pd.merge(a_data, b_data, on='a')
result = compute(joined, {a: a_data, b: b_data})
tm.assert_frame_equal(result, expected)
def test_sort():
tm.assert_frame_equal(compute(t.sort('amount'), df),
df.sort('amount'))
tm.assert_frame_equal(compute(t.sort('amount', ascending=True), df),
df.sort('amount', ascending=True))
tm.assert_frame_equal(compute(t.sort(['amount', 'id']), df),
df.sort(['amount', 'id']))
def test_sort_on_series_no_warning(recwarn):
expected = df.amount.order()
recwarn.clear()
assert_series_equal(compute(t['amount'].sort('amount'), df),
expected)
# raises as assertion error if no warning occurs, same thing for below
with pytest.raises(AssertionError):
assert recwarn.pop(FutureWarning)
assert_series_equal(compute(t['amount'].sort(), df),
expected)
with pytest.raises(AssertionError):
assert recwarn.pop(FutureWarning)
def test_field_on_series():
expr = symbol('s', 'var * int')
data = Series([1, 2, 3, 4], name='s')
assert_series_equal(compute(expr.s, data), data)
def test_head():
tm.assert_frame_equal(compute(t.head(1), df), df.head(1))
def test_tail():
tm.assert_frame_equal(compute(t.tail(1), df), df.tail(1))
def test_label():
expected = df['amount'] * 10
expected.name = 'foo'
assert_series_equal(compute((t['amount'] * 10).label('foo'), df),
expected)
def test_relabel():
result = compute(t.relabel({'name': 'NAME', 'id': 'ID'}), df)
expected = df.rename(columns={'name': 'NAME', 'id': 'ID'})
tm.assert_frame_equal(result, expected)
def test_relabel_series():
result = compute(t.relabel({'name': 'NAME'}), df.name)
assert result.name == 'NAME'
ts = pd.date_range('now', periods=10).to_series().reset_index(drop=True)
tframe = DataFrame({'timestamp': ts})
def test_map_column():
inc = lambda x: x + 1
result = compute(t['amount'].map(inc, 'int'), df)
expected = df['amount'] + 1
assert_series_equal(result, expected)
def test_map():
f = lambda _, amt, id: amt + id
result = compute(t.map(f, 'real'), df)
expected = df['amount'] + df['id']
assert_series_equal(result, expected)
def test_apply_column():
result = compute(t.amount.apply(np.sum, 'real'), df)
expected = np.sum(df['amount'])
assert result == expected
result = compute(t.amount.apply(builtins.sum, 'real'), df)
expected = builtins.sum(df['amount'])
assert result == expected
def test_apply():
result = compute(t.apply(str, 'string'), df)
expected = str(df)
assert result == expected
def test_merge():
col = (t['amount'] * 2).label('new')
expr = merge(t['name'], col)
expected = DataFrame([['Alice', 200],
['Bob', 400],
['Alice', 100]],
columns=['name', 'new'])
result = compute(expr, df)
tm.assert_frame_equal(result, expected)
def test_by_nunique():
result = compute(by(t['name'], count=t['id'].nunique()), df)
expected = DataFrame([['Alice', 2], ['Bob', 1]],
columns=['name', 'count'])
tm.assert_frame_equal(result, expected)
def test_selection_out_of_order():
expr = t['name'][t['amount'] < 100]
expected = df.loc[df.amount < 100, 'name']
result = compute(expr, df)
assert_series_equal(result, expected)
def test_outer_join():
left = [(1, 'Alice', 100),
(2, 'Bob', 200),
(4, 'Dennis', 400)]
left = DataFrame(left, columns=['id', 'name', 'amount'])
right = [('NYC', 1),
('Boston', 1),
('LA', 3),
('Moscow', 4)]
right = DataFrame(right, columns=['city', 'id'])
lsym = symbol('lsym', 'var * {id: int, name: string, amount: real}')
rsym = symbol('rsym', 'var * {city: string, id: int}')
convert = lambda df: set(df.to_records(index=False).tolist())
assert (convert(compute(join(lsym, rsym), {lsym: left, rsym: right})) ==
set([(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(4, 'Dennis', 400, 'Moscow')]))
assert (convert(compute(join(lsym, rsym, how='left'),
{lsym: left, rsym: right})) ==
set([(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, np.nan),
(4, 'Dennis', 400, 'Moscow')]))
df = compute(join(lsym, rsym, how='right'), {lsym: left, rsym: right})
expected = DataFrame([(1., 'Alice', 100., 'NYC'),
(1., 'Alice', 100., 'Boston'),
(3., np.nan, np.nan, 'lsymA'),
(4., 'Dennis', 400., 'Moscow')],
columns=['id', 'name', 'amount', 'city'])
result = df.sort('id').to_records(index=False)
expected = expected.sort('id').to_records(index=False)
np.array_equal(result, expected)
df = compute(join(lsym, rsym, how='outer'), {lsym: left, rsym: right})
expected = DataFrame([(1., 'Alice', 100., 'NYC'),
(1., 'Alice', 100., 'Boston'),
(2., 'Bob', 200., np.nan),
(3., np.nan, np.nan, 'LA'),
(4., 'Dennis', 400., 'Moscow')],
columns=['id', 'name', 'amount', 'city'])
result = df.sort('id').to_records(index=False)
expected = expected.sort('id').to_records(index=False)
np.array_equal(result, expected)
def test_by_on_same_column():
df = pd.DataFrame([[1, 2], [1, 4], [2, 9]], columns=['id', 'value'])
t = symbol('data', 'var * {id: int, value: int}')
gby = by(t['id'], count=t['id'].count())
expected = DataFrame([[1, 2], [2, 1]], columns=['id', 'count'])
result = compute(gby, {t: df})
tm.assert_frame_equal(result, expected)
def test_summary_by():
expr = by(t.name, summary(count=t.id.count(), sum=t.amount.sum()))
result = compute(expr, df)
expected = DataFrame([['Alice', 2, 150],
['Bob', 1, 200]], columns=['name', 'count', 'sum'])
expr = by(t.name, summary(count=t.id.count(), sum=(t.amount + 1).sum()))
result = compute(expr, df)
expected = DataFrame([['Alice', 2, 152],
['Bob', 1, 201]], columns=['name', 'count', 'sum'])
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(raises=TypeError,
reason=('pandas backend cannot support non Reduction '
'subclasses'))
def test_summary_by_first():
expr = by(t.name, fst=t.amount[0])
result = compute(expr, df)
assert result == df.amount.iloc[0]
def test_summary_by_reduction_arithmetic():
expr = by(t.name, summary(count=t.id.count(), sum=t.amount.sum() + 1))
result = compute(expr, df)
expected = DataFrame([['Alice', 2, 151],
['Bob', 1, 201]], columns=['name', 'count', 'sum'])
tm.assert_frame_equal(result, expected)
def test_summary():
expr = summary(count=t.id.count(), sum=t.amount.sum())
assert_series_equal(compute(expr, df), Series({'count': 3, 'sum': 350}))
def test_summary_on_series():
ser = Series([1, 2, 3])
s = symbol('s', '3 * int')
expr = summary(max=s.max(), min=s.min())
assert compute(expr, ser) == (3, 1)
expr = summary(max=s.max(), min=s.min(), keepdims=True)
assert compute(expr, ser) == [(3, 1)]
def test_summary_keepdims():
expr = summary(count=t.id.count(), sum=t.amount.sum(), keepdims=True)
expected = DataFrame([[3, 350]], columns=['count', 'sum'])
tm.assert_frame_equal(compute(expr, df), expected)
def test_dplyr_transform():
df = DataFrame({'timestamp': pd.date_range('now', periods=5)})
t = symbol('t', discover(df))
expr = transform(t, date=t.timestamp.map(lambda x: x.date(),
schema='datetime'))
lhs = compute(expr, df)
rhs = pd.concat([df, Series(df.timestamp.map(lambda x: x.date()),
name='date').to_frame()], axis=1)
tm.assert_frame_equal(lhs, rhs)
def test_nested_transform():
d = {'timestamp': [1379613528, 1379620047], 'platform': ["Linux",
"Windows"]}
df = DataFrame(d)
t = symbol('t', discover(df))
t = transform(t, timestamp=t.timestamp.map(datetime.fromtimestamp,
schema='datetime'))
expr = transform(t, date=t.timestamp.map(lambda x: x.date(),
schema='datetime'))
result = compute(expr, df)
df['timestamp'] = df.timestamp.map(datetime.fromtimestamp)
df['date'] = df.timestamp.map(lambda x: x.date())
tm.assert_frame_equal(result, df)
def test_like():
expr = t.like(name='Alice*')
expected = DataFrame([['Alice', 100, 1],
['Alice', 50, 3]],
columns=['name', 'amount', 'id'])
result = compute(expr, df).reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_strlen():
expr = t.name.strlen()
expected = pd.Series([5, 3, 5], name='name')
result = compute(expr, df).reset_index(drop=True)
assert_series_equal(expected, result)
def test_rowwise_by():
f = lambda _, id, name: id + len(name)
expr = by(t.map(f, 'int'), total=t.amount.sum())
df = pd.DataFrame({'id': [1, 1, 2],
'name': ['alice', 'wendy', 'bob'],
'amount': [100, 200, 300.03]})
expected = pd.DataFrame([(5, 300.03), (6, 300)], columns=expr.fields)
result = compute(expr, df)
tm.assert_frame_equal(result, expected)
def test_datetime_access():
df = DataFrame({'name': ['Alice', 'Bob', 'Joe'],
'when': [datetime(2010, 1, 1, 1, 1, 1)] * 3,
'amount': [100, 200, 300],
'id': [1, 2, 3]})
t = symbol('t', discover(df))
for attr in ['day', 'month', 'minute', 'second']:
expr = getattr(t.when, attr)
assert_series_equal(compute(expr, df),
Series([1, 1, 1], name=expr._name))
def test_frame_slice():
assert_series_equal(compute(t[0], df), df.iloc[0])
assert_series_equal(compute(t[2], df), df.iloc[2])
tm.assert_frame_equal(compute(t[:2], df), df.iloc[:2])
tm.assert_frame_equal(compute(t[1:3], df), df.iloc[1:3])
tm.assert_frame_equal(compute(t[1::2], df), df.iloc[1::2])
tm.assert_frame_equal(compute(t[[2, 0]], df), df.iloc[[2, 0]])
def test_series_slice():
assert compute(t.amount[0], df) == df.amount.iloc[0]
assert compute(t.amount[2], df) == df.amount.iloc[2]
assert_series_equal(compute(t.amount[:2], df), df.amount.iloc[:2])
assert_series_equal(compute(t.amount[1:3], df), df.amount.iloc[1:3])
assert_series_equal(compute(t.amount[1::2], df), df.amount.iloc[1::2])
def test_nelements():
assert compute(t.nelements(), df) == len(df)
assert compute(t.nrows, df) == len(df)
def test_datetime_truncation_minutes():
data = Series(['2000-01-01T12:10:00Z', '2000-06-25T12:35:12Z'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
result = compute(s.truncate(20, 'minutes'), data)
expected = Series(['2000-01-01T12:00:00Z', '2000-06-25T12:20:00Z'],
dtype='M8[ns]', name='s')
assert_series_equal(result, expected)
def test_datetime_truncation_nanoseconds():
data = Series(['2000-01-01T12:10:00.000000005',
'2000-01-01T12:10:00.000000025'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
expected = Series(['2000-01-01T12:10:00.000000000',
'2000-01-01T12:10:00.000000020'],
dtype='M8[ns]', name='s')
result = compute(s.truncate(nanoseconds=20), data)
assert_series_equal(result, expected)
def test_datetime_truncation_weeks():
data = Series(['2000-01-01T12:10:00Z', '2000-06-25T12:35:12Z'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
result = compute(s.truncate(2, 'weeks'), data)
expected = Series(['1999-12-19', '2000-06-18'], dtype='M8[ns]', name='s')
assert_series_equal(result, expected)
def test_datetime_truncation_days():
data = Series(['2000-01-01T12:10:00Z', '2000-06-25T12:35:12Z'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
result = compute(s.truncate(days=3), data)
expected = Series(['1999-12-31', '2000-06-25'], dtype='M8[ns]', name='s')
assert_series_equal(result, expected)
def test_datetime_truncation_same_as_python():
data = Series(['2000-01-01T12:10:00Z', '2000-06-25T12:35:12Z'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
assert (compute(s.truncate(weeks=2), data[0].to_pydatetime()) ==
datetime(1999, 12, 26).date())
def test_complex_group_by():
expr = by(merge(tbig.amount // 10, tbig.id % 2),
count=tbig.name.count())
result = compute(expr, dfbig) # can we do this? yes we can!
expected = dfbig.groupby([dfbig.amount // 10,
dfbig.id % 2])['name'].count().reset_index()
expected = expected.rename(columns={'name': 'count'})
tm.assert_frame_equal(result, expected)
def test_by_with_complex_summary():
expr = by(t.name, total=t.amount.sum() + t.id.sum() - 1, a=t.id.min())
result = compute(expr, df)
assert list(result.columns) == expr.fields
assert list(result.total) == [150 + 4 - 1, 200 + 2 - 1]
def test_notnull():
assert (compute(nt.name.notnull(), ndf) == ndf.name.notnull()).all()
def test_isnan():
assert (compute(nt.amount.isnan(), ndf) == ndf.amount.isnull()).all()
@pytest.mark.parametrize('keys', [[1], [2, 3]])
def test_isin(keys):
expr = t[t.id.isin(keys)]
result = compute(expr, df)
expected = df.loc[df.id.isin(keys)]
tm.assert_frame_equal(result, expected)
def test_nunique_table():
expr = t.nunique()
result = compute(expr, df)
assert result == len(df.drop_duplicates())
def test_str_concat():
a = Series(('a', 'b', 'c'))
s = symbol('s', "3 * string[1, 'U32']")
expr = s + 'a'
assert (compute(expr, a) == (a + 'a')).all()
def test_str_repeat():
a = Series(('a', 'b', 'c'))
s = symbol('s', "3 * string[1, 'U32']")
expr = s.repeat(3)
assert (compute(expr, a) == (a * 3)).all()
def test_str_interp():
a = Series(('%s', '%s', '%s'))
s = symbol('s', "3 * string[1, 'U32']")
expr = s.interp(1)
assert (compute(expr, a) == (a % 1)).all()
def test_timedelta_arith():
series = Series(pd.date_range('2014-01-01', '2014-02-01'))
sym = symbol('s', discover(series))
delta = timedelta(days=1)
assert (compute(sym + delta, series) == series + delta).all()
assert (compute(sym - delta, series) == series - delta).all()
def test_coerce_series():
s = pd.Series(list('123'), name='a')
t = symbol('t', discover(s))
result = compute(t.coerce(to='int64'), s)
expected = pd.Series([1, 2, 3], name=s.name)
assert_series_equal(result, expected)
def test_concat_arr():
s_data = Series(np.arange(15))
t_data = Series(np.arange(15, 30))
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
assert (
compute(concat(s, t), {s: s_data, t: t_data}) == Series(np.arange(30))
).all()
def test_concat_mat():
s_data = DataFrame(np.arange(15).reshape(5, 3), columns=list('abc'))
t_data = DataFrame(np.arange(15, 30).reshape(5, 3), columns=list('abc'))
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
tm.assert_frame_equal(
compute(concat(s, t), {s: s_data, t: t_data}),
pd.DataFrame(np.arange(30).reshape(10, 3), columns=list('abc')),
)
def test_count_keepdims_frame():
df = pd.DataFrame(dict(a=[1, 2, 3, np.nan]))
s = symbol('s', discover(df))
assert_series_equal(compute(s.count(keepdims=True), df),
pd.Series([df.shape[0]], name='s_count'))
def test_time_field():
data = pd.Series(pd.date_range(start='20120101', end='20120102', freq='H'))
s = symbol('s', discover(data))
result = compute(s.time, data)
expected = data.dt.time
expected.name = 's_time'
assert_series_equal(result, expected)
| bsd-3-clause |
pv/scikit-learn | sklearn/manifold/tests/test_spectral_embedding.py | 216 | 8091 | from nose.tools import assert_true
from nose.tools import assert_equal
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_raises
from nose.plugins.skip import SkipTest
from sklearn.manifold.spectral_embedding_ import SpectralEmbedding
from sklearn.manifold.spectral_embedding_ import _graph_is_connected
from sklearn.manifold import spectral_embedding
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_spectral_embedding_two_components(seed=36):
# Test spectral embedding with two components
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2,
n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed))
embedded_coordinate = se_precomp.fit_transform(affinity)
# Some numpy versions are touchy with types
embedded_coordinate = \
se_precomp.fit_transform(affinity.astype(np.float32))
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
def test_spectral_embedding_precomputed_affinity(seed=36):
# Test spectral embedding with precomputed kernel
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=2, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05))
def test_spectral_embedding_callable_affinity(seed=36):
# Test spectral embedding with callable affinity
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=2,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
assert_array_almost_equal(
se_callable.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(kern, se_rbf.affinity_matrix_)
assert_true(
_check_with_col_sign_flipping(embed_rbf, embed_callable, 0.05))
def test_spectral_embedding_amg_solver(seed=36):
# Test spectral embedding with amg solver
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
raise SkipTest("pyamg not available.")
se_amg = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="amg", n_neighbors=5,
random_state=np.random.RandomState(seed))
se_arpack = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="arpack", n_neighbors=5,
random_state=np.random.RandomState(seed))
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05))
def test_pipeline_spectral_clustering(seed=36):
# Test using pipeline to do spectral clustering
random_state = np.random.RandomState(seed)
se_rbf = SpectralEmbedding(n_components=n_clusters,
affinity="rbf",
random_state=random_state)
se_knn = SpectralEmbedding(n_components=n_clusters,
affinity="nearest_neighbors",
n_neighbors=5,
random_state=random_state)
for se in [se_rbf, se_knn]:
km = KMeans(n_clusters=n_clusters, random_state=random_state)
km.fit(se.fit_transform(S))
assert_array_almost_equal(
normalized_mutual_info_score(
km.labels_,
true_labels), 1.0, 2)
def test_spectral_embedding_unknown_eigensolver(seed=36):
# Test that SpectralClustering fails with an unknown eigensolver
se = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver="<unknown>")
assert_raises(ValueError, se.fit, S)
def test_spectral_embedding_unknown_affinity(seed=36):
# Test that SpectralClustering fails with an unknown affinity type
se = SpectralEmbedding(n_components=1, affinity="<unknown>",
random_state=np.random.RandomState(seed))
assert_raises(ValueError, se.fit, S)
def test_connectivity(seed=36):
# Test that graph connectivity test works as expected
graph = np.array([[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), False)
assert_equal(_graph_is_connected(csr_matrix(graph)), False)
assert_equal(_graph_is_connected(csc_matrix(graph)), False)
graph = np.array([[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), True)
assert_equal(_graph_is_connected(csr_matrix(graph)), True)
assert_equal(_graph_is_connected(csc_matrix(graph)), True)
def test_spectral_embedding_deterministic():
# Test that Spectral Embedding is deterministic
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
embedding_1 = spectral_embedding(sims)
embedding_2 = spectral_embedding(sims)
assert_array_almost_equal(embedding_1, embedding_2)
| bsd-3-clause |
abhisg/scikit-learn | sklearn/tree/tests/test_tree.py | 8 | 48235 | """
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.validation import check_random_state
from sklearn.utils.validation import NotFittedError
from sklearn.utils.testing import ignore_warnings
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree.tree import SPARSE_SPLITTERS
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.preprocessing._weights import _balance_weights
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
presort=True),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
presort=True),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = ["DecisionTreeClassifier", "DecisionTreeRegressor",
"ExtraTreeClassifier", "ExtraTreeRegressor"]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = assert_warns(
DeprecationWarning, clf.transform, X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [[-2, -1, 1]] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_pickle():
# Check that tree estimator are pickable
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
serialized_object = pickle.dumps(clf)
clf2 = pickle.loads(serialized_object)
assert_equal(type(clf2), clf.__class__)
score2 = clf2.score(iris.data, iris.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (classification) "
"with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
serialized_object = pickle.dumps(reg)
reg2 = pickle.loads(serialized_object)
assert_equal(type(reg2), reg.__class__)
score2 = reg2.score(boston.data, boston.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (regression) "
"with {0}".format(name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = _balance_weights(unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if not est.presort:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 100)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0]], [0]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-2 <= value.flat[0] < 2,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._utils import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
@ignore_warnings
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, [X])
@ignore_warnings
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if not TreeEstimator().presort:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
def check_presort_sparse(est, X, y):
assert_raises(ValueError, est.fit, X, y )
def test_presort_sparse():
ests = (DecisionTreeClassifier(presort=True),
DecisionTreeRegressor(presort=True))
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for est, sparse_matrix in product(ests, sparse_matrices):
yield check_presort_sparse, est, sparse_matrix(X), y
| bsd-3-clause |
NunoEdgarGub1/scikit-learn | examples/missing_values.py | 233 | 3056 | """
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better results
than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via cross-validation.
Sometimes dropping rows or using marker values is more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.cross_validation import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = np.floor(n_samples * missing_rate)
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
| bsd-3-clause |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/pandas/io/json.py | 9 | 26035 | # pylint: disable-msg=E1101,W0613,W0603
import os
import copy
from collections import defaultdict
import numpy as np
import pandas.json as _json
from pandas.tslib import iNaT
from pandas.compat import long, u
from pandas import compat, isnull
from pandas import Series, DataFrame, to_datetime
from pandas.io.common import get_filepath_or_buffer
from pandas.core.common import AbstractMethodError
import pandas.core.common as com
loads = _json.loads
dumps = _json.dumps
### interface to/from ###
def to_json(path_or_buf, obj, orient=None, date_format='epoch',
double_precision=10, force_ascii=True, date_unit='ms',
default_handler=None):
if isinstance(obj, Series):
s = SeriesWriter(
obj, orient=orient, date_format=date_format,
double_precision=double_precision, ensure_ascii=force_ascii,
date_unit=date_unit, default_handler=default_handler).write()
elif isinstance(obj, DataFrame):
s = FrameWriter(
obj, orient=orient, date_format=date_format,
double_precision=double_precision, ensure_ascii=force_ascii,
date_unit=date_unit, default_handler=default_handler).write()
else:
raise NotImplementedError("'obj' should be a Series or a DataFrame")
if isinstance(path_or_buf, compat.string_types):
with open(path_or_buf, 'w') as fh:
fh.write(s)
elif path_or_buf is None:
return s
else:
path_or_buf.write(s)
class Writer(object):
def __init__(self, obj, orient, date_format, double_precision,
ensure_ascii, date_unit, default_handler=None):
self.obj = obj
if orient is None:
orient = self._default_orient
self.orient = orient
self.date_format = date_format
self.double_precision = double_precision
self.ensure_ascii = ensure_ascii
self.date_unit = date_unit
self.default_handler = default_handler
self.is_copy = None
self._format_axes()
def _format_axes(self):
raise AbstractMethodError(self)
def write(self):
return dumps(
self.obj,
orient=self.orient,
double_precision=self.double_precision,
ensure_ascii=self.ensure_ascii,
date_unit=self.date_unit,
iso_dates=self.date_format == 'iso',
default_handler=self.default_handler)
class SeriesWriter(Writer):
_default_orient = 'index'
def _format_axes(self):
if not self.obj.index.is_unique and self.orient == 'index':
raise ValueError("Series index must be unique for orient="
"'%s'" % self.orient)
class FrameWriter(Writer):
_default_orient = 'columns'
def _format_axes(self):
""" try to axes if they are datelike """
if not self.obj.index.is_unique and self.orient in (
'index', 'columns'):
raise ValueError("DataFrame index must be unique for orient="
"'%s'." % self.orient)
if not self.obj.columns.is_unique and self.orient in (
'index', 'columns', 'records'):
raise ValueError("DataFrame columns must be unique for orient="
"'%s'." % self.orient)
def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True,
convert_axes=True, convert_dates=True, keep_default_dates=True,
numpy=False, precise_float=False, date_unit=None):
"""
Convert a JSON string to pandas object
Parameters
----------
path_or_buf : a valid JSON string or file-like, default: None
The string could be a URL. Valid URL schemes include http, ftp, s3, and
file. For file URLs, a host is expected. For instance, a local file
could be ``file://localhost/path/to/table.json``
orient
* `Series`
- default is ``'index'``
- allowed values are: ``{'split','records','index'}``
- The Series index must be unique for orient ``'index'``.
* `DataFrame`
- default is ``'columns'``
- allowed values are: {'split','records','index','columns','values'}
- The DataFrame index must be unique for orients 'index' and
'columns'.
- The DataFrame columns must be unique for orients 'index',
'columns', and 'records'.
* The format of the JSON string
- split : dict like
``{index -> [index], columns -> [columns], data -> [values]}``
- records : list like
``[{column -> value}, ... , {column -> value}]``
- index : dict like ``{index -> {column -> value}}``
- columns : dict like ``{column -> {index -> value}}``
- values : just the values array
typ : type of object to recover (series or frame), default 'frame'
dtype : boolean or dict, default True
If True, infer dtypes, if a dict of column to dtype, then use those,
if False, then don't infer dtypes at all, applies only to the data.
convert_axes : boolean, default True
Try to convert the axes to the proper dtypes.
convert_dates : boolean, default True
List of columns to parse for dates; If True, then try to parse
datelike columns default is True; a column label is datelike if
* it ends with ``'_at'``,
* it ends with ``'_time'``,
* it begins with ``'timestamp'``,
* it is ``'modified'``, or
* it is ``'date'``
keep_default_dates : boolean, default True
If parsing dates, then parse the default datelike columns
numpy : boolean, default False
Direct decoding to numpy arrays. Supports numeric data only, but
non-numeric column and index labels are supported. Note also that the
JSON ordering MUST be the same for each term if numpy=True.
precise_float : boolean, default False
Set to enable usage of higher precision (strtod) function when
decoding string to double values. Default (False) is to use fast but
less precise builtin functionality
date_unit : string, default None
The timestamp unit to detect if converting dates. The default behaviour
is to try and detect the correct precision, but if this is not desired
then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds,
milliseconds, microseconds or nanoseconds respectively.
Returns
-------
result : Series or DataFrame
"""
filepath_or_buffer, _, _ = get_filepath_or_buffer(path_or_buf)
if isinstance(filepath_or_buffer, compat.string_types):
try:
exists = os.path.exists(filepath_or_buffer)
# if the filepath is too long will raise here
# 5874
except (TypeError,ValueError):
exists = False
if exists:
with open(filepath_or_buffer, 'r') as fh:
json = fh.read()
else:
json = filepath_or_buffer
elif hasattr(filepath_or_buffer, 'read'):
json = filepath_or_buffer.read()
else:
json = filepath_or_buffer
obj = None
if typ == 'frame':
obj = FrameParser(json, orient, dtype, convert_axes, convert_dates,
keep_default_dates, numpy, precise_float,
date_unit).parse()
if typ == 'series' or obj is None:
if not isinstance(dtype, bool):
dtype = dict(data=dtype)
obj = SeriesParser(json, orient, dtype, convert_axes, convert_dates,
keep_default_dates, numpy, precise_float,
date_unit).parse()
return obj
class Parser(object):
_STAMP_UNITS = ('s', 'ms', 'us', 'ns')
_MIN_STAMPS = {
's': long(31536000),
'ms': long(31536000000),
'us': long(31536000000000),
'ns': long(31536000000000000)}
def __init__(self, json, orient, dtype=True, convert_axes=True,
convert_dates=True, keep_default_dates=False, numpy=False,
precise_float=False, date_unit=None):
self.json = json
if orient is None:
orient = self._default_orient
self.orient = orient
self.dtype = dtype
if orient == "split":
numpy = False
if date_unit is not None:
date_unit = date_unit.lower()
if date_unit not in self._STAMP_UNITS:
raise ValueError('date_unit must be one of %s' %
(self._STAMP_UNITS,))
self.min_stamp = self._MIN_STAMPS[date_unit]
else:
self.min_stamp = self._MIN_STAMPS['s']
self.numpy = numpy
self.precise_float = precise_float
self.convert_axes = convert_axes
self.convert_dates = convert_dates
self.date_unit = date_unit
self.keep_default_dates = keep_default_dates
self.obj = None
def check_keys_split(self, decoded):
"checks that dict has only the appropriate keys for orient='split'"
bad_keys = set(decoded.keys()).difference(set(self._split_keys))
if bad_keys:
bad_keys = ", ".join(bad_keys)
raise ValueError(u("JSON data had unexpected key(s): %s") %
com.pprint_thing(bad_keys))
def parse(self):
# try numpy
numpy = self.numpy
if numpy:
self._parse_numpy()
else:
self._parse_no_numpy()
if self.obj is None:
return None
if self.convert_axes:
self._convert_axes()
self._try_convert_types()
return self.obj
def _convert_axes(self):
""" try to convert axes """
for axis in self.obj._AXIS_NUMBERS.keys():
new_axis, result = self._try_convert_data(
axis, self.obj._get_axis(axis), use_dtypes=False,
convert_dates=True)
if result:
setattr(self.obj, axis, new_axis)
def _try_convert_types(self):
raise AbstractMethodError(self)
def _try_convert_data(self, name, data, use_dtypes=True,
convert_dates=True):
""" try to parse a ndarray like into a column by inferring dtype """
# don't try to coerce, unless a force conversion
if use_dtypes:
if self.dtype is False:
return data, False
elif self.dtype is True:
pass
else:
# dtype to force
dtype = (self.dtype.get(name)
if isinstance(self.dtype, dict) else self.dtype)
if dtype is not None:
try:
dtype = np.dtype(dtype)
return data.astype(dtype), True
except:
return data, False
if convert_dates:
new_data, result = self._try_convert_to_date(data)
if result:
return new_data, True
result = False
if data.dtype == 'object':
# try float
try:
data = data.astype('float64')
result = True
except:
pass
if data.dtype.kind == 'f':
if data.dtype != 'float64':
# coerce floats to 64
try:
data = data.astype('float64')
result = True
except:
pass
# do't coerce 0-len data
if len(data) and (data.dtype == 'float' or data.dtype == 'object'):
# coerce ints if we can
try:
new_data = data.astype('int64')
if (new_data == data).all():
data = new_data
result = True
except:
pass
# coerce ints to 64
if data.dtype == 'int':
# coerce floats to 64
try:
data = data.astype('int64')
result = True
except:
pass
return data, result
def _try_convert_to_date(self, data):
""" try to parse a ndarray like into a date column
try to coerce object in epoch/iso formats and
integer/float in epcoh formats, return a boolean if parsing
was successful """
# no conversion on empty
if not len(data):
return data, False
new_data = data
if new_data.dtype == 'object':
try:
new_data = data.astype('int64')
except:
pass
# ignore numbers that are out of range
if issubclass(new_data.dtype.type, np.number):
in_range = (isnull(new_data.values) | (new_data > self.min_stamp) |
(new_data.values == iNaT))
if not in_range.all():
return data, False
date_units = (self.date_unit,) if self.date_unit else self._STAMP_UNITS
for date_unit in date_units:
try:
new_data = to_datetime(new_data, errors='raise',
unit=date_unit)
except OverflowError:
continue
except:
break
return new_data, True
return data, False
def _try_convert_dates(self):
raise AbstractMethodError(self)
class SeriesParser(Parser):
_default_orient = 'index'
_split_keys = ('name', 'index', 'data')
def _parse_no_numpy(self):
json = self.json
orient = self.orient
if orient == "split":
decoded = dict((str(k), v)
for k, v in compat.iteritems(loads(
json,
precise_float=self.precise_float)))
self.check_keys_split(decoded)
self.obj = Series(dtype=None, **decoded)
else:
self.obj = Series(
loads(json, precise_float=self.precise_float), dtype=None)
def _parse_numpy(self):
json = self.json
orient = self.orient
if orient == "split":
decoded = loads(json, dtype=None, numpy=True,
precise_float=self.precise_float)
decoded = dict((str(k), v) for k, v in compat.iteritems(decoded))
self.check_keys_split(decoded)
self.obj = Series(**decoded)
elif orient == "columns" or orient == "index":
self.obj = Series(*loads(json, dtype=None, numpy=True,
labelled=True,
precise_float=self.precise_float))
else:
self.obj = Series(loads(json, dtype=None, numpy=True,
precise_float=self.precise_float))
def _try_convert_types(self):
if self.obj is None:
return
obj, result = self._try_convert_data(
'data', self.obj, convert_dates=self.convert_dates)
if result:
self.obj = obj
class FrameParser(Parser):
_default_orient = 'columns'
_split_keys = ('columns', 'index', 'data')
def _parse_numpy(self):
json = self.json
orient = self.orient
if orient == "columns":
args = loads(json, dtype=None, numpy=True, labelled=True,
precise_float=self.precise_float)
if args:
args = (args[0].T, args[2], args[1])
self.obj = DataFrame(*args)
elif orient == "split":
decoded = loads(json, dtype=None, numpy=True,
precise_float=self.precise_float)
decoded = dict((str(k), v) for k, v in compat.iteritems(decoded))
self.check_keys_split(decoded)
self.obj = DataFrame(**decoded)
elif orient == "values":
self.obj = DataFrame(loads(json, dtype=None, numpy=True,
precise_float=self.precise_float))
else:
self.obj = DataFrame(*loads(json, dtype=None, numpy=True,
labelled=True,
precise_float=self.precise_float))
def _parse_no_numpy(self):
json = self.json
orient = self.orient
if orient == "columns":
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None)
elif orient == "split":
decoded = dict((str(k), v)
for k, v in compat.iteritems(loads(
json,
precise_float=self.precise_float)))
self.check_keys_split(decoded)
self.obj = DataFrame(dtype=None, **decoded)
elif orient == "index":
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None).T
else:
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None)
def _process_converter(self, f, filt=None):
""" take a conversion function and possibly recreate the frame """
if filt is None:
filt = lambda col, c: True
needs_new_obj = False
new_obj = dict()
for i, (col, c) in enumerate(self.obj.iteritems()):
if filt(col, c):
new_data, result = f(col, c)
if result:
c = new_data
needs_new_obj = True
new_obj[i] = c
if needs_new_obj:
# possibly handle dup columns
new_obj = DataFrame(new_obj, index=self.obj.index)
new_obj.columns = self.obj.columns
self.obj = new_obj
def _try_convert_types(self):
if self.obj is None:
return
if self.convert_dates:
self._try_convert_dates()
self._process_converter(
lambda col, c: self._try_convert_data(col, c, convert_dates=False))
def _try_convert_dates(self):
if self.obj is None:
return
# our columns to parse
convert_dates = self.convert_dates
if convert_dates is True:
convert_dates = []
convert_dates = set(convert_dates)
def is_ok(col):
""" return if this col is ok to try for a date parse """
if not isinstance(col, compat.string_types):
return False
col_lower = col.lower()
if (col_lower.endswith('_at') or
col_lower.endswith('_time') or
col_lower == 'modified' or
col_lower == 'date' or
col_lower == 'datetime' or
col_lower.startswith('timestamp')):
return True
return False
self._process_converter(
lambda col, c: self._try_convert_to_date(c),
lambda col, c: ((self.keep_default_dates and is_ok(col))
or col in convert_dates))
#----------------------------------------------------------------------
# JSON normalization routines
def nested_to_record(ds, prefix="", level=0):
"""a simplified json_normalize
converts a nested dict into a flat dict ("record"), unlike json_normalize,
it does not attempt to extract a subset of the data.
Parameters
----------
ds : dict or list of dicts
prefix: the prefix, optional, default: ""
level: the number of levels in the jason string, optional, default: 0
Returns
-------
d - dict or list of dicts, matching `ds`
Examples
--------
IN[52]: nested_to_record(dict(flat1=1,dict1=dict(c=1,d=2),
nested=dict(e=dict(c=1,d=2),d=2)))
Out[52]:
{'dict1.c': 1,
'dict1.d': 2,
'flat1': 1,
'nested.d': 2,
'nested.e.c': 1,
'nested.e.d': 2}
"""
singleton = False
if isinstance(ds, dict):
ds = [ds]
singleton = True
new_ds = []
for d in ds:
new_d = copy.deepcopy(d)
for k, v in d.items():
# each key gets renamed with prefix
if level == 0:
newkey = str(k)
else:
newkey = prefix + '.' + str(k)
# only dicts gets recurse-flattend
# only at level>1 do we rename the rest of the keys
if not isinstance(v, dict):
if level != 0: # so we skip copying for top level, common case
v = new_d.pop(k)
new_d[newkey] = v
continue
else:
v = new_d.pop(k)
new_d.update(nested_to_record(v, newkey, level+1))
new_ds.append(new_d)
if singleton:
return new_ds[0]
return new_ds
def json_normalize(data, record_path=None, meta=None,
meta_prefix=None,
record_prefix=None):
"""
"Normalize" semi-structured JSON data into a flat table
Parameters
----------
data : dict or list of dicts
Unserialized JSON objects
record_path : string or list of strings, default None
Path in each object to list of records. If not passed, data will be
assumed to be an array of records
meta : list of paths (string or list of strings), default None
Fields to use as metadata for each record in resulting table
record_prefix : string, default None
If True, prefix records with dotted (?) path, e.g. foo.bar.field if
path to records is ['foo', 'bar']
meta_prefix : string, default None
Returns
-------
frame : DataFrame
Examples
--------
>>> data = [{'state': 'Florida',
... 'shortname': 'FL',
... 'info': {
... 'governor': 'Rick Scott'
... },
... 'counties': [{'name': 'Dade', 'population': 12345},
... {'name': 'Broward', 'population': 40000},
... {'name': 'Palm Beach', 'population': 60000}]},
... {'state': 'Ohio',
... 'shortname': 'OH',
... 'info': {
... 'governor': 'John Kasich'
... },
... 'counties': [{'name': 'Summit', 'population': 1234},
... {'name': 'Cuyahoga', 'population': 1337}]}]
>>> from pandas.io.json import json_normalize
>>> result = json_normalize(data, 'counties', ['state', 'shortname',
... ['info', 'governor']])
>>> result
name population info.governor state shortname
0 Dade 12345 Rick Scott Florida FL
1 Broward 40000 Rick Scott Florida FL
2 Palm Beach 60000 Rick Scott Florida FL
3 Summit 1234 John Kasich Ohio OH
4 Cuyahoga 1337 John Kasich Ohio OH
"""
def _pull_field(js, spec):
result = js
if isinstance(spec, list):
for field in spec:
result = result[field]
else:
result = result[spec]
return result
# A bit of a hackjob
if isinstance(data, dict):
data = [data]
if record_path is None:
if any([isinstance(x, dict) for x in compat.itervalues(data[0])]):
# naive normalization, this is idempotent for flat records
# and potentially will inflate the data considerably for
# deeply nested structures:
# {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@}
#
# TODO: handle record value which are lists, at least error
# reasonably
data = nested_to_record(data)
return DataFrame(data)
elif not isinstance(record_path, list):
record_path = [record_path]
if meta is None:
meta = []
elif not isinstance(meta, list):
meta = [meta]
for i, x in enumerate(meta):
if not isinstance(x, list):
meta[i] = [x]
# Disastrously inefficient for now
records = []
lengths = []
meta_vals = defaultdict(list)
meta_keys = ['.'.join(val) for val in meta]
def _recursive_extract(data, path, seen_meta, level=0):
if len(path) > 1:
for obj in data:
for val, key in zip(meta, meta_keys):
if level + 1 == len(val):
seen_meta[key] = _pull_field(obj, val[-1])
_recursive_extract(obj[path[0]], path[1:],
seen_meta, level=level+1)
else:
for obj in data:
recs = _pull_field(obj, path[0])
# For repeating the metadata later
lengths.append(len(recs))
for val, key in zip(meta, meta_keys):
if level + 1 > len(val):
meta_val = seen_meta[key]
else:
meta_val = _pull_field(obj, val[level:])
meta_vals[key].append(meta_val)
records.extend(recs)
_recursive_extract(data, record_path, {}, level=0)
result = DataFrame(records)
if record_prefix is not None:
result.rename(columns=lambda x: record_prefix + x, inplace=True)
# Data types, a problem
for k, v in compat.iteritems(meta_vals):
if meta_prefix is not None:
k = meta_prefix + k
if k in result:
raise ValueError('Conflicting metadata name %s, '
'need distinguishing prefix ' % k)
result[k] = np.array(v).repeat(lengths)
return result
| apache-2.0 |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/pandas/tests/series/test_internals.py | 7 | 12848 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime
from numpy import nan
import numpy as np
from pandas import Series
from pandas.tseries.index import Timestamp
import pandas.lib as lib
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
class TestSeriesInternals(tm.TestCase):
_multiprocess_can_split_ = True
def test_convert_objects(self):
s = Series([1., 2, 3], index=['a', 'b', 'c'])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates=False,
convert_numeric=True)
assert_series_equal(result, s)
# force numeric conversion
r = s.copy().astype('O')
r['a'] = '1'
with tm.assert_produces_warning(FutureWarning):
result = r.convert_objects(convert_dates=False,
convert_numeric=True)
assert_series_equal(result, s)
r = s.copy().astype('O')
r['a'] = '1.'
with tm.assert_produces_warning(FutureWarning):
result = r.convert_objects(convert_dates=False,
convert_numeric=True)
assert_series_equal(result, s)
r = s.copy().astype('O')
r['a'] = 'garbled'
expected = s.copy()
expected['a'] = np.nan
with tm.assert_produces_warning(FutureWarning):
result = r.convert_objects(convert_dates=False,
convert_numeric=True)
assert_series_equal(result, expected)
# GH 4119, not converting a mixed type (e.g.floats and object)
s = Series([1, 'na', 3, 4])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_numeric=True)
expected = Series([1, np.nan, 3, 4])
assert_series_equal(result, expected)
s = Series([1, '', 3, 4])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_numeric=True)
expected = Series([1, np.nan, 3, 4])
assert_series_equal(result, expected)
# dates
s = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0),
datetime(2001, 1, 3, 0, 0)])
s2 = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0),
datetime(2001, 1, 3, 0, 0), 'foo', 1.0, 1,
Timestamp('20010104'), '20010105'],
dtype='O')
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates=True,
convert_numeric=False)
expected = Series([Timestamp('20010101'), Timestamp('20010102'),
Timestamp('20010103')], dtype='M8[ns]')
assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce',
convert_numeric=False)
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce',
convert_numeric=True)
assert_series_equal(result, expected)
expected = Series([Timestamp('20010101'), Timestamp('20010102'),
Timestamp('20010103'),
lib.NaT, lib.NaT, lib.NaT, Timestamp('20010104'),
Timestamp('20010105')], dtype='M8[ns]')
with tm.assert_produces_warning(FutureWarning):
result = s2.convert_objects(convert_dates='coerce',
convert_numeric=False)
assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = s2.convert_objects(convert_dates='coerce',
convert_numeric=True)
assert_series_equal(result, expected)
# preserver all-nans (if convert_dates='coerce')
s = Series(['foo', 'bar', 1, 1.0], dtype='O')
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce',
convert_numeric=False)
expected = Series([lib.NaT] * 2 + [Timestamp(1)] * 2)
assert_series_equal(result, expected)
# preserver if non-object
s = Series([1], dtype='float32')
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce',
convert_numeric=False)
assert_series_equal(result, s)
# r = s.copy()
# r[0] = np.nan
# result = r.convert_objects(convert_dates=True,convert_numeric=False)
# self.assertEqual(result.dtype, 'M8[ns]')
# dateutil parses some single letters into today's value as a date
for x in 'abcdefghijklmnopqrstuvwxyz':
s = Series([x])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce')
assert_series_equal(result, s)
s = Series([x.upper()])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce')
assert_series_equal(result, s)
def test_convert_objects_preserve_bool(self):
s = Series([1, True, 3, 5], dtype=object)
with tm.assert_produces_warning(FutureWarning):
r = s.convert_objects(convert_numeric=True)
e = Series([1, 1, 3, 5], dtype='i8')
tm.assert_series_equal(r, e)
def test_convert_objects_preserve_all_bool(self):
s = Series([False, True, False, False], dtype=object)
with tm.assert_produces_warning(FutureWarning):
r = s.convert_objects(convert_numeric=True)
e = Series([False, True, False, False], dtype=bool)
tm.assert_series_equal(r, e)
# GH 10265
def test_convert(self):
# Tests: All to nans, coerce, true
# Test coercion returns correct type
s = Series(['a', 'b', 'c'])
results = s._convert(datetime=True, coerce=True)
expected = Series([lib.NaT] * 3)
assert_series_equal(results, expected)
results = s._convert(numeric=True, coerce=True)
expected = Series([np.nan] * 3)
assert_series_equal(results, expected)
expected = Series([lib.NaT] * 3, dtype=np.dtype('m8[ns]'))
results = s._convert(timedelta=True, coerce=True)
assert_series_equal(results, expected)
dt = datetime(2001, 1, 1, 0, 0)
td = dt - datetime(2000, 1, 1, 0, 0)
# Test coercion with mixed types
s = Series(['a', '3.1415', dt, td])
results = s._convert(datetime=True, coerce=True)
expected = Series([lib.NaT, lib.NaT, dt, lib.NaT])
assert_series_equal(results, expected)
results = s._convert(numeric=True, coerce=True)
expected = Series([nan, 3.1415, nan, nan])
assert_series_equal(results, expected)
results = s._convert(timedelta=True, coerce=True)
expected = Series([lib.NaT, lib.NaT, lib.NaT, td],
dtype=np.dtype('m8[ns]'))
assert_series_equal(results, expected)
# Test standard conversion returns original
results = s._convert(datetime=True)
assert_series_equal(results, s)
results = s._convert(numeric=True)
expected = Series([nan, 3.1415, nan, nan])
assert_series_equal(results, expected)
results = s._convert(timedelta=True)
assert_series_equal(results, s)
# test pass-through and non-conversion when other types selected
s = Series(['1.0', '2.0', '3.0'])
results = s._convert(datetime=True, numeric=True, timedelta=True)
expected = Series([1.0, 2.0, 3.0])
assert_series_equal(results, expected)
results = s._convert(True, False, True)
assert_series_equal(results, s)
s = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 1, 0, 0)],
dtype='O')
results = s._convert(datetime=True, numeric=True, timedelta=True)
expected = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 1, 0,
0)])
assert_series_equal(results, expected)
results = s._convert(datetime=False, numeric=True, timedelta=True)
assert_series_equal(results, s)
td = datetime(2001, 1, 1, 0, 0) - datetime(2000, 1, 1, 0, 0)
s = Series([td, td], dtype='O')
results = s._convert(datetime=True, numeric=True, timedelta=True)
expected = Series([td, td])
assert_series_equal(results, expected)
results = s._convert(True, True, False)
assert_series_equal(results, s)
s = Series([1., 2, 3], index=['a', 'b', 'c'])
result = s._convert(numeric=True)
assert_series_equal(result, s)
# force numeric conversion
r = s.copy().astype('O')
r['a'] = '1'
result = r._convert(numeric=True)
assert_series_equal(result, s)
r = s.copy().astype('O')
r['a'] = '1.'
result = r._convert(numeric=True)
assert_series_equal(result, s)
r = s.copy().astype('O')
r['a'] = 'garbled'
result = r._convert(numeric=True)
expected = s.copy()
expected['a'] = nan
assert_series_equal(result, expected)
# GH 4119, not converting a mixed type (e.g.floats and object)
s = Series([1, 'na', 3, 4])
result = s._convert(datetime=True, numeric=True)
expected = Series([1, nan, 3, 4])
assert_series_equal(result, expected)
s = Series([1, '', 3, 4])
result = s._convert(datetime=True, numeric=True)
assert_series_equal(result, expected)
# dates
s = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0),
datetime(2001, 1, 3, 0, 0)])
s2 = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0),
datetime(2001, 1, 3, 0, 0), 'foo', 1.0, 1,
Timestamp('20010104'), '20010105'], dtype='O')
result = s._convert(datetime=True)
expected = Series([Timestamp('20010101'), Timestamp('20010102'),
Timestamp('20010103')], dtype='M8[ns]')
assert_series_equal(result, expected)
result = s._convert(datetime=True, coerce=True)
assert_series_equal(result, expected)
expected = Series([Timestamp('20010101'), Timestamp('20010102'),
Timestamp('20010103'), lib.NaT, lib.NaT, lib.NaT,
Timestamp('20010104'), Timestamp('20010105')],
dtype='M8[ns]')
result = s2._convert(datetime=True, numeric=False, timedelta=False,
coerce=True)
assert_series_equal(result, expected)
result = s2._convert(datetime=True, coerce=True)
assert_series_equal(result, expected)
s = Series(['foo', 'bar', 1, 1.0], dtype='O')
result = s._convert(datetime=True, coerce=True)
expected = Series([lib.NaT] * 2 + [Timestamp(1)] * 2)
assert_series_equal(result, expected)
# preserver if non-object
s = Series([1], dtype='float32')
result = s._convert(datetime=True, coerce=True)
assert_series_equal(result, s)
# r = s.copy()
# r[0] = np.nan
# result = r._convert(convert_dates=True,convert_numeric=False)
# self.assertEqual(result.dtype, 'M8[ns]')
# dateutil parses some single letters into today's value as a date
expected = Series([lib.NaT])
for x in 'abcdefghijklmnopqrstuvwxyz':
s = Series([x])
result = s._convert(datetime=True, coerce=True)
assert_series_equal(result, expected)
s = Series([x.upper()])
result = s._convert(datetime=True, coerce=True)
assert_series_equal(result, expected)
def test_convert_no_arg_error(self):
s = Series(['1.0', '2'])
self.assertRaises(ValueError, s._convert)
def test_convert_preserve_bool(self):
s = Series([1, True, 3, 5], dtype=object)
r = s._convert(datetime=True, numeric=True)
e = Series([1, 1, 3, 5], dtype='i8')
tm.assert_series_equal(r, e)
def test_convert_preserve_all_bool(self):
s = Series([False, True, False, False], dtype=object)
r = s._convert(datetime=True, numeric=True)
e = Series([False, True, False, False], dtype=bool)
tm.assert_series_equal(r, e)
| apache-2.0 |
matthiasn/iWasWhere | src/tensorflow/meo_data.py | 1 | 1944 | import pandas as pd
import tensorflow as tf
TRAIN_PATH = "./data/export/entries_stories_training.csv"
TEST_PATH = "./data/export/entries_stories_test.csv"
UNLABELED_PATH = "./data/export/entries_stories_unlabeled.csv"
CSV_COLUMN_NAMES = ['Timestamp', 'Geohash40', 'Geohash35', 'Geohash30',
'Geohash25', 'Geohash20', 'Geohash15', 'Visit', 'Starred',
'ImgFile', 'AudioFile', 'Task', 'Screenshot', 'Md', 'WeeksAgo',
'DaysAgo', 'QuarterDay', 'HalfQuarterDay', 'Hour', 'Tags1',
'Mentions1']
CSV_COLUMN_NAMES_2 = CSV_COLUMN_NAMES + ['PrimaryStory']
def one_hot(sa):
ia = [int(k) for k in sa]
return tf.one_hot(ia, 500, 1.0, 0.1)
def load_data(y_name='PrimaryStory'):
train = pd.read_csv(TRAIN_PATH, names=CSV_COLUMN_NAMES_2, header=0)
train['Tags1'] = train['Tags1'].str.split('|', expand=True)
train_x, train_y = train, train.pop(y_name)
test = pd.read_csv(TEST_PATH, names=CSV_COLUMN_NAMES_2, header=0)
test['Tags1'] = test['Tags1'].str.split('|', expand=True)
test_x, test_y = test, test.pop(y_name)
unlabeled = pd.read_csv(UNLABELED_PATH, names=CSV_COLUMN_NAMES, header=0)
unlabeled['Tags1'] = unlabeled['Tags1'].str.replace('cat-', '').str.split(';', expand=True)
return (train_x, train_y), (test_x, test_y), unlabeled
def train_input_fn(features, labels, batch_size):
dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
dataset = dataset.shuffle(1000).repeat().batch(batch_size)
return dataset
def eval_input_fn(features, labels, batch_size):
features = dict(features)
if labels is None:
inputs = features
else:
inputs = (features, labels)
dataset = tf.data.Dataset.from_tensor_slices(inputs)
# Batch the examples
assert batch_size is not None, "batch_size must not be None"
dataset = dataset.batch(batch_size)
return dataset
| agpl-3.0 |
binghongcha08/pyQMD | QMC/MC_exchange/permute4d/dissipation/3.0/traj.py | 17 | 1290 | import numpy as np
import pylab as plt
import matplotlib.pyplot as plt
import matplotlib as mpl
#data = np.genfromtxt(fname='/home/bing/dissipation/energy.dat')
data = np.genfromtxt(fname='energy.dat')
fig, (ax1,ax2) = plt.subplots(ncols=1, nrows=2, sharex=True)
#font = {'family' : 'ubuntu',
# 'weight' : 'normal',
# 'size' : '16'}
#mpl.rc('font', **font) # pass in the font dict as kwargs
mpl.rcParams['font.size'] = 12
#mpl.rcParams['figure.figsize'] = 8,6
#pl.title('two-steps fitting alg')
ax1.set_ylabel('Energy [hartree]')
ax1.plot(data[:,0],data[:,2],'b--',linewidth=2,label='Potential')
#pl.plot(dat[:,0],dat[:,2],'r-',linewidth=2)
ax1.plot(data[:,0],data[:,3],'g-.',linewidth=2,label='Quantum Potential')
ax1.plot(data[:,0],data[:,4],'k-',linewidth=2,label='Energy')
#pl.legend(bbox_to_anchor=(0.5, 0.38, 0.42, .302), loc=3,ncol=1, mode="expand", borderaxespad=0.)
#ax1.set_yticks((0.4,0.6,0.8))
ax1.legend(loc=0)
ax1.set_ylim(0,5)
ax2.set_xlabel('time [a.u.]')
ax2.set_ylabel('Energy [hartree]')
ax2.plot(data[:,0],data[:,1],'r--',linewidth=2,label='$Kinetic$')
#pl.plot(dat[:,0],dat[:,1],'k-',linewidth=2)
ax2.set_yscale('log')
#ax2.set_xticks((0,4,8))
#ax2.set_yticks((1e-7,1e-5,1e-3))
plt.legend(loc=0)
plt.subplots_adjust(hspace=0.)
plt.show()
| gpl-3.0 |
jseabold/statsmodels | statsmodels/tsa/vector_ar/tests/test_var.py | 1 | 29510 | # -*- coding: utf-8 -*-
"""
Test VAR Model
"""
from statsmodels.compat.pandas import assert_index_equal
from statsmodels.compat.python import lrange
from io import BytesIO, StringIO
import os
import sys
import warnings
import numpy as np
from numpy.testing import assert_allclose, assert_almost_equal, assert_equal
import pandas as pd
import pytest
import statsmodels.api as sm
import statsmodels.tools.data as data_util
from statsmodels.tools.sm_exceptions import ValueWarning
from statsmodels.tsa.base.datetools import dates_from_str
import statsmodels.tsa.vector_ar.util as util
from statsmodels.tsa.vector_ar.var_model import VAR, var_acf
DECIMAL_12 = 12
DECIMAL_6 = 6
DECIMAL_5 = 5
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
@pytest.fixture()
def bivariate_var_data(reset_randomstate):
"""A bivariate dataset for VAR estimation"""
e = np.random.standard_normal((252, 2))
y = np.zeros_like(e)
y[:2] = e[:2]
for i in range(2, 252):
y[i] = .2 * y[i - 1] + .1 * y[i - 2] + e[i]
return y
@pytest.fixture()
def bivariate_var_result(bivariate_var_data):
"""A bivariate VARResults for reuse"""
mod = VAR(bivariate_var_data)
return mod.fit()
class CheckVAR(object): # FIXME: not inherited, so these tests are never run!
# just so pylint will not complain
res1 = None
res2 = None
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_3)
def test_neqs(self):
assert_equal(self.res1.neqs, self.res2.neqs)
def test_nobs(self):
assert_equal(self.res1.avobs, self.res2.nobs)
def test_df_eq(self):
assert_equal(self.res1.df_eq, self.res2.df_eq)
def test_rmse(self):
results = self.res1.results
for i in range(len(results)):
assert_almost_equal(results[i].mse_resid ** .5,
eval('self.res2.rmse_' + str(i + 1)), DECIMAL_6)
def test_rsquared(self):
results = self.res1.results
for i in range(len(results)):
assert_almost_equal(results[i].rsquared,
eval('self.res2.rsquared_' + str(i + 1)), DECIMAL_3)
def test_llf(self):
results = self.res1.results
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_2)
for i in range(len(results)):
assert_almost_equal(results[i].llf,
eval('self.res2.llf_' + str(i + 1)), DECIMAL_2)
def test_aic(self):
assert_almost_equal(self.res1.aic, self.res2.aic)
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic)
def test_hqic(self):
assert_almost_equal(self.res1.hqic, self.res2.hqic)
def test_fpe(self):
assert_almost_equal(self.res1.fpe, self.res2.fpe)
def test_detsig(self):
assert_almost_equal(self.res1.detomega, self.res2.detsig)
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
def get_macrodata():
data = sm.datasets.macrodata.load_pandas().data[['realgdp', 'realcons', 'realinv']]
data = data.to_records(index=False)
nd = data.view((float, 3), type=np.ndarray)
nd = np.diff(np.log(nd), axis=0)
return nd.ravel().view(data.dtype, type=np.ndarray)
def generate_var(): # FIXME: make a test?
import pandas.rpy.common as prp
from rpy2.robjects import r
r.source('tests/var.R')
return prp.convert_robj(r['result'], use_pandas=False)
def write_generate_var(): # FIXME: make a test?
result = generate_var()
np.savez('tests/results/vars_results.npz', **result)
class RResults(object):
"""
Simple interface with results generated by "vars" package in R.
"""
def __init__(self):
# data = np.load(resultspath + 'vars_results.npz')
from .results.results_var_data import var_results
data = var_results.__dict__
self.names = data['coefs'].dtype.names
self.params = data['coefs'].view((float, len(self.names)), type=np.ndarray)
self.stderr = data['stderr'].view((float, len(self.names)), type=np.ndarray)
self.irf = data['irf'].item()
self.orth_irf = data['orthirf'].item()
self.nirfs = int(data['nirfs'][0])
self.nobs = int(data['obs'][0])
self.totobs = int(data['totobs'][0])
crit = data['crit'].item()
self.aic = crit['aic'][0]
self.sic = self.bic = crit['sic'][0]
self.hqic = crit['hqic'][0]
self.fpe = crit['fpe'][0]
self.detomega = data['detomega'][0]
self.loglike = data['loglike'][0]
self.nahead = int(data['nahead'][0])
self.ma_rep = data['phis']
self.causality = data['causality']
_orig_stdout = None
def setup_module():
global _orig_stdout
_orig_stdout = sys.stdout
sys.stdout = StringIO()
class CheckIRF(object):
ref = None
res = None
irf = None
k = None
# ---------------------------------------------------------------------------
# IRF tests
def test_irf_coefs(self):
self._check_irfs(self.irf.irfs, self.ref.irf)
self._check_irfs(self.irf.orth_irfs, self.ref.orth_irf)
def _check_irfs(self, py_irfs, r_irfs):
for i, name in enumerate(self.res.names):
ref_irfs = r_irfs[name].view((float, self.k), type=np.ndarray)
res_irfs = py_irfs[:, :, i]
assert_almost_equal(ref_irfs, res_irfs)
@pytest.mark.matplotlib
def test_plot_irf(self, close_figures):
self.irf.plot()
self.irf.plot(plot_stderr=False)
self.irf.plot(impulse=0, response=1)
self.irf.plot(impulse=0)
self.irf.plot(response=0)
self.irf.plot(orth=True)
self.irf.plot(impulse=0, response=1, orth=True)
@pytest.mark.matplotlib
def test_plot_cum_effects(self, close_figures):
self.irf.plot_cum_effects()
self.irf.plot_cum_effects(plot_stderr=False)
self.irf.plot_cum_effects(impulse=0, response=1)
self.irf.plot_cum_effects(orth=True)
self.irf.plot_cum_effects(impulse=0, response=1, orth=True)
@pytest.mark.matplotlib
def test_plot_figsizes(self):
assert_equal(self.irf.plot().get_size_inches(), (10, 10))
assert_equal(
self.irf.plot(figsize=(14, 10)).get_size_inches(),
(14, 10))
assert_equal(self.irf.plot_cum_effects().get_size_inches(), (10, 10))
assert_equal(
self.irf.plot_cum_effects(figsize=(14, 10)).get_size_inches(),
(14, 10))
@pytest.mark.smoke
class CheckFEVD(object):
fevd = None
# ---------------------------------------------------------------------------
# FEVD tests
@pytest.mark.matplotlib
def test_fevd_plot(self, close_figures):
self.fevd.plot()
def test_fevd_repr(self):
self.fevd
def test_fevd_summary(self):
self.fevd.summary()
@pytest.mark.xfail(reason="FEVD.cov() is not implemented",
raises=NotImplementedError, strict=True)
def test_fevd_cov(self):
# test does not crash
# not implemented
covs = self.fevd.cov()
raise NotImplementedError
class TestVARResults(CheckIRF, CheckFEVD):
@classmethod
def setup_class(cls):
cls.p = 2
cls.data = get_macrodata()
cls.model = VAR(cls.data)
cls.names = cls.model.endog_names
cls.ref = RResults()
cls.k = len(cls.ref.names)
cls.res = cls.model.fit(maxlags=cls.p)
cls.irf = cls.res.irf(cls.ref.nirfs)
cls.nahead = cls.ref.nahead
cls.fevd = cls.res.fevd()
def test_constructor(self):
# make sure this works with no names
ndarr = self.data.view((float, 3), type=np.ndarray)
model = VAR(ndarr)
res = model.fit(self.p)
def test_names(self):
assert_equal(self.model.endog_names, self.ref.names)
model2 = VAR(self.data)
assert_equal(model2.endog_names, self.ref.names)
def test_get_eq_index(self):
assert type(self.res.names) is list # noqa: E721
for i, name in enumerate(self.names):
idx = self.res.get_eq_index(i)
idx2 = self.res.get_eq_index(name)
assert_equal(idx, i)
assert_equal(idx, idx2)
with pytest.raises(Exception):
self.res.get_eq_index('foo')
@pytest.mark.smoke
def test_repr(self):
# just want this to work
foo = str(self.res)
bar = repr(self.res)
def test_params(self):
assert_almost_equal(self.res.params, self.ref.params, DECIMAL_3)
@pytest.mark.smoke
def test_cov_params(self):
# do nothing for now
self.res.cov_params
@pytest.mark.smoke
def test_cov_ybar(self):
self.res.cov_ybar()
@pytest.mark.smoke
def test_tstat(self):
self.res.tvalues
@pytest.mark.smoke
def test_pvalues(self):
self.res.pvalues
@pytest.mark.smoke
def test_summary(self):
summ = self.res.summary()
def test_detsig(self):
assert_almost_equal(self.res.detomega, self.ref.detomega)
def test_aic(self):
assert_almost_equal(self.res.aic, self.ref.aic)
def test_bic(self):
assert_almost_equal(self.res.bic, self.ref.bic)
def test_hqic(self):
assert_almost_equal(self.res.hqic, self.ref.hqic)
def test_fpe(self):
assert_almost_equal(self.res.fpe, self.ref.fpe)
def test_lagorder_select(self):
ics = ['aic', 'fpe', 'hqic', 'bic']
for ic in ics:
res = self.model.fit(maxlags=10, ic=ic, verbose=True)
with pytest.raises(Exception):
self.model.fit(ic='foo')
def test_nobs(self):
assert_equal(self.res.nobs, self.ref.nobs)
def test_stderr(self):
assert_almost_equal(self.res.stderr, self.ref.stderr, DECIMAL_4)
def test_loglike(self):
assert_almost_equal(self.res.llf, self.ref.loglike)
def test_ma_rep(self):
ma_rep = self.res.ma_rep(self.nahead)
assert_almost_equal(ma_rep, self.ref.ma_rep)
# --------------------------------------------------
# Lots of tests to make sure stuff works...need to check correctness
def test_causality(self):
causedby = self.ref.causality['causedby']
for i, name in enumerate(self.names):
variables = self.names[:i] + self.names[i + 1:]
result = self.res.test_causality(name, variables, kind='f')
assert_almost_equal(result.pvalue, causedby[i], DECIMAL_4)
rng = lrange(self.k)
rng.remove(i)
result2 = self.res.test_causality(i, rng, kind='f')
assert_almost_equal(result.pvalue, result2.pvalue, DECIMAL_12)
# make sure works
result = self.res.test_causality(name, variables, kind='wald')
# corner cases
_ = self.res.test_causality(self.names[0], self.names[1])
_ = self.res.test_causality(0, 1)
with pytest.raises(Exception):
self.res.test_causality(0, 1, kind='foo')
def test_causality_no_lags(self):
res = VAR(self.data).fit(maxlags=0)
with pytest.raises(RuntimeError, match="0 lags"):
res.test_causality(0, 1)
@pytest.mark.smoke
def test_select_order(self):
result = self.model.fit(10, ic='aic', verbose=True)
result = self.model.fit(10, ic='fpe', verbose=True)
# bug
model = VAR(self.model.endog)
model.select_order()
def test_is_stable(self):
# may not necessarily be true for other datasets
assert (self.res.is_stable(verbose=True))
def test_acf(self):
# test that it works...for now
acfs = self.res.acf(10)
# defaults to nlags=lag_order
acfs = self.res.acf()
assert (len(acfs) == self.p + 1)
def test_acf_2_lags(self):
c = np.zeros((2, 2, 2))
c[0] = np.array([[.2, .1], [.15, .15]])
c[1] = np.array([[.1, .9], [0, .1]])
acf = var_acf(c, np.eye(2), 3)
gamma = np.zeros((6, 6))
gamma[:2, :2] = acf[0]
gamma[2:4, 2:4] = acf[0]
gamma[4:6, 4:6] = acf[0]
gamma[2:4, :2] = acf[1].T
gamma[4:, :2] = acf[2].T
gamma[:2, 2:4] = acf[1]
gamma[:2, 4:] = acf[2]
recovered = np.dot(gamma[:2, 2:], np.linalg.inv(gamma[:4, :4]))
recovered = [recovered[:, 2 * i:2 * (i + 1)] for i in range(2)]
recovered = np.array(recovered)
assert_allclose(recovered, c, atol=1e-7)
@pytest.mark.smoke
def test_acorr(self):
acorrs = self.res.acorr(10)
@pytest.mark.smoke
def test_forecast(self):
self.res.forecast(self.res.endog[-5:], 5)
@pytest.mark.smoke
def test_forecast_interval(self):
y = self.res.endog[:-self.p:]
point, lower, upper = self.res.forecast_interval(y, 5)
@pytest.mark.matplotlib
def test_plot_sim(self, close_figures):
self.res.plotsim(steps=100)
@pytest.mark.matplotlib
def test_plot(self, close_figures):
self.res.plot()
@pytest.mark.matplotlib
def test_plot_acorr(self, close_figures):
self.res.plot_acorr()
@pytest.mark.matplotlib
def test_plot_forecast(self, close_figures):
self.res.plot_forecast(5)
def test_reorder(self):
# manually reorder
data = self.data.view((float, 3), type=np.ndarray)
names = self.names
data2 = np.append(np.append(data[:, 2, None], data[:, 0, None], axis=1), data[:, 1, None], axis=1)
names2 = []
names2.append(names[2])
names2.append(names[0])
names2.append(names[1])
res2 = VAR(data2).fit(maxlags=self.p)
# use reorder function
res3 = self.res.reorder(['realinv', 'realgdp', 'realcons'])
# check if the main results match
assert_almost_equal(res2.params, res3.params)
assert_almost_equal(res2.sigma_u, res3.sigma_u)
assert_almost_equal(res2.bic, res3.bic)
assert_almost_equal(res2.stderr, res3.stderr)
def test_pickle(self):
fh = BytesIO()
# test wrapped results load save pickle
del self.res.model.data.orig_endog
self.res.save(fh)
fh.seek(0, 0)
res_unpickled = self.res.__class__.load(fh)
assert type(res_unpickled) is type(self.res) # noqa: E721
class E1_Results(object):
"""
Results from Lütkepohl (2005) using E2 dataset
"""
def __init__(self):
# Lutkepohl p. 120 results
# I asked the author about these results and there is probably rounding
# error in the book, so I adjusted these test results to match what is
# coming out of the Python (double-checked) calculations
self.irf_stderr = np.array([[[.125, 0.546, 0.664],
[0.032, 0.139, 0.169],
[0.026, 0.112, 0.136]],
[[0.129, 0.547, 0.663],
[0.032, 0.134, 0.163],
[0.026, 0.108, 0.131]],
[[0.084, .385, .479],
[.016, .079, .095],
[.016, .078, .103]]])
self.cum_irf_stderr = np.array([[[.125, 0.546, 0.664],
[0.032, 0.139, 0.169],
[0.026, 0.112, 0.136]],
[[0.149, 0.631, 0.764],
[0.044, 0.185, 0.224],
[0.033, 0.140, 0.169]],
[[0.099, .468, .555],
[.038, .170, .205],
[.033, .150, .185]]])
self.lr_stderr = np.array([[.134, .645, .808],
[.048, .230, .288],
[.043, .208, .260]])
basepath = os.path.split(sm.__file__)[0]
resultspath = basepath + '/tsa/vector_ar/tests/results/'
def get_lutkepohl_data(name='e2'):
path = resultspath + '%s.dat' % name
return util.parse_lutkepohl_data(path)
def test_lutkepohl_parse():
files = ['e%d' % i for i in range(1, 7)]
for f in files:
get_lutkepohl_data(f)
class TestVARResultsLutkepohl(object):
"""
Verify calculations using results from Lütkepohl's book
"""
@classmethod
def setup_class(cls):
cls.p = 2
sdata, dates = get_lutkepohl_data('e1')
data = data_util.struct_to_ndarray(sdata)
adj_data = np.diff(np.log(data), axis=0)
# est = VAR(adj_data, p=2, dates=dates[1:], names=names)
cls.model = VAR(adj_data[:-16], dates=dates[1:-16], freq='BQ-MAR')
cls.res = cls.model.fit(maxlags=cls.p)
cls.irf = cls.res.irf(10)
cls.lut = E1_Results()
def test_approx_mse(self):
# 3.5.18, p. 99
mse2 = np.array([[25.12, .580, 1.300],
[.580, 1.581, .586],
[1.300, .586, 1.009]]) * 1e-4
assert_almost_equal(mse2, self.res.forecast_cov(3)[1],
DECIMAL_3)
def test_irf_stderr(self):
irf_stderr = self.irf.stderr(orth=False)
for i in range(1, 1 + len(self.lut.irf_stderr)):
assert_almost_equal(np.round(irf_stderr[i], 3),
self.lut.irf_stderr[i - 1])
def test_cum_irf_stderr(self):
stderr = self.irf.cum_effect_stderr(orth=False)
for i in range(1, 1 + len(self.lut.cum_irf_stderr)):
assert_almost_equal(np.round(stderr[i], 3),
self.lut.cum_irf_stderr[i - 1])
def test_lr_effect_stderr(self):
stderr = self.irf.lr_effect_stderr(orth=False)
orth_stderr = self.irf.lr_effect_stderr(orth=True)
assert_almost_equal(np.round(stderr, 3), self.lut.lr_stderr)
def test_get_trendorder():
results = {
'c': 1,
'nc': 0,
'ct': 2,
'ctt': 3
}
for t, trendorder in results.items():
assert (util.get_trendorder(t) == trendorder)
def test_var_constant():
# see 2043
import datetime
from pandas import DataFrame, DatetimeIndex
series = np.array([[2., 2.], [1, 2.], [1, 2.], [1, 2.], [1., 2.]])
data = DataFrame(series)
d = datetime.datetime.now()
delta = datetime.timedelta(days=1)
index = []
for i in range(data.shape[0]):
index.append(d)
d += delta
data.index = DatetimeIndex(index)
# with pytest.warns(ValueWarning): #does not silence warning in test output
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=ValueWarning)
model = VAR(data)
with pytest.raises(ValueError):
model.fit(1)
def test_var_trend():
# see 2271
data = get_macrodata().view((float, 3), type=np.ndarray)
model = sm.tsa.VAR(data)
results = model.fit(4) # , trend = 'c')
irf = results.irf(10)
data_nc = data - data.mean(0)
model_nc = sm.tsa.VAR(data_nc)
results_nc = model_nc.fit(4, trend='nc')
with pytest.raises(ValueError):
model.fit(4, trend='t')
def test_irf_trend():
# test for irf with different trend see #1636
# this is a rough comparison by adding trend or subtracting mean to data
# to get similar AR coefficients and IRF
data = get_macrodata().view((float, 3), type=np.ndarray)
model = sm.tsa.VAR(data)
results = model.fit(4) # , trend = 'c')
irf = results.irf(10)
data_nc = data - data.mean(0)
model_nc = sm.tsa.VAR(data_nc)
results_nc = model_nc.fit(4, trend='nc')
irf_nc = results_nc.irf(10)
assert_allclose(irf_nc.stderr()[1:4], irf.stderr()[1:4], rtol=0.01)
trend = 1e-3 * np.arange(len(data)) / (len(data) - 1)
# for pandas version, currently not used, if data is a pd.DataFrame
# data_t = pd.DataFrame(data.values + trend[:,None], index=data.index, columns=data.columns)
data_t = data + trend[:, None]
model_t = sm.tsa.VAR(data_t)
results_t = model_t.fit(4, trend='ct')
irf_t = results_t.irf(10)
assert_allclose(irf_t.stderr()[1:4], irf.stderr()[1:4], rtol=0.03)
class TestVARExtras(object):
@classmethod
def setup_class(cls):
mdata = sm.datasets.macrodata.load_pandas().data
mdata = mdata[['realgdp', 'realcons', 'realinv']]
data = mdata.values
data = np.diff(np.log(data), axis=0) * 400
cls.res0 = sm.tsa.VAR(data).fit(maxlags=2)
def test_process(self, close_figures):
res0 = self.res0
k_ar = res0.k_ar
fc20 = res0.forecast(res0.endog[-k_ar:], 20)
mean_lr = res0.mean()
assert_allclose(mean_lr, fc20[-1], rtol=5e-4)
ysim = res0.simulate_var(seed=987128)
assert_allclose(ysim.mean(0), mean_lr, rtol=0.1)
# initialization does not use long run intercept, see #4542
assert_allclose(ysim[0], res0.intercept, rtol=1e-10)
assert_allclose(ysim[1], res0.intercept, rtol=1e-10)
n_sim = 900
ysimz = res0.simulate_var(steps=n_sim, offset=np.zeros((n_sim, 3)),
seed=987128)
zero3 = np.zeros(3)
assert_allclose(ysimz.mean(0), zero3, atol=0.4)
# initialization does not use long run intercept, see #4542
assert_allclose(ysimz[0], zero3, atol=1e-10)
assert_allclose(ysimz[1], zero3, atol=1e-10)
# check attributes
assert_equal(res0.k_trend, 1)
assert_equal(res0.k_exog_user, 0)
assert_equal(res0.k_exog, 1)
assert_equal(res0.k_ar, 2)
irf = res0.irf()
@pytest.mark.matplotlib
def test_process_plotting(self, close_figures):
# Partially a smoke test
res0 = self.res0
k_ar = res0.k_ar
fc20 = res0.forecast(res0.endog[-k_ar:], 20)
irf = res0.irf()
res0.plotsim()
res0.plot_acorr()
fig = res0.plot_forecast(20)
fcp = fig.axes[0].get_children()[1].get_ydata()[-20:]
# Note values are equal, but keep rtol buffer
assert_allclose(fc20[:, 0], fcp, rtol=1e-13)
fcp = fig.axes[1].get_children()[1].get_ydata()[-20:]
assert_allclose(fc20[:, 1], fcp, rtol=1e-13)
fcp = fig.axes[2].get_children()[1].get_ydata()[-20:]
assert_allclose(fc20[:, 2], fcp, rtol=1e-13)
fig_asym = irf.plot()
fig_mc = irf.plot(stderr_type='mc', repl=1000, seed=987128)
for k in range(3):
a = fig_asym.axes[1].get_children()[k].get_ydata()
m = fig_mc.axes[1].get_children()[k].get_ydata()
# use m as desired because it is larger
# a is for some irf much smaller than m
assert_allclose(a, m, atol=0.1, rtol=0.9)
def test_forecast_cov(self):
# forecast_cov can include parameter uncertainty if contant-only
res = self.res0
covfc1 = res.forecast_cov(3)
assert_allclose(covfc1, res.mse(3), rtol=1e-13)
# ignore warning, TODO: assert OutputWarning
with warnings.catch_warnings():
warnings.simplefilter("ignore")
covfc2 = res.forecast_cov(3, method='auto')
assert_allclose(covfc2, covfc1, rtol=0.05)
# regression test, TODO: replace with verified numbers (Stata)
res_covfc2 = np.array([[[9.45802013, 4.94142038, 37.1999646],
[4.94142038, 7.09273624, 5.66215089],
[37.1999646, 5.66215089, 259.61275869]],
[[11.30364479, 5.72569141, 49.28744123],
[5.72569141, 7.409761, 10.98164091],
[49.28744123, 10.98164091, 336.4484723]],
[[12.36188803, 6.44426905, 53.54588026],
[6.44426905, 7.88850029, 13.96382545],
[53.54588026, 13.96382545, 352.19564327]]])
assert_allclose(covfc2, res_covfc2, atol=1e-6)
def test_exog(self):
# check that trend and exog are equivalent for basics and varsim
data = self.res0.model.endog
res_lin_trend = sm.tsa.VAR(data).fit(maxlags=2, trend='ct')
ex = np.arange(len(data))
res_lin_trend1 = sm.tsa.VAR(data, exog=ex).fit(maxlags=2)
ex2 = np.arange(len(data))[:, None] ** [0, 1]
res_lin_trend2 = sm.tsa.VAR(data, exog=ex2).fit(maxlags=2, trend='nc')
# TODO: intercept differs by 4e-3, others are < 1e-12
assert_allclose(res_lin_trend.params, res_lin_trend1.params, rtol=5e-3)
assert_allclose(res_lin_trend.params, res_lin_trend2.params, rtol=5e-3)
assert_allclose(res_lin_trend1.params, res_lin_trend2.params, rtol=1e-10)
y1 = res_lin_trend.simulate_var(seed=987128)
y2 = res_lin_trend1.simulate_var(seed=987128)
y3 = res_lin_trend2.simulate_var(seed=987128)
assert_allclose(y2.mean(0), y1.mean(0), rtol=1e-12)
assert_allclose(y3.mean(0), y1.mean(0), rtol=1e-12)
assert_allclose(y3.mean(0), y2.mean(0), rtol=1e-12)
h = 10
fc1 = res_lin_trend.forecast(res_lin_trend.endog[-2:], h)
exf = np.arange(len(data), len(data) + h)
fc2 = res_lin_trend1.forecast(res_lin_trend1.endog[-2:], h,
exog_future=exf)
with pytest.raises(ValueError, match="exog_future only has"):
wrong_exf = np.arange(len(data), len(data) + h // 2)
res_lin_trend1.forecast(res_lin_trend1.endog[-2:], h,
exog_future=wrong_exf)
exf2 = exf[:, None] ** [0, 1]
fc3 = res_lin_trend2.forecast(res_lin_trend2.endog[-2:], h,
exog_future=exf2)
assert_allclose(fc2, fc1, rtol=1e-12)
assert_allclose(fc3, fc1, rtol=1e-12)
assert_allclose(fc3, fc2, rtol=1e-12)
fci1 = res_lin_trend.forecast_interval(res_lin_trend.endog[-2:], h)
exf = np.arange(len(data), len(data) + h)
fci2 = res_lin_trend1.forecast_interval(res_lin_trend1.endog[-2:], h,
exog_future=exf)
exf2 = exf[:, None] ** [0, 1]
fci3 = res_lin_trend2.forecast_interval(res_lin_trend2.endog[-2:], h,
exog_future=exf2)
assert_allclose(fci2, fci1, rtol=1e-12)
assert_allclose(fci3, fci1, rtol=1e-12)
assert_allclose(fci3, fci2, rtol=1e-12)
@pytest.mark.parametrize('attr', ['y', 'ys_lagged'])
def test_deprecated_attributes_varresults(bivariate_var_result, attr):
with pytest.warns(FutureWarning):
getattr(bivariate_var_result, attr)
def test_var_cov_params_pandas(bivariate_var_data):
df = pd.DataFrame(bivariate_var_data, columns=['x', 'y'])
mod = VAR(df)
res = mod.fit(2)
cov = res.cov_params()
assert isinstance(cov, pd.DataFrame)
exog_names = ('const', 'L1.x', 'L1.y', 'L2.x', 'L2.y')
index = pd.MultiIndex.from_product((exog_names, ('x', 'y')))
assert_index_equal(cov.index, cov.columns)
assert_index_equal(cov.index, index)
def test_summaries_exog(reset_randomstate):
y = np.random.standard_normal((500, 6))
df = pd.DataFrame(y)
cols = (["endog_{0}".format(i) for i in range(2)] +
["exog_{0}".format(i) for i in range(4)])
df.columns = cols
df.index = pd.date_range('1-1-1950', periods=500, freq="MS")
endog = df.iloc[:, :2]
exog = df.iloc[:, 2:]
res = VAR(endog=endog, exog=exog).fit(maxlags=0)
summ = res.summary().summary
assert 'exog_0' in summ
assert 'exog_1' in summ
assert 'exog_2' in summ
assert 'exog_3' in summ
res = VAR(endog=endog, exog=exog).fit(maxlags=2)
summ = res.summary().summary
assert 'exog_0' in summ
assert 'exog_1' in summ
assert 'exog_2' in summ
assert 'exog_3' in summ
def test_whiteness_nlag(reset_randomstate):
# GH 6686
y = np.random.standard_normal((200, 2))
res = VAR(y).fit(maxlags=1, ic=None)
with pytest.raises(ValueError, match="The whiteness test can only"):
res.test_whiteness(1)
def test_var_maxlag(reset_randomstate):
y = np.random.standard_normal((22, 10))
VAR(y).fit(maxlags=None, ic="aic")
with pytest.raises(ValueError, match="maxlags is too large"):
VAR(y).fit(maxlags=8, ic="aic")
def test_from_formula():
with pytest.raises(NotImplementedError):
VAR.from_formula("y ~ x", None)
def test_correct_nobs():
# GH6748
mdata = sm.datasets.macrodata.load_pandas().data
# prepare the dates index
dates = mdata[['year', 'quarter']].astype(int).astype(str)
quarterly = dates["year"] + "Q" + dates["quarter"]
quarterly = dates_from_str(quarterly)
mdata = mdata[['realgdp', 'realcons', 'realinv']]
mdata.index = pd.DatetimeIndex(quarterly)
data = np.log(mdata).diff().dropna()
data_exog = pd.DataFrame(index=data.index)
data_exog['exovar1'] = np.random.normal(size=data_exog.shape[0])
# make a VAR model
model = VAR(endog=data, exog=data_exog)
results = model.fit(maxlags=1)
irf = results.irf_resim(orth=False, repl=100, steps=10, seed=1, burn=100, cum=False)
assert irf.shape == (100, 11, 3, 3)
| bsd-3-clause |
jwlockhart/concept-networks | examples/draw_tripartite.py | 1 | 3581 | # @author Jeff Lockhart <[email protected]>
# Script for drawing the tripartite network underlying analysis.
# version 1.0
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
import sys
#add the parent directory to the current session's path
sys.path.insert(0, '../')
from network_utils import *
#read our cleaned up data
df = pd.read_csv('../data/sgm_stud/merged.tsv', sep='\t')
#The list of codes we're interested in.
code_cols = ['culture_problem',
#'culture_absent',
'culture_solution',
'culture_helpless',
'culture_victim',
'cishet_problem',
'cishet_victim',
'cishet_solution',
#'cishet_absent',
'cishet_helpless',
'sgm_victim',
'sgm_problem',
'sgm_helpless',
#'sgm_absent',
'sgm_solution',
'school_problem',
'school_solution',
#'school_absent',
'school_victim',
'school_helpless',
'community_problem',
'community_solution',
'community_helpless',
#'community_absent',
'community_victim']
#generate unique ID keys for each student and excerpt
def s_id(row):
return row['uni'] + str(row['Participant'])
def e_id(row):
return row['s_id'] + '-' + str(row['Start'])
df['s_id'] = df.apply(s_id, axis=1)
df['e_id'] = df.apply(e_id, axis=1)
#make a graph
g = nx.Graph()
#add all of our codes as nodes
for c in code_cols:
g.add_node(c, t='code')
#add each excerpt of text as a node. Connect it with relevant
#students and codes.
st = []
ex = []
last = ''
for row in df.iterrows():
#add the student node
g.add_node(row[1]['s_id'], t='student')
#if we haven't seen this student before, save the order we saw them in
if last != row[1]['s_id']:
last = row[1]['s_id']
st.append(last)
#add this excerpt node. Save its order to our list.
g.add_node(row[1]['e_id'], t='excerpt')
ex.append(row[1]['e_id'])
#add the edge joining this student and excerpt.
g.add_edge(row[1]['s_id'], row[1]['e_id'])
#for each code this excerpt has, draw an edge to it
for c in code_cols:
if row[1][c]:
g.add_edge(row[1]['e_id'], c)
#get a dictionary of our code nodes' labels
l = {}
for c in code_cols:
l[c] = c
#fix the positions of each node type in columns
pos = dict()
#space out the student and code nodes to align with excerpt column height
pos.update( (n, (1, i*5.57)) for i, n in enumerate(st) )
pos.update( (n, (2, i)) for i, n in enumerate(ex) )
pos.update( (n, (3, i*90)) for i, n in enumerate(code_cols) )
#make our figure big so we can see
plt.figure(figsize=(20,20))
#draw our nodes
nx.draw_networkx_nodes(g, pos, nodelist=st, node_color='r',
node_shape='^')
nx.draw_networkx_nodes(g, pos, nodelist=ex, node_color='b',
node_shape='o', alpha=0.5)
#draw our edges with low alpha so we can see
nx.draw_networkx_edges(g, pos, alpha=0.2)
#axes look silly
plt.axis('off')
#save the edges and nodes as one image
plt.savefig('../data/tripartite_unlabeled.png')
#save the labels for the codes as a different image
#this lets me edit them in with GIMP so that they're better positioned.
plt.figure(figsize=(20,20))
nx.draw_networkx_labels(g, pos, labels=l, font_size=20)
nx.draw_networkx_edges(g, pos, alpha=0)
plt.axis('off')
plt.savefig('../data/tripartite_labeles.png')
| gpl-3.0 |
sjperkins/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/dataframe_test.py | 62 | 3753 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the DataFrame class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.tests.dataframe import mocks
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
def setup_test_df():
"""Create a dataframe populated with some test columns."""
df = learn.DataFrame()
df["a"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor a", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
df["b"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor b", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out2")
df["c"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor c", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
return df
class DataFrameTest(test.TestCase):
"""Test of `DataFrame`."""
def test_create(self):
df = setup_test_df()
self.assertEqual(df.columns(), frozenset(["a", "b", "c"]))
def test_select_columns(self):
df = setup_test_df()
df2 = df.select_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["a", "c"]))
def test_exclude_columns(self):
df = setup_test_df()
df2 = df.exclude_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["b"]))
def test_get_item(self):
df = setup_test_df()
c1 = df["b"]
self.assertEqual(
mocks.MockTensor("Mock Tensor 2", dtypes.int32), c1.build())
def test_del_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
del df["b"]
self.assertEqual(2, len(df))
self.assertEqual(df.columns(), frozenset(["a", "c"]))
def test_set_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn",
mocks.MockTensor("Tensor ", dtypes.int32))
df["quack"] = col1
self.assertEqual(4, len(df))
col2 = df["quack"]
self.assertEqual(col1, col2)
def test_set_item_column_multi(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn", [])
col2 = mocks.MockSeries("MooColumn", [])
df["quack", "moo"] = [col1, col2]
self.assertEqual(5, len(df))
col3 = df["quack"]
self.assertEqual(col1, col3)
col4 = df["moo"]
self.assertEqual(col2, col4)
def test_set_item_pandas(self):
# TODO(jamieas)
pass
def test_set_item_numpy(self):
# TODO(jamieas)
pass
def test_build(self):
df = setup_test_df()
result = df.build()
expected = {
"a": mocks.MockTensor("Mock Tensor 1", dtypes.int32),
"b": mocks.MockTensor("Mock Tensor 2", dtypes.int32),
"c": mocks.MockTensor("Mock Tensor 1", dtypes.int32)
}
self.assertEqual(expected, result)
if __name__ == "__main__":
test.main()
| apache-2.0 |
NaturalHistoryMuseum/insect_analysis | vision/tests/scripts/example_wing_alignment.py | 2 | 7168 | import numpy as np
from skimage.feature import canny
from skimage.draw import set_color, polygon_perimeter
from vision.image_functions import threshold
from vision.segmentation.segment import saliency_dragonfly
from vision.tests import get_test_image
from vision.measurements import subspace_shape, procrustes
from skimage.measure import find_contours, regionprops, label
from skimage.transform import SimilarityTransform
import csv
import matplotlib.pyplot as plt
from skimage import draw
from sklearn.cluster import KMeans
import scipy
from operator import attrgetter
from skimage.morphology import skeletonize
from matplotlib import cm
from vision.io_functions import write_image
def visualize_modes(shape_model):
mu, phi, sigma2 = shape_model
K = phi.shape[1]
n = 10
colors = [cm.bone(i) for i in np.linspace(0.35, 1, n)]
for d in range(K):
plt.gca().set_color_cycle(colors)
for h_v in np.linspace(2, -2, n):
h = np.zeros((K, 1))
h[d] = h_v
s = mu + phi @ h
s = s.reshape(-1, 2)
plt.plot(s[:, 0], s[:, 1])
plt.savefig('mode{}.png'.format(d), transparent=True)
plt.clf()
def read_shape(index):
path = '/home/james/vision/vision/tests/test_data/wing_area/cropped/{}.csv'.format(index)
vertices = []
with open(path, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=' ')
for row in reader:
if len(row) == 2:
vertices.append(row[:2])
return np.array(vertices, dtype=np.float)
def smoothed_shape(shape, iterations=3):
shape_smooth = shape
for iteration in range(iterations):
shape_smooth_old = shape_smooth
shape_smooth = np.zeros((2 * (shape_smooth_old.shape[0] - 1), 2))
shape_smooth[0, :] = shape_smooth_old[0, :]
shape_smooth[-1, :] = shape_smooth_old[-1, :]
for i in range(1, shape_smooth_old.shape[0] - 1):
shape_smooth[2 * i - 1, :] = 0.75 * shape_smooth_old[i, :] + 0.25 * shape_smooth_old[i - 1, :]
shape_smooth[2 * i, :] = 0.75 * shape_smooth_old[i, :] + 0.25 * shape_smooth_old[i + 1, :]
return shape_smooth
def visualize_result(image, edge_image, shape, closest_points):
output_image = 0.5 * (image + edge_image[:, :, np.newaxis])
points = closest_points[:, [1, 0]]
perimeter = draw.polygon_perimeter(points[:, 0], points[:, 1])
draw.set_color(output_image, (perimeter[0].astype(np.int), perimeter[1].astype(np.int)), [0, 1, 0])
points = shape[:, [1, 0]]
perimeter = draw.polygon_perimeter(points[:, 0], points[:, 1])
draw.set_color(output_image, (perimeter[0].astype(np.int), perimeter[1].astype(np.int)), [0, 0, 1])
return output_image
shapes = [smoothed_shape(read_shape(i)) for i in range(4)]
aligned_shapes = procrustes.generalized_procrustes(shapes)
shape_model = subspace_shape.learn(aligned_shapes, K=8)
wings_image = get_test_image('wing_area', 'cropped', 'unlabelled', '7.png')
# write_image('wings.png', wings_image)
edges = canny(wings_image[:, :, 1], 3)
saliency = saliency_dragonfly(wings_image)
thresh = threshold(saliency)
background = threshold(scipy.ndimage.distance_transform_edt(~thresh))
contours = find_contours(thresh, level=0.5)
outline = max(contours, key=attrgetter('size')).astype(np.int)
outline_image = np.zeros_like(edges)
draw.set_color(outline_image, (outline[:, 0], outline[:, 1]), True)
edges = skeletonize(edges)
gaps = scipy.ndimage.filters.convolve(1 * edges, np.ones((3, 3)), mode='constant', cval=False)
edges[(gaps == 2) & ~edges] = True
edges = skeletonize(edges)
# write_image('wing_edge.png', edges)
distance = scipy.ndimage.distance_transform_edt(~edges)
labels = label(edges)
num_labels = np.max(labels)
edge_distance = np.zeros(num_labels + 1)
for i in range(num_labels + 1):
other_distance = scipy.ndimage.distance_transform_edt(~((labels > 0) & (labels != (i))))
edge_distance[i] = np.median(other_distance[labels == (i)])
regions = regionprops(labels)
edge_lengths = np.zeros_like(labels)
for i, edge in enumerate(sorted(regions, key=attrgetter('filled_area'))):
edge_lengths[labels == edge.label] = edge.filled_area
# write_image('labels.png', labels / labels.max())
edges = edge_lengths > 500
scores = edges.shape[0] * np.exp(-edge_lengths**4 / (8 * edges.shape[0]**4))
write_image('edges_wing.png', scores / scores.max())
kmeans = KMeans(n_clusters=8)
indices_vector = np.array(np.where(thresh)).T
saliency_vector = saliency[thresh].reshape(-1, 1)
distance_vector = distance[thresh].reshape(-1, 1)
color_vector = wings_image[thresh].reshape(-1, 3)
distance2 = np.copy(distance)
distance2[~thresh] = 0
# write_image('distance.png', distance2 / distance2.max())
thresh2 = threshold(distance2)
output_image = (0.5 + 0.5 * thresh2)[:, :, np.newaxis] * wings_image
# write_image('distance2.png', output_image)
wing_labels = label(thresh2)
regions = regionprops(wing_labels)
wings = sorted([r for r in regions if r.filled_area > 1000], key=attrgetter('filled_area'), reverse=True)
labels = np.zeros_like(wing_labels)
labels[background] = 1
for index, wing in enumerate(wings):
labels[wing_labels == wing.label] = index + 2
initial_rotation = np.zeros(3)
initial_scale = np.zeros(3)
initial_translation = np.zeros((3, 2))
for i, wing in enumerate(wings):
tform = SimilarityTransform(rotation=wing.orientation)
major = wing.major_axis_length * 1.125
minor = wing.minor_axis_length * 1.125
initial_scale[i] = 2 * np.sqrt(np.power(major / 2, 2) + np.power(minor / 2, 2))
initial_rotation[i] = -wing.orientation
initial_translation[i, :] = wing.centroid
coords = np.array([[-(minor / 2), -(major / 2)],
[-(minor / 2), (major / 2)],
[(minor / 2), (major / 2)],
[(minor / 2), -(major / 2)]])
rotated_coords = tform(coords) + wing.centroid
box_coords = polygon_perimeter(rotated_coords[:, 0], rotated_coords[:, 1])
set_color(wings_image, box_coords, [0, 0, 1])
# write_image('distance_box.png', wings_image)
slices = [slice(13, -2)] + [slice(start, None) for start in range(13)[::-1]]
# slices = [slice(None)]
inference = subspace_shape.infer(edges,
edge_lengths,
*shape_model,
update_slice=slices[0],
scale_estimate=initial_scale[0],
rotation=initial_rotation[0],
translation=initial_translation[0, [1, 0]])
fitted_shape_old = np.zeros_like(shape_model[0].reshape(-1, 2))
inference.send(None)
for i, s in enumerate(slices):
for iteration in range(100):
fitted_shape, closest_edge_points = inference.send(s)
print((np.power(fitted_shape - fitted_shape_old, 2).sum(axis=1).mean()))
fitted_shape_old = fitted_shape
if iteration % 50 == 0:
output_image = visualize_result(wings_image, edges, fitted_shape, closest_edge_points)
write_image('wings_template_slice_{}_iteration_{}.png'.format(i, iteration), output_image)
| gpl-2.0 |
PrashntS/scikit-learn | examples/decomposition/plot_pca_iris.py | 253 | 1801 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
PCA example with Iris Data-set
=========================================================
Principal Component Analysis applied to the Iris dataset.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
fig = plt.figure(1, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
pca = decomposition.PCA(n_components=3)
pca.fit(X)
X = pca.transform(X)
for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral)
x_surf = [X[:, 0].min(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].max()]
y_surf = [X[:, 0].max(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].min()]
x_surf = np.array(x_surf)
y_surf = np.array(y_surf)
v0 = pca.transform(pca.components_[0])
v0 /= v0[-1]
v1 = pca.transform(pca.components_[1])
v1 /= v1[-1]
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
rgommers/statsmodels | statsmodels/sandbox/km_class.py | 31 | 11748 | #a class for the Kaplan-Meier estimator
from statsmodels.compat.python import range
import numpy as np
from math import sqrt
import matplotlib.pyplot as plt
class KAPLAN_MEIER(object):
def __init__(self, data, timesIn, groupIn, censoringIn):
raise RuntimeError('Newer version of Kaplan-Meier class available in survival2.py')
#store the inputs
self.data = data
self.timesIn = timesIn
self.groupIn = groupIn
self.censoringIn = censoringIn
def fit(self):
#split the data into groups based on the predicting variable
#get a set of all the groups
groups = list(set(self.data[:,self.groupIn]))
#create an empty list to store the data for different groups
groupList = []
#create an empty list for each group and add it to groups
for i in range(len(groups)):
groupList.append([])
#iterate through all the groups in groups
for i in range(len(groups)):
#iterate though the rows of dataArray
for j in range(len(self.data)):
#test if this row has the correct group
if self.data[j,self.groupIn] == groups[i]:
#add the row to groupList
groupList[i].append(self.data[j])
#create an empty list to store the times for each group
timeList = []
#iterate through all the groups
for i in range(len(groupList)):
#create an empty list
times = []
#iterate through all the rows of the group
for j in range(len(groupList[i])):
#get a list of all the times in the group
times.append(groupList[i][j][self.timesIn])
#get a sorted set of the times and store it in timeList
times = list(sorted(set(times)))
timeList.append(times)
#get a list of the number at risk and events at each time
#create an empty list to store the results in
timeCounts = []
#create an empty list to hold points for plotting
points = []
#create a list for points where censoring occurs
censoredPoints = []
#iterate trough each group
for i in range(len(groupList)):
#initialize a variable to estimate the survival function
survival = 1
#initialize a variable to estimate the variance of
#the survival function
varSum = 0
#initialize a counter for the number at risk
riskCounter = len(groupList[i])
#create a list for the counts for this group
counts = []
##create a list for points to plot
x = []
y = []
#iterate through the list of times
for j in range(len(timeList[i])):
if j != 0:
if j == 1:
#add an indicator to tell if the time
#starts a new group
groupInd = 1
#add (0,1) to the list of points
x.append(0)
y.append(1)
#add the point time to the right of that
x.append(timeList[i][j-1])
y.append(1)
#add the point below that at survival
x.append(timeList[i][j-1])
y.append(survival)
#add the survival to y
y.append(survival)
else:
groupInd = 0
#add survival twice to y
y.append(survival)
y.append(survival)
#add the time twice to x
x.append(timeList[i][j-1])
x.append(timeList[i][j-1])
#add each censored time, number of censorings and
#its survival to censoredPoints
censoredPoints.append([timeList[i][j-1],
censoringNum,survival,groupInd])
#add the count to the list
counts.append([timeList[i][j-1],riskCounter,
eventCounter,survival,
sqrt(((survival)**2)*varSum)])
#increment the number at risk
riskCounter += -1*(riskChange)
#initialize a counter for the change in the number at risk
riskChange = 0
#initialize a counter to zero
eventCounter = 0
#intialize a counter to tell when censoring occurs
censoringCounter = 0
censoringNum = 0
#iterate through the observations in each group
for k in range(len(groupList[i])):
#check of the observation has the given time
if (groupList[i][k][self.timesIn]) == (timeList[i][j]):
#increment the number at risk counter
riskChange += 1
#check if this is an event or censoring
if groupList[i][k][self.censoringIn] == 1:
#add 1 to the counter
eventCounter += 1
else:
censoringNum += 1
#check if there are any events at this time
if eventCounter != censoringCounter:
censoringCounter = eventCounter
#calculate the estimate of the survival function
survival *= ((float(riskCounter) -
eventCounter)/(riskCounter))
try:
#calculate the estimate of the variance
varSum += (eventCounter)/((riskCounter)
*(float(riskCounter)-
eventCounter))
except ZeroDivisionError:
varSum = 0
#append the last row to counts
counts.append([timeList[i][len(timeList[i])-1],
riskCounter,eventCounter,survival,
sqrt(((survival)**2)*varSum)])
#add the last time once to x
x.append(timeList[i][len(timeList[i])-1])
x.append(timeList[i][len(timeList[i])-1])
#add the last survival twice to y
y.append(survival)
#y.append(survival)
censoredPoints.append([timeList[i][len(timeList[i])-1],
censoringNum,survival,1])
#add the list for the group to al ist for all the groups
timeCounts.append(np.array(counts))
points.append([x,y])
#returns a list of arrays, where each array has as it columns: the time,
#the number at risk, the number of events, the estimated value of the
#survival function at that time, and the estimated standard error at
#that time, in that order
self.results = timeCounts
self.points = points
self.censoredPoints = censoredPoints
def plot(self):
x = []
#iterate through the groups
for i in range(len(self.points)):
#plot x and y
plt.plot(np.array(self.points[i][0]),np.array(self.points[i][1]))
#create lists of all the x and y values
x += self.points[i][0]
for j in range(len(self.censoredPoints)):
#check if censoring is occuring
if (self.censoredPoints[j][1] != 0):
#if this is the first censored point
if (self.censoredPoints[j][3] == 1) and (j == 0):
#calculate a distance beyond 1 to place it
#so all the points will fit
dx = ((1./((self.censoredPoints[j][1])+1.))
*(float(self.censoredPoints[j][0])))
#iterate through all the censored points at this time
for k in range(self.censoredPoints[j][1]):
#plot a vertical line for censoring
plt.vlines((1+((k+1)*dx)),
self.censoredPoints[j][2]-0.03,
self.censoredPoints[j][2]+0.03)
#if this censored point starts a new group
elif ((self.censoredPoints[j][3] == 1) and
(self.censoredPoints[j-1][3] == 1)):
#calculate a distance beyond 1 to place it
#so all the points will fit
dx = ((1./((self.censoredPoints[j][1])+1.))
*(float(self.censoredPoints[j][0])))
#iterate through all the censored points at this time
for k in range(self.censoredPoints[j][1]):
#plot a vertical line for censoring
plt.vlines((1+((k+1)*dx)),
self.censoredPoints[j][2]-0.03,
self.censoredPoints[j][2]+0.03)
#if this is the last censored point
elif j == (len(self.censoredPoints) - 1):
#calculate a distance beyond the previous time
#so that all the points will fit
dx = ((1./((self.censoredPoints[j][1])+1.))
*(float(self.censoredPoints[j][0])))
#iterate through all the points at this time
for k in range(self.censoredPoints[j][1]):
#plot a vertical line for censoring
plt.vlines((self.censoredPoints[j-1][0]+((k+1)*dx)),
self.censoredPoints[j][2]-0.03,
self.censoredPoints[j][2]+0.03)
#if this is a point in the middle of the group
else:
#calcuate a distance beyond the current time
#to place the point, so they all fit
dx = ((1./((self.censoredPoints[j][1])+1.))
*(float(self.censoredPoints[j+1][0])
- self.censoredPoints[j][0]))
#iterate through all the points at this time
for k in range(self.censoredPoints[j][1]):
#plot a vetical line for censoring
plt.vlines((self.censoredPoints[j][0]+((k+1)*dx)),
self.censoredPoints[j][2]-0.03,
self.censoredPoints[j][2]+0.03)
#set the size of the plot so it extends to the max x and above 1 for y
plt.xlim((0,np.max(x)))
plt.ylim((0,1.05))
#label the axes
plt.xlabel('time')
plt.ylabel('survival')
plt.show()
def show_results(self):
#start a string that will be a table of the results
resultsString = ''
#iterate through all the groups
for i in range(len(self.results)):
#label the group and header
resultsString += ('Group {0}\n\n'.format(i) +
'Time At Risk Events Survival Std. Err\n')
for j in self.results[i]:
#add the results to the string
resultsString += (
'{0:<9d}{1:<12d}{2:<11d}{3:<13.4f}{4:<6.4f}\n'.format(
int(j[0]),int(j[1]),int(j[2]),j[3],j[4]))
print(resultsString)
| bsd-3-clause |
rbalda/neural_ocr | env/lib/python2.7/site-packages/matplotlib/tests/test_backend_svg.py | 7 | 3551 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import numpy as np
from io import BytesIO
import xml.parsers.expat
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import cleanup
from matplotlib.testing.decorators import image_comparison
@cleanup
def test_visibility():
fig = plt.figure()
ax = fig.add_subplot(111)
x = np.linspace(0, 4 * np.pi, 50)
y = np.sin(x)
yerr = np.ones_like(y)
a, b, c = ax.errorbar(x, y, yerr=yerr, fmt='ko')
for artist in b:
artist.set_visible(False)
fd = BytesIO()
fig.savefig(fd, format='svg')
fd.seek(0)
buf = fd.read()
fd.close()
parser = xml.parsers.expat.ParserCreate()
parser.Parse(buf) # this will raise ExpatError if the svg is invalid
@image_comparison(baseline_images=['fill_black_with_alpha'], remove_text=True,
extensions=['svg'])
def test_fill_black_with_alpha():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.scatter(x=[0, 0.1, 1], y=[0, 0, 0], c='k', alpha=0.1, s=10000)
@image_comparison(baseline_images=['noscale'], remove_text=True)
def test_noscale():
X, Y = np.meshgrid(np.arange(-5, 5, 1), np.arange(-5, 5, 1))
Z = np.sin(Y ** 2)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.imshow(Z, cmap='gray')
plt.rcParams['svg.image_noscale'] = True
@cleanup
def test_composite_images():
#Test that figures can be saved with and without combining multiple images
#(on a single set of axes) into a single composite image.
X, Y = np.meshgrid(np.arange(-5, 5, 1), np.arange(-5, 5, 1))
Z = np.sin(Y ** 2)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim(0, 3)
ax.imshow(Z, extent=[0, 1, 0, 1])
ax.imshow(Z[::-1], extent=[2, 3, 0, 1])
plt.rcParams['image.composite_image'] = True
with BytesIO() as svg:
fig.savefig(svg, format="svg")
svg.seek(0)
buff = svg.read()
assert buff.count(six.b('<image ')) == 1
plt.rcParams['image.composite_image'] = False
with BytesIO() as svg:
fig.savefig(svg, format="svg")
svg.seek(0)
buff = svg.read()
assert buff.count(six.b('<image ')) == 2
@cleanup
def test_text_urls():
fig = plt.figure()
test_url = "http://test_text_urls.matplotlib.org"
fig.suptitle("test_text_urls", url=test_url)
fd = BytesIO()
fig.savefig(fd, format='svg')
fd.seek(0)
buf = fd.read().decode()
fd.close()
expected = '<a xlink:href="{0}">'.format(test_url)
assert expected in buf
@image_comparison(baseline_images=['bold_font_output'], extensions=['svg'])
def test_bold_font_output():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(np.arange(10), np.arange(10))
ax.set_xlabel('nonbold-xlabel')
ax.set_ylabel('bold-ylabel', fontweight='bold')
ax.set_title('bold-title', fontweight='bold')
@image_comparison(baseline_images=['bold_font_output_with_none_fonttype'],
extensions=['svg'])
def test_bold_font_output_with_none_fonttype():
plt.rcParams['svg.fonttype'] = 'none'
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(np.arange(10), np.arange(10))
ax.set_xlabel('nonbold-xlabel')
ax.set_ylabel('bold-ylabel', fontweight='bold')
ax.set_title('bold-title', fontweight='bold')
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
nhejazi/scikit-learn | examples/linear_model/plot_iris_logistic.py | 119 | 1679 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
laszlocsomor/tensorflow | tensorflow/tools/dist_test/python/census_widendeep.py | 3 | 11899 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distributed training and evaluation of a wide and deep model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import sys
from six.moves import urllib
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import learn_runner
from tensorflow.contrib.learn.python.learn.estimators import run_config
# Constants: Data download URLs
TRAIN_DATA_URL = "http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.data"
TEST_DATA_URL = "http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.test"
# Define features for the model
def census_model_config():
"""Configuration for the census Wide & Deep model.
Returns:
columns: Column names to retrieve from the data source
label_column: Name of the label column
wide_columns: List of wide columns
deep_columns: List of deep columns
categorical_column_names: Names of the categorical columns
continuous_column_names: Names of the continuous columns
"""
# 1. Categorical base columns.
gender = tf.contrib.layers.sparse_column_with_keys(
column_name="gender", keys=["female", "male"])
race = tf.contrib.layers.sparse_column_with_keys(
column_name="race",
keys=["Amer-Indian-Eskimo",
"Asian-Pac-Islander",
"Black",
"Other",
"White"])
education = tf.contrib.layers.sparse_column_with_hash_bucket(
"education", hash_bucket_size=1000)
marital_status = tf.contrib.layers.sparse_column_with_hash_bucket(
"marital_status", hash_bucket_size=100)
relationship = tf.contrib.layers.sparse_column_with_hash_bucket(
"relationship", hash_bucket_size=100)
workclass = tf.contrib.layers.sparse_column_with_hash_bucket(
"workclass", hash_bucket_size=100)
occupation = tf.contrib.layers.sparse_column_with_hash_bucket(
"occupation", hash_bucket_size=1000)
native_country = tf.contrib.layers.sparse_column_with_hash_bucket(
"native_country", hash_bucket_size=1000)
# 2. Continuous base columns.
age = tf.contrib.layers.real_valued_column("age")
age_buckets = tf.contrib.layers.bucketized_column(
age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
education_num = tf.contrib.layers.real_valued_column("education_num")
capital_gain = tf.contrib.layers.real_valued_column("capital_gain")
capital_loss = tf.contrib.layers.real_valued_column("capital_loss")
hours_per_week = tf.contrib.layers.real_valued_column("hours_per_week")
wide_columns = [
gender, native_country, education, occupation, workclass,
marital_status, relationship, age_buckets,
tf.contrib.layers.crossed_column([education, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([native_country, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([age_buckets, race, occupation],
hash_bucket_size=int(1e6))]
deep_columns = [
tf.contrib.layers.embedding_column(workclass, dimension=8),
tf.contrib.layers.embedding_column(education, dimension=8),
tf.contrib.layers.embedding_column(marital_status, dimension=8),
tf.contrib.layers.embedding_column(gender, dimension=8),
tf.contrib.layers.embedding_column(relationship, dimension=8),
tf.contrib.layers.embedding_column(race, dimension=8),
tf.contrib.layers.embedding_column(native_country, dimension=8),
tf.contrib.layers.embedding_column(occupation, dimension=8),
age, education_num, capital_gain, capital_loss, hours_per_week]
# Define the column names for the data sets.
columns = ["age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week",
"native_country", "income_bracket"]
label_column = "label"
categorical_columns = ["workclass", "education", "marital_status",
"occupation", "relationship", "race", "gender",
"native_country"]
continuous_columns = ["age", "education_num", "capital_gain",
"capital_loss", "hours_per_week"]
return (columns, label_column, wide_columns, deep_columns,
categorical_columns, continuous_columns)
class CensusDataSource(object):
"""Source of census data."""
def __init__(self, data_dir, train_data_url, test_data_url,
columns, label_column,
categorical_columns, continuous_columns):
"""Constructor of CensusDataSource.
Args:
data_dir: Directory to save/load the data files
train_data_url: URL from which the training data can be downloaded
test_data_url: URL from which the test data can be downloaded
columns: Columns to retrieve from the data files (A list of strings)
label_column: Name of the label column
categorical_columns: Names of the categorical columns (A list of strings)
continuous_columns: Names of the continuous columns (A list of strings)
"""
# Retrieve data from disk (if available) or download from the web.
train_file_path = os.path.join(data_dir, "adult.data")
if os.path.isfile(train_file_path):
print("Loading training data from file: %s" % train_file_path)
train_file = open(train_file_path)
else:
urllib.urlretrieve(train_data_url, train_file_path)
test_file_path = os.path.join(data_dir, "adult.test")
if os.path.isfile(test_file_path):
print("Loading test data from file: %s" % test_file_path)
test_file = open(test_file_path)
else:
test_file = open(test_file_path)
urllib.urlretrieve(test_data_url, test_file_path)
# Read the training and testing data sets into Pandas DataFrame.
import pandas # pylint: disable=g-import-not-at-top
self._df_train = pandas.read_csv(train_file, names=columns,
skipinitialspace=True)
self._df_test = pandas.read_csv(test_file, names=columns,
skipinitialspace=True, skiprows=1)
# Remove the NaN values in the last rows of the tables
self._df_train = self._df_train[:-1]
self._df_test = self._df_test[:-1]
# Apply the threshold to get the labels.
income_thresh = lambda x: ">50K" in x
self._df_train[label_column] = (
self._df_train["income_bracket"].apply(income_thresh)).astype(int)
self._df_test[label_column] = (
self._df_test["income_bracket"].apply(income_thresh)).astype(int)
self.label_column = label_column
self.categorical_columns = categorical_columns
self.continuous_columns = continuous_columns
def input_train_fn(self):
return self._input_fn(self._df_train)
def input_test_fn(self):
return self._input_fn(self._df_test)
# TODO(cais): Turn into minibatch feeder
def _input_fn(self, df):
"""Input data function.
Creates a dictionary mapping from each continuous feature column name
(k) to the values of that column stored in a constant Tensor.
Args:
df: data feed
Returns:
feature columns and labels
"""
continuous_cols = {k: tf.constant(df[k].values)
for k in self.continuous_columns}
# Creates a dictionary mapping from each categorical feature column name (k)
# to the values of that column stored in a tf.SparseTensor.
categorical_cols = {
k: tf.SparseTensor(
indices=[[i, 0] for i in range(df[k].size)],
values=df[k].values,
dense_shape=[df[k].size, 1])
for k in self.categorical_columns}
# Merges the two dictionaries into one.
feature_cols = dict(continuous_cols.items() + categorical_cols.items())
# Converts the label column into a constant Tensor.
label = tf.constant(df[self.label_column].values)
# Returns the feature columns and the label.
return feature_cols, label
def _create_experiment_fn(output_dir): # pylint: disable=unused-argument
"""Experiment creation function."""
(columns, label_column, wide_columns, deep_columns, categorical_columns,
continuous_columns) = census_model_config()
census_data_source = CensusDataSource(FLAGS.data_dir,
TRAIN_DATA_URL, TEST_DATA_URL,
columns, label_column,
categorical_columns,
continuous_columns)
os.environ["TF_CONFIG"] = json.dumps({
"cluster": {
tf.contrib.learn.TaskType.PS: ["fake_ps"] *
FLAGS.num_parameter_servers
},
"task": {
"index": FLAGS.worker_index
}
})
config = run_config.RunConfig(master=FLAGS.master_grpc_url)
estimator = tf.contrib.learn.DNNLinearCombinedClassifier(
model_dir=FLAGS.model_dir,
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[5],
config=config)
return tf.contrib.learn.Experiment(
estimator=estimator,
train_input_fn=census_data_source.input_train_fn,
eval_input_fn=census_data_source.input_test_fn,
train_steps=FLAGS.train_steps,
eval_steps=FLAGS.eval_steps
)
def main(unused_argv):
print("Worker index: %d" % FLAGS.worker_index)
learn_runner.run(experiment_fn=_create_experiment_fn,
output_dir=FLAGS.output_dir,
schedule=FLAGS.schedule)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--data_dir",
type=str,
default="/tmp/census-data",
help="Directory for storing the census data"
)
parser.add_argument(
"--model_dir",
type=str,
default="/tmp/census_wide_and_deep_model",
help="Directory for storing the model"
)
parser.add_argument(
"--output_dir",
type=str,
default="",
help="Base output directory."
)
parser.add_argument(
"--schedule",
type=str,
default="local_run",
help="Schedule to run for this experiment."
)
parser.add_argument(
"--master_grpc_url",
type=str,
default="",
help="URL to master GRPC tensorflow server, e.g.,grpc://127.0.0.1:2222"
)
parser.add_argument(
"--num_parameter_servers",
type=int,
default=0,
help="Number of parameter servers"
)
parser.add_argument(
"--worker_index",
type=int,
default=0,
help="Worker index (>=0)"
)
parser.add_argument(
"--train_steps",
type=int,
default=1000,
help="Number of training steps"
)
parser.add_argument(
"--eval_steps",
type=int,
default=1,
help="Number of evaluation steps"
)
global FLAGS # pylint:disable=global-at-module-level
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
andybrnr/QuantEcon.py | examples/paths_and_hist.py | 3 | 1056 |
import numpy as np
import matplotlib.pyplot as plt
from quantecon import LinearStateSpace
import random
phi_1, phi_2, phi_3, phi_4 = 0.5, -0.2, 0, 0.5
sigma = 0.1
A = [[phi_1, phi_2, phi_3, phi_4],
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0]]
C = [[sigma], [0], [0], [0]]
G = [1, 0, 0, 0]
T = 30
ar = LinearStateSpace(A, C, G, mu_0=np.ones(4))
ymin, ymax = -0.8, 1.25
fig, axes = plt.subplots(1, 2, figsize=(8, 3))
for ax in axes:
ax.grid(alpha=0.4)
ax = axes[0]
ax.set_ylim(ymin, ymax)
ax.set_ylabel(r'$y_t$', fontsize=16)
ax.vlines((T,), -1.5, 1.5)
ax.set_xticks((T,))
ax.set_xticklabels((r'$T$',))
sample = []
for i in range(20):
rcolor = random.choice(('c', 'g', 'b', 'k'))
x, y = ar.simulate(ts_length=T+15)
y = y.flatten()
ax.plot(y, color=rcolor, lw=1, alpha=0.5)
ax.plot((T,), (y[T],), 'ko', alpha=0.5)
sample.append(y[T])
y = y.flatten()
axes[1].set_ylim(ymin, ymax)
axes[1].hist(sample, bins=16, normed=True, orientation='horizontal', alpha=0.5)
plt.show()
| bsd-3-clause |
winklerand/pandas | pandas/tests/indexes/period/test_period.py | 1 | 26361 | import pytest
import numpy as np
import pandas as pd
from pandas.util import testing as tm
from pandas import (PeriodIndex, period_range, notna, DatetimeIndex, NaT,
Index, Period, Int64Index, Series, DataFrame, date_range,
offsets, compat)
from ..datetimelike import DatetimeLike
class TestPeriodIndex(DatetimeLike):
_holder = PeriodIndex
_multiprocess_can_split_ = True
def setup_method(self, method):
self.indices = dict(index=tm.makePeriodIndex(10),
index_dec=period_range('20130101', periods=10,
freq='D')[::-1])
self.setup_indices()
def create_index(self):
return period_range('20130101', periods=5, freq='D')
def test_astype(self):
# GH 13149, GH 13209
idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D')
result = idx.astype(object)
expected = Index([Period('2016-05-16', freq='D')] +
[Period(NaT, freq='D')] * 3, dtype='object')
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([16937] + [-9223372036854775808] * 3,
dtype=np.int64)
tm.assert_index_equal(result, expected)
idx = period_range('1990', '2009', freq='A')
result = idx.astype('i8')
tm.assert_index_equal(result, Index(idx.asi8))
tm.assert_numpy_array_equal(result.values, idx.asi8)
def test_astype_raises(self):
# GH 13149, GH 13209
idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D')
pytest.raises(TypeError, idx.astype, str)
pytest.raises(TypeError, idx.astype, float)
pytest.raises(TypeError, idx.astype, 'timedelta64')
pytest.raises(TypeError, idx.astype, 'timedelta64[ns]')
def test_pickle_compat_construction(self):
pass
def test_pickle_round_trip(self):
for freq in ['D', 'M', 'A']:
idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq=freq)
result = tm.round_trip_pickle(idx)
tm.assert_index_equal(result, idx)
@pytest.mark.parametrize('klass', [list, tuple, np.array, Series])
def test_where(self, klass):
i = self.create_index()
cond = [True] * len(i)
expected = i
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * (len(i) - 1)
expected = PeriodIndex([NaT] + i[1:].tolist(), freq='D')
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_where_other(self):
i = self.create_index()
for arr in [np.nan, pd.NaT]:
result = i.where(notna(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(),
freq='D')
result = i.where(notna(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(),
freq='D')
result = i.where(notna(i2), i2.values)
tm.assert_index_equal(result, i2)
def test_repeat(self):
# GH10183
idx = pd.period_range('2000-01-01', periods=3, freq='D')
res = idx.repeat(3)
exp = PeriodIndex(idx.values.repeat(3), freq='D')
tm.assert_index_equal(res, exp)
assert res.freqstr == 'D'
def test_fillna_period(self):
# GH 11343
idx = pd.PeriodIndex(['2011-01-01 09:00', pd.NaT,
'2011-01-01 11:00'], freq='H')
exp = pd.PeriodIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H')
tm.assert_index_equal(
idx.fillna(pd.Period('2011-01-01 10:00', freq='H')), exp)
exp = pd.Index([pd.Period('2011-01-01 09:00', freq='H'), 'x',
pd.Period('2011-01-01 11:00', freq='H')], dtype=object)
tm.assert_index_equal(idx.fillna('x'), exp)
exp = pd.Index([pd.Period('2011-01-01 09:00', freq='H'),
pd.Period('2011-01-01', freq='D'),
pd.Period('2011-01-01 11:00', freq='H')], dtype=object)
tm.assert_index_equal(idx.fillna(
pd.Period('2011-01-01', freq='D')), exp)
def test_no_millisecond_field(self):
with pytest.raises(AttributeError):
DatetimeIndex.millisecond
with pytest.raises(AttributeError):
DatetimeIndex([]).millisecond
def test_difference_freq(self):
# GH14323: difference of Period MUST preserve frequency
# but the ability to union results must be preserved
index = period_range("20160920", "20160925", freq="D")
other = period_range("20160921", "20160924", freq="D")
expected = PeriodIndex(["20160920", "20160925"], freq='D')
idx_diff = index.difference(other)
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal('freq', idx_diff, expected)
other = period_range("20160922", "20160925", freq="D")
idx_diff = index.difference(other)
expected = PeriodIndex(["20160920", "20160921"], freq='D')
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal('freq', idx_diff, expected)
def test_hash_error(self):
index = period_range('20010101', periods=10)
with tm.assert_raises_regex(TypeError, "unhashable type: %r" %
type(index).__name__):
hash(index)
def test_make_time_series(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index)
assert isinstance(series, Series)
def test_shallow_copy_empty(self):
# GH13067
idx = PeriodIndex([], freq='M')
result = idx._shallow_copy()
expected = idx
tm.assert_index_equal(result, expected)
def test_dtype_str(self):
pi = pd.PeriodIndex([], freq='M')
assert pi.dtype_str == 'period[M]'
assert pi.dtype_str == str(pi.dtype)
pi = pd.PeriodIndex([], freq='3M')
assert pi.dtype_str == 'period[3M]'
assert pi.dtype_str == str(pi.dtype)
def test_view_asi8(self):
idx = pd.PeriodIndex([], freq='M')
exp = np.array([], dtype=np.int64)
tm.assert_numpy_array_equal(idx.view('i8'), exp)
tm.assert_numpy_array_equal(idx.asi8, exp)
idx = pd.PeriodIndex(['2011-01', pd.NaT], freq='M')
exp = np.array([492, -9223372036854775808], dtype=np.int64)
tm.assert_numpy_array_equal(idx.view('i8'), exp)
tm.assert_numpy_array_equal(idx.asi8, exp)
exp = np.array([14975, -9223372036854775808], dtype=np.int64)
idx = pd.PeriodIndex(['2011-01-01', pd.NaT], freq='D')
tm.assert_numpy_array_equal(idx.view('i8'), exp)
tm.assert_numpy_array_equal(idx.asi8, exp)
def test_values(self):
idx = pd.PeriodIndex([], freq='M')
exp = np.array([], dtype=np.object)
tm.assert_numpy_array_equal(idx.values, exp)
tm.assert_numpy_array_equal(idx.get_values(), exp)
exp = np.array([], dtype=np.int64)
tm.assert_numpy_array_equal(idx._values, exp)
idx = pd.PeriodIndex(['2011-01', pd.NaT], freq='M')
exp = np.array([pd.Period('2011-01', freq='M'), pd.NaT], dtype=object)
tm.assert_numpy_array_equal(idx.values, exp)
tm.assert_numpy_array_equal(idx.get_values(), exp)
exp = np.array([492, -9223372036854775808], dtype=np.int64)
tm.assert_numpy_array_equal(idx._values, exp)
idx = pd.PeriodIndex(['2011-01-01', pd.NaT], freq='D')
exp = np.array([pd.Period('2011-01-01', freq='D'), pd.NaT],
dtype=object)
tm.assert_numpy_array_equal(idx.values, exp)
tm.assert_numpy_array_equal(idx.get_values(), exp)
exp = np.array([14975, -9223372036854775808], dtype=np.int64)
tm.assert_numpy_array_equal(idx._values, exp)
def test_period_index_length(self):
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
assert len(pi) == 9
pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009')
assert len(pi) == 4 * 9
pi = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
assert len(pi) == 12 * 9
start = Period('02-Apr-2005', 'B')
i1 = PeriodIndex(start=start, periods=20)
assert len(i1) == 20
assert i1.freq == start.freq
assert i1[0] == start
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
assert len(i1) == 10
assert i1.freq == end_intv.freq
assert i1[-1] == end_intv
end_intv = Period('2006-12-31', '1w')
i2 = PeriodIndex(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
end_intv = Period('2006-12-31', ('w', 1))
i2 = PeriodIndex(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
try:
PeriodIndex(start=start, end=end_intv)
raise AssertionError('Cannot allow mixed freq for start and end')
except ValueError:
pass
end_intv = Period('2005-05-01', 'B')
i1 = PeriodIndex(start=start, end=end_intv)
try:
PeriodIndex(start=start)
raise AssertionError(
'Must specify periods if missing start or end')
except ValueError:
pass
# infer freq from first element
i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')])
assert len(i2) == 2
assert i2[0] == end_intv
i2 = PeriodIndex(np.array([end_intv, Period('2005-05-05', 'B')]))
assert len(i2) == 2
assert i2[0] == end_intv
# Mixed freq should fail
vals = [end_intv, Period('2006-12-31', 'w')]
pytest.raises(ValueError, PeriodIndex, vals)
vals = np.array(vals)
pytest.raises(ValueError, PeriodIndex, vals)
def test_fields(self):
# year, month, day, hour, minute
# second, weekofyear, week, dayofweek, weekday, dayofyear, quarter
# qyear
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2005')
self._check_all_fields(pi)
pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2002')
self._check_all_fields(pi)
pi = PeriodIndex(freq='M', start='1/1/2001', end='1/1/2002')
self._check_all_fields(pi)
pi = PeriodIndex(freq='D', start='12/1/2001', end='6/1/2001')
self._check_all_fields(pi)
pi = PeriodIndex(freq='B', start='12/1/2001', end='6/1/2001')
self._check_all_fields(pi)
pi = PeriodIndex(freq='H', start='12/31/2001', end='1/1/2002 23:00')
self._check_all_fields(pi)
pi = PeriodIndex(freq='Min', start='12/31/2001', end='1/1/2002 00:20')
self._check_all_fields(pi)
pi = PeriodIndex(freq='S', start='12/31/2001 00:00:00',
end='12/31/2001 00:05:00')
self._check_all_fields(pi)
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
self._check_all_fields(i1)
def _check_all_fields(self, periodindex):
fields = ['year', 'month', 'day', 'hour', 'minute', 'second',
'weekofyear', 'week', 'dayofweek', 'dayofyear',
'quarter', 'qyear', 'days_in_month']
periods = list(periodindex)
s = pd.Series(periodindex)
for field in fields:
field_idx = getattr(periodindex, field)
assert len(periodindex) == len(field_idx)
for x, val in zip(periods, field_idx):
assert getattr(x, field) == val
if len(s) == 0:
continue
field_s = getattr(s.dt, field)
assert len(periodindex) == len(field_s)
for x, val in zip(periods, field_s):
assert getattr(x, field) == val
def test_period_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = period_range('2011/01/01', periods=6, freq='M')
idx2 = period_range('2013', periods=6, freq='A')
df = df.set_index(idx1)
tm.assert_index_equal(df.index, idx1)
df = df.set_index(idx2)
tm.assert_index_equal(df.index, idx2)
def test_factorize(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
arr, idx = idx1.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
idx2 = pd.PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.intp)
arr, idx = idx2.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
exp_arr = np.array([0, 0, 1, 2, 0, 2], dtype=np.intp)
exp_idx = PeriodIndex(['2014-03', '2014-02', '2014-01'], freq='M')
arr, idx = idx2.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
def test_asobject_like(self):
idx = pd.PeriodIndex([], freq='M')
exp = np.array([], dtype=object)
tm.assert_numpy_array_equal(idx.asobject.values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
idx = pd.PeriodIndex(['2011-01', pd.NaT], freq='M')
exp = np.array([pd.Period('2011-01', freq='M'), pd.NaT], dtype=object)
tm.assert_numpy_array_equal(idx.asobject.values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
exp = np.array([pd.Period('2011-01-01', freq='D'), pd.NaT],
dtype=object)
idx = pd.PeriodIndex(['2011-01-01', pd.NaT], freq='D')
tm.assert_numpy_array_equal(idx.asobject.values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
def test_is_(self):
create_index = lambda: PeriodIndex(freq='A', start='1/1/2001',
end='12/1/2009')
index = create_index()
assert index.is_(index)
assert not index.is_(create_index())
assert index.is_(index.view())
assert index.is_(index.view().view().view().view().view())
assert index.view().is_(index)
ind2 = index.view()
index.name = "Apple"
assert ind2.is_(index)
assert not index.is_(index[:])
assert not index.is_(index.asfreq('M'))
assert not index.is_(index.asfreq('A'))
assert not index.is_(index - 2)
assert not index.is_(index - 0)
def test_comp_period(self):
idx = period_range('2007-01', periods=20, freq='M')
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
def test_contains(self):
rng = period_range('2007-01', freq='M', periods=10)
assert Period('2007-01', freq='M') in rng
assert not Period('2007-01', freq='D') in rng
assert not Period('2007-01', freq='2M') in rng
def test_contains_nat(self):
# see gh-13582
idx = period_range('2007-01', freq='M', periods=10)
assert pd.NaT not in idx
assert None not in idx
assert float('nan') not in idx
assert np.nan not in idx
idx = pd.PeriodIndex(['2011-01', 'NaT', '2011-02'], freq='M')
assert pd.NaT in idx
assert None in idx
assert float('nan') in idx
assert np.nan in idx
def test_periods_number_check(self):
with pytest.raises(ValueError):
period_range('2011-1-1', '2012-1-1', 'B')
def test_start_time(self):
index = PeriodIndex(freq='M', start='2016-01-01', end='2016-05-31')
expected_index = date_range('2016-01-01', end='2016-05-31', freq='MS')
tm.assert_index_equal(index.start_time, expected_index)
def test_end_time(self):
index = PeriodIndex(freq='M', start='2016-01-01', end='2016-05-31')
expected_index = date_range('2016-01-01', end='2016-05-31', freq='M')
tm.assert_index_equal(index.end_time, expected_index)
def test_index_duplicate_periods(self):
# monotonic
idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq='A-JUN')
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts[2007]
expected = ts[1:3]
tm.assert_series_equal(result, expected)
result[:] = 1
assert (ts[1:3] == 1).all()
# not monotonic
idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq='A-JUN')
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts[2007]
expected = ts[idx == 2007]
tm.assert_series_equal(result, expected)
def test_index_unique(self):
idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq='A-JUN')
expected = PeriodIndex([2000, 2007, 2009], freq='A-JUN')
tm.assert_index_equal(idx.unique(), expected)
assert idx.nunique() == 3
idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq='A-JUN',
tz='US/Eastern')
expected = PeriodIndex([2000, 2007, 2009], freq='A-JUN',
tz='US/Eastern')
tm.assert_index_equal(idx.unique(), expected)
assert idx.nunique() == 3
def test_shift_gh8083(self):
# test shift for PeriodIndex
# GH8083
drange = self.create_index()
result = drange.shift(1)
expected = PeriodIndex(['2013-01-02', '2013-01-03', '2013-01-04',
'2013-01-05', '2013-01-06'], freq='D')
tm.assert_index_equal(result, expected)
def test_shift(self):
pi1 = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='A', start='1/1/2002', end='12/1/2010')
tm.assert_index_equal(pi1.shift(0), pi1)
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(1), pi2)
pi1 = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='A', start='1/1/2000', end='12/1/2008')
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(-1), pi2)
pi1 = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='M', start='2/1/2001', end='1/1/2010')
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(1), pi2)
pi1 = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='M', start='12/1/2000', end='11/1/2009')
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(-1), pi2)
pi1 = PeriodIndex(freq='D', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='D', start='1/2/2001', end='12/2/2009')
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(1), pi2)
pi1 = PeriodIndex(freq='D', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='D', start='12/31/2000', end='11/30/2009')
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(-1), pi2)
def test_shift_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
result = idx.shift(1)
expected = PeriodIndex(['2011-02', '2011-03', 'NaT',
'2011-05'], freq='M', name='idx')
tm.assert_index_equal(result, expected)
assert result.name == expected.name
def test_ndarray_compat_properties(self):
if compat.is_platform_32bit():
pytest.skip("skipping on 32bit")
super(TestPeriodIndex, self).test_ndarray_compat_properties()
def test_shift_ndarray(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
result = idx.shift(np.array([1, 2, 3, 4]))
expected = PeriodIndex(['2011-02', '2011-04', 'NaT',
'2011-08'], freq='M', name='idx')
tm.assert_index_equal(result, expected)
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
result = idx.shift(np.array([1, -2, 3, -4]))
expected = PeriodIndex(['2011-02', '2010-12', 'NaT',
'2010-12'], freq='M', name='idx')
tm.assert_index_equal(result, expected)
def test_negative_ordinals(self):
Period(ordinal=-1000, freq='A')
Period(ordinal=0, freq='A')
idx1 = PeriodIndex(ordinal=[-1, 0, 1], freq='A')
idx2 = PeriodIndex(ordinal=np.array([-1, 0, 1]), freq='A')
tm.assert_index_equal(idx1, idx2)
def test_pindex_fieldaccessor_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2012-03', '2012-04'], freq='D', name='name')
exp = Index([2011, 2011, -1, 2012, 2012], dtype=np.int64, name='name')
tm.assert_index_equal(idx.year, exp)
exp = Index([1, 2, -1, 3, 4], dtype=np.int64, name='name')
tm.assert_index_equal(idx.month, exp)
def test_pindex_qaccess(self):
pi = PeriodIndex(['2Q05', '3Q05', '4Q05', '1Q06', '2Q06'], freq='Q')
s = Series(np.random.rand(len(pi)), index=pi).cumsum()
# Todo: fix these accessors!
assert s['05Q4'] == s[2]
def test_numpy_repeat(self):
index = period_range('20010101', periods=2)
expected = PeriodIndex([Period('2001-01-01'), Period('2001-01-01'),
Period('2001-01-02'), Period('2001-01-02')])
tm.assert_index_equal(np.repeat(index, 2), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, index, 2, axis=1)
def test_pindex_multiples(self):
pi = PeriodIndex(start='1/1/11', end='12/31/11', freq='2M')
expected = PeriodIndex(['2011-01', '2011-03', '2011-05', '2011-07',
'2011-09', '2011-11'], freq='2M')
tm.assert_index_equal(pi, expected)
assert pi.freq == offsets.MonthEnd(2)
assert pi.freqstr == '2M'
pi = period_range(start='1/1/11', end='12/31/11', freq='2M')
tm.assert_index_equal(pi, expected)
assert pi.freq == offsets.MonthEnd(2)
assert pi.freqstr == '2M'
pi = period_range(start='1/1/11', periods=6, freq='2M')
tm.assert_index_equal(pi, expected)
assert pi.freq == offsets.MonthEnd(2)
assert pi.freqstr == '2M'
def test_iteration(self):
index = PeriodIndex(start='1/1/10', periods=4, freq='B')
result = list(index)
assert isinstance(result[0], Period)
assert result[0].freq == index.freq
def test_is_full(self):
index = PeriodIndex([2005, 2007, 2009], freq='A')
assert not index.is_full
index = PeriodIndex([2005, 2006, 2007], freq='A')
assert index.is_full
index = PeriodIndex([2005, 2005, 2007], freq='A')
assert not index.is_full
index = PeriodIndex([2005, 2005, 2006], freq='A')
assert index.is_full
index = PeriodIndex([2006, 2005, 2005], freq='A')
pytest.raises(ValueError, getattr, index, 'is_full')
assert index[:0].is_full
def test_with_multi_index(self):
# #1705
index = date_range('1/1/2012', periods=4, freq='12H')
index_as_arrays = [index.to_period(freq='D'), index.hour]
s = Series([0, 1, 2, 3], index_as_arrays)
assert isinstance(s.index.levels[0], PeriodIndex)
assert isinstance(s.index.values[0][0], Period)
def test_convert_array_of_periods(self):
rng = period_range('1/1/2000', periods=20, freq='D')
periods = list(rng)
result = pd.Index(periods)
assert isinstance(result, PeriodIndex)
def test_append_concat(self):
# #1815
d1 = date_range('12/31/1990', '12/31/1999', freq='A-DEC')
d2 = date_range('12/31/2000', '12/31/2009', freq='A-DEC')
s1 = Series(np.random.randn(10), d1)
s2 = Series(np.random.randn(10), d2)
s1 = s1.to_period()
s2 = s2.to_period()
# drops index
result = pd.concat([s1, s2])
assert isinstance(result.index, PeriodIndex)
assert result.index[0] == s1.index[0]
def test_pickle_freq(self):
# GH2891
prng = period_range('1/1/2011', '1/1/2012', freq='M')
new_prng = tm.round_trip_pickle(prng)
assert new_prng.freq == offsets.MonthEnd()
assert new_prng.freqstr == 'M'
def test_map(self):
# test_map_dictlike generally tests
index = PeriodIndex([2005, 2007, 2009], freq='A')
result = index.map(lambda x: x.ordinal)
exp = Index([x.ordinal for x in index])
tm.assert_index_equal(result, exp)
@pytest.mark.parametrize('how', ['outer', 'inner', 'left', 'right'])
def test_join_self(self, how):
index = period_range('1/1/2000', periods=10)
joined = index.join(index, how=how)
assert index is joined
def test_insert(self):
# GH 18295 (test missing)
expected = PeriodIndex(
['2017Q1', pd.NaT, '2017Q2', '2017Q3', '2017Q4'], freq='Q')
for na in (np.nan, pd.NaT, None):
result = period_range('2017Q1', periods=4, freq='Q').insert(1, na)
tm.assert_index_equal(result, expected)
| bsd-3-clause |
chetan51/NAB | nab/scripts/optimize_threshold.py | 1 | 7512 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
helpString = """ This script takes a batch of csvs and generates an ROC curve
given the csvs using steps between thresholds of 0.0 and 1.0. Finally it will
find the point on that ROC curve which minimizes the average cost over
multiple user profiles. """
import os
import pandas
import numpy
import csv
from helpers import (sharedSetup,
getCSVFiles,
getDetailedResults,
genConfusionMatrix)
def optimizeThreshold(options):
"""
Generate ROC curve and find optimum point given cost matrix. Results of
this analysis will be put into a best and a summary results file.
"""
# Setup
config, profiles, dataGroupDirs, detector = sharedSetup(options)
# Files to loop over
csvFiles = getCSVFiles(dataGroupDirs, "raw")
# Thresholds
threshMin = 0.0
threshMax = 1.0
initialThreshStep = .1
# Accumulate results
header = None
resultsSummary = []
# Loop over all specified results files
for resultsFile in csvFiles:
print "Analyzing results file %s ..." % resultsFile
with open(resultsFile, "r") as fh:
results = pandas.read_csv(fh)
for profileName, profile in profiles.iteritems():
costMatrix = profile["CostMatrix"]
vals = genCurveData(results,
threshMin,
threshMax,
initialThreshStep,
profile["ScoringWindow"],
costMatrix,
options.verbosity)
# First time through write out the headers
if not header:
header = ["Name", "User Profile"]
thresholds = vals["thresholds"]
header.extend(thresholds)
resultsSummary.append(header)
# Add a row for each file processed
resultRow = [resultsFile, profileName]
resultRow.extend(vals["costs"])
resultsSummary.append(resultRow)
costs = vals["costs"]
costsArray = numpy.array(costs)
# Sum all values
resultsSummaryArray = numpy.array(resultsSummary)
# Skip first row and two columns
summaryView = resultsSummaryArray[1:,2:].astype("float")
# Summarize data for file writing
totalsArray = numpy.sum(summaryView, axis=0)
totalsList = totalsArray.tolist()
lowestCost = totalsArray.min()
minSummaryCostIndices = numpy.where(totalsArray == lowestCost)[0].tolist()
bestThresholds = [thresholds[ind] for ind in minSummaryCostIndices]
# Re-run all files with lowest "best" threshold
minThresh = bestThresholds[0]
csvType = "raw"
detailedResults = getDetailedResults(csvType, csvFiles, profiles)
costIndex = detailedResults[0].index("Cost")
# Write out detailed results
detailedResultsArray = numpy.array(detailedResults)
# Skip first row and two columns
detailedView = detailedResultsArray[1:,2:].astype("float")
# Summarize data for file writing
detailedTotalsArray = numpy.sum(detailedView, axis=0)
detailedTotalsList = detailedTotalsArray.tolist()
detailedOutput = os.path.join(options.resultsDir,
"optimizationBestResults.csv")
with open(detailedOutput, "w") as outFile:
writer = csv.writer(outFile)
writer.writerows(detailedResults)
totalsRow = ["Totals", ""]
totalsRow.extend(detailedTotalsList)
writer.writerow(totalsRow)
# Write out summary results
outputFile = os.path.join(options.resultsDir, "optimizationSummary.csv")
with open(outputFile, "w") as outFile:
writer = csv.writer(outFile)
writer.writerows(resultsSummary)
totalsRow = ["Totals", ""]
totalsRow.extend(totalsList)
writer.writerow(totalsRow)
# Console output
print "#" * 70
print "YOUR RESULTS"
print "Detector: ", detector
print "Minimum cost:",
print lowestCost
print "Best thresholds:"
for thresh in bestThresholds:
print "\t" + str(thresh)
print "Summary file for all thresholds:", outputFile
print "Detailed summary file for the best threshold:", detailedOutput
def genCurveData(results,
minThresh = 0.0,
maxThresh = 1.0,
step = .1,
window = 30,
costMatrix = None,
verbosity = 0):
"""
Returns a dict containing lists of data for plotting
experiment - expInfo dict
minThresh - Where to start our threshold search
maxThresh - Where to stop the threshold search (inclusive)
step - The increment size between each threshold test. This will be
varied during the run to increase resolution near 1.0.
"""
vals = {"tprs": [],
"fprs": [],
"thresholds": [],
"costs": []}
incrementCount = 1.0
while minThresh < maxThresh and incrementCount < 60:
cMatrix = genConfusionMatrix(results,
"anomaly_score",
"label",
window,
5,
costMatrix,
threshold = minThresh,
verbosity = verbosity)
vals["tprs"].append(cMatrix.tpr)
vals["fprs"].append(cMatrix.fpr)
vals["thresholds"].append(minThresh)
vals["costs"].append(cMatrix.cost)
minThresh, step = updateThreshold(minThresh, step, incrementCount)
incrementCount += 1.0
return vals
def updateThreshold(thresh, step, incrementCount):
"""
One method of updating our threshold to generate the ROC curve. Here as soon
as we reach .9 we begin decreasing the threshold increment in a logarithmic
fashion, asymptotically approaching 1
"""
thresh += step
# Decrease step as we approach 1
if incrementCount % 9 == 0:
step /= 10.0
return thresh, step
if __name__ == "__main__":
# All the command line options
parser = OptionParser(helpString)
parser.add_option("-d", "--resultsDir",
help="Path to results files. Single detector only!")
parser.add_option("--verbosity", default=0, help="Increase the amount and "
"detail of output by setting this greater than 0.")
parser.add_option("--config", default="benchmark_config.yaml",
help="The configuration file to use while running the "
"benchmark.")
parser.add_option("--profiles", default="user_profiles.yaml",
help="The configuration file to use while running the "
"benchmark.")
options, args = parser.parse_args()
# Main
optimizeThreshold(options) | gpl-3.0 |
DavidPowell/OpenModes | test/test_basis.py | 1 | 4550 | # -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# OpenModes - An eigenmode solver for open electromagnetic resonantors
# Copyright (C) 2013 David Powell
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#-----------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
import os.path as osp
import openmodes
from openmodes.basis import DivRwgBasis, LoopStarBasis
from openmodes.integration import DunavantRule
from openmodes import Simulation
from openmodes.visualise import write_vtk
from helpers import read_2d_real, write_2d_real
from numpy.testing import assert_allclose
tests_location = osp.split(__file__)[0]
mesh_dir = osp.join(tests_location, 'input', 'test_basis')
reference_dir = osp.join(tests_location, 'reference', 'test_basis')
def test_interpolate_rwg(plot=False, write_reference=False, skip_asserts=False):
"Interpolate an RWG basis function over a triangle"
sim = Simulation()
srr = sim.load_mesh(osp.join(mesh_dir, 'SRR.msh'))
basis = DivRwgBasis(srr)
rwg_function = np.zeros(len(basis), np.float64)
rwg_function[20] = 1
rule = DunavantRule(10)
r, basis_func = basis.interpolate_function(rwg_function, rule)
if write_reference:
# save reference data
write_2d_real(osp.join(reference_dir, 'rwg_r.txt'), r)
write_2d_real(osp.join(reference_dir, 'rwg_basis_func.txt'),
basis_func)
r_ref = read_2d_real(osp.join(reference_dir, 'rwg_r.txt'))
basis_func_ref = read_2d_real(osp.join(reference_dir,
'rwg_basis_func.txt'))
if not skip_asserts:
assert_allclose(r, r_ref, rtol=1e-6)
assert_allclose(basis_func, basis_func_ref, rtol=1e-6)
if plot:
plt.figure(figsize=(6, 6))
plt.quiver(r[:, 0], r[:, 1], basis_func[:, 0], basis_func[:, 1],
scale=5e4)
plt.show()
def test_interpolate_loop_star(plot=False, write_reference=False,
skip_asserts=False):
"Interpolate loop and star basis functions"
sim = Simulation()
mesh = sim.load_mesh(osp.join(tests_location, 'input', 'test_basis',
'rectangle.msh'))
basis = LoopStarBasis(mesh)
ls_function = np.zeros(len(basis), np.float64)
# chose one loop and one star
star_basis = 28
loop_basis = 4
ls_function[star_basis] = 1
ls_function[loop_basis] = 1
rule = DunavantRule(10)
r, basis_func = basis.interpolate_function(ls_function, rule)
the_basis = basis[star_basis]
plus_nodes = mesh.nodes[basis.mesh.polygons[the_basis.tri_p,
the_basis.node_p]]
minus_nodes = mesh.nodes[basis.mesh.polygons[the_basis.tri_m,
the_basis.node_m]]
if write_reference:
# save reference data
write_2d_real(osp.join(reference_dir, 'loop_star_r.txt'), r)
write_2d_real(osp.join(reference_dir, 'loop_star_basis_func.txt'),
basis_func)
r_ref = read_2d_real(osp.join(reference_dir, 'loop_star_r.txt'))
basis_func_ref = read_2d_real(osp.join(reference_dir,
'loop_star_basis_func.txt'))
if not skip_asserts:
assert_allclose(r, r_ref, rtol=1e-6)
assert_allclose(basis_func, basis_func_ref, rtol=1e-6)
if plot:
plt.figure(figsize=(6, 6))
plt.quiver(r[:, 0], r[:, 1], basis_func[:, 0], basis_func[:, 1],
pivot='middle')
plt.plot(plus_nodes[:, 0], plus_nodes[:, 1], 'x')
plt.plot(minus_nodes[:, 0], minus_nodes[:, 1], '+')
plt.show()
if __name__ == "__main__":
test_interpolate_rwg(plot=True)#, skip_asserts=True)
test_interpolate_loop_star(plot=True) #, skip_asserts=True)
| gpl-3.0 |
SaTa999/pyPanair | examples/batch_analysis/batch_analysis.py | 1 | 3827 | #!/usr/bin/env python
from concurrent import futures
from logging import getLogger, StreamHandler, FileHandler, Formatter, INFO, DEBUG
import multiprocessing
import os
from shutil import copy2, rmtree
from subprocess import Popen, PIPE
import pandas as pd
from make_wgs_aux import main as mkwgsaux
# initialize logger
logger = getLogger(__name__)
shandler = StreamHandler()
shandler.setFormatter(Formatter("%(asctime)s %(levelname)s: %(message)s"))
fhandler = FileHandler(filename="batch_analysis.log")
fhandler.setFormatter(Formatter("%(asctime)s %(levelname)s: %(message)s"))
shandler.setLevel(INFO)
fhandler.setLevel(INFO)
logger.setLevel(INFO)
logger.addHandler(shandler)
logger.addHandler(fhandler)
def run_panin(directory, aux):
process = Popen("./panin", stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=directory)
aux = str.encode(aux)
stdout, stderr = process.communicate(aux)
if stderr:
logger.error("\n".join((directory, str(stderr))))
else:
logger.debug("\n".join((directory, str(stdout))))
def run_panair(directory):
process = Popen("./panair", stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=directory)
stdout, stderr = process.communicate(b"a502.in")
if stderr:
logger.error("\n".join((directory, str(stderr))))
else:
logger.debug("\n".join((directory, str(stdout))))
try:
with open(os.path.join(directory, "panair.err")) as f:
if "PanAir is stopping." in f.read():
logger.critical("\n".join((directory, "fatal error with PanAir")))
except FileNotFoundError:
pass
def safe_makedirs(path):
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
logger.error("could not create directory at {}".format(path))
def copy2dir(files, olddir, newdir):
safe_makedirs(newdir)
if type(files) is str:
files = (files, )
for f in files:
try:
copy2("{0}/{1}".format(olddir, f), "{0}/{1}".format(newdir, f))
except FileNotFoundError as e:
logger.error(e)
def run_analysis(casenum, aux, analysis_dir, params):
# create directory to run panin and panair
procid = int(multiprocessing.current_process().pid)
logger.info("calculating case{} with procid {}".format(casenum, procid))
target_dir = os.path.join(analysis_dir, "panair{}".format(procid))
safe_makedirs(target_dir)
# run panin and panair
mkwgsaux(target_dir=target_dir, **params)
run_panin(target_dir, aux)
run_panair(target_dir)
# save results
files_to_save = ("panair.out", "ffmf", "agps", "panair.err", "panin.dbg", "a502.in")
newdir = os.path.join("results", "case{}".format(casenum))
copy2dir(files_to_save, target_dir, newdir)
# delete intermediate files
rmtree(target_dir)
if __name__ == '__main__':
# set variables
N_PROCS = 3
AUXNAME = "ADODG_case3.aux"
ANALYSIS_DIR = "" # directory to run analysis (intermediate files will be stored here)
logger.info("start batch analysis")
# read caselist
caselist = pd.read_csv("caselist.csv")
with futures.ProcessPoolExecutor(max_workers=N_PROCS) as executor:
# submit jobs
fs = list()
for _, case in caselist.iterrows():
casenum = int(case["casenum"])
params = case[2:].to_dict()
if case["run"] == 1:
fs.append(executor.submit(run_analysis, casenum, AUXNAME, ANALYSIS_DIR, params))
else:
logger.debug("skipping case{}".format(casenum))
continue
# run analysis
futures.wait(fs, return_when=futures.ALL_COMPLETED)
logger.info("finish batch analysis")
| mit |
jmetzen/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 128 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
gnilson/Wellbore-Profile-Metrics | wellbore_explorer.py | 1 | 13983 |
import sys, os, random
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from numpy import arange
from numpy import linspace
from numpy import array
from numpy import where
from numpy import polyfit
from numpy import poly1d
import matplotlib
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
import numpy.random
import math
import csv
from scipy import interpolate
from scipy import cluster
# CALCULATE:
# Average inclination
# Straitness Index
#
well_list = []
well_names = [] # well name <--> UWI
class AppForm(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.setWindowTitle('Demo: PyQt with matplotlib')
self.create_menu()
self.create_main_frame()
self.create_status_bar()
self.xdata = []
self.ydata = []
self.dev_data = []
self.md_data = []
#self.textbox.setText('1 2 3 4')
self.on_draw()
def on_press(self):
return
def well_selected(self, ind):
print "Well Selected", well_list[ind.row()], well_names[well_list[ind.row()]]
self.xdata = master_dict[well_names[well_list[ind.row()]]]['Deviation N/S']
self.ydata = master_dict[well_names[well_list[ind.row()]]]['TV Depth']
self.dev_data = array(master_dict[well_names[well_list[ind.row()]]]['Deviation Angle'])
self.md_data = master_dict[well_names[well_list[ind.row()]]]['Measured Depth']
self.on_draw()
return
def save_plot(self):
file_choices = "PNG (*.png)|*.png"
path = unicode(QFileDialog.getSaveFileName(self,
'Save file', '',
file_choices))
if path:
self.canvas.print_figure(path, dpi=self.dpi)
self.statusBar().showMessage('Saved to %s' % path, 2000)
def on_about(self):
msg = """ A demo of using PyQt with matplotlib:
* Use the matplotlib navigation bar
* Add values to the text box and press Enter (or click "Draw")
* Show or hide the grid
* Drag the slider to modify the width of the bars
* Save the plot to a file using the File menu
* Click on a bar to receive an informative message
"""
QMessageBox.about(self, "About the demo", msg.strip())
def sort_by_straitness(self):
fd = open('out.csv','w')
print "API, STRAITNESS RATIO, ANGLE"
fd.write("API, STRAITNESS RATIO, ANGLE\n")
for well in master_dict.keys():
self.xdata = master_dict[well]['Deviation N/S']
self.ydata = master_dict[well]['TV Depth']
self.dev_data = array(master_dict[well]['Deviation Angle'])
self.md_data = master_dict[well]['Measured Depth']
if self.xdata:
try:
centroid, label = cluster.vq.kmeans2(self.dev_data, 3)
ct = centroid[label[-1]]
x0 = where(self.dev_data >= ct)[0][0]
#x0 = where(self.dev_data > 70)[0][0]
targ_x = self.xdata[x0:]
targ_y = self.ydata[x0:]
lin_int = poly1d(polyfit(targ_x, targ_y, 1))
xs = [self.xdata[x0], self.xdata[-1]]
out = lin_int(xs)
stlen = ((out[-1]-out[0])**2.0 + (xs[-1]-xs[0])**2.0)**0.5
angle = -1*math.atan((out[-1]-out[0])/(abs(xs[-1]-xs[0]))*360/(2*math.pi))
tcknot, u = interpolate.splprep([self.xdata, self.ydata],
u=self.md_data,
s=0,
k=3)
mds = linspace(min(u), max(u), 5000)
out = interpolate.splev(mds, tcknot)
mds = linspace(self.md_data[x0], self.md_data[-1], 5000)
out = interpolate.splev(mds, tcknot)
splen = 0.0
for i in arange(0, len(out[0])-1):
splen += ((out[0][i+1] - out[0][i])**2.0 + (out[1][i+1] - out[1][i])**2.0) ** 0.5
print "%s, %s, %s" %(well, (1-stlen/splen)*1000, angle)
fd.write("%s, %s, %s\n" %(well, (1-stlen/splen)*1000, angle))
except SystemError:
print "%s, ERROR, ERROR" % (well,)
fd.write("%s, ERROR, ERROR\n" % (well,))
pass
fd.close()
print "WELLBORE DATA WRITTEN TO out.csv"
return
def on_draw(self):
""" Redraws the figure
"""
print "on_draw"
self.axes.clear()
#self.title.set_text(self.wellname)
self.axes.set_picker(1)
self.axes.set_color_cycle(['b','g','r','c','m','y','k','w','#A6CEE3','#1F78B4','#B2DF8A','#CAB2D6','#FB9A99','#E31A1C','#FF7F00','#CAB2D6','#FFFF99'])
#self.axes.set_yscale('log')
#self.axes.set_xscale('log')
self.axes.yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1f'))
#self.axes.set_ylim(0.01, 10000)
#axes.set_xlim(0, 3000)
self.axes.set_xlabel("X (ft)")
self.axes.set_ylabel("Y (ft)")
self.axes.yaxis.grid(True, which="both", ls='-', color='#C0C0C0')
self.axes.xaxis.grid(True, which="both", ls='-', color='#C0C0C0')
self.axes.grid(True)
if self.xdata:
#############
#K-MEANS
centroid, label = cluster.vq.kmeans2(self.dev_data, 3)
ct = centroid[label[-1]]
#x0 = where(label == label[-1])[0][0]
#############
x0 = where(self.dev_data >= ct)[0][0]
targ_x = self.xdata[x0:]
targ_y = self.ydata[x0:]
lin_int = poly1d(polyfit(targ_x, targ_y, 1))
xs = [self.xdata[x0], self.xdata[-1]]
out = lin_int(xs)
self.axes.plot(xs, out, '-', color='red')
stlen = ((out[-1]-out[0])**2.0 + (xs[-1]-xs[0])**2.0)**0.5
print "STRAIT_LENGTH", stlen
angle = -1*math.atan((out[-1]-out[0])/(abs(xs[-1]-xs[0]))*360/(2*math.pi))
tcknot, u = interpolate.splprep([self.xdata, self.ydata],
u=self.md_data,
s=0,
k=3)
mds = linspace(min(u), max(u), 5000)
out = interpolate.splev(mds, tcknot)
self.axes.plot(out[0], out[1], '-')
mds = linspace(self.md_data[x0], self.md_data[-1], 5000)
out = interpolate.splev(mds, tcknot)
splen = 0.0
for i in arange(0, len(out[0])-1):
splen += ((out[0][i+1] - out[0][i])**2.0 + (out[1][i+1] - out[1][i])**2.0) ** 0.5
print "SPLINE_LENGTH", splen
self.axes.plot(out[0], out[1], '-')
print out[0][0], out[0][-1], out[1][0], out[1][-1]
print "RATIO", (1-stlen/splen)*1000
print "ANGLE", angle
# the larger the worse
#self.axes.plot(self.xdata, self.ydata, '.', color='green')
self.canvas.draw()
return
def create_main_frame(self):
self.main_frame = QWidget()
# Create the mpl Figure and FigCanvas objects.
# 5x4 inches, 100 dots-per-inch
#
self.dpi = 100
self.fig = plt.figure(facecolor="white")
#self.title = self.fig.suptitle(self.wellname)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
# Since we have only one plot, we can use add_axes
# instead of add_subplot, but then the subplot
# configuration tool in the navigation toolbar wouldn't
# work.
#
self.axes = self.fig.add_subplot(111, axisbelow=True)
plt.gca().invert_yaxis()
#self.bfaxes = self.fig.add_subplot(122, axisbelow=True)
# Bind the 'pick' event for clicking on one of the bars
#
#self.canvas.mpl_connect('pick_event', self.on_pick)
#self.canvas.mpl_connect('button_press_event', self.on_press)
# Create the navigation toolbar, tied to the canvas
#
self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
# Other GUI controls
#
#self.textbox = QLineEdit()
self.lspinbox = QDoubleSpinBox()
self.lspinbox.setMinimum(0.01)
self.lspinbox.setMaximum(1)
self.lspinbox.setSingleStep(0.1)
self.lspinbox.setValue(0.1)
self.DOFPspinbox = QSpinBox()
self.DOFPspinbox.setMinimum(0)
self.DOFPspinbox.setMaximum(100)
self.DOFPspinbox.setSingleStep(1)
self.DOFPspinbox.setValue(0)
self.connect(self.DOFPspinbox, SIGNAL('valueChanged (int)'), self.on_press)
#self.textbox.setMinimumWidth(200)
# self.connect(self.textbox, SIGNAL('editingFinished ()'), self.on_draw)
# self.connect(self.lspinbox, SIGNAL('valueChanged (double)'), self.on_draw)
self.draw_button = QPushButton("Sort Alphabetically")
self.connect(self.draw_button, SIGNAL('clicked()'), self.on_press)
self.reset_button = QPushButton("Sort by Straitness")
self.connect(self.reset_button, SIGNAL('clicked()'), self.sort_by_straitness)
self.bf_sens = QPushButton("Sort by X")
self.connect(self.bf_sens, SIGNAL('clicked()'), self.on_press)
self.calc_bf = QCheckBox("Export Data")
self.calc_bf.setChecked(False)
self.connect(self.calc_bf, SIGNAL('stateChanged(int)'), self.on_press)
self.well_listview = QListView()
self.listmodel = QStandardItemModel(self.well_listview)
#self.well_list.sort()
for well in well_list:
item = QStandardItem()
item.setText(well)
item.setEditable(False)
self.listmodel.appendRow(item)
self.well_listview.setModel(self.listmodel)
self.connect(self.well_listview, SIGNAL('doubleClicked(QModelIndex)'), self.well_selected)
#slider_label = QLabel('Bar width (%):')
#self.slider = QSlider(Qt.Horizontal)
#self.slider.setRange(1, 100)
#self.slider.setValue(20)
#self.slider.setTracking(True)
#self.slider.setTickPosition(QSlider.TicksBothSides)
#self.connect(self.slider, SIGNAL('valueChanged(int)'), self.on_draw)
#
# Layout with box sizers
#
lhbox1 = QHBoxLayout()
lhbox1.addWidget(QLabel("Bourdet Derrivative L:"))
lhbox1.addWidget(self.lspinbox)
lhbox2 = QHBoxLayout()
lhbox2.addWidget(QLabel("Offset DOFP:"))
lhbox2.addWidget(self.DOFPspinbox)
vboxtools = QVBoxLayout()
vboxtools.addLayout(lhbox1)
vboxtools.addLayout(lhbox2)
for w in [ self.draw_button, self.reset_button, self.bf_sens, self.calc_bf]:
vboxtools.addWidget(w)
vboxtools.setAlignment(w, Qt.AlignVCenter)
lrtoolsbox = QHBoxLayout()
lrtoolsbox.addLayout(vboxtools)
lrtoolsbox.addWidget(self.well_listview)
vbox = QVBoxLayout()
vbox.addWidget(self.canvas)
vbox.addWidget(self.mpl_toolbar)
vbox.addLayout(lrtoolsbox)
self.main_frame.setLayout(vbox)
self.setCentralWidget(self.main_frame)
def create_status_bar(self):
self.status_text = QLabel("Double click on a well name")
self.statusBar().addWidget(self.status_text, 1)
def create_menu(self):
self.file_menu = self.menuBar().addMenu("&File")
load_file_action = self.create_action("&Save plot",
shortcut="Ctrl+S", slot=self.save_plot,
tip="Save the plot")
quit_action = self.create_action("&Quit", slot=self.close,
shortcut="Ctrl+Q", tip="Close the application")
self.add_actions(self.file_menu,
(load_file_action, None, quit_action))
self.help_menu = self.menuBar().addMenu("&Help")
about_action = self.create_action("&About",
shortcut='F1', slot=self.on_about,
tip='About the demo')
self.add_actions(self.help_menu, (about_action,))
def add_actions(self, target, actions):
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def create_action( self, text, slot=None, shortcut=None,
icon=None, tip=None, checkable=False,
signal="triggered()"):
action = QAction(text, self)
if icon is not None:
action.setIcon(QIcon(":/%s.png" % icon))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
def extend_dict(d1, d2):
dnew = {}
if len(d1) == 0:
for k, v in d2.items():
dnew[k] = [v,]
else:
for k, v in d2.items():
dnew[k] = d1[k] + [v]
return dnew
if __name__ == "__main__":
fd = open("./IHS Raw Data/Directional_Surveys.csv", 'rb')
reader = csv.DictReader(fd)
uwi = 0
newuwi = 0
master_dict = {}
tmp_dict = {}
well_names = {}
lastrow = []
for row in reader:
newuwi = int(row['UWI'])
if uwi == newuwi or not uwi:
tmp_dict = extend_dict(tmp_dict, row)
elif len(tmp_dict):
well_names[lastrow['Well Name'] + ' ' + lastrow['Well Num']] = uwi
master_dict[uwi] = tmp_dict.copy()
tmp_dict = {}
lastrow = row
uwi = newuwi
fd.close()
for k in master_dict.keys():
welldata = master_dict[k]
# strings to floats
for j in master_dict[k].keys():
try:
if ['Deviation Azimuth',
'Deviation E/W',
'Deviation N/S',
'TV Depth',
'Measured Depth',
'Deviation Angle',
'UWI'].index(j):
welldata[j] = map(float, welldata[j])
except ValueError:
pass
for i in arange(0, len(welldata['Deviation E/W'])):
if welldata['E/W'][i] == 'W':
welldata['Deviation E/W'][i] *= -1
if welldata['N/S'][i] == 'S':
welldata['Deviation N/S'][i] *= -1
#api = master_dict.keys()[40]
#xdata = master_dict[api]['Deviation N/S']
#ydata = master_dict[api]['TV Depth']
well_list = well_names.keys()
well_list.sort()
app = QApplication(sys.argv)
form = AppForm()
form.show()
app.exec_()
| bsd-3-clause |
aminert/scikit-learn | examples/calibration/plot_calibration_curve.py | 225 | 5903 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.cross_validation import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration cuve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration cuve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
CompPhysics/MachineLearning | doc/src/How2ReadData/knn.py | 2 | 1603 | import numpy as np
import matplotlib.pyplot as plt
from IPython.display import display
import sklearn
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import train_test_split
import mglearn
X, y = mglearn.datasets.make_forge()
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
from sklearn.neighbors import KNeighborsClassifier
clf = KNeighborsClassifier(n_neighbors=3)
clf.fit(X_train, y_train)
KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski',metric_params=None, n_jobs=1, n_neighbors=3, p=2,weights='uniform')
clf.predict(X_test)
clf.score(X_test, y_test)
fig, axes = plt.subplots(1, 3, figsize=(10, 3))
for n_neighbors, ax in zip([1, 3, 9], axes):
clf = KNeighborsClassifier(n_neighbors=n_neighbors).fit(X, y)
mglearn.plots.plot_2d_separator(clf, X, fill=True, eps=0.5, ax=ax, alpha=.4)
ax.scatter(X[:, 0], X[:, 1], c=y, s=60, cmap=mglearn.cm2)
ax.set_title("%d neighbor(s)" % n_neighbors)
data = np.loadtxt('src/Hudson_Bay.csv', delimiter=',', skiprows=1)
x = data[:,0]
y = data[:,1]
#x_train, y_train = train_test_split(x, y, random_state=0)
line = np.linspace(1900,1930,1000,endpoint=False).reshape(-1,1)
reg = DecisionTreeRegressor(min_samples_split=3).fit(x.reshape(-1,1),y.reshape(-1,1))
plt.plot(line, reg.predict(line), label="decision tree")
regline = LinearRegression().fit(x.reshape(-1,1),y.reshape(-1,1))
plt.plot(line, regline.predict(line), label= "Linear Regression")
plt.plot(x, y, label= "Linear Regression")
plt.show()
| cc0-1.0 |
kayhayen/Nuitka | tests/library/compile_extension_modules.py | 1 | 4408 | #!/usr/bin/env python
# Copyright 2021, Kay Hayen, mailto:[email protected]
#
# Python test originally created or extracted from other peoples work. The
# parts from me are licensed as below. It is at least Free Software where
# it's copied from other people. In these cases, that will normally be
# indicated.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" This test runner compiles all extension modules for standalone mode.
This is a test to reveal hidden dependencies on a system.
"""
import os
import sys
# Find nuitka package relative to us.
sys.path.insert(
0,
os.path.normpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..")
),
)
# isort:start
import shutil
import tempfile
from nuitka.tools.testing.Common import (
check_output,
checkRuntimeLoadedFilesForOutsideAccesses,
checkSucceedsWithCPython,
compileLibraryTest,
createSearchMode,
getRuntimeTraceOfLoadedFiles,
my_print,
setup,
test_logger,
)
setup(needs_io_encoding=True)
search_mode = createSearchMode()
tmp_dir = tempfile.gettempdir()
# Try to avoid RAM disk /tmp and use the disk one instead.
if tmp_dir == "/tmp" and os.path.exists("/var/tmp"):
tmp_dir = "/var/tmp"
done = set()
def decide(root, filename):
if os.path.sep + "Cython" + os.path.sep in root:
return False
if (
root.endswith(os.path.sep + "matplotlib")
or os.path.sep + "matplotlib" + os.path.sep in root
):
return False
if filename.endswith("linux-gnu_d.so"):
return False
if root.endswith(os.path.sep + "msgpack"):
return False
first_part = filename.split(".")[0]
if first_part in done:
return False
done.add(first_part)
return filename.endswith((".so", ".pyd")) and not filename.startswith("libpython")
current_dir = os.path.normpath(os.getcwd())
current_dir = os.path.normcase(current_dir)
def action(stage_dir, root, path):
command = [
sys.executable,
os.path.join("..", "..", "bin", "nuitka"),
"--stand",
"--run",
"--output-dir",
stage_dir,
"--remove-output",
"--plugin-enable=pylint-warnings",
]
filename = os.path.join(stage_dir, "importer.py")
assert path.startswith(root)
module_name = path[len(root) + 1 :]
module_name = module_name.split(".")[0]
module_name = module_name.replace(os.path.sep, ".")
with open(filename, "w") as output:
output.write("import " + module_name + "\n")
output.write("print('OK')")
command += os.environ.get("NUITKA_EXTRA_OPTIONS", "").split()
command.append(filename)
if checkSucceedsWithCPython(filename):
try:
output = check_output(command).splitlines()
except Exception: # only trying to check for no exception, pylint: disable=try-except-raise
raise
else:
assert os.path.exists(filename[:-3] + ".dist")
loaded_filenames = getRuntimeTraceOfLoadedFiles(
logger=test_logger,
path=os.path.join(filename[:-3] + ".dist", "importer.exe"),
)
outside_accesses = checkRuntimeLoadedFilesForOutsideAccesses(
loaded_filenames,
[filename[:-3] + ".dist", current_dir, os.path.expanduser("~/.config")],
)
if output[-1] != b"OK":
sys.exit("FAIL")
my_print("OK")
assert not outside_accesses, outside_accesses
shutil.rmtree(filename[:-3] + ".dist")
else:
my_print("SKIP (does not work with CPython)")
compileLibraryTest(
search_mode=search_mode,
stage_dir=os.path.join(tmp_dir, "compile_extensions"),
decide=decide,
action=action,
)
my_print("FINISHED, all extension modules compiled.")
| apache-2.0 |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/seaborn/matrix.py | 2 | 45299 | """Functions to visualize matrices of data."""
import itertools
import matplotlib as mpl
from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
import pandas as pd
from scipy.spatial import distance
from scipy.cluster import hierarchy
from .axisgrid import Grid
from .palettes import cubehelix_palette
from .utils import despine, axis_ticklabels_overlap, relative_luminance
__all__ = ["heatmap", "clustermap"]
def _index_to_label(index):
"""Convert a pandas index or multiindex to an axis label."""
if isinstance(index, pd.MultiIndex):
return "-".join(map(str, index.names))
else:
return index.name
def _index_to_ticklabels(index):
"""Convert a pandas index or multiindex into ticklabels."""
if isinstance(index, pd.MultiIndex):
return ["-".join(map(str, i)) for i in index.values]
else:
return index.values
def _convert_colors(colors):
"""Convert either a list of colors or nested lists of colors to RGB."""
to_rgb = mpl.colors.colorConverter.to_rgb
if isinstance(colors, pd.DataFrame):
# Convert dataframe
return pd.DataFrame({col: colors[col].map(to_rgb)
for col in colors})
elif isinstance(colors, pd.Series):
return colors.map(to_rgb)
else:
try:
to_rgb(colors[0])
# If this works, there is only one level of colors
return list(map(to_rgb, colors))
except ValueError:
# If we get here, we have nested lists
return [list(map(to_rgb, l)) for l in colors]
def _matrix_mask(data, mask):
"""Ensure that data and mask are compatabile and add missing values.
Values will be plotted for cells where ``mask`` is ``False``.
``data`` is expected to be a DataFrame; ``mask`` can be an array or
a DataFrame.
"""
if mask is None:
mask = np.zeros(data.shape, np.bool)
if isinstance(mask, np.ndarray):
# For array masks, ensure that shape matches data then convert
if mask.shape != data.shape:
raise ValueError("Mask must have the same shape as data.")
mask = pd.DataFrame(mask,
index=data.index,
columns=data.columns,
dtype=np.bool)
elif isinstance(mask, pd.DataFrame):
# For DataFrame masks, ensure that semantic labels match data
if not mask.index.equals(data.index) \
and mask.columns.equals(data.columns):
err = "Mask must have the same index and columns as data."
raise ValueError(err)
# Add any cells with missing data to the mask
# This works around an issue where `plt.pcolormesh` doesn't represent
# missing data properly
mask = mask | pd.isnull(data)
return mask
class _HeatMapper(object):
"""Draw a heatmap plot of a matrix with nice labels and colormaps."""
def __init__(self, data, vmin, vmax, cmap, center, robust, annot, fmt,
annot_kws, cbar, cbar_kws,
xticklabels=True, yticklabels=True, mask=None):
"""Initialize the plotting object."""
# We always want to have a DataFrame with semantic information
# and an ndarray to pass to matplotlib
if isinstance(data, pd.DataFrame):
plot_data = data.values
else:
plot_data = np.asarray(data)
data = pd.DataFrame(plot_data)
# Validate the mask and convet to DataFrame
mask = _matrix_mask(data, mask)
# Reverse the rows so the plot looks like the matrix
plot_data = plot_data[::-1]
data = data.iloc[::-1]
mask = mask.iloc[::-1]
plot_data = np.ma.masked_where(np.asarray(mask), plot_data)
# Get good names for the rows and columns
xtickevery = 1
if isinstance(xticklabels, int) and xticklabels > 1:
xtickevery = xticklabels
xticklabels = _index_to_ticklabels(data.columns)
elif xticklabels is True:
xticklabels = _index_to_ticklabels(data.columns)
elif xticklabels is False:
xticklabels = []
ytickevery = 1
if isinstance(yticklabels, int) and yticklabels > 1:
ytickevery = yticklabels
yticklabels = _index_to_ticklabels(data.index)
elif yticklabels is True:
yticklabels = _index_to_ticklabels(data.index)
elif yticklabels is False:
yticklabels = []
else:
yticklabels = yticklabels[::-1]
# Get the positions and used label for the ticks
nx, ny = data.T.shape
if xticklabels == []:
self.xticks = []
self.xticklabels = []
else:
xstart, xend, xstep = 0, nx, xtickevery
self.xticks = np.arange(xstart, xend, xstep) + .5
self.xticklabels = xticklabels[xstart:xend:xstep]
if yticklabels == []:
self.yticks = []
self.yticklabels = []
else:
ystart, yend, ystep = (ny - 1) % ytickevery, ny, ytickevery
self.yticks = np.arange(ystart, yend, ystep) + .5
self.yticklabels = yticklabels[ystart:yend:ystep]
# Get good names for the axis labels
xlabel = _index_to_label(data.columns)
ylabel = _index_to_label(data.index)
self.xlabel = xlabel if xlabel is not None else ""
self.ylabel = ylabel if ylabel is not None else ""
# Determine good default values for the colormapping
self._determine_cmap_params(plot_data, vmin, vmax,
cmap, center, robust)
# Sort out the annotations
if annot is None:
annot = False
annot_data = None
elif isinstance(annot, bool):
if annot:
annot_data = plot_data
else:
annot_data = None
else:
try:
annot_data = annot.values[::-1]
except AttributeError:
annot_data = annot[::-1]
if annot.shape != plot_data.shape:
raise ValueError('Data supplied to "annot" must be the same '
'shape as the data to plot.')
annot = True
# Save other attributes to the object
self.data = data
self.plot_data = plot_data
self.annot = annot
self.annot_data = annot_data
self.fmt = fmt
self.annot_kws = {} if annot_kws is None else annot_kws
self.cbar = cbar
self.cbar_kws = {} if cbar_kws is None else cbar_kws
self.cbar_kws.setdefault('ticks', mpl.ticker.MaxNLocator(6))
def _determine_cmap_params(self, plot_data, vmin, vmax,
cmap, center, robust):
"""Use some heuristics to set good defaults for colorbar and range."""
calc_data = plot_data.data[~np.isnan(plot_data.data)]
if vmin is None:
vmin = np.percentile(calc_data, 2) if robust else calc_data.min()
if vmax is None:
vmax = np.percentile(calc_data, 98) if robust else calc_data.max()
# Simple heuristics for whether these data should have a divergent map
divergent = ((vmin < 0) and (vmax > 0)) or center is not None
# Now set center to 0 so math below makes sense
if center is None:
center = 0
# A divergent map should be symmetric around the center value
if divergent:
vlim = max(abs(vmin - center), abs(vmax - center))
vmin, vmax = -vlim, vlim
self.divergent = divergent
# Now add in the centering value and set the limits
vmin += center
vmax += center
self.vmin = vmin
self.vmax = vmax
# Choose default colormaps if not provided
if cmap is None:
if divergent:
self.cmap = "RdBu_r"
else:
self.cmap = cubehelix_palette(light=.95, as_cmap=True)
else:
self.cmap = cmap
def _annotate_heatmap(self, ax, mesh):
"""Add textual labels with the value in each cell."""
mesh.update_scalarmappable()
xpos, ypos = np.meshgrid(ax.get_xticks(), ax.get_yticks())
for x, y, m, color, val in zip(xpos.flat, ypos.flat,
mesh.get_array(), mesh.get_facecolors(),
self.annot_data.flat):
if m is not np.ma.masked:
l = relative_luminance(color)
text_color = ".15" if l > .408 else "w"
annotation = ("{:" + self.fmt + "}").format(val)
text_kwargs = dict(color=text_color, ha="center", va="center")
text_kwargs.update(self.annot_kws)
ax.text(x, y, annotation, **text_kwargs)
def plot(self, ax, cax, kws):
"""Draw the heatmap on the provided Axes."""
# Remove all the Axes spines
despine(ax=ax, left=True, bottom=True)
# Draw the heatmap
mesh = ax.pcolormesh(self.plot_data, vmin=self.vmin, vmax=self.vmax,
cmap=self.cmap, **kws)
# Set the axis limits
ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))
# Add row and column labels
ax.set(xticks=self.xticks, yticks=self.yticks)
xtl = ax.set_xticklabels(self.xticklabels)
ytl = ax.set_yticklabels(self.yticklabels, rotation="vertical")
# Possibly rotate them if they overlap
plt.draw()
if axis_ticklabels_overlap(xtl):
plt.setp(xtl, rotation="vertical")
if axis_ticklabels_overlap(ytl):
plt.setp(ytl, rotation="horizontal")
# Add the axis labels
ax.set(xlabel=self.xlabel, ylabel=self.ylabel)
# Annotate the cells with the formatted values
if self.annot:
self._annotate_heatmap(ax, mesh)
# Possibly add a colorbar
if self.cbar:
cb = ax.figure.colorbar(mesh, cax, ax, **self.cbar_kws)
cb.outline.set_linewidth(0)
# If rasterized is passed to pcolormesh, also rasterize the
# colorbar to avoid white lines on the PDF rendering
if kws.get('rasterized', False):
cb.solids.set_rasterized(True)
def heatmap(data, vmin=None, vmax=None, cmap=None, center=None, robust=False,
annot=None, fmt=".2g", annot_kws=None,
linewidths=0, linecolor="white",
cbar=True, cbar_kws=None, cbar_ax=None,
square=False, ax=None, xticklabels=True, yticklabels=True,
mask=None,
**kwargs):
"""Plot rectangular data as a color-encoded matrix.
This function tries to infer a good colormap to use from the data, but
this is not guaranteed to work, so take care to make sure the kind of
colormap (sequential or diverging) and its limits are appropriate.
This is an Axes-level function and will draw the heatmap into the
currently-active Axes if none is provided to the ``ax`` argument. Part of
this Axes space will be taken and used to plot a colormap, unless ``cbar``
is False or a separate Axes is provided to ``cbar_ax``.
Parameters
----------
data : rectangular dataset
2D dataset that can be coerced into an ndarray. If a Pandas DataFrame
is provided, the index/column information will be used to label the
columns and rows.
vmin, vmax : floats, optional
Values to anchor the colormap, otherwise they are inferred from the
data and other keyword arguments. When a diverging dataset is inferred,
one of these values may be ignored.
cmap : matplotlib colormap name or object, optional
The mapping from data values to color space. If not provided, this
will be either a cubehelix map (if the function infers a sequential
dataset) or ``RdBu_r`` (if the function infers a diverging dataset).
center : float, optional
The value at which to center the colormap. Passing this value implies
use of a diverging colormap.
robust : bool, optional
If True and ``vmin`` or ``vmax`` are absent, the colormap range is
computed with robust quantiles instead of the extreme values.
annot : bool or rectangular dataset, optional
If True, write the data value in each cell. If an array-like with the
same shape as ``data``, then use this to annotate the heatmap instead
of the raw data.
fmt : string, optional
String formatting code to use when adding annotations.
annot_kws : dict of key, value mappings, optional
Keyword arguments for ``ax.text`` when ``annot`` is True.
linewidths : float, optional
Width of the lines that will divide each cell.
linecolor : color, optional
Color of the lines that will divide each cell.
cbar : boolean, optional
Whether to draw a colorbar.
cbar_kws : dict of key, value mappings, optional
Keyword arguments for `fig.colorbar`.
cbar_ax : matplotlib Axes, optional
Axes in which to draw the colorbar, otherwise take space from the
main Axes.
square : boolean, optional
If True, set the Axes aspect to "equal" so each cell will be
square-shaped.
ax : matplotlib Axes, optional
Axes in which to draw the plot, otherwise use the currently-active
Axes.
xticklabels : list-like, int, or bool, optional
If True, plot the column names of the dataframe. If False, don't plot
the column names. If list-like, plot these alternate labels as the
xticklabels. If an integer, use the column names but plot only every
n label.
yticklabels : list-like, int, or bool, optional
If True, plot the row names of the dataframe. If False, don't plot
the row names. If list-like, plot these alternate labels as the
yticklabels. If an integer, use the index names but plot only every
n label.
mask : boolean array or DataFrame, optional
If passed, data will not be shown in cells where ``mask`` is True.
Cells with missing values are automatically masked.
kwargs : other keyword arguments
All other keyword arguments are passed to ``ax.pcolormesh``.
Returns
-------
ax : matplotlib Axes
Axes object with the heatmap.
Examples
--------
Plot a heatmap for a numpy array:
.. plot::
:context: close-figs
>>> import numpy as np; np.random.seed(0)
>>> import seaborn as sns; sns.set()
>>> uniform_data = np.random.rand(10, 12)
>>> ax = sns.heatmap(uniform_data)
Change the limits of the colormap:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(uniform_data, vmin=0, vmax=1)
Plot a heatmap for data centered on 0:
.. plot::
:context: close-figs
>>> normal_data = np.random.randn(10, 12)
>>> ax = sns.heatmap(normal_data)
Plot a dataframe with meaningful row and column labels:
.. plot::
:context: close-figs
>>> flights = sns.load_dataset("flights")
>>> flights = flights.pivot("month", "year", "passengers")
>>> ax = sns.heatmap(flights)
Annotate each cell with the numeric value using integer formatting:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, annot=True, fmt="d")
Add lines between each cell:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, linewidths=.5)
Use a different colormap:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, cmap="YlGnBu")
Center the colormap at a specific value:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, center=flights.loc["January", 1955])
Plot every other column label and don't plot row labels:
.. plot::
:context: close-figs
>>> data = np.random.randn(50, 20)
>>> ax = sns.heatmap(data, xticklabels=2, yticklabels=False)
Don't draw a colorbar:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, cbar=False)
Use different axes for the colorbar:
.. plot::
:context: close-figs
>>> grid_kws = {"height_ratios": (.9, .05), "hspace": .3}
>>> f, (ax, cbar_ax) = plt.subplots(2, gridspec_kw=grid_kws)
>>> ax = sns.heatmap(flights, ax=ax,
... cbar_ax=cbar_ax,
... cbar_kws={"orientation": "horizontal"})
Use a mask to plot only part of a matrix
.. plot::
:context: close-figs
>>> corr = np.corrcoef(np.random.randn(10, 200))
>>> mask = np.zeros_like(corr)
>>> mask[np.triu_indices_from(mask)] = True
>>> with sns.axes_style("white"):
... ax = sns.heatmap(corr, mask=mask, vmax=.3, square=True)
"""
# Initialize the plotter object
plotter = _HeatMapper(data, vmin, vmax, cmap, center, robust, annot, fmt,
annot_kws, cbar, cbar_kws, xticklabels,
yticklabels, mask)
# Add the pcolormesh kwargs here
kwargs["linewidths"] = linewidths
kwargs["edgecolor"] = linecolor
# Draw the plot and return the Axes
if ax is None:
ax = plt.gca()
if square:
ax.set_aspect("equal")
plotter.plot(ax, cbar_ax, kwargs)
return ax
class _DendrogramPlotter(object):
"""Object for drawing tree of similarities between data rows/columns"""
def __init__(self, data, linkage, metric, method, axis, label, rotate):
"""Plot a dendrogram of the relationships between the columns of data
Parameters
----------
data : pandas.DataFrame
Rectangular data
"""
self.axis = axis
if self.axis == 1:
data = data.T
if isinstance(data, pd.DataFrame):
array = data.values
else:
array = np.asarray(data)
data = pd.DataFrame(array)
self.array = array
self.data = data
self.shape = self.data.shape
self.metric = metric
self.method = method
self.axis = axis
self.label = label
self.rotate = rotate
if linkage is None:
self.linkage = self.calculated_linkage
else:
self.linkage = linkage
self.dendrogram = self.calculate_dendrogram()
# Dendrogram ends are always at multiples of 5, who knows why
ticks = 10 * np.arange(self.data.shape[0]) + 5
if self.label:
ticklabels = _index_to_ticklabels(self.data.index)
ticklabels = [ticklabels[i] for i in self.reordered_ind]
if self.rotate:
self.xticks = []
self.yticks = ticks
self.xticklabels = []
self.yticklabels = ticklabels
self.ylabel = _index_to_label(self.data.index)
self.xlabel = ''
else:
self.xticks = ticks
self.yticks = []
self.xticklabels = ticklabels
self.yticklabels = []
self.ylabel = ''
self.xlabel = _index_to_label(self.data.index)
else:
self.xticks, self.yticks = [], []
self.yticklabels, self.xticklabels = [], []
self.xlabel, self.ylabel = '', ''
self.dependent_coord = self.dendrogram['dcoord']
self.independent_coord = self.dendrogram['icoord']
def _calculate_linkage_scipy(self):
if np.product(self.shape) >= 10000:
UserWarning('This will be slow... (gentle suggestion: '
'"pip install fastcluster")')
pairwise_dists = distance.pdist(self.array, metric=self.metric)
linkage = hierarchy.linkage(pairwise_dists, method=self.method)
del pairwise_dists
return linkage
def _calculate_linkage_fastcluster(self):
import fastcluster
# Fastcluster has a memory-saving vectorized version, but only
# with certain linkage methods, and mostly with euclidean metric
vector_methods = ('single', 'centroid', 'median', 'ward')
euclidean_methods = ('centroid', 'median', 'ward')
euclidean = self.metric == 'euclidean' and self.method in \
euclidean_methods
if euclidean or self.method == 'single':
return fastcluster.linkage_vector(self.array,
method=self.method,
metric=self.metric)
else:
pairwise_dists = distance.pdist(self.array, metric=self.metric)
linkage = fastcluster.linkage(pairwise_dists, method=self.method)
del pairwise_dists
return linkage
@property
def calculated_linkage(self):
try:
return self._calculate_linkage_fastcluster()
except ImportError:
return self._calculate_linkage_scipy()
def calculate_dendrogram(self):
"""Calculates a dendrogram based on the linkage matrix
Made a separate function, not a property because don't want to
recalculate the dendrogram every time it is accessed.
Returns
-------
dendrogram : dict
Dendrogram dictionary as returned by scipy.cluster.hierarchy
.dendrogram. The important key-value pairing is
"reordered_ind" which indicates the re-ordering of the matrix
"""
return hierarchy.dendrogram(self.linkage, no_plot=True,
color_threshold=-np.inf)
@property
def reordered_ind(self):
"""Indices of the matrix, reordered by the dendrogram"""
return self.dendrogram['leaves']
def plot(self, ax):
"""Plots a dendrogram of the similarities between data on the axes
Parameters
----------
ax : matplotlib.axes.Axes
Axes object upon which the dendrogram is plotted
"""
line_kwargs = dict(linewidths=.5, colors='k')
if self.rotate and self.axis == 0:
lines = LineCollection([list(zip(x, y))
for x, y in zip(self.dependent_coord,
self.independent_coord)],
**line_kwargs)
else:
lines = LineCollection([list(zip(x, y))
for x, y in zip(self.independent_coord,
self.dependent_coord)],
**line_kwargs)
ax.add_collection(lines)
number_of_leaves = len(self.reordered_ind)
max_dependent_coord = max(map(max, self.dependent_coord))
if self.rotate:
ax.yaxis.set_ticks_position('right')
# Constants 10 and 1.05 come from
# `scipy.cluster.hierarchy._plot_dendrogram`
ax.set_ylim(0, number_of_leaves * 10)
ax.set_xlim(0, max_dependent_coord * 1.05)
ax.invert_xaxis()
ax.invert_yaxis()
else:
# Constants 10 and 1.05 come from
# `scipy.cluster.hierarchy._plot_dendrogram`
ax.set_xlim(0, number_of_leaves * 10)
ax.set_ylim(0, max_dependent_coord * 1.05)
despine(ax=ax, bottom=True, left=True)
ax.set(xticks=self.xticks, yticks=self.yticks,
xlabel=self.xlabel, ylabel=self.ylabel)
xtl = ax.set_xticklabels(self.xticklabels)
ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')
# Force a draw of the plot to avoid matplotlib window error
plt.draw()
if len(ytl) > 0 and axis_ticklabels_overlap(ytl):
plt.setp(ytl, rotation="horizontal")
if len(xtl) > 0 and axis_ticklabels_overlap(xtl):
plt.setp(xtl, rotation="vertical")
return self
def dendrogram(data, linkage=None, axis=1, label=True, metric='euclidean',
method='average', rotate=False, ax=None):
"""Draw a tree diagram of relationships within a matrix
Parameters
----------
data : pandas.DataFrame
Rectangular data
linkage : numpy.array, optional
Linkage matrix
axis : int, optional
Which axis to use to calculate linkage. 0 is rows, 1 is columns.
label : bool, optional
If True, label the dendrogram at leaves with column or row names
metric : str, optional
Distance metric. Anything valid for scipy.spatial.distance.pdist
method : str, optional
Linkage method to use. Anything valid for
scipy.cluster.hierarchy.linkage
rotate : bool, optional
When plotting the matrix, whether to rotate it 90 degrees
counter-clockwise, so the leaves face right
ax : matplotlib axis, optional
Axis to plot on, otherwise uses current axis
Returns
-------
dendrogramplotter : _DendrogramPlotter
A Dendrogram plotter object.
Notes
-----
Access the reordered dendrogram indices with
dendrogramplotter.reordered_ind
"""
plotter = _DendrogramPlotter(data, linkage=linkage, axis=axis,
metric=metric, method=method,
label=label, rotate=rotate)
if ax is None:
ax = plt.gca()
return plotter.plot(ax=ax)
class ClusterGrid(Grid):
def __init__(self, data, pivot_kws=None, z_score=None, standard_scale=None,
figsize=None, row_colors=None, col_colors=None, mask=None):
"""Grid object for organizing clustered heatmap input on to axes"""
if isinstance(data, pd.DataFrame):
self.data = data
else:
self.data = pd.DataFrame(data)
self.data2d = self.format_data(self.data, pivot_kws, z_score,
standard_scale)
self.mask = _matrix_mask(self.data2d, mask)
if figsize is None:
width, height = 10, 10
figsize = (width, height)
self.fig = plt.figure(figsize=figsize)
self.row_colors, self.row_color_labels = \
self._preprocess_colors(data, row_colors, axis=0)
self.col_colors, self.col_color_labels = \
self._preprocess_colors(data, col_colors, axis=1)
width_ratios = self.dim_ratios(self.row_colors,
figsize=figsize,
axis=1)
height_ratios = self.dim_ratios(self.col_colors,
figsize=figsize,
axis=0)
nrows = 3 if self.col_colors is None else 4
ncols = 3 if self.row_colors is None else 4
self.gs = gridspec.GridSpec(nrows, ncols, wspace=0.01, hspace=0.01,
width_ratios=width_ratios,
height_ratios=height_ratios)
self.ax_row_dendrogram = self.fig.add_subplot(self.gs[nrows - 1, 0:2],
axisbg="white")
self.ax_col_dendrogram = self.fig.add_subplot(self.gs[0:2, ncols - 1],
axisbg="white")
self.ax_row_colors = None
self.ax_col_colors = None
if self.row_colors is not None:
self.ax_row_colors = self.fig.add_subplot(
self.gs[nrows - 1, ncols - 2])
if self.col_colors is not None:
self.ax_col_colors = self.fig.add_subplot(
self.gs[nrows - 2, ncols - 1])
self.ax_heatmap = self.fig.add_subplot(self.gs[nrows - 1, ncols - 1])
# colorbar for scale to left corner
self.cax = self.fig.add_subplot(self.gs[0, 0])
self.dendrogram_row = None
self.dendrogram_col = None
def _preprocess_colors(self, data, colors, axis):
"""Preprocess {row/col}_colors to extract labels and convert colors."""
labels = None
if colors is not None:
if isinstance(colors, (pd.DataFrame, pd.Series)):
# Ensure colors match data indices
if axis == 0:
colors = colors.ix[data.index]
else:
colors = colors.ix[data.columns]
# Replace na's with background color
colors = colors.fillna('white')
# Extract color values and labels from frame/series
if isinstance(colors, pd.DataFrame):
labels = list(colors.columns)
colors = colors.T.values
else:
labels = [colors.name]
colors = colors.values
colors = _convert_colors(colors)
return colors, labels
def format_data(self, data, pivot_kws, z_score=None,
standard_scale=None):
"""Extract variables from data or use directly."""
# Either the data is already in 2d matrix format, or need to do a pivot
if pivot_kws is not None:
data2d = data.pivot(**pivot_kws)
else:
data2d = data
if z_score is not None and standard_scale is not None:
raise ValueError(
'Cannot perform both z-scoring and standard-scaling on data')
if z_score is not None:
data2d = self.z_score(data2d, z_score)
if standard_scale is not None:
data2d = self.standard_scale(data2d, standard_scale)
return data2d
@staticmethod
def z_score(data2d, axis=1):
"""Standarize the mean and variance of the data axis
Parameters
----------
data2d : pandas.DataFrame
Data to normalize
axis : int
Which axis to normalize across. If 0, normalize across rows, if 1,
normalize across columns.
Returns
-------
normalized : pandas.DataFrame
Noramlized data with a mean of 0 and variance of 1 across the
specified axis.
"""
if axis == 1:
z_scored = data2d
else:
z_scored = data2d.T
z_scored = (z_scored - z_scored.mean()) / z_scored.std()
if axis == 1:
return z_scored
else:
return z_scored.T
@staticmethod
def standard_scale(data2d, axis=1):
"""Divide the data by the difference between the max and min
Parameters
----------
data2d : pandas.DataFrame
Data to normalize
axis : int
Which axis to normalize across. If 0, normalize across rows, if 1,
normalize across columns.
vmin : int
If 0, then subtract the minimum of the data before dividing by
the range.
Returns
-------
standardized : pandas.DataFrame
Noramlized data with a mean of 0 and variance of 1 across the
specified axis.
>>> import numpy as np
>>> d = np.arange(5, 8, 0.5)
>>> ClusterGrid.standard_scale(d)
array([ 0. , 0.2, 0.4, 0.6, 0.8, 1. ])
"""
# Normalize these values to range from 0 to 1
if axis == 1:
standardized = data2d
else:
standardized = data2d.T
subtract = standardized.min()
standardized = (standardized - subtract) / (
standardized.max() - standardized.min())
if axis == 1:
return standardized
else:
return standardized.T
def dim_ratios(self, side_colors, axis, figsize, side_colors_ratio=0.05):
"""Get the proportions of the figure taken up by each axes
"""
figdim = figsize[axis]
# Get resizing proportion of this figure for the dendrogram and
# colorbar, so only the heatmap gets bigger but the dendrogram stays
# the same size.
dendrogram = min(2. / figdim, .2)
# add the colorbar
colorbar_width = .8 * dendrogram
colorbar_height = .2 * dendrogram
if axis == 0:
ratios = [colorbar_width, colorbar_height]
else:
ratios = [colorbar_height, colorbar_width]
if side_colors is not None:
# Add room for the colors
ratios += [side_colors_ratio]
# Add the ratio for the heatmap itself
ratios += [.8]
return ratios
@staticmethod
def color_list_to_matrix_and_cmap(colors, ind, axis=0):
"""Turns a list of colors into a numpy matrix and matplotlib colormap
These arguments can now be plotted using heatmap(matrix, cmap)
and the provided colors will be plotted.
Parameters
----------
colors : list of matplotlib colors
Colors to label the rows or columns of a dataframe.
ind : list of ints
Ordering of the rows or columns, to reorder the original colors
by the clustered dendrogram order
axis : int
Which axis this is labeling
Returns
-------
matrix : numpy.array
A numpy array of integer values, where each corresponds to a color
from the originally provided list of colors
cmap : matplotlib.colors.ListedColormap
"""
# check for nested lists/color palettes.
# Will fail if matplotlib color is list not tuple
if any(issubclass(type(x), list) for x in colors):
all_colors = set(itertools.chain(*colors))
n = len(colors)
m = len(colors[0])
else:
all_colors = set(colors)
n = 1
m = len(colors)
colors = [colors]
color_to_value = dict((col, i) for i, col in enumerate(all_colors))
matrix = np.array([color_to_value[c]
for color in colors for c in color])
shape = (n, m)
matrix = matrix.reshape(shape)
matrix = matrix[:, ind]
if axis == 0:
# row-side:
matrix = matrix.T
cmap = mpl.colors.ListedColormap(all_colors)
return matrix, cmap
def savefig(self, *args, **kwargs):
if 'bbox_inches' not in kwargs:
kwargs['bbox_inches'] = 'tight'
self.fig.savefig(*args, **kwargs)
def plot_dendrograms(self, row_cluster, col_cluster, metric, method,
row_linkage, col_linkage):
# Plot the row dendrogram
if row_cluster:
self.dendrogram_row = dendrogram(
self.data2d, metric=metric, method=method, label=False, axis=0,
ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage)
else:
self.ax_row_dendrogram.set_xticks([])
self.ax_row_dendrogram.set_yticks([])
# PLot the column dendrogram
if col_cluster:
self.dendrogram_col = dendrogram(
self.data2d, metric=metric, method=method, label=False,
axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage)
else:
self.ax_col_dendrogram.set_xticks([])
self.ax_col_dendrogram.set_yticks([])
despine(ax=self.ax_row_dendrogram, bottom=True, left=True)
despine(ax=self.ax_col_dendrogram, bottom=True, left=True)
def plot_colors(self, xind, yind, **kws):
"""Plots color labels between the dendrogram and the heatmap
Parameters
----------
heatmap_kws : dict
Keyword arguments heatmap
"""
# Remove any custom colormap and centering
kws = kws.copy()
kws.pop('cmap', None)
kws.pop('center', None)
kws.pop('vmin', None)
kws.pop('vmax', None)
kws.pop('xticklabels', None)
kws.pop('yticklabels', None)
if self.row_colors is not None:
matrix, cmap = self.color_list_to_matrix_and_cmap(
self.row_colors, yind, axis=0)
# Get row_color labels
if self.row_color_labels is not None:
row_color_labels = self.row_color_labels
else:
row_color_labels = False
heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,
xticklabels=row_color_labels, yticklabels=False, **kws)
# Adjust rotation of labels
if row_color_labels is not False:
plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)
else:
despine(self.ax_row_colors, left=True, bottom=True)
if self.col_colors is not None:
matrix, cmap = self.color_list_to_matrix_and_cmap(
self.col_colors, xind, axis=1)
# Get col_color labels
if self.col_color_labels is not None:
col_color_labels = self.col_color_labels
else:
col_color_labels = False
heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,
xticklabels=False, yticklabels=col_color_labels, **kws)
# Adjust rotation of labels, place on right side
if col_color_labels is not False:
self.ax_col_colors.yaxis.tick_right()
plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)
else:
despine(self.ax_col_colors, left=True, bottom=True)
def plot_matrix(self, colorbar_kws, xind, yind, **kws):
self.data2d = self.data2d.iloc[yind, xind]
self.mask = self.mask.iloc[yind, xind]
# Try to reorganize specified tick labels, if provided
xtl = kws.pop("xticklabels", True)
try:
xtl = np.asarray(xtl)[xind]
except (TypeError, IndexError):
pass
ytl = kws.pop("yticklabels", True)
try:
ytl = np.asarray(ytl)[yind]
except (TypeError, IndexError):
pass
heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.cax,
cbar_kws=colorbar_kws, mask=self.mask,
xticklabels=xtl, yticklabels=ytl, **kws)
self.ax_heatmap.yaxis.set_ticks_position('right')
self.ax_heatmap.yaxis.set_label_position('right')
def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,
row_linkage, col_linkage, **kws):
colorbar_kws = {} if colorbar_kws is None else colorbar_kws
self.plot_dendrograms(row_cluster, col_cluster, metric, method,
row_linkage=row_linkage, col_linkage=col_linkage)
try:
xind = self.dendrogram_col.reordered_ind
except AttributeError:
xind = np.arange(self.data2d.shape[1])
try:
yind = self.dendrogram_row.reordered_ind
except AttributeError:
yind = np.arange(self.data2d.shape[0])
self.plot_colors(xind, yind, **kws)
self.plot_matrix(colorbar_kws, xind, yind, **kws)
return self
def clustermap(data, pivot_kws=None, method='average', metric='euclidean',
z_score=None, standard_scale=None, figsize=None, cbar_kws=None,
row_cluster=True, col_cluster=True,
row_linkage=None, col_linkage=None,
row_colors=None, col_colors=None, mask=None, **kwargs):
"""Plot a hierarchically clustered heatmap of a pandas DataFrame
Parameters
----------
data: pandas.DataFrame
Rectangular data for clustering. Cannot contain NAs.
pivot_kws : dict, optional
If `data` is a tidy dataframe, can provide keyword arguments for
pivot to create a rectangular dataframe.
method : str, optional
Linkage method to use for calculating clusters.
See scipy.cluster.hierarchy.linkage documentation for more information:
http://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html
metric : str, optional
Distance metric to use for the data. See
scipy.spatial.distance.pdist documentation for more options
http://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.pdist.html
z_score : int or None, optional
Either 0 (rows) or 1 (columns). Whether or not to calculate z-scores
for the rows or the columns. Z scores are: z = (x - mean)/std, so
values in each row (column) will get the mean of the row (column)
subtracted, then divided by the standard deviation of the row (column).
This ensures that each row (column) has mean of 0 and variance of 1.
standard_scale : int or None, optional
Either 0 (rows) or 1 (columns). Whether or not to standardize that
dimension, meaning for each row or column, subtract the minimum and
divide each by its maximum.
figsize: tuple of two ints, optional
Size of the figure to create.
cbar_kws : dict, optional
Keyword arguments to pass to ``cbar_kws`` in ``heatmap``, e.g. to
add a label to the colorbar.
{row,col}_cluster : bool, optional
If True, cluster the {rows, columns}.
{row,col}_linkage : numpy.array, optional
Precomputed linkage matrix for the rows or columns. See
scipy.cluster.hierarchy.linkage for specific formats.
{row,col}_colors : list-like or pandas DataFrame/Series, optional
List of colors to label for either the rows or columns. Useful to
evaluate whether samples within a group are clustered together. Can
use nested lists or DataFrame for multiple color levels of labeling.
If given as a DataFrame or Series, labels for the colors are extracted
from the DataFrames column names or from the name of the Series.
DataFrame/Series colors are also matched to the data by their
index, ensuring colors are drawn in the correct order.
mask : boolean array or DataFrame, optional
If passed, data will not be shown in cells where ``mask`` is True.
Cells with missing values are automatically masked. Only used for
visualizing, not for calculating.
kwargs : other keyword arguments
All other keyword arguments are passed to ``sns.heatmap``
Returns
-------
clustergrid : ClusterGrid
A ClusterGrid instance.
Notes
-----
The returned object has a ``savefig`` method that should be used if you
want to save the figure object without clipping the dendrograms.
To access the reordered row indices, use:
``clustergrid.dendrogram_row.reordered_ind``
Column indices, use:
``clustergrid.dendrogram_col.reordered_ind``
Examples
--------
Plot a clustered heatmap:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set()
>>> flights = sns.load_dataset("flights")
>>> flights = flights.pivot("month", "year", "passengers")
>>> g = sns.clustermap(flights)
Don't cluster one of the axes:
.. plot::
:context: close-figs
>>> g = sns.clustermap(flights, col_cluster=False)
Use a different colormap and add lines to separate the cells:
.. plot::
:context: close-figs
>>> cmap = sns.cubehelix_palette(as_cmap=True, rot=-.3, light=1)
>>> g = sns.clustermap(flights, cmap=cmap, linewidths=.5)
Use a different figure size:
.. plot::
:context: close-figs
>>> g = sns.clustermap(flights, cmap=cmap, figsize=(7, 5))
Standardize the data across the columns:
.. plot::
:context: close-figs
>>> g = sns.clustermap(flights, standard_scale=1)
Normalize the data across the rows:
.. plot::
:context: close-figs
>>> g = sns.clustermap(flights, z_score=0)
Use a different clustering method:
.. plot::
:context: close-figs
>>> g = sns.clustermap(flights, method="single", metric="cosine")
Add colored labels on one of the axes:
.. plot::
:context: close-figs
>>> season_colors = (sns.color_palette("BuPu", 3) +
... sns.color_palette("RdPu", 3) +
... sns.color_palette("YlGn", 3) +
... sns.color_palette("OrRd", 3))
>>> g = sns.clustermap(flights, row_colors=season_colors)
"""
plotter = ClusterGrid(data, pivot_kws=pivot_kws, figsize=figsize,
row_colors=row_colors, col_colors=col_colors,
z_score=z_score, standard_scale=standard_scale,
mask=mask)
return plotter.plot(metric=metric, method=method,
colorbar_kws=cbar_kws,
row_cluster=row_cluster, col_cluster=col_cluster,
row_linkage=row_linkage, col_linkage=col_linkage,
**kwargs)
| mit |
jorgemauricio/INIFAP_Course | ejercicios/ej_21_concatenate.py | 1 | 1167 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 17 16:17:25 2017
@author: jorgemauricio
"""
# librerias
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
# crear una matriz
arr1 = np.arange(9).reshape((3,3))
# desplegar
arr1
# concatenar en el eje 1
np.concatenate([arr1,arr1],axis=1)
# concatenar en el eje 0
np.concatenate([arr1,arr1],axis=0)
# ejemplo en pandas
# creamos dos series sin solapamiento
ser1 = Series([0,1,2],index=['T','U','V'])
ser2 = Series([3,4],index=['X','Y'])
# concatenamos las series (default es axis=0)
pd.concat([ser1,ser2])
# concatenando la informacion en el axis 1 generamos un dataframe
pd.concat([ser1,ser2],axis=1)
# podemos espeficifar los ejes que se van a utilizar
pd.concat([ser1,ser2],axis=1,join_axes=[['U','V','Y']])
# crear dos dataFrames
dframe1 = DataFrame(np.random.randn(4,3), columns=['X', 'Y', 'Z'])
dframe2 = DataFrame(np.random.randn(3, 3), columns=['Y', 'Q', 'X'])
# concatenar los dataframes
pd.concat([dframe1,dframe2])
# si el indice no es importante podemos ignorar el valor con el atributo ignore_index
pd.concat([dframe1,dframe2],ignore_index=True) | mit |
davidgbe/scikit-learn | sklearn/utils/tests/test_random.py | 230 | 7344 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
veragluscevic/strings | strings/covariances.py | 1 | 7111 | import matplotlib.pyplot as plt
import numpy as np
import os, os.path, glob, shutil
import sys,re
import cPickle as pickle
import logging
from .simulator import obtain_N_cmb_maps
from .statistic import Statistic
from .statistic import PDF, PowerSpectrum, Moments
from .convolve import generate_rotated_stick_convolutions
from .mapset import MapSet_Group
FWHM = 1.4
NOISE = 16.
SPECTRA_FILE = '/data/verag/strings/inputs/lensedCls.dat'
STATS_ROOT = '/data/verag/strings/stats'
pdf_defaults={
'binmin':{'gradgrad':-5e8,
'grad':0,
'gradgrad_rotstick':3e8,
'grad_rotstick':8e4,
'map':-500},
'binmax':{'gradgrad':2e9,
'grad':5e5,
'gradgrad_rotstick':7e8,
'grad_rotstick':2e5,
'map':500}}
def compute_largemap_stat(statnumber, whichmap='gradgrad_rotstick',
statname='pdf',
map_fov_deg=72., fwhm=1.4, noise=16.,
Nx=10240, Ny=10240,
Gmu=0., strings=False, string_file_num=0,
name='large_cmb',
stats_kwargs=None,
restag=None, returnres=False,saveres=True):
"""fwhm is in arcmin
"""
calc_grad = False
calc_gradgrad = False
calc_rotstick = False
if whichmap=='grad':
calc_grad = True
if whichmap=='gradgrad':
calc_gradgrad = True
if whichmap=='gradgrad_rotstick':
calc_rotstick = True
if whichmap=='grad_rotstick':
calc_rotstick = True
name += '{}'.format(statnumber)
msg = MapSet_Group(N=1,
calc_grad=calc_grad, calc_gradgrad=calc_gradgrad,
calc_rotstick=calc_rotstick,
name=name, strings=strings,
recalc=True,
noise=noise,
fwhm=fwhm, string_file_num=string_file_num,
map_fov_deg=map_fov_deg,
Gmu=Gmu, Nx=Nx, Ny=Ny)
newmap = getattr(msg, whichmap)
if stats_kwargs is None:
stats_kwargs = {}
if statname=='pdf':
for prop in ['binmin', 'binmax']:
if prop not in stats_kwargs:
stats_kwargs[prop] = pdf_defaults[prop][whichmap]
if 'bins' not in stats_kwargs:
stats_kwargs['bins'] = 110
if 'normed' not in stats_kwargs:
stats_kwargs['normed'] = False#True
stat = PDF(**stats_kwargs)
newmap.apply_statistic(stat)
res = newmap.statistics[statname][0]
print newmap.statistics[statname][0]
del msg, newmap
if saveres:
if restag is None:
restag = '{:.1f}deg2_fwhm{:.1f}_{:.0f}uK'.format(map_fov_deg,fwhm,noise)
if strings:
restag += '_Gmu{:.1e}_stringfile{}'.format(Gmu,string_file_num)
resfile = STATS_ROOT + '/{}_{}{}_{}.npy'.format(whichmap,statname,statnumber,restag)
np.save(resfile, res)
if returnres:
return res
def compute_stats_batch(Nstart=0, Nmaps=10, Nstringfiles=100,
strings=True, Gmu=1.4e-7,
whichmap='gradgrad_rotstick', statname='pdf',
noise=NOISE, fwhm=FWHM,
combine_method='max',
enlarge_side_factor=10):
if strings:
Nx = 1024
Ny = 1024
map_fov_deg = 7.2
else:
Nx = 1024 * enlarge_side_factor
Ny = 1024 * enlarge_side_factor
map_fov_deg = 7.2 * enlarge_side_factor
Nend = Nstart + Nmaps
count = 0
for j,num in enumerate(np.arange(Nstart,Nend)):
if strings:
for i in np.arange(Nstringfiles):
count += 1
print 'calculating for {} (map{},string{})/{}...'.format(count,j,i,Nmaps*Nstringfiles)
compute_largemap_stat(num, whichmap=whichmap,
statname=statname,
map_fov_deg=map_fov_deg, fwhm=fwhm, noise=noise,
Nx=Nx, Ny=Ny,
Gmu=Gmu, strings=strings, string_file_num=i,
name='large_stringy',
stats_kwargs=None,
restag=None, returnres=False,saveres=True)
else:
print 'calculating {}/{}...'.format(j+1,Nmaps)
compute_largemap_stat(num, whichmap=whichmap,
statname=statname,
map_fov_deg=map_fov_deg, fwhm=fwhm, noise=noise,
Nx=Nx, Ny=Ny,
Gmu=0., strings=False,
name='large',
stats_kwargs=None,
restag=None, returnres=False,saveres=True)
def sigma_Gmu(Gmu, h1m, h2m, h1Sm, h2Sm, check=False):
h1 = h1m - h1m.mean()
h2 = h2m - h2m.mean()
h1S = h1Sm - h1m.mean()
h2S = h2Sm - h2m.mean()
covh = np.zeros((2,2))
covh_inv = np.zeros((2,2))
covh[0,0] = (h1 * h1).mean() - h1.mean() * h1.mean()
covh[1,1] = (h2 * h2).mean() - h2.mean() * h2.mean()
covh[0,1] = (h1 * h2).mean() - h1.mean() * h2.mean()
covh[1,0] = covh[0,1]
corr_coeff = covh[0,1]/(covh[1,1]*covh[0,0])**0.5
if np.isclose(np.abs(corr_coeff),1):
res=1./(h1S**2/covh[0,0])**0.5
print res
return res
det_covh = covh[0,0]*covh[1,1] - covh[0,1]**2
covh_inv[0,0] = covh[1,1]/det_covh
covh_inv[1,1] = covh[0,0]/det_covh
covh_inv[0,1] = -covh[0,1]/det_covh
covh_inv[1,0] = -covh[1,0]/det_covh
det_covh = covh[0,0]*covh[1,1] - covh[0,1]**2
sigma2_inv = h1S**2 * covh_inv[0,0] + h2S**2 * covh_inv[1,1] + 2.*h1S*h2S * covh_inv[0,1]
res = 1./sigma2_inv**0.5
if check:
det_covh_inv = covh_inv[0,0]*covh_inv[1,1] - covh_inv[0,1]**2
print '\ndet={}, 1/det_inv={}\n'.format(det_covh,1./det_covh_inv)
print 'cov:'
print '{} {}'.format(covh[0,0],covh[0,1])
print '{} {}\n'.format(covh[1,0],covh[1,1])
print 'inv(cov):'
print '{} {}'.format(covh_inv[0,0],covh_inv[0,1])
print '{} {}\n'.format(covh_inv[1,0],covh_inv[1,1])
print '<h1>={}'.format(h1.mean())
print '<h2>={}'.format(h2.mean())
print '<h1h2>={}'.format((h1*h2).mean())
print '<h1^2>={}'.format((h1*h1).mean())
print '<h2^2>={}\n'.format((h2*h2).mean())
print '<h1^2>-<h1>^2={}'.format((h1*h1).mean()-h1.mean()**2)
print '<h2^2>-<h2>^2={}'.format((h2*h2).mean()-h2.mean()**2)
print '<h1h2>-<h1><h2>={}\n'.format((h1*h2).mean()-h1.mean()*h2.mean())
print 'h1S={}'.format(h1S)
print 'h2S={}'.format(h2S)
return res/Gmu
| mit |
ysasaki6023/NeuralNetworkStudy | cifar05/StopManager.py | 4 | 3055 | import numpy as np
import pandas as pd
import time
class StopManager:
def __init__(self):
self.Accuracy = []
self.Time = []
self.Epoch = []
self.MaxEpoch = None
self.MinEpoch = 10
self.LookBackRatioForP1 = 0.2 # Look back 20% of epoch to calculate P1
self.AverageNum = 10
self.Threshold = 3e-4
return
def __str__(self):
return "StopManager: Threshold = %.1f%% / 100epoch, MinEpoch = %d, MaxEpoch = %d" % (self.Threshold*100*100,self.MinEpoch,self.MaxEpoch)
def GetInfo(self):
params = self.GetAI()
if np.isnan(params["AIrate"]):
return ""
else:
return ("Current: Accuracy = %.1f%%, Epoch = %.0f, Improvement = +%.1f%% / 100 epoch ( = %.0f min ), Stop threshold = %.1f%% / 100epoch\n"%(params["Current:Accuracy"]*100,params["Current:Epoch"],params["AIrate"]*100*100,params["EpochTime"]*100/60.,self.Threshold*100*100)
+"At threshold: Accuracy = %.1f%%, Epoch = %.0f, Time remaining = %.0f min"%(params["Threshold:Accuracy"]*100.,params["Threshold:Epoch"],params["Threshold:TimeRemaining"]/60.))
def SetMaximumEpoch(self,maxEpoch=None):
self.MaxEpoch = maxEpoch
return
def SetMinimumEpoch(self,minEpoch=10):
self.MinEpoch = minEpoch
return
def SetStopThreshold(self,threshold=3e-4):
self.Threshold = threshold
return
def AddAccuracy(self,accuracy):
self.Accuracy.append(accuracy)
self.Time.append(time.time())
self.Epoch.append(len(self.Epoch)+1)
return
def GetAI(self):
epoch = np.array(self.Epoch,dtype=np.int32)
accur = np.array(self.Accuracy,dtype=np.float32)
deltaE = self.LookBackRatioForP1
p1 = (accur-accur[(epoch*(1-deltaE)).astype(np.int)])/(np.log(epoch)-np.log(epoch*(1-deltaE)))
p1avg = np.array(pd.Series(p1).rolling(window=self.AverageNum).mean())
ai = p1 / epoch
aiavg = p1avg / epoch
atime = np.array(self.Time,dtype=np.float64)
atime -= atime[0]
timeAvg = (atime[-1] - atime[(epoch[-1]*(1-deltaE)).astype(np.int)]) / (epoch[-1] - (epoch[-1]*(1-deltaE)).astype(np.int))
params = {}
Et = p1[-1] / self.Threshold
params["Threshold:TimeRemaining"] = timeAvg * p1[-1] * (1./self.Threshold - 1./aiavg[-1])
params["Threshold:Epoch"] = Et
params["Threshold:Accuracy"] = accur[-1] + p1[-1] * (np.log(Et) - np.log(epoch[-1]))
params["Current:Epoch"] = epoch[-1]
params["Current:Accuracy"] = accur[-1]
params["AIrate"] = aiavg[-1]
params["EpochTime"] = timeAvg
return params
def StopCheck(self):
epoch = len(self.Accuracy)
if self.MaxEpoch and epoch >= self.MaxEpoch: return True
if epoch >= self.MinEpoch:
params = self.GetAI()
if params["AIrate"]<self.Threshold: return True
return False
| mit |
xavierwu/scikit-learn | sklearn/linear_model/tests/test_ridge.py | 68 | 23597 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import _solve_cholesky
from sklearn.linear_model.ridge import _solve_cholesky_kernel
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import KFold
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
# Ridge regression convergence test using score
# TODO: for this test to be robust, we should use a dataset instead
# of np.random.
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "cholesky", "lsqr", "sag"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples // 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples // 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_sample_weights():
rng = np.random.RandomState(0)
for solver in ("cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
for alpha in (1.0, 1e-2):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
coefs = ridge_regression(X, y,
alpha=alpha,
sample_weight=sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha=alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
# Test for fit_intercept = True
est = Ridge(alpha=alpha, solver=solver)
est.fit(X, y, sample_weight=sample_weight)
# Check using Newton's Method
# Quadratic function should be solved in a single step.
# Initialize
sample_weight = np.sqrt(sample_weight)
X_weighted = sample_weight[:, np.newaxis] * (
np.column_stack((np.ones(n_samples), X)))
y_weighted = y * sample_weight
# Gradient is (X*coef-y)*X + alpha*coef_[1:]
# Remove coef since it is initialized to zero.
grad = -np.dot(y_weighted, X_weighted)
# Hessian is (X.T*X) + alpha*I except that the first
# diagonal element should be zero, since there is no
# penalization of intercept.
diag = alpha * np.ones(n_features + 1)
diag[0] = 0.
hess = np.dot(X_weighted.T, X_weighted)
hess.flat[::n_features + 2] += diag
coef_ = - np.dot(linalg.inv(hess), grad)
assert_almost_equal(coef_[0], est.intercept_)
assert_array_almost_equal(coef_[1:], est.coef_)
def test_ridge_shapes():
# Test shape of coef_ and intercept_
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
# Test intercept with multiple targets GH issue #708
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
# Test BayesianRegression ridge classifier
# TODO: test also n_samples > n_features
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
# On alpha=0., Ridge and OLS yield the same solution.
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
# Tests the ridge object using individual penalties
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-8).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky', 'sag']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:-1])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(alpha=1.0, fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = get_scorer('mean_squared_error')
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
n_samples = X_diabetes.shape[0]
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(n_samples, 5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
n_samples = X_iris.shape[0]
cv = KFold(n_samples, 5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_ridge_sparse_svd():
X = sp.csc_matrix(rng.rand(100, 10))
y = rng.rand(100)
ridge = Ridge(solver='svd')
assert_raises(TypeError, ridge.fit, X, y)
def test_class_weights():
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = RidgeClassifier(class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
# check if class_weight = 'balanced' can handle negative labels.
clf = RidgeClassifier(class_weight='balanced')
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# class_weight = 'balanced', and class_weight = None should return
# same values when y has equal number of all labels
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])
y = [1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
clfa = RidgeClassifier(class_weight='balanced')
clfa.fit(X, y)
assert_equal(len(clfa.classes_), 2)
assert_array_almost_equal(clf.coef_, clfa.coef_)
assert_array_almost_equal(clf.intercept_, clfa.intercept_)
def test_class_weight_vs_sample_weight():
"""Check class_weights resemble sample_weights behavior."""
for clf in (RidgeClassifier, RidgeClassifierCV):
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = clf()
clf1.fit(iris.data, iris.target)
clf2 = clf(class_weight='balanced')
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Check that sample_weight and class_weight are multiplicative
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_class_weights_cv():
# Test class weights for cross validated ridge classifier.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
clf.fit(X, y)
# we give a small weights to class 1
clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
clf.fit(X, y)
assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
# Test _RidgeCV's store_cv_values attribute.
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
def test_ridgecv_sample_weight():
rng = np.random.RandomState(0)
alphas = (0.1, 1.0, 10.0)
# There are different algorithms for n_samples > n_features
# and the opposite, so test them both.
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
cv = KFold(n_samples, 5)
ridgecv = RidgeCV(alphas=alphas, cv=cv)
ridgecv.fit(X, y, sample_weight=sample_weight)
# Check using GridSearchCV directly
parameters = {'alpha': alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(), parameters, fit_params=fit_params,
cv=cv)
gs.fit(X, y)
assert_equal(ridgecv.alpha_, gs.best_estimator_.alpha)
assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]
ridge = Ridge(alpha=1)
# make sure the "OK" sample weights actually work
ridge.fit(X, y, sample_weights_OK)
ridge.fit(X, y, sample_weights_OK_1)
ridge.fit(X, y, sample_weights_OK_2)
def fit_ridge_not_ok():
ridge.fit(X, y, sample_weights_not_OK)
def fit_ridge_not_ok_2():
ridge.fit(X, y, sample_weights_not_OK_2)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok_2)
def test_sparse_design_with_sample_weights():
# Sample weights must work with sparse matrices
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
sparse_matrix_converters = [sp.coo_matrix,
sp.csr_matrix,
sp.csc_matrix,
sp.lil_matrix,
sp.dok_matrix
]
sparse_ridge = Ridge(alpha=1., fit_intercept=False)
dense_ridge = Ridge(alpha=1., fit_intercept=False)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights = rng.randn(n_samples) ** 2 + 1
for sparse_converter in sparse_matrix_converters:
X_sparse = sparse_converter(X)
sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
dense_ridge.fit(X, y, sample_weight=sample_weights)
assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
decimal=6)
def test_raises_value_error_if_solver_not_supported():
# Tests whether a ValueError is raised if a non-identified solver
# is passed to ridge_regression
wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)"
exception = ValueError
message = "Solver %s not understood" % wrong_solver
def func():
X = np.eye(3)
y = np.ones(3)
ridge_regression(X, y, alpha=1., solver=wrong_solver)
assert_raise_message(exception, message, func)
def test_sparse_cg_max_iter():
reg = Ridge(solver="sparse_cg", max_iter=1)
reg.fit(X_diabetes, y_diabetes)
assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
@ignore_warnings
def test_n_iter():
# Test that self.n_iter_ is correct.
n_targets = 2
X, y = X_diabetes, y_diabetes
y_n = np.tile(y, (n_targets, 1)).T
for max_iter in range(1, 4):
for solver in ('sag', 'lsqr'):
reg = Ridge(solver=solver, max_iter=max_iter, tol=1e-12)
reg.fit(X, y_n)
assert_array_equal(reg.n_iter_, np.tile(max_iter, n_targets))
for solver in ('sparse_cg', 'svd', 'cholesky'):
reg = Ridge(solver=solver, max_iter=1, tol=1e-1)
reg.fit(X, y_n)
assert_equal(reg.n_iter_, None)
| bsd-3-clause |
shaunwbell/FOCI_Analysis | temp/timeseries2grid.py | 1 | 1350 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 3 11:03:49 2017
@author: bell
"""
import xarray as xa
import pandas as pd
import datetime
from netCDF4 import num2date
"""
filein='/Volumes/WDC_internal/Users/bell/in_and_outbox/Ongoing_Analysis/M2_IntegratedTemp/most_current/1995_2016_htcontent_depthint.cf.nc'
data = xa.open_dataset(filein)
df = data.to_dataframe()
df=df.reset_index().set_index('time')
for month in range(1,13,1):
for day in range(1,32):
temp = [str(month)+'-'+str(day)]
for year in range(1995,2017):
temp = temp + [df.loc[(df.index.day == day) & (df.index.month == month) & (df.index.year == year)].mean()['V00_1900']]
print temp
"""
filein='/Volumes/WDC_internal/Users/bell/ecoraid/2016/Moorings/16bspitae/initial_archive/16bspitae_prawler_2D.nc'
data = xa.open_dataset(filein, decode_cf=False)
datamasked = data.where(data.time < 1e30)
time = datamasked.time.to_pandas()
time = time.fillna(0)
tarray = num2date(time,'hours since 1900-01-01T00:00:00Z')
for i in range(0, tarray.shape[0]):
line = []
for j in range(0, tarray.shape[1]):
if tarray[i,j] != datetime.datetime(1900,1,1):
line = line + [datetime.datetime.strftime(tarray[i,j],'%Y-%m-%d %H:%M:%S')]
else:
line = line + ['']
print ",".join(line)
#print Chlorophyll | mit |
karolciba/playground | cross/solver.py | 1 | 7020 | from collections import defaultdict
import matplotlib.pyplot as plt
from itertools import product
plt.ion()
# node = (dot,N,NW,W,SW,S,SE,E,NE)
O = 0
N = 1
NW = 2
W = 3
SW = 4
S = 5
SE = 6
E = 7
NE = 8
def dot():
return [True,None,None,None,None,None,None,None,None]
def empty():
return [None,None,None,None,None,None,None,None,None]
space = { (0,3): dot(),
(0,4): dot(),
(0,5): dot(),
(0,6): dot(),
(1,3): dot(),
(1,6): dot(),
(2,3): dot(),
(2,6): dot(),
(3,0): dot(),
(3,1): dot(),
(3,2): dot(),
(3,3): dot(),
(3,6): dot(),
(3,7): dot(),
(3,8): dot(),
(3,9): dot(),
(4,0): dot(),
(4,9): dot(),
(5,0): dot(),
(5,9): dot(),
(6,0): dot(),
(6,1): dot(),
(6,2): dot(),
(6,3): dot(),
(6,6): dot(),
(6,7): dot(),
(6,8): dot(),
(6,9): dot(),
(7,3): dot(),
(7,6): dot(),
(8,3): dot(),
(8,6): dot(),
(9,3): dot(),
(9,4): dot(),
(9,5): dot(),
(9,6): dot()}
def copy(space):
cp = { k: v[:] for k,v in space.items() }
return cp
def visualize(space):
plt.clf()
dots = space.keys()
x = [p[0] for p in dots]
y = [p[1] for p in dots]
for key,value in space.items():
for c in value[1:]:
if c:
plt.plot((key[0],c[0]),(key[1],c[1]), 'ro-')
plt.plot(x,y,'o')
def checkset(DF,DT,points,space):
p0 = points[0]
p1 = points[1]
p2 = points[2]
p3 = points[3]
p4 = points[4]
p5 = points[5]
p6 = points[6]
# import pdb; pdb.set_trace()
if p1 not in space or p2 not in space or p3 not in space or p4 not in space or p5 not in space:
return False
if p0 in space:
if space[p0][DF] or space[p1][DT]:
return False
if p6 in space:
if space[p5][DF] or space[p6][DT]:
return False
if space[p1][DF] or space[p2][DT]:
return False
if space[p2][DF] or space[p3][DT]:
return False
if space[p3][DF] or space[p4][DT]:
return False
if space[p4][DF] or space[p5][DT]:
return False
space[p1][DF] = p2
space[p2][DT] = p1
space[p2][DF] = p3
space[p3][DT] = p2
space[p3][DF] = p4
space[p4][DT] = p3
space[p4][DF] = p5
space[p5][DT] = p4
return True
def valid(coord,space):
# can input dot?
# no - not valid
valids = []
if coord in space:
return valids
# E - W
for after in range(0,6):
before = -5 + after
dots = list(zip(range(before + coord[0],after+2 + coord[0]), [coord[1]]*7))
cp = copy(space)
cp[coord] = dot()
if checkset(E,W,dots,cp):
valids.append(cp)
# N - S
for after in range(0,6):
before = -5 + after
dots = list(zip([coord[0]]*7, range(before + coord[1],after+2 + coord[1])))
cp = copy(space)
cp[coord] = dot()
if checkset(N,S,dots,cp):
valids.append(cp)
# NE - SW
for after in range(0,6):
before = -5 + after
dots = list(zip(range(before + coord[0],after+2 + coord[0]),range(before + coord[1],after+2 + coord[1])))
cp = copy(space)
cp[coord] = dot()
if checkset(NE,SW,dots,cp):
valids.append(cp)
# NW - SE
for after in range(0,6):
before = -5 + after
dots = list(zip(range(before + coord[0],after+2 + coord[0]),range(after+2 + coord[1],before + coord[1],-1)))
# print(dots)
cp = copy(space)
cp[coord] = dot()
if checkset(NE,SW,dots,cp):
valids.append(cp)
# print(valids)
return valids
def avail(space):
dots = space.keys()
neigh = set()
for dot in dots:
neigh.add( (dot[0]-1,dot[1]-1) )
neigh.add( (dot[0]-1,dot[1]) )
neigh.add( (dot[0]-1,dot[1]+1) )
neigh.add( (dot[0],dot[1]-1) )
neigh.add( (dot[0],dot[1]) )
neigh.add( (dot[0],dot[1]+1) )
neigh.add( (dot[0]+1,dot[1]-1) )
neigh.add( (dot[0]+1,dot[1]) )
neigh.add( (dot[0]+1,dot[1]+1) )
candidates = set([n for n in neigh if n not in space])
return candidates
def extend(cand,coord,space):
neigh = set([n for n in cand if n not in space])
neigh.add( (coord[0]-1,dot[1]-1) )
neigh.add( (coord[0]-1,dot[1]) )
neigh.add( (coord[0]-1,dot[1]+1) )
neigh.add( (coord[0],dot[1]-1) )
neigh.add( (coord[0],dot[1]) )
neigh.add( (coord[0],dot[1]+1) )
neigh.add( (coord[0]+1,dot[1]-1) )
neigh.add( (coord[0]+1,dot[1]) )
neigh.add( (coord[0]+1,dot[1]+1) )
candidates = set([n for n in neigh if n not in space])
return candidates
from heapq import *
def prir2(space, depth=0):
queue = []
counter = 0
moves = avail(space)
heappush(queue, (0, counter, space, moves))
total_best = 0
total_space = space
while queue:
depth, _, space, moves = heappop(queue)
options = []
for move in moves:
# options.extend(valid(move,space))
ns = valid(move,space)
pair = ns, extend(moves,move,ns)
options.extend(valid(move,space))
for o,m in options:
print("Depth {} best {}".format(depth, total_best), end="\r")
counter += 1
heappush(queue,(depth+1,counter,o,m))
if True or depth > total_best:
total_best = depth
plt.pause(.0001)
visualize(o)
def prir(space, depth=0):
queue = []
counter = 0
heappush(queue, (0, counter, space))
plt.ion()
total_best = 0
total_space = space
while queue:
depth, _, space = heappop(queue)
moves = avail(space)
options = []
for move in moves:
options.extend(valid(move,space))
for o in options:
print("Depth {} best {}".format(depth, total_best), end="\r")
counter += 1
heappush(queue,(depth+1,counter,o))
if True or depth > total_best:
total_best = depth
# plt.pause(.0001)
visualize(o)
total_best = 0
total_s = None
def solve(space, depth=0):
global total_best
moves = avail(space)
options = []
for move in moves:
options.extend(valid(move,space))
best_d = depth
best_s = space
for o in options:
print("Depth {} iter best {} total best {}".format(depth, best_d, total_best), end="\r")
s, d = solve(o, depth+1)
# plt.pause(.0001)
# visualize(best_s)
if d > best_d:
best_d = d
best_s = s
if best_d > total_best:
total_best = best_d
total_s = best_s
visualize(best_s)
plt.pause(.01)
return best_s, best_d
| unlicense |
dungvtdev/upsbayescpm | pgmpy/tests/test_estimators/test_ParameterEstimator.py | 6 | 1435 | import unittest
from pandas import DataFrame
from numpy import NaN
from pgmpy.models import BayesianModel
from pgmpy.estimators import ParameterEstimator
class TestParameterEstimator(unittest.TestCase):
def setUp(self):
self.m1 = BayesianModel([('A', 'C'), ('B', 'C'), ('D', 'B')])
self.d1 = DataFrame(data={'A': [0, 0, 1], 'B': [0, 1, 0], 'C': [1, 1, 0], 'D': ['X', 'Y', 'Z']})
self.d2 = DataFrame(data={'A': [0, NaN, 1], 'B': [0, 1, 0], 'C': [1, 1, NaN], 'D': [NaN, 'Y', NaN]})
def test_state_count(self):
e = ParameterEstimator(self.m1, self.d1)
self.assertEqual(e.state_counts('A').values.tolist(), [[2], [1]])
self.assertEqual(e.state_counts('C').values.tolist(),
[[0., 0., 1., 0.], [1., 1., 0., 0.]])
def test_missing_data(self):
e = ParameterEstimator(self.m1, self.d2, state_names={'C': [0, 1]}, complete_samples_only=False)
self.assertEqual(e.state_counts('A', complete_samples_only=True).values.tolist(), [[0], [0]])
self.assertEqual(e.state_counts('A').values.tolist(), [[1], [1]])
self.assertEqual(e.state_counts('C', complete_samples_only=True).values.tolist(),
[[0, 0, 0, 0], [0, 0, 0, 0]])
self.assertEqual(e.state_counts('C').values.tolist(),
[[0, 0, 0, 0], [1, 0, 0, 0]])
def tearDown(self):
del self.m1
del self.d1
| mit |
fzalkow/scikit-learn | examples/plot_isotonic_regression.py | 303 | 1767 | """
===================
Isotonic Regression
===================
An illustration of the isotonic regression on generated data. The
isotonic regression finds a non-decreasing approximation of a function
while minimizing the mean squared error on the training data. The benefit
of such a model is that it does not assume any form for the target
function such as linearity. For comparison a linear regression is also
presented.
"""
print(__doc__)
# Author: Nelle Varoquaux <[email protected]>
# Alexandre Gramfort <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
###############################################################################
# Fit IsotonicRegression and LinearRegression models
ir = IsotonicRegression()
y_ = ir.fit_transform(x, y)
lr = LinearRegression()
lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression
###############################################################################
# plot result
segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
lc = LineCollection(segments, zorder=0)
lc.set_array(np.ones(len(y)))
lc.set_linewidths(0.5 * np.ones(n))
fig = plt.figure()
plt.plot(x, y, 'r.', markersize=12)
plt.plot(x, y_, 'g.-', markersize=12)
plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
plt.gca().add_collection(lc)
plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
plt.title('Isotonic regression')
plt.show()
| bsd-3-clause |
trevorwitter/Stats | stats.py | 1 | 2933 | from pandas import DataFrame, groupby
import pandas as pd
import numpy as np
import scipy.stats
from scipy.stats import linregress
from random import random
import matplotlib.pyplot as plt
from collections import Counter
df = pd.read_excel('/Users/twitter/Desktop/R1_baseline2.xlsx')
frame = DataFrame(df)
#1D stats
def cohort_stats(x):
mean = np.mean(x)
stdev = np.std(x)
max_value = np.max(x)
min_value = np.min(x)
stats = {'mean':mean, 'stdev':stdev, 'max':max_value, 'min':min_value}
return stats
def data_range(x):
return max(x) - min(x)
def histogram(x):
"""basic histogram plot for 1D array"""
y = np.array(x)
rounded = np.rint(y)
subject_counts = Counter(rounded)
xs = range(160)
ys = [subject_counts[z] for z in xs]
plt.bar(xs, ys)
xmin = min(x)
xmax = max(x)
plt.xlim(xmin, xmax)
ymax = (max(ys)*1.15)
plt.ylim(0, ymax)
plt.show()
def Normality(x):
"""determines if distribution is normal"""
w,p = scipy.stats.shapiro(x)
if p > .05:
return "normal"
else:
return "not normal"
def norm_histo(x):
if Normality(x) == "normal":
print "normal distribution"
print cohort_stats(x)
return histogram(x)
else:
print "not normal distribution"
print cohort_stats(x)
return histogram(x)
#2D Stats
def de_mean(x):
"""resulting mean is zero; for use with variance"""
x_bar = np.mean(x)
return [x_i - x_bar for x_i in x]
def sum_of_squares(n):
return sum([i**2 for i in range(1, n+1)])
def variance(x):
"""x must have at least 2 elements"""
n = len(x)
deviations = de_mean(x)
return sum_of_squares(deviations)/(n-1)
def standard_deviation(x):
return math.sqrt(variance(x))
def covariance(x, y):
n = len(x)
return np.dot(de_mean(x), de_mean(y)) / (n-1)
def correlation_plot(x, y):
"""correlation plot with linear regression, slope and R**2 display"""
plt.plot(x, y, linestyle=' ', marker='o', color='b')
slope, intercept, r_value, p_value, std_err = linregress(x, y)
plt.title('%s x %s' % (x.name, y.name))
bestfit = [(i*slope)+intercept for i in x]
plt.plot(x, bestfit, linestyle='--', color='k')
ymin = max(y)
xmax = min(x)+(max(x)*.02)
plt.text(xmax, ymin, r'$Slope = %s,\ R^2=%s$' % (np.round(slope, decimals = 2), np.round(r_value, decimals = 2)))
plt.xlabel('%s' % (x.name))
plt.ylabel('%s' % (y.name))
plt.show()
def correlation_matrix(data):
"""returns a num_columns * num_columns matrix of correlations plots. Incomplete; working on this"""
num_columns = 3
num_plot = (0)
fig, ax= plt.subplots(num_columns, num_columns)
for i in data:
for j in data:
num_plot += 1
ax = fig.add_subplot(num_columns, num_columns, num_plot)
correlation_plot(i, j)
plt.show()
| mit |
admk/soap | soap/flopoco/actual.py | 1 | 7631 | import itertools
import pickle
import shutil
import tempfile
from soap import logger
from soap.flopoco.common import cd, template_file, flopoco, xilinx
class _RTLGenerator(object):
def __init__(self, expr, var_env, prec, file_name=None, dir=None):
from soap.expression import Expr
self.expr = Expr(expr)
self.var_env = var_env
self.wf = prec
self.we = self.expr.exponent_width(var_env, prec)
self.dir = dir or tempfile.mktemp(suffix='/')
with cd(self.dir):
self.f = file_name or tempfile.mktemp(suffix='.vhdl', dir=dir)
def generate(self):
from akpytemp import Template
ops = set()
in_ports = set()
out_port, ls = self.expr.as_labels()
wires = set()
signals = set()
def wire(op, in1, in2, out):
def wire_name(i):
if i in signals:
return i.signal_name()
if i in in_ports:
return i.port_name()
if i == out_port:
return 'p_out'
for i in [in1, in2, out]:
# a variable represented as a string is a port
if isinstance(i.e, str):
in_ports.add(i)
continue
# a number is a port
try:
float(i.e)
in_ports.add(i)
continue
except (TypeError, ValueError):
pass
# a range is a port
try:
a, b = i.e
float(a), float(b)
in_ports.add(i)
continue
except (TypeError, ValueError):
pass
# an expression, need a signal for its output
try:
i.e.op
if i != out_port:
signals.add(i)
except AttributeError:
pass
wires.add((op, wire_name(in1), wire_name(in2), wire_name(out)))
for out, e in ls.items():
try:
op, in1, in2 = e.op, e.a1, e.a2
wire(op, in1, in2, out)
ops.add(e.op)
except AttributeError:
pass
in_ports = [i.port_name() for i in in_ports]
out_port = 'p_out'
signals = [i.signal_name() for i in signals]
logger.debug(in_ports, signals, wires)
Template(path=template_file).save(
path=self.f, directory=self.dir, flopoco=flopoco,
ops=ops, e=self.expr,
we=self.we, wf=self.wf,
in_ports=in_ports, out_port=out_port,
signals=signals, wires=wires)
return self.f
def actual_luts(expr, var_env, prec):
import sh
dir = tempfile.mktemp(suffix='/')
f = _RTLGenerator(expr, var_env, prec, dir=dir).generate()
logger.debug('Synthesising', str(expr), 'with precision', prec, 'in', f)
try:
return xilinx(f, dir=dir)
except (sh.ErrorReturnCode, KeyboardInterrupt):
raise
finally:
shutil.rmtree(dir)
def _para_area(i_n_e_v_p):
import sh
i, n, e, v, p = i_n_e_v_p
try:
real_area, estimated_area = e.real_area(v, p), e.area(v, p).area
logger.info(
'%d/%d, Expr: %s, Prec: %d, Real Area: %d, Estimated Area: %d' %
(i + 1, n, str(e), p, real_area, estimated_area))
return real_area, estimated_area
except sh.ErrorReturnCode:
logger.error('Unable to synthesise', str(e), 'with precision', p)
except Exception as exc:
logger.error('Unknown failure', exc, 'when synthesising', str(e),
'with precision', p)
_pool = None
def pool():
global _pool
if _pool:
return _pool
from multiprocessing import Pool
_pool = Pool()
return _pool
_setup_rc_done = False
def _setup_rc():
global _setup_rc_done
if _setup_rc_done:
return
from matplotlib import rc
rc('font', family='serif', size=24, serif='Times')
rc('text', usetex=True)
_setup_rc_done = True
class AreaEstimateValidator(object):
"""Validates our area model by comparing it against synthesis"""
def __init__(self, expr_set=None, var_env=None, prec_list=None):
self.e = expr_set
self.v = var_env
self.p = prec_list
def scatter_points(self):
try:
return self.points
except AttributeError:
pass
v = self.v
n = len(self.e) * len(self.p)
s = [(i, n, e, v, p)
for i, (e, p) in enumerate(itertools.product(self.e, self.p))]
self.points = pool().imap_unordered(_para_area, s)
self.points = [p for p in self.points if p is not None]
return self.points
def _plot(self):
try:
return self.figure
except AttributeError:
pass
from matplotlib import pyplot, pylab
_setup_rc()
self.figure = pyplot.figure()
plot = self.figure.add_subplot(111)
for ax in [plot.xaxis, plot.yaxis]:
ax.get_major_formatter().set_scientific(True)
ax.get_major_formatter().set_powerlimits((-2, 3))
real_area, estimated_area = zip(*self.scatter_points())
scatter_real_area = [v for i, v in enumerate(real_area) if i % 10 == 0]
scatter_estimated_area = [v for i, v in enumerate(estimated_area)
if i % 10 == 0]
plot.scatter(scatter_real_area, scatter_estimated_area,
marker='.', s=0.5, linewidth=1, color='r')
plot.grid(True, which='both', ls=':')
plot.set_xlabel('Actual Area (Number of LUTs)')
plot.set_ylabel('Estimated Area (Number of LUTs)')
lim = max(plot.get_xlim())
reg_fit = pylab.polyfit(real_area, estimated_area, 1)
logger.info(reg_fit)
reg_func = pylab.poly1d(reg_fit)
plot.plot([0, lim], reg_func([0, lim]), color='k')
plot.plot([0, lim], [0, lim], linestyle=':', color='k')
plot.set_xlim(0, lim)
plot.set_ylim(0, lim)
return self.figure
def show_plot(self):
from matplotlib import pyplot
pyplot.show(self._plot())
def save_plot(self, *args, **kwargs):
self._plot().savefig(*args, bbox_inches='tight', **kwargs)
@classmethod
def load_points(cls, f):
a = cls()
with open(f, 'rb') as f:
a.points = pickle.load(f)
return a
def save_points(self, f):
p = self.scatter_points()
with open(f, 'wb') as f:
pickle.dump(p, f)
def actual_vs_estimate():
from soap.transformer.utils import greedy_trace
from soap.flopoco.common import wf_range
logger.set_context(level=logger.levels.info)
try:
a = AreaEstimateValidator.load_points('area.pkl')
except FileNotFoundError:
exprs = [
"""(a + a + b) * (a + b + b) * (b + b + c) *
(b + c + c) * (c + c + a) * (c + a + a)""",
'(1 + b + c) * (a + 1 + b) * (a + b + 1)',
'(a + 1) * (b + 1) * (c + 1)',
'a + b + c',
]
v = {
'a': ['1', '2'],
'b': ['10', '20'],
'c': ['100', '200'],
}
p = list(reversed(wf_range))
s = []
for e in exprs:
s += greedy_trace(e, v, depth=3)
a = AreaEstimateValidator(s, v, p)
a.save_points('area.pkl')
a.save_plot('area.pdf')
a.show_plot()
| mit |
abhishekgahlot/scikit-learn | sklearn/decomposition/tests/test_fastica.py | 30 | 7560 | """
Test the fastica algorithm.
"""
import itertools
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
"""
Test gram schmidt orthonormalization
"""
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
""" Test the FastICA algorithm on very simple data.
"""
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
""" Test the FastICA algorithm on very simple data.
"""
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
"""Test FastICA.fit_transform"""
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, 10]]:
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components, 10))
assert_equal(Xt.shape, (100, n_components))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
"""Test FastICA.inverse_transform"""
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
gojira/tensorflow | tensorflow/contrib/learn/python/learn/estimators/debug_test.py | 40 | 32402 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Debug estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import operator
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.layers.python.layers import feature_column_ops
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import debug
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
NUM_EXAMPLES = 100
N_CLASSES = 5 # Cardinality of multiclass labels.
LABEL_DIMENSION = 3 # Dimensionality of regression labels.
def _train_test_split(features_and_labels):
features, labels = features_and_labels
train_set = (features[:int(len(features) / 2)],
labels[:int(len(features) / 2)])
test_set = (features[int(len(features) / 2):],
labels[int(len(features) / 2):])
return train_set, test_set
def _input_fn_builder(features, labels):
def input_fn():
feature_dict = {'features': constant_op.constant(features)}
my_labels = labels
if my_labels is not None:
my_labels = constant_op.constant(my_labels)
return feature_dict, my_labels
return input_fn
class DebugClassifierTest(test.TestCase):
def setUp(self):
np.random.seed(100)
self.features = np.random.rand(NUM_EXAMPLES, 5)
self.labels = np.random.choice(
range(N_CLASSES), p=[0.1, 0.3, 0.4, 0.1, 0.1], size=NUM_EXAMPLES)
self.binary_labels = np.random.choice(
range(2), p=[0.2, 0.8], size=NUM_EXAMPLES)
self.binary_float_labels = np.random.choice(
range(2), p=[0.2, 0.8], size=NUM_EXAMPLES)
def testPredict(self):
"""Tests that DebugClassifier outputs the majority class."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.labels])
majority_class, _ = max(
collections.Counter(train_labels).items(), key=operator.itemgetter(1))
expected_prediction = np.vstack(
[[majority_class] for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=N_CLASSES)
classifier.fit(
input_fn=_input_fn_builder(train_features, train_labels), steps=50)
pred = classifier.predict_classes(
input_fn=_input_fn_builder(test_features, None))
self.assertAllEqual(expected_prediction, np.vstack(pred))
def testPredictBinary(self):
"""Same as above for binary predictions."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.binary_labels])
majority_class, _ = max(
collections.Counter(train_labels).items(), key=operator.itemgetter(1))
expected_prediction = np.vstack(
[[majority_class] for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(
input_fn=_input_fn_builder(train_features, train_labels), steps=50)
pred = classifier.predict_classes(
input_fn=_input_fn_builder(test_features, None))
self.assertAllEqual(expected_prediction, np.vstack(pred))
(train_features,
train_labels), (test_features, test_labels) = _train_test_split(
[self.features, self.binary_float_labels])
majority_class, _ = max(
collections.Counter(train_labels).items(), key=operator.itemgetter(1))
expected_prediction = np.vstack(
[[majority_class] for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(
input_fn=_input_fn_builder(train_features, train_labels), steps=50)
pred = classifier.predict_classes(
input_fn=_input_fn_builder(test_features, None))
self.assertAllEqual(expected_prediction, np.vstack(pred))
def testPredictProba(self):
"""Tests that DebugClassifier outputs observed class distribution."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.labels])
class_distribution = np.zeros((1, N_CLASSES))
for label in train_labels:
class_distribution[0, label] += 1
class_distribution /= len(train_labels)
expected_prediction = np.vstack(
[class_distribution for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=N_CLASSES)
classifier.fit(
input_fn=_input_fn_builder(train_features, train_labels), steps=50)
pred = classifier.predict_proba(
input_fn=_input_fn_builder(test_features, None))
self.assertAllClose(expected_prediction, np.vstack(pred), atol=0.1)
def testPredictProbaBinary(self):
"""Same as above but for binary classification."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.binary_labels])
class_distribution = np.zeros((1, 2))
for label in train_labels:
class_distribution[0, label] += 1
class_distribution /= len(train_labels)
expected_prediction = np.vstack(
[class_distribution for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(
input_fn=_input_fn_builder(train_features, train_labels), steps=50)
pred = classifier.predict_proba(
input_fn=_input_fn_builder(test_features, None))
self.assertAllClose(expected_prediction, np.vstack(pred), atol=0.1)
(train_features,
train_labels), (test_features, test_labels) = _train_test_split(
[self.features, self.binary_float_labels])
class_distribution = np.zeros((1, 2))
for label in train_labels:
class_distribution[0, int(label)] += 1
class_distribution /= len(train_labels)
expected_prediction = np.vstack(
[class_distribution for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(
input_fn=_input_fn_builder(train_features, train_labels), steps=50)
pred = classifier.predict_proba(
input_fn=_input_fn_builder(test_features, None))
self.assertAllClose(expected_prediction, np.vstack(pred), atol=0.1)
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=debug.DebugClassifier(n_classes=3),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, debug.DebugClassifier)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
classifier = debug.DebugClassifier(
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
classifier.fit(input_fn=input_fn, steps=5)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
classifier = debug.DebugClassifier(
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
classifier = debug.DebugClassifier(
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=5)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def _assertBinaryPredictions(self, expected_len, predictions):
self.assertEqual(expected_len, len(predictions))
for prediction in predictions:
self.assertIn(prediction, (0, 1))
def _assertProbabilities(self, expected_batch_size, expected_n_classes,
probabilities):
self.assertEqual(expected_batch_size, len(probabilities))
for b in range(expected_batch_size):
self.assertEqual(expected_n_classes, len(probabilities[b]))
for i in range(expected_n_classes):
self._assertInRange(0.0, 1.0, probabilities[b][i])
def testLogisticRegression_TensorData(self):
"""Tests binary classification using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(classifier.predict_classes(input_fn=predict_input_fn))
self._assertBinaryPredictions(3, predictions)
def testLogisticRegression_FloatLabel(self):
"""Tests binary classification with float labels."""
def _input_fn_float_label(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[50], [20], [10]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn_float_label, steps=50)
predict_input_fn = functools.partial(_input_fn_float_label, num_epochs=1)
predictions = list(classifier.predict_classes(input_fn=predict_input_fn))
self._assertBinaryPredictions(3, predictions)
predictions_proba = list(
classifier.predict_proba(input_fn=predict_input_fn))
self._assertProbabilities(3, 2, predictions_proba)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
classifier = debug.DebugClassifier(n_classes=3)
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=200)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
classifier = debug.DebugClassifier(n_classes=3)
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
classifier = debug.DebugClassifier(n_classes=3)
classifier.fit(x=train_x, y=train_y, steps=200)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClass_StringLabel(self):
"""Tests multi-class classification with string labels."""
def _input_fn_train():
labels = constant_op.constant([['foo'], ['bar'], ['baz'], ['bar']])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
}
return features, labels
classifier = debug.DebugClassifier(
n_classes=3, label_keys=['foo', 'bar', 'baz'])
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
}
return features, labels
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
classifier = debug.DebugClassifier(
weight_column_name='w',
n_classes=2,
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = debug.DebugClassifier(weight_column_name='w')
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
labels = math_ops.cast(labels, predictions.dtype)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = debug.DebugClassifier(
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(
list(classifier.predict_classes(input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
model_dir = tempfile.mkdtemp()
classifier = debug.DebugClassifier(
model_dir=model_dir,
n_classes=3,
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions1 = classifier.predict_classes(input_fn=predict_input_fn)
del classifier
classifier2 = debug.DebugClassifier(
model_dir=model_dir,
n_classes=3,
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = classifier2.predict_classes(input_fn=predict_input_fn)
self.assertEqual(list(predictions1), list(predictions2))
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
feature_columns = [
feature_column.real_valued_column('age'),
feature_column.embedding_column(language, dimension=1)
]
classifier = debug.DebugClassifier(
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=input_fn, steps=5)
def default_input_fn(unused_estimator, examples):
return feature_column_ops.parse_feature_columns_from_examples(
examples, feature_columns)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir, input_fn=default_input_fn)
class DebugRegressorTest(test.TestCase):
def setUp(self):
np.random.seed(100)
self.features = np.random.rand(NUM_EXAMPLES, 5)
self.targets = np.random.rand(NUM_EXAMPLES, LABEL_DIMENSION)
def testPredictScores(self):
"""Tests that DebugRegressor outputs the mean target."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.targets])
mean_target = np.mean(train_labels, 0)
expected_prediction = np.vstack(
[mean_target for _ in range(test_labels.shape[0])])
classifier = debug.DebugRegressor(label_dimension=LABEL_DIMENSION)
classifier.fit(
input_fn=_input_fn_builder(train_features, train_labels), steps=50)
pred = classifier.predict_scores(
input_fn=_input_fn_builder(test_features, None))
self.assertAllClose(expected_prediction, np.vstack(pred), atol=0.1)
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=debug.DebugRegressor(),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, debug.DebugRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
regressor.fit(input_fn=input_fn, steps=200)
scores = regressor.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(x=train_x, y=train_y, steps=200)
scores = regressor.evaluate(x=train_x, y=train_y, steps=1)
self.assertIn('loss', scores)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
}
return features, labels
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = debug.DebugRegressor(
weight_column_name='w', config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = debug.DebugRegressor(
weight_column_name='w', config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(
list(regressor.predict_scores(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
model_dir = tempfile.mkdtemp()
regressor = debug.DebugRegressor(
model_dir=model_dir, config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = debug.DebugRegressor(
model_dir=model_dir, config=run_config.RunConfig(tf_random_seed=1))
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
if __name__ == '__main__':
test.main()
| apache-2.0 |
gVallverdu/pymatgen | pymatgen/analysis/diffraction/tests/test_tem.py | 2 | 11775 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Unit tests for TEM calculator.
"""
import unittest
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.analysis.diffraction.tem import TEMCalculator
from pymatgen.util.testing import PymatgenTest
import numpy as np
import pandas as pd
import plotly.graph_objs as go
__author__ = "Frank Wan, Jason Liang"
__copyright__ = "Copyright 2019, The Materials Project"
__version__ = "0.201"
__maintainer__ = "Jason Liang"
__email__ = "[email protected], [email protected]"
__date__ = "2/20/20"
class TEMCalculatorTest(PymatgenTest):
def test_wavelength_rel(self):
# Tests that the relativistic wavelength formula (for 200kv electron beam) is correct
c = TEMCalculator()
self.assertAlmostEqual(c.wavelength_rel(), 0.0251, places=3)
def test_generate_points(self):
# Tests that 3d points are properly generated
c = TEMCalculator()
actual = c.generate_points(-1, 1)
expected = np.array([[-1, -1, -1],
[-1, -1, 0],
[-1, -1, 1],
[0, -1, -1],
[0, -1, 0],
[0, -1, 1],
[1, -1, -1],
[1, -1, 0],
[1, -1, 1],
[-1, 0, -1],
[-1, 0, 0],
[-1, 0, 1],
[0, 0, -1],
[0, 0, 0],
[0, 0, 1],
[1, 0, -1],
[1, 0, 0],
[1, 0, 1],
[-1, 1, -1],
[-1, 1, 0],
[-1, 1, 1],
[0, 1, -1],
[0, 1, 0],
[0, 1, 1],
[1, 1, -1],
[1, 1, 0],
[1, 1, 1]])
self.assertArrayEqual(expected, actual)
def test_zone_axis_filter(self):
# Tests that the appropriate Laue-Zoned points are returned
c = TEMCalculator()
empty_points = np.asarray([])
self.assertEqual(c.zone_axis_filter(empty_points), [])
points = np.asarray([[-1, -1, -1]])
self.assertEqual(c.zone_axis_filter(points), [])
laue_1 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 0, -1]])
self.assertEqual(c.zone_axis_filter(laue_1, 1), [(0, 0, 1)])
def test_get_interplanar_spacings(self):
# Tests that the appropriate interplacing spacing is returned
c = TEMCalculator()
point = [(3, 9, 0)]
latt = Lattice.cubic(4.209)
cubic = Structure(latt, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
tet = self.get_structure("Li10GeP2S12")
hexa = self.get_structure("Graphite")
ortho = self.get_structure("K2O2")
mono = self.get_structure("Li3V2(PO4)3")
spacings_cubic = c.get_interplanar_spacings(cubic, point)
spacings_tet = c.get_interplanar_spacings(tet, point)
spacings_hexa = c.get_interplanar_spacings(hexa, point)
spacings_ortho = c.get_interplanar_spacings(ortho, point)
spacings_mono = c.get_interplanar_spacings(mono, point)
for p in point:
self.assertAlmostEqual(spacings_cubic[p], 0.4436675557216236)
self.assertAlmostEqual(spacings_tet[p], 0.9164354445646701)
self.assertAlmostEqual(spacings_hexa[p], 0.19775826179547752)
self.assertAlmostEqual(spacings_ortho[p], 0.5072617738916)
self.assertAlmostEqual(spacings_mono[p], 0.84450786041677972)
def test_bragg_angles(self):
# Tests that the appropriate bragg angle is returned. Testing formula with values of x-ray diffraction in
# materials project.
c = TEMCalculator()
latt = Lattice.cubic(4.209)
cubic = Structure(latt, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
point = [(1, 1, 0)]
spacings = c.get_interplanar_spacings(cubic, point)
bragg_angles_val = np.arcsin(1.5406 / (2 * spacings[point[0]]))
self.assertAlmostEqual(bragg_angles_val, 0.262, places=3)
def test_get_s2(self):
# Tests that the appropriate s2 factor is returned.
c = TEMCalculator()
latt = Lattice.cubic(4.209)
cubic = Structure(latt, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
point = [(-10, 3, 0)]
spacings = c.get_interplanar_spacings(cubic, point)
angles = c.bragg_angles(spacings)
s2 = c.get_s2(angles)
for p in s2:
self.assertAlmostEqual(s2[p], 1.5381852947115047)
def test_x_ray_factors(self):
c = TEMCalculator()
latt = Lattice.cubic(4.209)
cubic = Structure(latt, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
point = [(-10, 3, 0)]
spacings = c.get_interplanar_spacings(cubic, point)
angles = c.bragg_angles(spacings)
x_ray = c.x_ray_factors(cubic, angles)
self.assertAlmostEqual(x_ray['Cs'][(-10, 3, 0)], 14.42250869579648)
self.assertAlmostEqual(x_ray['Cl'][(-10, 3, 0)], 2.7804915737999103)
def test_electron_scattering_factors(self):
# Test the electron atomic scattering factor, values approximate with
# international table of crystallography volume C. Rounding error when converting hkl to sin(theta)/lambda.
# Error increases as sin(theta)/lambda is smaller.
c = TEMCalculator()
latt = Lattice.cubic(4.209)
cubic = Structure(latt, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
nacl = Structure.from_spacegroup("Fm-3m", Lattice.cubic(5.692), ["Na", "Cl"],
[[0, 0, 0], [0.5, 0.5, 0.5]])
point = [(2, 1, 3)]
point_nacl = [(4, 2, 0)]
spacings = c.get_interplanar_spacings(cubic, point)
spacings_nacl = c.get_interplanar_spacings(nacl, point_nacl)
angles = c.bragg_angles(spacings)
angles_nacl = c.bragg_angles(spacings_nacl)
elscatt = c.electron_scattering_factors(cubic, angles)
elscatt_nacl = c.electron_scattering_factors(nacl, angles_nacl)
self.assertAlmostEqual(elscatt['Cs'][(2, 1, 3)], 2.890, places=1)
self.assertAlmostEqual(elscatt['Cl'][(2, 1, 3)], 1.138, places=1)
self.assertAlmostEqual(elscatt_nacl['Na'][(4, 2, 0)], 0.852, places=1)
self.assertAlmostEqual(elscatt_nacl['Cl'][(4, 2, 0)], 1.372, places=1)
def test_cell_scattering_factors(self):
# Test that fcc structure gives 0 intensity for mixed even, odd hkl.
c = TEMCalculator()
nacl = Structure.from_spacegroup("Fm-3m", Lattice.cubic(5.692), ["Na", "Cl"],
[[0, 0, 0], [0.5, 0.5, 0.5]])
point = [(2, 1, 0)]
spacings = c.get_interplanar_spacings(nacl, point)
angles = c.bragg_angles(spacings)
cellscatt = c.cell_scattering_factors(nacl, angles)
self.assertAlmostEqual(cellscatt[(2, 1, 0)], 0)
def test_cell_intensity(self):
# Test that bcc structure gives lower intensity for h + k + l != even.
c = TEMCalculator()
latt = Lattice.cubic(4.209)
cubic = Structure(latt, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
point = [(2, 1, 0)]
point2 = [(2, 2, 0)]
spacings = c.get_interplanar_spacings(cubic, point)
spacings2 = c.get_interplanar_spacings(cubic, point2)
angles = c.bragg_angles(spacings)
angles2 = c.bragg_angles(spacings2)
cellint = c.cell_intensity(cubic, angles)
cellint2 = c.cell_intensity(cubic, angles2)
self.assertGreater(cellint2[(2, 2, 0)], cellint[(2, 1, 0)])
def test_normalized_cell_intensity(self):
# Test that the method correctly normalizes a value.
c = TEMCalculator()
latt = Lattice.cubic(4.209)
cubic = Structure(latt, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
point = [(2, 0, 0)]
spacings = c.get_interplanar_spacings(cubic, point)
angles = c.bragg_angles(spacings)
cellint = c.normalized_cell_intensity(cubic, angles)
self.assertAlmostEqual(cellint[(2, 0, 0)], 1)
def test_is_parallel(self):
c = TEMCalculator()
structure = self.get_structure("Si")
self.assertTrue(c.is_parallel(structure, (1, 0, 0), (3, 0, 0)))
self.assertFalse(c.is_parallel(structure, (1, 0, 0), (3, 0, 1)))
def test_get_first_point(self):
c = TEMCalculator()
latt = Lattice.cubic(4.209)
points = c.generate_points(-2, 2)
cubic = Structure(latt, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
first_pt = c.get_first_point(cubic, points)
self.assertTrue(4.209 in first_pt.values())
def test_interplanar_angle(self):
# test interplanar angles. Reference values from KW Andrews,
# Interpretation of Electron Diffraction pp70-90.
c = TEMCalculator()
latt = Lattice.cubic(4.209)
cubic = Structure(latt, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
phi = c.get_interplanar_angle(cubic, (0, 0, -1), (0, -1, 0))
self.assertAlmostEqual(90, phi, places=1)
tet = self.get_structure("Li10GeP2S12")
phi = c.get_interplanar_angle(tet, (0, 0, 1), (1, 0, 3))
self.assertAlmostEqual(25.796, phi, places=1)
latt = Lattice.hexagonal(2, 4)
hex = Structure(latt, ["Ab"], [[0, 0, 0]])
phi = c.get_interplanar_angle(hex, (0, 0, 1), (1, 0, 6))
self.assertAlmostEqual(21.052, phi, places=1)
def test_get_plot_coeffs(self):
# Test if x * p1 + y * p2 yields p3.
c = TEMCalculator()
coeffs = c.get_plot_coeffs((1, 1, 0), (1, -1, 0), (2, 0, 0))
self.assertArrayAlmostEqual(np.array([1., 1.]), coeffs)
def test_get_positions(self):
c = TEMCalculator()
points = c.generate_points(-2, 2)
structure = self.get_structure("Si")
positions = c.get_positions(structure, points)
self.assertArrayEqual([0, 0], positions[(0, 0, 0)])
# Test silicon diffraction data spot rough positions:
# see https://www.doitpoms.ac.uk/tlplib/diffraction-patterns/printall.php
self.assertArrayAlmostEqual([1, 0], positions[(-1, 0, 0)], 0)
def test_TEM_dots(self):
# All dependencies in TEM_dots method are tested. Only make sure each object created is
# the class desired.
c = TEMCalculator()
points = c.generate_points(-2, 2)
structure = self.get_structure("Si")
dots = c.tem_dots(structure, points)
self.assertTrue(all([isinstance(x, tuple) for x in dots]))
def test_get_pattern(self):
# All dependencies in get_pattern method are tested.
# Only make sure result is a pd dataframe.
c = TEMCalculator()
structure = self.get_structure("Si")
self.assertTrue(isinstance(c.get_pattern(structure), pd.DataFrame))
def test_get_plot_2d(self):
c = TEMCalculator()
structure = self.get_structure("Si")
self.assertTrue(isinstance(c.get_plot_2d(structure), go.Figure))
def test_get_plot_2d_concise(self):
c = TEMCalculator()
structure = self.get_structure("Si")
fig = c.get_plot_2d_concise(structure)
width = fig.layout.width
height = fig.layout.height
self.assertTrue(width == 121 and height == 121)
if __name__ == '__main__':
unittest.main()
| mit |
tdhopper/scikit-learn | examples/model_selection/grid_search_digits.py | 227 | 2665 | """
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.grid_search.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_weighted' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in clf.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| bsd-3-clause |
rhyolight/nupic.research | projects/union_path_integration/plot_aggregate_narrowing.py | 3 | 3887 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2018, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Plot location module representations during narrowing."""
import argparse
from collections import defaultdict
import json
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
CWD = os.path.dirname(os.path.realpath(__file__))
CHART_DIR = os.path.join(CWD, "charts")
def aggregateChart(inFilename, outFilename, objectCounts, ylim):
if not os.path.exists(CHART_DIR):
os.makedirs(CHART_DIR)
markers = ["D", "v", "o", "^"]
markersizes = [3, 4, 4, 4]
plt.figure(figsize=(3.25, 2.5), tight_layout = {"pad": 0})
cellsPerModule = 100
numSteps = 9
resultsByNumObjects = defaultdict(list)
with open(inFilename, "r") as f:
experiments = json.load(f)
for exp in experiments:
numObjects = exp[0]["numObjects"]
timestepsByObject = exp[1]["locationLayerTimelineByObject"].values()
meanDensityByTimestep = [
np.mean([len(module["activeCells"])
for modules in timestepByObject
for module in modules]) / float(cellsPerModule)
for timestepByObject in zip(*timestepsByObject)
]
resultsByNumObjects[numObjects].append(meanDensityByTimestep)
percentiles = [5, 50, 95]
for numObjects, marker, markersize in zip(objectCounts, markers, markersizes):
trials = resultsByNumObjects[numObjects]
x = []
y = []
errBelow = []
errAbove = []
for step, densityByTrial in zip(xrange(numSteps), zip(*trials)):
x.append(step + 1)
p1, p2, p3 = np.percentile(densityByTrial, percentiles)
y.append(p2)
errBelow.append(p2 - p1)
errAbove.append(p3 - p2)
plt.errorbar(x, y, yerr=[errBelow, errAbove], fmt="{}-".format(marker),
label="{} learned objects".format(numObjects), capsize=2,
markersize=markersize)
plt.xlabel("Number of Sensations")
plt.ylabel("Mean Cell Activation Density")
plt.ylim(ylim)
# Remove the errorbars from the legend.
handles, labels = plt.gca().get_legend_handles_labels()
handles = [h[0] for h in handles]
# If there's any opacity, when we export a copy of this from Illustrator, it
# creates a PDF that isn't compatible with Word.
framealpha = 1.0
plt.legend(handles, labels, framealpha=framealpha)
filename = os.path.join(CHART_DIR, outFilename)
print "Saving", filename
plt.savefig(filename)
if __name__ == "__main__":
plt.rc("font",**{"family": "sans-serif",
"sans-serif": ["Arial"],
"size": 8})
parser = argparse.ArgumentParser()
parser.add_argument("--inFile", type=str, required=True)
parser.add_argument("--outFile", type=str, required=True)
parser.add_argument("--objectCounts", type=int, nargs="+", default=[50, 75, 100, 125])
parser.add_argument("--ylim", type=float, nargs=2, default=(-0.05, 1.05))
args = parser.parse_args()
aggregateChart(args.inFile, args.outFile, args.objectCounts, args.ylim)
| gpl-3.0 |
sniemi/SamPy | astronomy/reduceACSpol.py | 1 | 29000 | """
Reduces ACS WFC polarimetry data.
:requires: astrodither / astrodrizzle
:requires: PyRAF
:requires: IRAF
:requires: NumPy
:requires: SciPy
:requires: matplotlib
:author: Sami-Matias Niemi
:contact: [email protected]
:version: 0.6
"""
import matplotlib
matplotlib.use('PDF')
import os, glob, shutil, datetime
from optparse import OptionParser
from itertools import groupby, izip, count
from time import time
import numpy as np
import pyfits as pf
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.ticker import NullFormatter
from scipy.stats import gaussian_kde
from scipy import ndimage
from pytools import asnutil
import pyraf
from pyraf import iraf
from iraf import stsdas, hst_calib, acs, calacs, images, immatch, sregister, blkavg
import acstools
try:
#different naming convention in linux and macosx
import drizzlepac as a
except:
import astrodither as a
try:
#different naming convention in linux and macosx
from drizzlepac import astrodrizzle, tweakreg, pixtopix
except:
from astrodither import astrodrizzle, tweakreg, pixtopix
class sourceFinding():
"""
This class provides methods for source finding.
"""
def __init__(self, image, **kwargs):
"""
Init.
:param image: 2D image array
:type image: numpy.ndarray
:param kwargs: additional keyword arguments
:type kwargs: dictionary
"""
self.image = image
#set default parameter values and then update using kwargs
self.settings = dict(above_background=10.0,
clean_size_min=9,
clean_size_max=110,
sigma=1.5,
disk_struct=3,
output='objects.txt')
self.settings.update(kwargs)
def _diskStructure(self, n):
"""
"""
struct = np.zeros((2 * n + 1, 2 * n + 1))
x, y = np.indices((2 * n + 1, 2 * n + 1))
mask = (x - n) ** 2 + (y - n) ** 2 <= n ** 2
struct[mask] = 1
return struct.astype(np.bool)
def find(self):
"""
Find all pixels above the median pixel after smoothing with a Gaussian filter.
:Note: maybe one should use mode instead of median
"""
#smooth the image
img = ndimage.gaussian_filter(self.image, sigma=self.settings['sigma'])
#find pixels above the median
msk = self.image > np.median(img)
#get background image and calculate statistics
backgrd = self.image[~msk]
std = np.std(backgrd).item() #items required if image was memmap'ed by pyfits
mean = np.mean(backgrd[backgrd > 0.0]).item() #items required if image was memmap'ed by pyfits
rms = np.sqrt(std ** 2 + mean ** 2)
print 'Background: average={0:.4f} and rms={1:.4f}'.format(mean, rms)
#find objects above the background
self.mask = ndimage.median_filter(self.image, self.settings['sigma']) > rms * self.settings[
'above_background'] + mean
#mask_pix = im > rms * above_background + mean
#mask = (mask + mask_pix) >= 1
#get labels
self.label_im, self.nb_labels = ndimage.label(self.mask)
print 'Finished the initial run and found {0:d} objects...'.format(self.nb_labels)
return self.mask, self.label_im, self.nb_labels
def getContours(self):
"""
Derive contours using the diskStructure function.
"""
if not hasattr(self, 'mask'):
self.find()
self.opened = ndimage.binary_opening(self.mask,
structure=self._diskStructure(self.settings['disk_struct']))
return self.opened
def getSizes(self):
"""
Derives sizes for each object.
"""
if not hasattr(self, 'label_im'):
self.find()
self.sizes = np.asarray(ndimage.sum(self.mask, self.label_im, range(self.nb_labels + 1)))
return self.sizes
def getFluxes(self):
"""
Derive fluxes or counts.
"""
if not hasattr(self, 'label_im'):
self.find()
self.fluxes = np.asarray(ndimage.sum(self.image, self.label_im, range(1, self.nb_labels + 1)))
return self.fluxes
def cleanSample(self):
"""
Cleans up small connected components and large structures.
"""
if not hasattr(self, 'sizes'):
self.getSizes()
mask_size = (self.sizes < self.settings['clean_size_min']) | (self.sizes > self.settings['clean_size_max'])
remove_pixel = mask_size[self.label_im]
self.label_im[remove_pixel] = 0
labels = np.unique(self.label_im)
self.label_clean = np.searchsorted(labels, self.label_im)
def getCenterOfMass(self):
"""
Finds the center-of-mass for all objects using numpy.ndimage.center_of_mass method.
:return: xposition, yposition, center-of-masses
:rtype: list
"""
if not hasattr(self, 'label_clean'):
self.cleanSample()
self.cms = ndimage.center_of_mass(self.image,
labels=self.label_clean,
index=np.unique(self.label_clean))
self.xcms = [c[1] for c in self.cms]
self.ycms = [c[0] for c in self.cms]
print 'After cleaning found {0:d} objects'.format(len(self.xcms))
return self.xcms, self.ycms, self.cms
def plot(self):
"""
Generates a diagnostic plot.
:return: None
"""
if not hasattr(self, 'opened'):
self.getContours()
if not hasattr(self, 'xcms'):
self.getCenterOfMass()
plt.figure(1, figsize=(18, 8))
s1 = plt.subplot(131)
s1.imshow(np.log10(np.sqrt(self.image)), interpolation=None, origin='lower')
s1.plot(self.xcms, self.ycms, 'x', ms=4)
s1.contour(self.opened, [0.2], c='b', linewidths=1.2, linestyles='dotted')
s1.axis('off')
s1.set_title('log10(sqrt(IMAGE))')
s2 = plt.subplot(132)
s2.imshow(self.mask, cmap=plt.cm.gray, interpolation=None, origin='lower')
s2.axis('off')
s2.set_title('Object Mask')
s3 = plt.subplot(133)
s3.imshow(self.label_clean, cmap=plt.cm.spectral, interpolation=None, origin='lower')
s3.axis('off')
s3.set_title('Cleaned Object Mask')
plt.subplots_adjust(wspace=0.02, hspace=0.02, top=1, bottom=0, left=0, right=1)
plt.savefig('SourceExtraction.pdf')
plt.close()
def generateOutput(self):
"""
Outputs the found positions to an ascii and a DS9 reg file.
:return: None
"""
if not hasattr(self, 'xcms'):
self.getCenterOfMass()
fh = open(self.settings['output'], 'w')
rg = open(self.settings['output'].split('.')[0]+'.reg', 'w')
fh.write('#X coordinate in pixels [starts from 1]\n')
fh.write('#Y coordinate in pixels [starts from 1]\n')
rg.write('#File written on {0:>s}\n'.format(datetime.datetime.isoformat(datetime.datetime.now())))
for x, y in zip(self.xcms, self.ycms):
fh.write('%10.3f %10.3f\n' % (x + 1, y + 1))
rg.write('circle({0:.3f},{1:.3f},5)\n'.format(x + 1, y + 1))
fh.close()
rg.close()
def runAll(self):
"""
Performs all steps of source finding at one go.
:return: source finding results such as positions, sizes, fluxes, etc.
:rtype: dictionary
"""
self.find()
self.getContours()
self.getSizes()
self.getFluxes()
self.cleanSample()
self.getCenterOfMass()
self.plot()
self.generateOutput()
results = dict(xcms=self.xcms, ycms=self.ycms, cms=self.cms,
sizes=self.sizes, fluxes=self.fluxes)
return results
class reduceACSWFCpoli():
"""
This class provides methods for reducing ACS WFC polarimetry data.
Uses astrodrizzle to combine images.
"""
def __init__(self, input, **kwargs):
"""
Init.
:param input: FITS association
:type input: string
:param kwargs: additional keyword arguments
:type kwargs: dictionary
"""
self.input = input
self.settings = dict(asndir='asn', rawdir='raw',
jref='/grp/hst/cdbs/jref/',
mtab='/grp/hst/cdbs/mtab/',
sourceImage='POL0V_drz.fits',
sigma=5.0)
self.settings.update(kwargs)
#add env variables to both system and IRAF
os.putenv('jref', self.settings['jref'])
os.putenv('mtab', self.settings['mtab'])
iraf.set(jref=self.settings['jref'])
iraf.set(mtab=self.settings['mtab'])
#IRAF has funny True and False
self.yes = iraf.yes
self.no = iraf.no
def createAssociations(self):
"""
Finds raw data and generates proper FITS associations for each POL filter.
Groups by all data based on the FILTER1 header keyword.
"""
orig = os.getcwd()
try:
os.mkdir(self.settings['asndir'])
except:
for d in glob.glob('./' + self.settings['asndir'] + '/*.*'):
os.remove(d)
#find all raw files
os.chdir(os.getcwd() + '/' + self.settings['rawdir'] + '/')
raws = glob.glob('*_raw.fits')
out = open('../rawFiles.txt', 'w')
out.write('#File written on {0:>s}\n'.format(datetime.datetime.isoformat(datetime.datetime.now())))
out.write('#file filter1 filter2 PA_V3\n')
data = {}
for raw in raws:
hdr = pf.open(raw)[0].header
print raw, hdr['FILTER1'], hdr['FILTER2'], hdr['PA_V3']
if 'POL' in hdr['FILTER1'] or 'POL' in hdr['FILTER2']:
out.write('%s %s %s %s\n' % (raw, hdr['FILTER1'], hdr['FILTER2'], hdr['PA_V3']))
data[raw] = (hdr['FILTER1'], hdr['FILTER2'], hdr['PA_V3'])
else:
print 'skipping line, no POL filter data'
out.close()
#group data by FILTER1
newass = {}
f2d = [(a, data[a][1]) for a in data]
f2d = sorted(f2d, key=lambda x: x[1])
for key, group in groupby(f2d, lambda x: x[1]):
tmp = []
print '\nnew group found:'
for member in group:
print member
tmp.append(member[0])
newass[key] = tmp
#create new associations
asns = []
awd = '../' + self.settings['asndir'] + '/'
print '\n\nCreate new associations to ./%s/' % self.settings['asndir']
for key, value in newass.iteritems():
asnt = asnutil.ASNTable(value, output=key)
asnt.create()
asnt.write()
shutil.move(key + '_asn.fits', awd)
asns.append(key + '_asn.fits')
#check the new associations
for asn in asns:
data = pf.open('../' + self.settings['asndir'] + '/' + asn)[1].data
print asn
for row in data:
print row
os.chdir(orig)
self.associations = asns
if self.input is None:
self.input = [x for x in self.associations if 'POL' in x]
else:
self.input = glob.glob(self.input)
def copyRaws(self):
"""
Copy all _raw, _spt, and _asn files to a temporary working directory.
:Note: One should run either copyRaws or copyFLTs but usually not both!
"""
#make a new dir
path = 'tmp'
try:
os.mkdir(path)
except:
for d in glob.glob('./%s/*.*' % path):
os.remove(d)
for fle in glob.glob('./raw/*_raw.fits'):
shutil.copy(fle, path)
for fle in glob.glob('./support/*_spt.fits'):
shutil.copy(fle, path)
for fle in glob.glob('./asn/*_asn.fits'):
shutil.copy(fle, path)
#change the current working directory to tmp
os.chdir(os.getcwd() + '/' + path)
iraf.chdir(os.getcwd())
def copyFLTs(self):
"""
Copy all _flt, _spt, and _asn files to a temporary working directory.
:Note: One should run either copyRaws or copyFLTs but usually not both!
"""
#make a new dir
path = 'tmp'
try:
os.mkdir(path)
except:
for d in glob.glob('./%s/*.*' % path):
os.remove(d)
for fle in glob.glob('./opus/*_flt.fits'):
shutil.copy(fle, path)
for fle in glob.glob('./support/*_spt.fits'):
shutil.copy(fle, path)
for fle in glob.glob('./asn/*_asn.fits'):
shutil.copy(fle, path)
#change the current working directory to tmp
os.chdir(os.getcwd() + '/' + path)
iraf.chdir(os.getcwd())
def omitPHOTCORR(self):
"""
Sets PHOTCORR keyword to OMIT to prevent crashing.
:note: find a proper fix for this.
:note: Change from iraf to pyfits.
"""
for raw in glob.glob('*_raw.fits'):
iraf.hedit(images=raw + '[0]', fields='PHOTCORR', value='OMIT',
add=self.no, addonly=self.no, delete=self.no,
verify=self.no, show=self.yes, update=self.yes)
def runCalACS(self):
"""
Calls calACS and processes all given files or associations.
"""
for f in self.input:
calacs.run(input=f)
#remove the raw files
for f in glob.glob('*_raw.fits'):
os.remove(f)
def destripeFLT(self):
"""
Uses the acs_destripe routine from the acstools to remove the bias striping.
Renames the original FLT files as _flt_orig.fits.
The destriped files are called as _flt_destripe.fits.
"""
acstools.acs_destripe.clean('*_flt.fits',
'destripe',
clobber=False,
maxiter=20,
sigrej=2.0)
for f in glob.glob('*_flt.fits'):
shutil.move(f, f.replace('_flt.fits', '_flt_orig.fits'))
for f in glob.glob('*_flt_destripe.fits'):
shutil.copy(f, f.replace('_flt.destripe.fits', '_flt.fits'))
def destripeFLTSMN(self):
"""
Uses an algorithm developed by SMN to destripe the ACS FLT frames.
This is an extremely simple method in which the median of each row
is being first subtracted from the row after which a median of the
full frame is being added back to each row to not to remove any sky/
background. Each pixel that is known to be bad is being masked using
the DQ array. In addition, any good pixel above sigma is not taken
into account. This allows to exclude bright objects and cosmic rays.
Note that such masking is not extremely accurate, but given the fact
that the algorithm deals with medians, it should suffice.
"""
nullfmt = NullFormatter()
for input in glob.glob('*_flt.fits'):
shutil.copy(input, input.replace('_flt.fits', '_flt_orig.fits'))
inp = input.replace('.fits', '')
fh = pf.open(input, mode='update')
data = fh[1].data
org = data.copy()
dqarr = fh[3].data
medians = []
for i, l, dq in izip(count(), data, dqarr):
msk = ~(dq > 0)
d = l[msk]
#mask additionally everything above x sigma
sig = np.median(d) + self.settings['sigma'] * np.std(d)
msk2 = d < sig
median = np.median(d[msk2])
if ~np.isnan(median):
data[i] -= median
medians.append(median)
else:
print 'Will not remove nan median on line %i' % i
medians = np.asarray(medians)
#add back the background
md = org[~(dqarr > 0)]
background = np.median(md[md < (np.median(md) + self.settings['sigma'] * np.std(md))])
data += background
fh.close()
#generate a ratio plot
plt.figure()
plt.title(inp.replace('_','\_'))
ims = plt.imshow(data / org, origin='lower', vmin=0.98, vmax=1.02)
cb = plt.colorbar(ims)
cb.set_label('Destriped / Original')
plt.savefig(inp + 'ratio.pdf')
plt.close()
#calculate Gaussian KDE and evaluate it
est2 = []
vals = medians - background
kde = gaussian_kde(vals)
for x in np.arange(np.int(np.min(vals)), np.int(np.max(vals)), 0.1):
y = kde.evaluate(x)[0]
est2.append([x, y])
est2 = np.asarray(est2)
#generate a plot showing the distribution of median subtractions
plt.figure()
gs = gridspec.GridSpec(2, 1, height_ratios=[4, 1])
gs.update(wspace=0.0, hspace=0.0, top=0.96, bottom=0.07)
axScatter = plt.subplot(gs[0])
axHist = plt.subplot(gs[1])
axScatter.set_title(inp.replace('_','\_'))
axScatter.plot(medians - background, np.arange(len(medians)), 'bo')
axScatter.xaxis.set_major_formatter(nullfmt)
n, bins, patches = axHist.hist(medians - background, bins=35, normed=True)
axHist.plot(est2[:, 0], est2[:, 1], 'r-', label='Gaussian KDE')
axHist.set_xlabel('Medians - Background')
axScatter.set_ylabel('Row')
axScatter.set_ylim(-1, 2046)
axHist.legend()
plt.savefig(inp + 'dist.pdf')
plt.close()
def updateHeader(self):
"""
Calls astrodrizzle's updatenpol to update the headers of the FLT files.
"""
a.updatenpol.update('*_flt.fits', self.settings['jref'])
def initialProcessing(self):
"""
Does the initial processing as follows:
1. use astrodrizzle to combine images of the same POL filter using
the shifts file generated by the tweakreg at point one.
"""
#run astrodrizzle separately for each POL
kwargs = dict(final_pixfrac=1.0, final_fillval=1.0, preserve=False,
updatewcs=True, final_wcs=False, build=True, skysub=False)
for f in self.input:
astrodrizzle.AstroDrizzle(input=f, mdriztab=False, editpars=False, **kwargs)
def findImprovedAlignment(self):
"""
Tries to find stars to be used for improved alignment.
Generates coordinate lists for each POL file and for every _flt file.
Maps all the positions to uncorrected/distorted frame using pixtopix transformation.
Finally, runs the tweakreg to find improved alignment and updates the WCS in the
headers.
"""
#find some good stars
source = sourceFinding(pf.open(self.settings['sourceImage'])[1].data)
results = source.runAll()
#find improved locations for each star
acc = []
for x, y in zip(results['xcms'], results['ycms']):
acc.append(iraf.imcntr('POL*_drz.fits[1]', x_init=x, y_init=y, cboxsize=45, Stdout=1))
o = open('tmp.txt', 'w')
o.write('#File written on {0:>s}\n'.format(datetime.datetime.isoformat(datetime.datetime.now())))
for line in acc:
for l in line:
o.write(l.replace('[', '').replace(']', '') + '\n')
o.close()
data = open('tmp.txt').readlines()
pol0 = open('POL0coords.txt', 'w')
pol60 = open('POL60coords.txt', 'w')
pol120 = open('POL120coords.txt', 'w')
pol0r = open('POL0coords.reg', 'w')
pol60r = open('POL60coords.reg', 'w')
pol120r = open('POL120coords.reg', 'w')
for line in data:
tmp = line.split(':')
x = tmp[1].replace('y', '').strip()
y = tmp[2].strip()
out = '%s %s\n' % (x, y)
reg = 'image;circle(%s,%s,5)\n' % ( x, y)
if 'POL0' in line:
pol0.write(out)
pol0r.write(reg)
elif 'POL60' in line:
pol60.write(out)
pol60r.write(reg)
elif 'POL120' in line:
pol120.write(out)
pol120r.write(reg)
else:
print 'Skipping line:', line
pol0.close()
pol60.close()
pol120.close()
pol0r.close()
pol60r.close()
pol120r.close()
data = open('../rawFiles.txt').readlines()
pol0 = [line.split()[0].split('_raw')[0] + '_flt.fits' for line in data if 'POL0' in line.split()[2]]
pol60 = [line.split()[0].split('_raw')[0] + '_flt.fits' for line in data if 'POL60' in line.split()[2]]
pol120 = [line.split()[0].split('_raw')[0] + '_flt.fits' for line in data if 'POL120' in line.split()[2]]
for file in pol0:
x, y = pixtopix.tran(file + "[1]", 'POL0V_drz.fits[1]', 'backward',
coords='POL0coords.txt', output=file.replace('.fits', '') + '.coords',
verbose=False)
for file in pol60:
x, y = pixtopix.tran(file + "[1]", 'POL60V_drz.fits[1]', 'backward',
coords='POL60coords.txt', output=file.replace('.fits', '') + '.coords',
verbose=False)
for file in pol120:
x, y = pixtopix.tran(file + "[1]", 'POL120V_drz.fits[1]', 'backward',
coords='POL120coords.txt', output=file.replace('.fits', '') + '.coords',
verbose=False)
del x
del y
coords = glob.glob('*_flt.coords')
#remove comment lines from each coords file and produce a DS9 region file
for f in coords:
data = open(f).readlines()
out = open(f, 'w')
reg = open(f.replace('.coords', '.reg'), 'w')
reg.write('#File written on {0:>s}\n'.format(datetime.datetime.isoformat(datetime.datetime.now())))
for line in data:
if not line.startswith('#'):
out.write(line)
tmp = line.split()
reg.write('image;circle({0:>s},{1:>s},5)\n'.format(tmp[0], tmp[1]))
out.close()
reg.close()
#create a mapping file
out = open('regcatalog.txt', 'w')
for f in coords:
out.write('%s %s\n' % (f.replace('.coords', '.fits'), f))
out.close()
params = {'catfile': 'regcatalog.txt', 'shiftfile': True, 'outshifts': 'flt1_shifts.txt', 'updatehdr': True,
'verbose': False, 'minobj': 15, 'use2dhist': False, 'see2dplot': False,
'searchrad': 50, 'searchunits': 'pixels', 'tolerance': 50.0, 'separation': 30.0, 'nclip': 3}
tweakreg.TweakReg('*_flt.fits', editpars=False, **params)
params.update({'outshifts': 'flt_shifts.txt', 'searchrad': 15, 'tolerance': 3})
tweakreg.TweakReg('*_flt.fits', editpars=False, **params)
#params = {'updatehdr': True, 'verbose': False, 'minobj': 15, 'use2dhist': True, 'see2dplot': False,
# 'searchrad': 2.5, 'searchunits': 'pixels'}
#tweakreg.TweakReg('*_flt.fits', editpars=False, **params)
def registerPOLs(self):
"""
Aligns and registers the POL files. In addition, produces block averages files.
:return: None
"""
#copy the first round files to backup and check the shifts
drzs = glob.glob('*POL*_drz*.fits')
for drz in drzs:
shutil.move(drz, drz.replace('_drz.fits', '_backup.fits'))
params = {'outshifts': 'backup_shifts.txt', 'updatehdr': True,
'verbose': False, 'minobj': 20, 'use2dhist': True,
'residplot': 'residuals', 'see2dplot': False,
'searchrad': 35, 'searchunits': 'pixels'}
#we can do this twice to get the alignment really good
tweakreg.TweakReg('*_backup.fits', editpars=False, **params)
tweakreg.TweakReg('*_backup.fits', editpars=False, **params)
#user sregister to resample to separate POL frames to a single WCS frame
sregister('POL*_backup.fits[1]', 'POL0V_backup.fits[1]', 'P0lin_sci.fits,P60lin_sci.fits,P120lin_sci.fits',
fitgeometry='rscale', calctype='double', interpolant='linear', fluxconserve=self.yes)
sregister('POL*_backup.fits[1]', 'POL0V_backup.fits[1]', 'P0drz_sci.fits,P60drz_sci.fits,P120drz_sci.fits',
fitgeometry='rscale', calctype='double', interpolant='drizzle', fluxconserve=self.yes)
sregister('POL*_backup.fits[2]', 'POL0V_backup.fits[1]', 'P0lin_wht.fits,P60lin_wht.fits,P120lin_wht.fits',
fitgeometry='rscale', calctype='double', interpolant='linear', fluxconserve=self.yes)
sregister('POL*_backup.fits[2]', 'POL0V_backup.fits[1]', 'P0drz_wht.fits,P60drz_wht.fits,P120drz_wht.fits',
fitgeometry='rscale', calctype='double', interpolant='drizzle', fluxconserve=self.yes)
#average in 10x10x10 blocks
blkavg('P0lin_sci.fits', 'P0lin_smt.fits', b1=10, b2=10, b3=10, option='average')
blkavg('P60lin_sci.fits', 'P60lin_smt.fits', b1=10, b2=10, b3=10, option='average')
blkavg('P120lin_sci.fits', 'P120lin_smt.fits', b1=10, b2=10, b3=10, option='average')
blkavg('P0drz_sci.fits', 'P0drz_smt.fits', b1=10, b2=10, b3=10, option='average')
blkavg('P60drz_sci.fits', 'P60drz_smt.fits', b1=10, b2=10, b3=10, option='average')
blkavg('P120drz_sci.fits', 'P120drz_smt.fits', b1=10, b2=10, b3=10, option='average')
def doFinalDrizzle(self):
"""
Does the final drizzling.
:return: None
"""
#we can now perform the final drizzle to drizzle all FLT images to POL frames
kwargs = {'final_pixfrac': 1.0, 'skysub': False,
'final_outnx': 2300, 'final_outny': 2300,
'final_ra': 128.8369, 'final_dec': -45.1791,
'updatewcs': False, 'final_wcs': True, 'preserve': False,
'build': True, 'final_fillval': 1.0, #'final_wht_type': 'ERR',
'final_refimage': 'jbj901akq_flt.fits[1]'}
for f in self.input:
astrodrizzle.AstroDrizzle(input=f, mdriztab=False, editpars=False, **kwargs)
def runAll(self):
"""
Runs all steps of the pipeline at one go.
:return: None
"""
self.createAssociations()
self.copyRaws()
self.omitPHOTCORR()
self.runCalACS()
self.destripeFLT()
#self.destripeFLTSMN()
self.updateHeader()
self.initialProcessing()
self.findImprovedAlignment()
self.registerPOLs()
self.doFinalDrizzle()
def processArgs(printHelp=False):
"""
Processes command line arguments.
"""
parser = OptionParser()
parser.add_option('-i', '--input',
dest='input',
help='Input association FILE to be processed [*POL*_asn.fits].',
metavar='string')
parser.add_option('-p', '--pipeline',
dest='pipeline',
action='store_true',
help='Use OPUS FLT files. Assumes that _flt files are in ./opus/ folder.',
metavar='boolean')
if printHelp:
parser.print_help()
else:
return parser.parse_args()
if __name__ == '__main__':
start = time()
opts, args = processArgs()
settings = dict(jref='/Users/sammy/Research/pulsar/data/12240/refs/',
mtab='/Users/sammy/Research/pulsar/data/12240/refs/')
#settings = dict(jref='/astro/data/puppis0/niemi/pulsar/data/12240/refs/',
# mtab='/astro/data/puppis0/niemi/pulsar/data/12240/refs/')
reduce = reduceACSWFCpoli(opts.input, **settings)
if opts.pipeline:
reduce.createAssociations()
reduce.copyFLTs()
reduce.destripeFLTSMN()
reduce.updateHeader()
reduce.initialProcessing()
reduce.findImprovedAlignment()
reduce.registerPOLs()
reduce.doFinalDrizzle()
else:
reduce.runAll()
elapsed = time() - start
print 'Processing took {0:.1f} minutes'.format(elapsed / 60.)
| bsd-2-clause |
jashwanth9/Expert-recommendation-system | code/content_tfidf_on_training.py | 1 | 2615 | import numpy as np
import pdb
from sklearn.naive_bayes import MultinomialNB
import collections
import cPickle as pickle
def loadData():
print "loading data"
question_keys = pickle.load(open('../features/question_info_keys.dat', 'rb'))
question_feats = {}
trainData = []
valData = []
# with open('../features/question_word_freq.txt', 'r') as f1:
# i = 0
# for line in f1:
# line = line.rstrip()
# wordfreq = map(int, line.split())
# question_feats[question_keys[i]] = wordfreq
# i += 1
tf = pickle.load(open('../features/ques_wordid_tfidf.dat', 'rb'))
tfx = tf.toarray()
for i in range(len(tfx)):
question_feats[question_keys[i]] = tfx[0].tolist()
with open('../train_data/invited_info_train.txt', 'r') as f1:
for line in f1:
line = line.rstrip('\n')
sp = line.split()
trainData.append((sp[0], sp[1], int(sp[2])))
with open('../train_data/validate_nolabel.txt', 'r') as f1:
for line in f1:
valData.append(line.rstrip('\r\n').split(','))
return question_feats, trainData, valData[1:]
def getModels(trainData, question_feats):
print "getting models"
userX = {}
userY = {}
for qid, uid, val in trainData:
if uid not in userX:
userX[uid] = []
userY[uid] = []
userX[uid].append(question_feats[qid])
userY[uid].append(val)
nbmodels = {}
for user in userX:
nbmodels[user] = MultinomialNB()
nbmodels[user].fit(userX[user], userY[user])
#print "dumping"
#pickle.dump(nbmodels, open('../features/usermodels_queschartfidf.dat', 'wb'))
#exit()
return nbmodels
def getPredictions(valData, nbmodels, question_feats):
print "getting predictions"
predictions = []
for qid, uid in valData:
if uid not in nbmodels:
predictions.append(0)
continue
prob = nbmodels[uid].predict_proba([question_feats[qid]])
if nbmodels[uid].classes_[0] == 1:
predictions.append(prob[0][0])
else:
predictions.append(prob[0][1])
return predictions
question_feats, trainData, valData = loadData()
nbmodels = getModels(trainData, question_feats)
training_predictions = getPredictions([[x[0], x[1]] for x in trainData], nbmodels, question_feats)
predictions = getPredictions(valData, nbmodels, question_feats)
with open('../validation/contentbased_word_tfidf2train.csv', 'w') as f1:
f1.write('qid,uid,label\n')
for i in range(0, len(training_predictions)):
f1.write(trainData[i][0]+','+trainData[i][1]+','+str(training_predictions[i])+'\n')
with open('../validation/contentbased_word_tfidf2.csv', 'w') as f1:
f1.write('qid,uid,label\n')
for i in range(0, len(predictions)):
f1.write(valData[i][0]+','+valData[i][1]+','+str(predictions[i])+'\n')
| apache-2.0 |
h2educ/scikit-learn | sklearn/decomposition/truncated_svd.py | 199 | 7744 | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# Michael Becker <[email protected]>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
try:
from scipy.sparse.linalg import svds
except ImportError:
from ..utils.arpack import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). It is very similar to PCA,
but operates on sample vectors directly, instead of on a covariance matrix.
This means it can work with scipy.sparse matrices efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithm: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional
Number of iterations for randomized SVD solver. Not used by ARPACK.
random_state : int or RandomState, optional
(Seed for) pseudo-random number generator. If not given, the
numpy.random singleton is used.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
explained_variance_ : array, [n_components]
The variance of the training samples transformed by a projection to
each component.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=5,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.07825... 0.05528... 0.05445... 0.04997... 0.04134...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.27930...
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = as_float_array(X, copy=False)
random_state = check_random_state(self.random_state)
# If sparse and not csr or csc, convert to csr
if sp.issparse(X) and X.getformat() not in ["csr", "csc"]:
X = X.tocsr()
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = np.dot(U, np.diag(Sigma))
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
| bsd-3-clause |
arabenjamin/scikit-learn | sklearn/utils/tests/test_estimator_checks.py | 202 | 3757 | import scipy.sparse as sp
import numpy as np
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.testing import assert_raises_regex, assert_true
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.linear_model import LogisticRegression
from sklearn.utils.validation import check_X_y, check_array
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
if not hasattr(self, 'coef_'):
raise CorrectNotFittedError("estimator is not fitted yet")
X = check_array(X)
return np.ones(X.shape[0])
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "it does not implement a 'get_params' methods"
assert_raises_regex(TypeError, msg, check_estimator, object)
# check that we have a fit method
msg = "object has no attribute 'fit'"
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator)
# check that fit does input validation
msg = "TypeError not raised by fit"
assert_raises_regex(AssertionError, msg, check_estimator, BaseBadClassifier)
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict)
# check for sparse matrix input handling
msg = "Estimator type doesn't seem to fail gracefully on sparse data"
# the check for sparse input handling prints to the stdout,
# instead of raising an error, so as not to remove the original traceback.
# that means we need to jump through some hoops to catch it.
old_stdout = sys.stdout
string_buffer = StringIO()
sys.stdout = string_buffer
try:
check_estimator(NoSparseClassifier)
except:
pass
finally:
sys.stdout = old_stdout
assert_true(msg in string_buffer.getvalue())
# doesn't error on actual estimator
check_estimator(LogisticRegression)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "AttributeError or ValueError not raised by predict"
assert_raises_regex(AssertionError, msg, check_estimators_unfitted,
"estimator", NoSparseClassifier)
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier)
| bsd-3-clause |
jm-begon/scikit-learn | examples/cluster/plot_segmentation_toy.py | 258 | 3336 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
| bsd-3-clause |
pnedunuri/scikit-learn | sklearn/neighbors/base.py | 71 | 31147 | """Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array, _get_n_jobs, gen_even_slices
from ..utils.fixes import argpartition
from ..utils.validation import DataConversionWarning
from ..utils.validation import NotFittedError
from ..externals import six
from ..externals.joblib import Parallel, delayed
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
class NeighborsWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", NeighborsWarning)
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1, **kwargs):
if kwargs:
warnings.warn("Passing additional arguments to the metric "
"function as **kwargs is deprecated "
"and will no longer be supported in 0.18. "
"Use metric_params instead.",
DeprecationWarning, stacklevel=3)
if metric_params is None:
metric_params = {}
metric_params.update(kwargs)
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
self.n_jobs = n_jobs
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
if metric == 'precomputed':
alg_check = 'brute'
else:
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if ((self.n_neighbors is None or
self.n_neighbors < self._fit_X.shape[0] // 2) and
self.metric != 'precomputed'):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
if self.n_neighbors is not None:
if self.n_neighbors <= 0:
raise ValueError(
"Expected n_neighbors > 0. Got %d" %
self.n_neighbors
)
return self
@property
def _pairwise(self):
# For cross-validation routines to split data correctly
return self.metric == 'precomputed'
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to points, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([[1., 1., 1.]])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
train_size = self._fit_X.shape[0]
if n_neighbors > train_size:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" %
(train_size, n_neighbors)
)
n_samples, _ = X.shape
sample_range = np.arange(n_samples)[:, None]
n_jobs = _get_n_jobs(self.n_jobs)
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
n_jobs=n_jobs, squared=True)
else:
dist = pairwise_distances(
X, self._fit_X, self.effective_metric_, n_jobs=n_jobs,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[
sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = Parallel(n_jobs, backend='threading')(
delayed(self._tree.query, check_pickle=False)(
X[s], n_neighbors, return_distance)
for s in gen_even_slices(X.shape[0], n_jobs)
)
if return_distance:
dist, neigh_ind = tuple(zip(*result))
result = np.vstack(dist), np.vstack(neigh_ind)
else:
result = np.vstack(result)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return result
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = result
else:
neigh_ind = result
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_samples, n_neighbors - 1))
if return_distance:
dist = np.reshape(
dist[sample_mask], (n_samples, n_neighbors - 1))
return dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
if n_neighbors is None:
n_neighbors = self.n_neighbors
# kneighbors does the None handling.
if X is not None:
X = check_array(X, accept_sparse='csr')
n_samples1 = X.shape[0]
else:
n_samples1 = self._fit_X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones(n_samples1 * n_neighbors)
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
A_data, A_ind = self.kneighbors(
X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
return kneighbors_graph
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X=None, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> rng = neigh.radius_neighbors([[1., 1., 1.]])
>>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
[ 1.5 0.5]
>>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
n_samples = X.shape[0]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind_list = [np.where(d <= radius)[0] for d in dist]
# See https://github.com/numpy/numpy/issues/5456
# if you want to understand why this is initialized this way.
neigh_ind = np.empty(n_samples, dtype='object')
neigh_ind[:] = neigh_ind_list
if return_distance:
dist_array = np.empty(n_samples, dtype='object')
if self.effective_metric_ == 'euclidean':
dist_list = [np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)]
else:
dist_list = [d[neigh_ind[i]]
for i, d in enumerate(dist)]
dist_array[:] = dist_list
results = dist_array, neigh_ind
else:
results = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
results = results[::-1]
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
dist[ind] = dist[ind][mask]
if return_distance:
return dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if X is not None:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples2 = self._fit_X.shape[0]
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_samples1 = A_ind.shape[0]
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
"""
return self._fit(X)
| bsd-3-clause |
mtesseracttech/CustomEngine | lib/bullet/src/examples/pybullet/testrender_np.py | 3 | 1447 |
#make sure to compile pybullet with PYBULLET_USE_NUMPY enabled, otherwise use testrender.py (slower but compatible without numpy)
import numpy as np
import matplotlib.pyplot as plt
import pybullet
import time
pybullet.connect(pybullet.DIRECT)
pybullet.loadURDF("r2d2.urdf")
camTargetPos = [0,0,0]
cameraUp = [0,0,1]
cameraPos = [1,1,1]
yaw = 40
pitch = 10.0
roll=0
upAxisIndex = 2
camDistance = 4
pixelWidth = 1920
pixelHeight = 1080
nearPlane = 0.01
farPlane = 1000
fov = 60
main_start = time.time()
for pitch in range (0,360,10) :
start = time.time()
viewMatrix = pybullet.computeViewMatrixFromYawPitchRoll(camTargetPos, camDistance, yaw, pitch, roll, upAxisIndex)
aspect = pixelWidth / pixelHeight;
projectionMatrix = pybullet.computeProjectionMatrixFOV(fov, aspect, nearPlane, farPlane);
img_arr = pybullet.getCameraImage(pixelWidth, pixelHeight, viewMatrix,projectionMatrix, [0,1,0])
stop = time.time()
print ("renderImage %f" % (stop - start))
w=img_arr[0] #width of the image, in pixels
h=img_arr[1] #height of the image, in pixels
rgb=img_arr[2] #color data RGB
dep=img_arr[3] #depth data
print 'width = %d height = %d' % (w,h)
#note that sending the data to matplotlib is really slow
#show
plt.imshow(rgb,interpolation='none')
#plt.show()
plt.pause(0.01)
main_stop = time.time()
print ("Total time %f" % (main_stop - main_start))
pybullet.resetSimulation()
| apache-2.0 |
mattcaldwell/zipline | tests/test_examples.py | 6 | 1612 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This code is based on a unittest written by John Salvatier:
# https://github.com/pymc-devs/pymc/blob/pymc3/tests/test_examples.py
# Disable plotting
#
import matplotlib
matplotlib.use('Agg')
import os
from os import path
try:
from path import walk
except ImportError:
# Assume Python 3
from os import walk
import fnmatch
import imp
def test_examples():
os.chdir(example_dir())
for fname in all_matching_files(example_dir(), '*.py'):
yield check_example, fname
def all_matching_files(d, pattern):
def addfiles(fls, dir, nfiles):
nfiles = fnmatch.filter(nfiles, pattern)
nfiles = [path.join(dir, f) for f in nfiles]
fls.extend(nfiles)
files = []
for dirpath, dirnames, filenames in walk(d):
addfiles(files, dirpath, filenames)
return files
def example_dir():
import zipline
d = path.dirname(zipline.__file__)
return path.join(path.abspath(d), 'examples/')
def check_example(p):
imp.load_source('__main__', path.basename(p))
| apache-2.0 |
dsullivan7/scikit-learn | examples/svm/plot_svm_margin.py | 318 | 2328 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the separation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distribution, and will only consider points close to line
of separation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penalty in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penalty)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy + a * margin
yy_up = yy - a * margin
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
adykstra/mne-python | mne/stats/tests/test_cluster_level.py | 2 | 24402 | # Authors: Eric Larson <[email protected]>
# Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
from functools import partial
import os
import numpy as np
from scipy import sparse, linalg, stats
from numpy.testing import (assert_equal, assert_array_equal,
assert_array_almost_equal)
import pytest
from mne.parallel import _force_serial
from mne.stats.cluster_level import (permutation_cluster_test, f_oneway,
permutation_cluster_1samp_test,
spatio_temporal_cluster_test,
spatio_temporal_cluster_1samp_test,
ttest_1samp_no_p, summarize_clusters_stc)
from mne.utils import run_tests_if_main, _TempDir, catch_logging
n_space = 50
def _get_conditions():
noise_level = 20
n_time_1 = 20
n_time_2 = 13
normfactor = np.hanning(20).sum()
rng = np.random.RandomState(42)
condition1_1d = rng.randn(n_time_1, n_space) * noise_level
for c in condition1_1d:
c[:] = np.convolve(c, np.hanning(20), mode="same") / normfactor
condition2_1d = rng.randn(n_time_2, n_space) * noise_level
for c in condition2_1d:
c[:] = np.convolve(c, np.hanning(20), mode="same") / normfactor
pseudoekp = 10 * np.hanning(25)[None, :]
condition1_1d[:, 25:] += pseudoekp
condition2_1d[:, 25:] -= pseudoekp
condition1_2d = condition1_1d[:, :, np.newaxis]
condition2_2d = condition2_1d[:, :, np.newaxis]
return condition1_1d, condition2_1d, condition1_2d, condition2_2d
def test_thresholds():
"""Test automatic threshold calculations."""
# within subjects
rng = np.random.RandomState(0)
X = rng.randn(10, 1, 1) + 0.08
want_thresh = -stats.t.ppf(0.025, len(X) - 1)
assert 0.03 < stats.ttest_1samp(X[:, 0, 0], 0)[1] < 0.05
my_fun = partial(ttest_1samp_no_p)
with catch_logging() as log:
with pytest.warns(RuntimeWarning, match='threshold is only valid'):
out = permutation_cluster_1samp_test(X, stat_fun=my_fun,
verbose=True)
log = log.getvalue()
assert str(want_thresh)[:6] in log
assert len(out[1]) == 1 # 1 cluster
assert 0.03 < out[2] < 0.05
# between subjects
Y = rng.randn(10, 1, 1)
Z = rng.randn(10, 1, 1) - 0.7
X = [X, Y, Z]
want_thresh = stats.f.ppf(1. - 0.05, 2, sum(len(a) for a in X) - len(X))
p = stats.f_oneway(*X)[1]
assert 0.03 < p < 0.05
my_fun = partial(f_oneway) # just to make the check fail
with catch_logging() as log:
with pytest.warns(RuntimeWarning, match='threshold is only valid'):
out = permutation_cluster_test(X, tail=1, stat_fun=my_fun,
verbose=True)
log = log.getvalue()
assert str(want_thresh)[:6] in log
assert len(out[1]) == 1 # 1 cluster
assert 0.03 < out[2] < 0.05
with pytest.warns(RuntimeWarning, match='Ignoring argument "tail"'):
permutation_cluster_test(X, tail=0)
def test_cache_dir():
"""Test use of cache dir."""
tempdir = _TempDir()
orig_dir = os.getenv('MNE_CACHE_DIR', None)
orig_size = os.getenv('MNE_MEMMAP_MIN_SIZE', None)
rng = np.random.RandomState(0)
X = rng.randn(9, 2, 10)
try:
os.environ['MNE_MEMMAP_MIN_SIZE'] = '1K'
os.environ['MNE_CACHE_DIR'] = tempdir
# Fix error for #1507: in-place when memmapping
with catch_logging() as log_file:
permutation_cluster_1samp_test(
X, buffer_size=None, n_jobs=2, n_permutations=1,
seed=0, stat_fun=ttest_1samp_no_p, verbose=False)
assert 'independently' not in log_file.getvalue()
# ensure that non-independence yields warning
stat_fun = partial(ttest_1samp_no_p, sigma=1e-3)
with pytest.warns(RuntimeWarning, match='independently'):
permutation_cluster_1samp_test(
X, buffer_size=10, n_jobs=2, n_permutations=1,
seed=0, stat_fun=stat_fun, verbose=False)
finally:
if orig_dir is not None:
os.environ['MNE_CACHE_DIR'] = orig_dir
else:
del os.environ['MNE_CACHE_DIR']
if orig_size is not None:
os.environ['MNE_MEMMAP_MIN_SIZE'] = orig_size
else:
del os.environ['MNE_MEMMAP_MIN_SIZE']
def test_permutation_large_n_samples():
"""Test that non-replacement works with large N."""
X = np.random.RandomState(0).randn(72, 1) + 1
for n_samples in (11, 72):
tails = (0, 1) if n_samples <= 20 else (0,)
for tail in tails:
H0 = permutation_cluster_1samp_test(
X[:n_samples], threshold=1e-4, tail=tail)[-1]
assert H0.shape == (1024,)
assert len(np.unique(H0)) >= 1024 - (H0 == 0).sum()
def test_permutation_step_down_p():
"""Test cluster level permutations with step_down_p."""
try:
try:
from sklearn.feature_extraction.image import grid_to_graph
except ImportError:
from scikits.learn.feature_extraction.image import grid_to_graph # noqa: F401,E501 analysis:ignore
except ImportError:
return
rng = np.random.RandomState(0)
# subjects, time points, spatial points
X = rng.randn(9, 2, 10)
# add some significant points
X[:, 0:2, 0:2] += 2 # span two time points and two spatial points
X[:, 1, 5:9] += 0.5 # span four time points with 4x smaller amplitude
thresh = 2
# make sure it works when we use ALL points in step-down
t, clusters, p, H0 = \
permutation_cluster_1samp_test(X, threshold=thresh,
step_down_p=1.0)
# make sure using step-down will actually yield improvements sometimes
t, clusters, p_old, H0 = \
permutation_cluster_1samp_test(X, threshold=thresh,
step_down_p=0.0)
assert_equal(np.sum(p_old < 0.05), 1) # just spatial cluster
t, clusters, p_new, H0 = \
permutation_cluster_1samp_test(X, threshold=thresh,
step_down_p=0.05)
assert_equal(np.sum(p_new < 0.05), 2) # time one rescued
assert np.all(p_old >= p_new)
def test_cluster_permutation_test():
"""Test cluster level permutations tests."""
condition1_1d, condition2_1d, condition1_2d, condition2_2d = \
_get_conditions()
for condition1, condition2 in zip((condition1_1d, condition1_2d),
(condition2_1d, condition2_2d)):
T_obs, clusters, cluster_p_values, hist = permutation_cluster_test(
[condition1, condition2], n_permutations=100, tail=1, seed=1,
buffer_size=None)
assert_equal(np.sum(cluster_p_values < 0.05), 1)
T_obs, clusters, cluster_p_values, hist = permutation_cluster_test(
[condition1, condition2], n_permutations=100, tail=1, seed=1,
buffer_size=None)
assert_equal(np.sum(cluster_p_values < 0.05), 1)
# test with 2 jobs and buffer_size enabled
buffer_size = condition1.shape[1] // 10
T_obs, clusters, cluster_p_values_buff, hist =\
permutation_cluster_test([condition1, condition2],
n_permutations=100, tail=1, seed=1,
n_jobs=2, buffer_size=buffer_size)
assert_array_equal(cluster_p_values, cluster_p_values_buff)
def stat_fun(X, Y):
return stats.f_oneway(X, Y)[0]
with pytest.warns(RuntimeWarning, match='is only valid'):
permutation_cluster_test([condition1, condition2], n_permutations=1,
stat_fun=stat_fun)
def test_cluster_permutation_t_test():
"""Test cluster level permutations T-test."""
condition1_1d, condition2_1d, condition1_2d, condition2_2d = \
_get_conditions()
# use a very large sigma to make sure Ts are not independent
stat_funs = [ttest_1samp_no_p,
partial(ttest_1samp_no_p, sigma=1e-1)]
for stat_fun in stat_funs:
for condition1 in (condition1_1d, condition1_2d):
# these are so significant we can get away with fewer perms
T_obs, clusters, cluster_p_values, hist =\
permutation_cluster_1samp_test(condition1, n_permutations=100,
tail=0, seed=1,
buffer_size=None)
assert_equal(np.sum(cluster_p_values < 0.05), 1)
T_obs_pos, c_1, cluster_p_values_pos, _ =\
permutation_cluster_1samp_test(condition1, n_permutations=100,
tail=1, threshold=1.67, seed=1,
stat_fun=stat_fun,
buffer_size=None)
T_obs_neg, _, cluster_p_values_neg, _ =\
permutation_cluster_1samp_test(-condition1, n_permutations=100,
tail=-1, threshold=-1.67,
seed=1, stat_fun=stat_fun,
buffer_size=None)
assert_array_equal(T_obs_pos, -T_obs_neg)
assert_array_equal(cluster_p_values_pos < 0.05,
cluster_p_values_neg < 0.05)
# test with 2 jobs and buffer_size enabled
buffer_size = condition1.shape[1] // 10
with pytest.warns(None): # sometimes "independently"
T_obs_neg_buff, _, cluster_p_values_neg_buff, _ = \
permutation_cluster_1samp_test(
-condition1, n_permutations=100, tail=-1,
threshold=-1.67, seed=1, n_jobs=2, stat_fun=stat_fun,
buffer_size=buffer_size)
assert_array_equal(T_obs_neg, T_obs_neg_buff)
assert_array_equal(cluster_p_values_neg, cluster_p_values_neg_buff)
def test_cluster_permutation_with_connectivity():
"""Test cluster level permutations with connectivity matrix."""
try:
try:
from sklearn.feature_extraction.image import grid_to_graph
except ImportError:
from scikits.learn.feature_extraction.image import grid_to_graph
except ImportError:
return
condition1_1d, condition2_1d, condition1_2d, condition2_2d = \
_get_conditions()
n_pts = condition1_1d.shape[1]
# we don't care about p-values in any of these, so do fewer permutations
args = dict(seed=None, max_step=1, exclude=None,
step_down_p=0, t_power=1, threshold=1.67,
check_disjoint=False, n_permutations=50)
did_warn = False
for X1d, X2d, func, spatio_temporal_func in \
[(condition1_1d, condition1_2d,
permutation_cluster_1samp_test,
spatio_temporal_cluster_1samp_test),
([condition1_1d, condition2_1d],
[condition1_2d, condition2_2d],
permutation_cluster_test,
spatio_temporal_cluster_test)]:
out = func(X1d, **args)
connectivity = grid_to_graph(1, n_pts)
out_connectivity = func(X1d, connectivity=connectivity, **args)
assert_array_equal(out[0], out_connectivity[0])
for a, b in zip(out_connectivity[1], out[1]):
assert_array_equal(out[0][a], out[0][b])
assert np.all(a[b])
# test spatio-temporal w/o time connectivity (repeat spatial pattern)
connectivity_2 = sparse.coo_matrix(
linalg.block_diag(connectivity.asfptype().todense(),
connectivity.asfptype().todense()))
if isinstance(X1d, list):
X1d_2 = [np.concatenate((x, x), axis=1) for x in X1d]
else:
X1d_2 = np.concatenate((X1d, X1d), axis=1)
out_connectivity_2 = func(X1d_2, connectivity=connectivity_2, **args)
# make sure we were operating on the same values
split = len(out[0])
assert_array_equal(out[0], out_connectivity_2[0][:split])
assert_array_equal(out[0], out_connectivity_2[0][split:])
# make sure we really got 2x the number of original clusters
n_clust_orig = len(out[1])
assert len(out_connectivity_2[1]) == 2 * n_clust_orig
# Make sure that we got the old ones back
data_1 = {np.sum(out[0][b[:n_pts]]) for b in out[1]}
data_2 = {np.sum(out_connectivity_2[0][a]) for a in
out_connectivity_2[1][:]}
assert len(data_1.intersection(data_2)) == len(data_1)
# now use the other algorithm
if isinstance(X1d, list):
X1d_3 = [np.reshape(x, (-1, 2, n_space)) for x in X1d_2]
else:
X1d_3 = np.reshape(X1d_2, (-1, 2, n_space))
out_connectivity_3 = spatio_temporal_func(X1d_3, n_permutations=50,
connectivity=connectivity,
max_step=0, threshold=1.67,
check_disjoint=True)
# make sure we were operating on the same values
split = len(out[0])
assert_array_equal(out[0], out_connectivity_3[0][0])
assert_array_equal(out[0], out_connectivity_3[0][1])
# make sure we really got 2x the number of original clusters
assert len(out_connectivity_3[1]) == 2 * n_clust_orig
# Make sure that we got the old ones back
data_1 = {np.sum(out[0][b[:n_pts]]) for b in out[1]}
data_2 = {np.sum(out_connectivity_3[0][a[0], a[1]]) for a in
out_connectivity_3[1]}
assert len(data_1.intersection(data_2)) == len(data_1)
# test new versus old method
out_connectivity_4 = spatio_temporal_func(X1d_3, n_permutations=50,
connectivity=connectivity,
max_step=2, threshold=1.67)
out_connectivity_5 = spatio_temporal_func(X1d_3, n_permutations=50,
connectivity=connectivity,
max_step=1, threshold=1.67)
# clusters could be in a different order
sums_4 = [np.sum(out_connectivity_4[0][a])
for a in out_connectivity_4[1]]
sums_5 = [np.sum(out_connectivity_4[0][a])
for a in out_connectivity_5[1]]
sums_4 = np.sort(sums_4)
sums_5 = np.sort(sums_5)
assert_array_almost_equal(sums_4, sums_5)
if not _force_serial:
pytest.raises(ValueError, spatio_temporal_func, X1d_3,
n_permutations=1, connectivity=connectivity,
max_step=1, threshold=1.67, n_jobs=-1000)
# not enough TFCE params
pytest.raises(KeyError, spatio_temporal_func, X1d_3,
connectivity=connectivity, threshold=dict(me='hello'))
# too extreme a start threshold
with pytest.warns(None) as w:
spatio_temporal_func(X1d_3, connectivity=connectivity,
threshold=dict(start=10, step=1))
if not did_warn:
assert len(w) == 1
did_warn = True
# too extreme a start threshold
pytest.raises(ValueError, spatio_temporal_func, X1d_3,
connectivity=connectivity, tail=-1,
threshold=dict(start=1, step=-1))
pytest.raises(ValueError, spatio_temporal_func, X1d_3,
connectivity=connectivity, tail=-1,
threshold=dict(start=-1, step=1))
# Make sure connectivity has to be sparse
pytest.raises(ValueError, spatio_temporal_func, X1d_3,
n_permutations=50, connectivity=connectivity.todense(),
max_step=1, threshold=1.67)
# wrong type for threshold
pytest.raises(TypeError, spatio_temporal_func, X1d_3,
connectivity=connectivity, threshold=[])
# wrong value for tail
with pytest.warns(None): # sometimes ignoring tail
pytest.raises(ValueError, spatio_temporal_func, X1d_3,
connectivity=connectivity, tail=2)
# make sure it actually found a significant point
out_connectivity_6 = spatio_temporal_func(X1d_3, n_permutations=50,
connectivity=connectivity,
max_step=1,
threshold=dict(start=1,
step=1))
assert np.min(out_connectivity_6[2]) < 0.05
def test_permutation_connectivity_equiv():
"""Test cluster level permutations with and without connectivity."""
try:
try:
from sklearn.feature_extraction.image import grid_to_graph
except ImportError:
from scikits.learn.feature_extraction.image import grid_to_graph
except ImportError:
return
rng = np.random.RandomState(0)
# subjects, time points, spatial points
n_time = 2
n_space = 4
X = rng.randn(6, n_time, n_space)
# add some significant points
X[:, :, 0:2] += 10 # span two time points and two spatial points
X[:, 1, 3] += 20 # span one time point
max_steps = [1, 1, 1, 2, 1]
# This will run full algorithm in two ways, then the ST-algorithm in 2 ways
# All of these should give the same results
conns = [None,
grid_to_graph(n_time, n_space),
grid_to_graph(1, n_space),
grid_to_graph(1, n_space),
None]
stat_map = None
thresholds = [2, 2, 2, 2, dict(start=0.01, step=1.0)]
sig_counts = [2, 2, 2, 2, 5]
stat_fun = partial(ttest_1samp_no_p, sigma=1e-3)
cs = None
ps = None
for thresh, count, max_step, conn in zip(thresholds, sig_counts,
max_steps, conns):
t, clusters, p, H0 = \
permutation_cluster_1samp_test(
X, threshold=thresh, connectivity=conn, n_jobs=2,
max_step=max_step, stat_fun=stat_fun)
# make sure our output datatype is correct
assert isinstance(clusters[0], np.ndarray)
assert clusters[0].dtype == bool
assert_array_equal(clusters[0].shape, X.shape[1:])
# make sure all comparisons were done; for TFCE, no perm
# should come up empty
inds = np.where(p < 0.05)[0]
assert_equal(len(inds), count)
if isinstance(thresh, dict):
assert_equal(len(clusters), n_time * n_space)
assert np.all(H0 != 0)
continue
this_cs = [clusters[ii] for ii in inds]
this_ps = p[inds]
this_stat_map = np.zeros((n_time, n_space), dtype=bool)
for ci, c in enumerate(this_cs):
if isinstance(c, tuple):
this_c = np.zeros((n_time, n_space), bool)
for x, y in zip(c[0], c[1]):
this_stat_map[x, y] = True
this_c[x, y] = True
this_cs[ci] = this_c
c = this_c
this_stat_map[c] = True
if cs is None:
ps = this_ps
cs = this_cs
if stat_map is None:
stat_map = this_stat_map
assert_array_equal(ps, this_ps)
assert len(cs) == len(this_cs)
for c1, c2 in zip(cs, this_cs):
assert_array_equal(c1, c2)
assert_array_equal(stat_map, this_stat_map)
def test_spatio_temporal_cluster_connectivity():
"""Test spatio-temporal cluster permutations."""
try:
try:
from sklearn.feature_extraction.image import grid_to_graph
except ImportError:
from scikits.learn.feature_extraction.image import grid_to_graph
except ImportError:
return
condition1_1d, condition2_1d, condition1_2d, condition2_2d = \
_get_conditions()
rng = np.random.RandomState(0)
noise1_2d = rng.randn(condition1_2d.shape[0], condition1_2d.shape[1], 10)
data1_2d = np.transpose(np.dstack((condition1_2d, noise1_2d)), [0, 2, 1])
noise2_d2 = rng.randn(condition2_2d.shape[0], condition2_2d.shape[1], 10)
data2_2d = np.transpose(np.dstack((condition2_2d, noise2_d2)), [0, 2, 1])
conn = grid_to_graph(data1_2d.shape[-1], 1)
threshold = dict(start=4.0, step=2)
T_obs, clusters, p_values_conn, hist = \
spatio_temporal_cluster_test([data1_2d, data2_2d], connectivity=conn,
n_permutations=50, tail=1, seed=1,
threshold=threshold, buffer_size=None)
buffer_size = data1_2d.size // 10
T_obs, clusters, p_values_no_conn, hist = \
spatio_temporal_cluster_test([data1_2d, data2_2d],
n_permutations=50, tail=1, seed=1,
threshold=threshold, n_jobs=2,
buffer_size=buffer_size)
assert_equal(np.sum(p_values_conn < 0.05), np.sum(p_values_no_conn < 0.05))
# make sure results are the same without buffer_size
T_obs, clusters, p_values2, hist2 = \
spatio_temporal_cluster_test([data1_2d, data2_2d],
n_permutations=50, tail=1, seed=1,
threshold=threshold, n_jobs=2,
buffer_size=None)
assert_array_equal(p_values_no_conn, p_values2)
pytest.raises(ValueError, spatio_temporal_cluster_test,
[data1_2d, data2_2d], tail=1, threshold=-2.)
pytest.raises(ValueError, spatio_temporal_cluster_test,
[data1_2d, data2_2d], tail=-1, threshold=2.)
pytest.raises(ValueError, spatio_temporal_cluster_test,
[data1_2d, data2_2d], tail=0, threshold=-1)
def ttest_1samp(X):
"""Return T-values."""
return stats.ttest_1samp(X, 0)[0]
def test_summarize_clusters():
"""Test cluster summary stcs."""
clu = (np.random.random([1, 20484]),
[(np.array([0]), np.array([0, 2, 4]))],
np.array([0.02, 0.1]),
np.array([12, -14, 30]))
stc_sum = summarize_clusters_stc(clu)
assert stc_sum.data.shape[1] == 2
clu[2][0] = 0.3
pytest.raises(RuntimeError, summarize_clusters_stc, clu)
def test_permutation_test_H0():
"""Test that H0 is populated properly during testing."""
rng = np.random.RandomState(0)
data = rng.rand(7, 10, 1) - 0.5
with pytest.warns(RuntimeWarning, match='No clusters found'):
t, clust, p, h0 = spatio_temporal_cluster_1samp_test(
data, threshold=100, n_permutations=1024, seed=rng)
assert_equal(len(h0), 0)
for n_permutations in (1024, 65, 64, 63):
t, clust, p, h0 = spatio_temporal_cluster_1samp_test(
data, threshold=0.1, n_permutations=n_permutations, seed=rng)
assert_equal(len(h0), min(n_permutations, 64))
assert isinstance(clust[0], tuple) # sets of indices
for tail, thresh in zip((-1, 0, 1), (-0.1, 0.1, 0.1)):
t, clust, p, h0 = spatio_temporal_cluster_1samp_test(
data, threshold=thresh, seed=rng, tail=tail, out_type='mask')
assert isinstance(clust[0], np.ndarray) # bool mask
# same as "128 if tail else 64"
assert_equal(len(h0), 2 ** (7 - (tail == 0))) # exact test
def test_tfce_thresholds():
"""Test TFCE thresholds."""
rng = np.random.RandomState(0)
data = rng.randn(7, 10, 1) - 0.5
# if tail==-1, step must also be negative
pytest.raises(ValueError, permutation_cluster_1samp_test, data, tail=-1,
threshold=dict(start=0, step=0.1))
# this works (smoke test)
permutation_cluster_1samp_test(data, tail=-1,
threshold=dict(start=0, step=-0.1))
# thresholds must be monotonically increasing
pytest.raises(ValueError, permutation_cluster_1samp_test, data, tail=1,
threshold=dict(start=1, step=-0.5))
run_tests_if_main()
| bsd-3-clause |
pierreazalbert/Sexual-Keiling | device_src/self-update-graph.py | 1 | 8230 | import paho.mqtt.client as mqtt
import json
import pandas as pd
import datetime
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import matplotlib.dates as dates
from termcolor import colored, cprint
# global dictionnary to access message from all functions easily
global data
data = {}
# global pandas dataframe to access data log from all functions easily
global df
df = pd.DataFrame()
# initialise live plot
fig, [ax1, ax2, ax3] = plt.subplots(3, 1, sharex=True, sharey=False)
fig.set_dpi(300)
fig.set_size_inches(15,8)
fig.suptitle('Tempo.o Demo', fontsize=20)
plt.subplots_adjust(left=0.1, right=0.85)
plt.style.use('bmh')
fig.patch.set_facecolor('white')
# runs when successful connection to server is made
def on_connect(client, userdata, rc):
print("Connected with result code " + str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe(topic_prefix)
print("subscribed to", topic_prefix)
# runs when message is received from the server.
def on_message(client, userdata, msg):
print(msg.topic + " " + str(msg.payload))
msg_string = msg.payload.decode('UTF-8')
message = json.loads(msg_string)
global data
data = { message['datetime'] : {key: message[key] for key in ['humi', 'temp', 'max_accel']} }
print('\n******************************************')
print('Welcome to the Sexual Keiling MQTT monitor')
print('******************************************\n')
print('Are you connected to the EEERover wifi?')
broker_address = '192.168.0.10'
#broker_address = 'iot.eclipse.org' # test server
topic_prefix = 'esys/sexual-keiling'
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
# todo check paho doc for a way to save this from infinite loop if no connection is made
client.connect(broker_address)
# function to determine message to show according to humidity data received
def humidity_status():
if df['humi'].iloc[-1] > 65:
status = 'Too humid'
color = 'red'
elif df['humi'].iloc[-1] > 55:
status = 'Slightly humid'
color = 'orange'
elif df['humi'].iloc[-1] > 45:
status = 'OK'
color = 'green'
elif df['humi'].iloc[-1] > 35:
status = 'Slightly dry'
color = 'orange'
else:
status = 'Too dry'
color = 'red'
if df['humi_var'].iloc[-1] > 40:
status = 'Unstable'
color = 'orange'
return status, color
# function to determine message to show according to accelerometer data received
def movement_status():
if df['accel_var'].iloc[-1] < -50:
status = 'Impact'
color = 'red'
text = ("IMPACT RECORDED AT " + str(df['accel_var'].index[-1]))
text = colored(text, 'red', attrs=['bold', 'blink'])
print(text)
elif df['max_accel'].iloc[-1] > 60:
status = 'Shaking'
color = 'orange'
else:
status = 'OK'
color = 'green'
return status, color
# function to determine message to show according to temperature data received
def temperature_status():
if df['temp'].iloc[-1] > 30:
status = 'Too hot'
color = 'red'
elif df['temp'].iloc[-1] > 25:
status = 'Slightly hot'
color = 'orange'
elif df['temp'].iloc[-1] > 20:
status = 'OK'
color = 'green'
elif df['temp'].iloc[-1] > 15:
status = 'Slightly cold'
color = 'orange'
else:
status = 'Too cold'
color = 'red'
if df['temp_var'].iloc[-1] > 2:
status = 'Unstable'
color = 'orange'
return status, color
# matplotlib function that will be used to update the graph
def animate(i):
global df
# get messages from mqtt broker with 1s timeout
client.loop(1.0)
# append each new message to pandas dataframe
newline = pd.DataFrame.from_dict(data, orient='index')
df = df.append(newline)
df.index = pd.DatetimeIndex(df.index)
# keep only last 24 hours of data
if len(df.index) > 24*60*60*60:
df.drop(df.iloc[0].name, inplace=True)
if df.empty is False:
df.drop_duplicates()
# calculate instantaneous derivatives of each signal
df['humi_var'] = df['humi'].diff().apply(lambda x: x**2).rolling(50).max()
df['temp_var'] = df['temp'].diff().apply(lambda x: x**2).rolling(50).max()
df['accel_var'] = df['max_accel'].diff()#.rolling(5).std()
# clear whole figure before re-drawing
ax1.clear()
ax2.clear()
ax3.clear()
# plot dataframe containing humidity, temperature and acceleration data
df['humi'].plot(ax=[ax1], subplots=True)
df['max_accel'].plot(ax=[ax2], subplots=True)
df['temp'].plot(ax=[ax3], subplots=True)
#df['humi_var'].plot(ax=[ax1], subplots=True, secondary_y=True)
#df['accel_var'].plot(ax=[ax2], subplots=True, secondary_y=True)
#df['temp_var'].plot(ax=[ax3], subplots=True, secondary_y=True)
# format axis 1 - humidity
ax1.lines[0].set_linewidth(5)
ax1.set_ylim(0,100)
ax1.set_ylabel('Humidity (%)', fontsize=12)
ax1.legend().set_visible(False)
#ax1.right_ax.yaxis.set_visible(False)
ax1.axhspan(0,35, facecolor='r', alpha=0.3)
ax1.axhspan(35,45, facecolor='orange', alpha=0.3)
ax1.axhspan(45,55, facecolor='g', alpha=0.3)
ax1.axhspan(55,65, facecolor='orange', alpha=0.3)
ax1.axhspan(65,100, facecolor='r', alpha=0.3)
# format axis 2 - acceleration
ax2.lines[0].set_linewidth(5)
ax2.set_ylabel('Acceleration (%)', fontsize=12)
ax2.legend().set_visible(False)
#ax2.right_ax.yaxis.set_visible(False)
ax2.axhspan(0,20, facecolor='green', alpha=0.3)
ax2.axhspan(20,60, facecolor='orange', alpha=0.3)
ax2.axhspan(60,ax2.get_ylim()[1], facecolor='red', alpha=0.3)
# format axis 3 - temperature
ax3.lines[0].set_linewidth(5)
ax3.set_ylabel('Temperature (%)', fontsize=12)
ax3.set_xlabel('Time', fontsize=15)
ax3.legend().set_visible(False)
#ax3.right_ax.yaxis.set_visible(False)
ax3.axhspan(0,15, facecolor='r', alpha=0.3)
ax3.axhspan(15,20, facecolor='orange', alpha=0.3)
ax3.axhspan(20,25, facecolor='g', alpha=0.3)
ax3.axhspan(25,30, facecolor='orange', alpha=0.3)
ax3.axhspan(30,ax3.get_ylim()[1], facecolor='r', alpha=0.3)
# display status message next to humidity subplot
status, color = humidity_status()
ax1.text(1.02, 0.6, 'Humidity',
verticalalignment='center', horizontalalignment='left',
transform=ax1.transAxes,
color='black', fontsize=20)
ax1.text(1.02, 0.4, status,
verticalalignment='center', horizontalalignment='left',
transform=ax1.transAxes,
color=color, fontsize=20)
# display status message next to movement subplot
status, color = movement_status()
ax2.text(1.02, 0.6, 'Movement',
verticalalignment='center', horizontalalignment='left',
transform=ax2.transAxes,
color='black', fontsize=20)
ax2.text(1.02, 0.4, status,
verticalalignment='center', horizontalalignment='left',
transform=ax2.transAxes,
color=color, fontsize=20)
# display status message next to temperature subplot
status, color = temperature_status()
ax3.text(1.02, 0.6, 'Temperature',
verticalalignment='center', horizontalalignment='left',
transform=ax3.transAxes,
color='black', fontsize=20)
ax3.text(1.02, 0.4, status,
verticalalignment='center', horizontalalignment='left',
transform=ax3.transAxes,
color=color, fontsize=20)
else:
print('Waiting for data...')
# animate plot by updating it using the animate() function every 0.5 seconds
ani = animation.FuncAnimation(fig, animate, interval=500)
plt.show()
| gpl-3.0 |
KirtoXX/Security_Camera | SSD_mobilenet.py | 1 | 3677 | import tensorflow as tf
from ssd_mobilenet.object_detection.utils import visualization_utils as vis_util
import os
import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
from scipy import misc
class Detection_api:
def __init__(self):
MODEL_NAME = 'ssd_mobilenet/ssd_mobilenet'
self.PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
self.NUM_CLASSES = 90
self.detection_graph = tf.Graph()
def buid_ssdmobilenet(self):
#----------build graph------------
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(self.PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.sess = tf.Session(graph=self.detection_graph)
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
#---------init_sess----------------
print('ssd_mobilenet load finish!')
def load_image_into_numpy_array(self,image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)
def inference(self,image_np_expanded):
(boxes, scores, classes, num) = self.sess.run(
[self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],
feed_dict={self.image_tensor: image_np_expanded})
return boxes,scores,classes
def decode_inference(self,boxes,scores,classes):
classes_np = np.squeeze(classes).astype(np.int32)
boxes_np = np.squeeze(boxes)
scores_np = np.squeeze(scores)
persion_id = []
for i in range(len(classes_np)):
if classes_np[i] == 1:
persion_id.append(i)
persion_boxs = boxes_np[persion_id, :]
persion_score = scores_np[persion_id]
persion_class = np.squeeze(persion_id)
return persion_boxs,persion_score,persion_class
def detectFaces(self, image_path):
#-------read_image--------
image = Image.open(image_path)
image_np = self.load_image_into_numpy_array(image)
image_np_expanded = np.expand_dims(image_np, axis=0)
#------do inference--------
boxes, scores, classes = self.inference(image_np_expanded)
boxes, scores, classes = self.decode_inference(boxes, scores, classes)
#------viual---------------
nb_persion = vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
boxes,
classes,
scores,
#category_index,
use_normalized_coordinates=True,
line_thickness=8)
return nb_persion,image_np
def main():
model = Detection_api()
model.buid_ssdmobilenet()
nb,image_np = model.detectFaces('test_image/001.jpg')
misc.imsave('temp/temp.jpg',image_np)
print(nb)
if __name__ == '__main__':
main()
| apache-2.0 |
adammenges/statsmodels | examples/incomplete/dates.py | 29 | 1251 | """
Using dates with timeseries models
"""
import statsmodels.api as sm
import pandas as pd
# Getting started
# ---------------
data = sm.datasets.sunspots.load()
# Right now an annual date series must be datetimes at the end of the year.
dates = sm.tsa.datetools.dates_from_range('1700', length=len(data.endog))
# Using Pandas
# ------------
# Make a pandas TimeSeries or DataFrame
endog = pd.TimeSeries(data.endog, index=dates)
# and instantiate the model
ar_model = sm.tsa.AR(endog, freq='A')
pandas_ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
# Let's do some out-of-sample prediction
pred = pandas_ar_res.predict(start='2005', end='2015')
print(pred)
# Using explicit dates
# --------------------
ar_model = sm.tsa.AR(data.endog, dates=dates, freq='A')
ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
pred = ar_res.predict(start='2005', end='2015')
print(pred)
# This just returns a regular array, but since the model has date information
# attached, you can get the prediction dates in a roundabout way.
print(ar_res.data.predict_dates)
# This attribute only exists if predict has been called. It holds the dates
# associated with the last call to predict.
#..TODO: should this be attached to the results instance?
| bsd-3-clause |
JsNoNo/scikit-learn | sklearn/manifold/tests/test_spectral_embedding.py | 216 | 8091 | from nose.tools import assert_true
from nose.tools import assert_equal
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_raises
from nose.plugins.skip import SkipTest
from sklearn.manifold.spectral_embedding_ import SpectralEmbedding
from sklearn.manifold.spectral_embedding_ import _graph_is_connected
from sklearn.manifold import spectral_embedding
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_spectral_embedding_two_components(seed=36):
# Test spectral embedding with two components
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2,
n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed))
embedded_coordinate = se_precomp.fit_transform(affinity)
# Some numpy versions are touchy with types
embedded_coordinate = \
se_precomp.fit_transform(affinity.astype(np.float32))
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
def test_spectral_embedding_precomputed_affinity(seed=36):
# Test spectral embedding with precomputed kernel
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=2, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05))
def test_spectral_embedding_callable_affinity(seed=36):
# Test spectral embedding with callable affinity
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=2,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
assert_array_almost_equal(
se_callable.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(kern, se_rbf.affinity_matrix_)
assert_true(
_check_with_col_sign_flipping(embed_rbf, embed_callable, 0.05))
def test_spectral_embedding_amg_solver(seed=36):
# Test spectral embedding with amg solver
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
raise SkipTest("pyamg not available.")
se_amg = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="amg", n_neighbors=5,
random_state=np.random.RandomState(seed))
se_arpack = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="arpack", n_neighbors=5,
random_state=np.random.RandomState(seed))
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05))
def test_pipeline_spectral_clustering(seed=36):
# Test using pipeline to do spectral clustering
random_state = np.random.RandomState(seed)
se_rbf = SpectralEmbedding(n_components=n_clusters,
affinity="rbf",
random_state=random_state)
se_knn = SpectralEmbedding(n_components=n_clusters,
affinity="nearest_neighbors",
n_neighbors=5,
random_state=random_state)
for se in [se_rbf, se_knn]:
km = KMeans(n_clusters=n_clusters, random_state=random_state)
km.fit(se.fit_transform(S))
assert_array_almost_equal(
normalized_mutual_info_score(
km.labels_,
true_labels), 1.0, 2)
def test_spectral_embedding_unknown_eigensolver(seed=36):
# Test that SpectralClustering fails with an unknown eigensolver
se = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver="<unknown>")
assert_raises(ValueError, se.fit, S)
def test_spectral_embedding_unknown_affinity(seed=36):
# Test that SpectralClustering fails with an unknown affinity type
se = SpectralEmbedding(n_components=1, affinity="<unknown>",
random_state=np.random.RandomState(seed))
assert_raises(ValueError, se.fit, S)
def test_connectivity(seed=36):
# Test that graph connectivity test works as expected
graph = np.array([[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), False)
assert_equal(_graph_is_connected(csr_matrix(graph)), False)
assert_equal(_graph_is_connected(csc_matrix(graph)), False)
graph = np.array([[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), True)
assert_equal(_graph_is_connected(csr_matrix(graph)), True)
assert_equal(_graph_is_connected(csc_matrix(graph)), True)
def test_spectral_embedding_deterministic():
# Test that Spectral Embedding is deterministic
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
embedding_1 = spectral_embedding(sims)
embedding_2 = spectral_embedding(sims)
assert_array_almost_equal(embedding_1, embedding_2)
| bsd-3-clause |
ahadmushir/whatsCooking | xg.py | 1 | 6416 | #using xgboost
import pandas as pd
import xgboost as xgb
import difflib
from sklearn.preprocessing import LabelEncoder
import numpy as np
import matplotlib.pyplot as plt
from fuzzywuzzy import fuzz
from sklearn.naive_bayes import BernoulliNB
from sklearn.feature_extraction.text import TfidfVectorizer
# Load the data
train_df = pd.read_csv('newXgTrain.csv', header=0)
test_df = pd.read_csv('newXgTest.csv', header=0)
# We'll impute missing values using the median for numeric columns and the most
# common value for string columns.
# This is based on some nice code by 'sveitser' at http://stackoverflow.com/a/25562948
from sklearn.base import TransformerMixin
class DataFrameImputer(TransformerMixin):
def fit(self, X, y=None):
self.fill = pd.Series([X[c].value_counts().index[0]
if X[c].dtype == np.dtype('O') else X[c].median() for c in X],
index=X.columns)
return self
def transform(self, X, y=None):
return X.fillna(self.fill)
#feature handling
# a = train_df.columns
# feature_columns_to_use = list()
# a1 = a[2:]
# la = list()
# ov = list()
# for i in a1:
# for j in a1:
# d = fuzz.token_sort_ratio(i,j)
# if d != 100 and d > 83:
# print i,j
# if i not in la and j not in la:
# la.append([i,j])
# for i in la:
# for j in i:
# if j not in ov:
# ov.append(j)
# train_df['new'] = train_df[la[0][0]] | train_df[la[0][1]]
# train_df['new1'] = train_df[la[1][0]] | train_df[la[1][1]]
# train_df['new2'] = train_df[la[2][0]] | train_df[la[2][1]]
# train_df['new3'] = train_df[la[3][0]] | train_df[la[3][1]]
# train_df['new4'] = train_df[la[4][0]] | train_df[la[4][1]]
# train_df['new5'] = train_df[la[5][0]] | train_df[la[5][1]]
# train_df['new6'] = train_df[la[6][0]] | train_df[la[6][1]]
# train_df['new7'] = train_df[la[7][0]] | train_df[la[7][1]]
# train_df['new8'] = train_df[la[8][0]] | train_df[la[8][1]]
# train_df['new9'] = train_df[la[9][0]] | train_df[la[9][1]]
# train_df['new10'] = train_df[la[10][0]] | train_df[la[10][1]]
# train_df['new11'] = train_df[la[11][0]] | train_df[la[11][1]]
# train_df['new12'] = train_df[la[12][0]] | train_df[la[12][1]]
# train_df['new13'] = train_df[la[13][0]] | train_df[la[13][1]]
# train_df['new14'] = train_df[la[14][0]] | train_df[la[14][1]]
# train_df['new15'] = train_df[la[15][0]] | train_df[la[15][1]]
# train_df['new16'] = train_df[la[16][0]] | train_df[la[16][1]]
# train_df['new17'] = train_df[la[17][0]] | train_df[la[17][1]]
# train_df['new18'] = train_df[la[18][0]] | train_df[la[18][1]]
# train_df['new19'] = train_df[la[19][0]] | train_df[la[19][1]]
# v = a[321:]
# cc = 2
# while cc != len(train_df.columns):
# if a[cc] not in ov:
# feature_columns_to_use.append(a[cc])
# cc = cc + 1
# print len
# print b
feature_columns_to_use = list()
aData = train_df.columns
a1 = aData[2:]
la = list()
ov = list()
####
#Trying to merge ingredients which are similar (without the use of lemmatizer)
for i in a1:
for j in a1:
d = fuzz.token_sort_ratio(i,j)
if d != 100 and d > 75:
print i,j
if i not in la and j not in la:
la.append([i,j])
for i in la:
for j in i:
if j not in ov:
ov.append(j)
print 'len of not including ing',len(ov)
print 'initial cols', len(train_df.columns)
ran = range(len(ov))
####
##for train
cc = 0
while cc != len(ov):
c1 = 0
cStr = str(cc)
train_df[cStr] = train_df[la[c1][0]] | train_df[la[c1][1]]
cc = cc + 1
c1 = c1 + 1
##for test
cc = 0
while cc != len(ov):
c1 = 0
cStr = str(cc)
test_df[cStr] = test_df[la[c1][0]] | test_df[la[c1][1]]
cc = cc + 1
c1 = c1 + 1
print train_df.columns[len(ov)+1:len(train_df.columns)-1]
print 'aggregated columns',len(train_df.columns)
a = train_df.columns
ccc = 2
while ccc != len(a):
if a[ccc] not in ov:
feature_columns_to_use.append(a[ccc])
ccc = ccc + 1
print 'final length of cols',len(feature_columns_to_use)
# print len(feature_columns_to_use)
# feature_columns_to_use = ['salt','onions','garlic','olive oil','sugar','water','soy sauce','carrots','butter','garlic cloves', 'ground black pepper', 'eggs', 'vegetable oil']
# feature_columns_to_use = ['Pclass','Sex','Age','Fare','Parch']
# nonnumeric_columns = ['Sex']
# Join the features from train and test together before imputing missing values,
# in case their distribution is slightly different
big_X = train_df[feature_columns_to_use].append(test_df[feature_columns_to_use])
big_X_imputed = DataFrameImputer().fit_transform(big_X)
# XGBoost doesn't (yet) handle categorical features automatically, so we need to change
# them to columns of integer values.
# le = LabelEncoder()
# for feature in nonnumeric_columns:
# big_X_imputed[feature] = le.fit_transform(big_X_imputed[feature])
# Prepare the inputs for the model
train_X = big_X_imputed[0:train_df.shape[0]].as_matrix()
test_X = big_X_imputed[train_df.shape[0]::].as_matrix()
train_y = train_df['class']
gbm = xgb.XGBClassifier(max_depth=15, n_estimators=700, learning_rate=0.05).fit(train_X, train_y)
predictions = gbm.predict(test_X)
# clf = xgb.XGBClassifier(n_estimators=200)
# eval_set = [(train_X,train_y)]
# clf.fit(train_X, train_y, eval_metric="auc")
# mapFeat = dict(zip(["f"+str(i) for i in range(len(feature_columns_to_use))],feature_columns_to_use))
# ts = pd.Series(gbm.booster().get_fscore())
# ts.index = ts.reset_index()['index'].map(mapFeat)
# ts.order()[-15:].plot(kind="barh", title=("features importance"))
# plt.show()
# Kaggle needs the submission to have a certain format;
# see https://www.kaggle.com/c/titanic-gettingStarted/download/gendermodel.csv
# for an example of what it's supposed to look like.
# corpustr = train_df[feature_columns_to_use]
# corr = train_df['class']
# vectorizertr = TfidfVectorizer(stop_words='english', ngram_range = ( 1, 1),analyzer="word",
# max_df = .6 , binary=False , token_pattern=r'\w+' , sublinear_tf=False, norm = 'l2')
# tfidftr = vectorizertr.fit_transform(corpustr).todense()
# clss = vectorizertr.fit_transform(corr).todense()
# nb = BernoulliNB()
# nb = nb.fit(tfidftr,clss)
# scores = cross_validation.cross_val_score(nb, tfidftr, clss, cv=5, scoring='accuracy')
# print("Accuracy BernoulliNB: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std()))
submission = pd.DataFrame({ 'cuisine': predictions,
'id': test_df['id'] })
submission.to_csv("submissionX14Dec.csv", index=False)
| apache-2.0 |
glorizen/nupic | examples/opf/clients/hotgym/prediction/one_gym/nupic_output.py | 32 | 6059 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Provides two classes with the same signature for writing data out of NuPIC
models.
(This is a component of the One Hot Gym Prediction Tutorial.)
"""
import csv
from collections import deque
from abc import ABCMeta, abstractmethod
# Try to import matplotlib, but we don't have to.
try:
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.dates import date2num
except ImportError:
pass
WINDOW = 100
class NuPICOutput(object):
__metaclass__ = ABCMeta
def __init__(self, names, showAnomalyScore=False):
self.names = names
self.showAnomalyScore = showAnomalyScore
@abstractmethod
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
pass
@abstractmethod
def close(self):
pass
class NuPICFileOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICFileOutput, self).__init__(*args, **kwargs)
self.outputFiles = []
self.outputWriters = []
self.lineCounts = []
headerRow = ['timestamp', 'kw_energy_consumption', 'prediction']
for name in self.names:
self.lineCounts.append(0)
outputFileName = "%s_out.csv" % name
print "Preparing to output %s data to %s" % (name, outputFileName)
outputFile = open(outputFileName, "w")
self.outputFiles.append(outputFile)
outputWriter = csv.writer(outputFile)
self.outputWriters.append(outputWriter)
outputWriter.writerow(headerRow)
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
assert len(timestamps) == len(actualValues) == len(predictedValues)
for index in range(len(self.names)):
timestamp = timestamps[index]
actual = actualValues[index]
prediction = predictedValues[index]
writer = self.outputWriters[index]
if timestamp is not None:
outputRow = [timestamp, actual, prediction]
writer.writerow(outputRow)
self.lineCounts[index] += 1
def close(self):
for index, name in enumerate(self.names):
self.outputFiles[index].close()
print "Done. Wrote %i data lines to %s." % (self.lineCounts[index], name)
class NuPICPlotOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICPlotOutput, self).__init__(*args, **kwargs)
# Turn matplotlib interactive mode on.
plt.ion()
self.dates = []
self.convertedDates = []
self.actualValues = []
self.predictedValues = []
self.actualLines = []
self.predictedLines = []
self.linesInitialized = False
self.graphs = []
plotCount = len(self.names)
plotHeight = max(plotCount * 3, 6)
fig = plt.figure(figsize=(14, plotHeight))
gs = gridspec.GridSpec(plotCount, 1)
for index in range(len(self.names)):
self.graphs.append(fig.add_subplot(gs[index, 0]))
plt.title(self.names[index])
plt.ylabel('KW Energy Consumption')
plt.xlabel('Date')
plt.tight_layout()
def initializeLines(self, timestamps):
for index in range(len(self.names)):
print "initializing %s" % self.names[index]
# graph = self.graphs[index]
self.dates.append(deque([timestamps[index]] * WINDOW, maxlen=WINDOW))
self.convertedDates.append(deque(
[date2num(date) for date in self.dates[index]], maxlen=WINDOW
))
self.actualValues.append(deque([0.0] * WINDOW, maxlen=WINDOW))
self.predictedValues.append(deque([0.0] * WINDOW, maxlen=WINDOW))
actualPlot, = self.graphs[index].plot(
self.dates[index], self.actualValues[index]
)
self.actualLines.append(actualPlot)
predictedPlot, = self.graphs[index].plot(
self.dates[index], self.predictedValues[index]
)
self.predictedLines.append(predictedPlot)
self.linesInitialized = True
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
assert len(timestamps) == len(actualValues) == len(predictedValues)
# We need the first timestamp to initialize the lines at the right X value,
# so do that check first.
if not self.linesInitialized:
self.initializeLines(timestamps)
for index in range(len(self.names)):
self.dates[index].append(timestamps[index])
self.convertedDates[index].append(date2num(timestamps[index]))
self.actualValues[index].append(actualValues[index])
self.predictedValues[index].append(predictedValues[index])
# Update data
self.actualLines[index].set_xdata(self.convertedDates[index])
self.actualLines[index].set_ydata(self.actualValues[index])
self.predictedLines[index].set_xdata(self.convertedDates[index])
self.predictedLines[index].set_ydata(self.predictedValues[index])
self.graphs[index].relim()
self.graphs[index].autoscale_view(True, True, True)
plt.draw()
plt.legend(('actual','predicted'), loc=3)
def close(self):
plt.ioff()
plt.show()
NuPICOutput.register(NuPICFileOutput)
NuPICOutput.register(NuPICPlotOutput)
| agpl-3.0 |
IssamLaradji/scikit-learn | examples/gaussian_process/gp_diabetes_dataset.py | 17 | 2021 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
========================================================================
Gaussian Processes regression: goodness-of-fit on the 'diabetes' dataset
========================================================================
This example consists in fitting a Gaussian Process model onto the diabetes
dataset.
The correlation parameters are determined by means of maximum likelihood
estimation (MLE). An anisotropic squared exponential correlation model with a
constant regression model are assumed. We also used a nugget = 1e-2 in order to
account for the (strong) noise in the targets.
We compute then compute a cross-validation estimate of the coefficient of
determination (R2) without reperforming MLE, using the set of correlation
parameters found on the whole dataset.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcess
from sklearn.cross_validation import cross_val_score, KFold
# Load the dataset from scikit's data sets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# Instanciate a GP model
gp = GaussianProcess(regr='constant', corr='absolute_exponential',
theta0=[1e-4] * 10, thetaL=[1e-12] * 10,
thetaU=[1e-2] * 10, nugget=1e-2, optimizer='Welch')
# Fit the GP model to the data performing maximum likelihood estimation
gp.fit(X, y)
# Deactivate maximum likelihood estimation for the cross-validation loop
gp.theta0 = gp.theta_ # Given correlation parameter = MLE
gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE
# Perform a cross-validation estimate of the coefficient of determination using
# the cross_validation module using all CPUs available on the machine
K = 20 # folds
R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=1).mean()
print("The %d-Folds estimate of the coefficient of determination is R2 = %s"
% (K, R2))
| bsd-3-clause |
lbishal/scikit-learn | sklearn/datasets/tests/test_base.py | 33 | 6143 | import os
import shutil
import tempfile
import warnings
import nose
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_breast_cancer():
res = load_breast_cancer()
assert_equal(res.data.shape, (569, 30))
assert_equal(res.target.size, 569)
assert_equal(res.target_names.size, 2)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
| bsd-3-clause |
JeffGoldblum/uwaterloo-igem-2015 | models/tridimensional/data_kleinstiver/cas9_mutants_stats.py | 8 | 9578 | """
Gathering statistics on the Cas9 mutants that were able to bind alternative PAMs
TODO:
- mutations which always co-occur (regionally? or by AA class?)
- mutations which co-occur between NGA and NGC PAMs
- mutations which differ between NGA and NGC PAMs
"""
import operator
import numpy as np
import matplotlib.pyplot as plt
from cas9_mutants import *
from aa_info import *
def find_mutation_counts(pam):
"""
Number of mutations that compose each mutant Cas9, separated by PAM
Returns a vector of the same length as the number of records in mutants_kleinstiver that have 'pam' = pam
"""
mutation_counts = []
for mutant in mutants_kleinstiver:
if mutant['pam'] == pam:
mutation_counts.append(len(mutant['mutations']))
return mutation_counts
def find_idx_counts(pam, start=1099, end=1368):
"""
Cumulative counts of frequency at which amino acid indices are mutated, separated by PAM
Returns a dict w/ the same keys for indices start-end+1 and values = # of times it was mutated for 'pam' = pam
"""
idx_counts = {key: 0 for key in range(start, end+1)}
for mutant in mutants_kleinstiver:
if mutant['pam'] == pam:
for mutation in mutant['mutations']:
if start <= mutation['aa_idx'] <= end:
idx_counts[mutation['aa_idx']] += 1
return idx_counts
def find_ss_counts(pam):
"""
Cumulative counts of which secondary structures are mutated across mutant Cas9s, separated by PAM
Returns a dict w/ the same keys as PI_sec_structure and values = # of times it was mutated for 'pam' = pam
"""
ss_counts = {key: 0 for key in PI_sec_structure}
for mutant in mutants_kleinstiver:
if mutant['pam'] == pam:
for mutation in mutant['mutations']:
ss_counts[mutation['sec_structure']] += 1
return ss_counts
def find_aa_changes(pam, change_type, by_idx=True, start=1099, end=1368):
"""
Interfaces with the aa_info dictionary to classify the amino acid changes across mutant Cas9s, separated by PAM
if by_idx = True, returns a dict with the keys for each combination of index and aa_info change, otherwise returns
a dict with keys for every aa_info change observed
"""
aa_changes = {}
assert change_type in aa_info.keys(), "change_type is not an in set of aa_info keys: %s" % id
for mutant in mutants_kleinstiver:
if mutant['pam'] == pam:
for mutation in mutant['mutations']:
aa_info_init = aa_info[change_type][mutation['aa_init']] # info corresponding to WT nucleotide
aa_info_mut = aa_info[change_type][mutation['aa_mut']] # info corresponding to mutated nucleotide
# key for aa_changes dictionary determined with by_idx setting
change_key = aa_info_init + 'to' + aa_info_mut
if by_idx: change_key = str(mutation['aa_idx']) + '_' + change_key
if change_key in aa_changes:
aa_changes[change_key] += 1
else:
aa_changes[change_key] = 1
return aa_changes
def find_num_pcr_needed():
"""
Estimate the number of PCR needed to generate each Cas9 mutant from WT Cas9, assuming that WT Cas9 is mutated via
PCR reactions that can alter 60 nt at a time (or 20 AA).
Returns: list of ints of the same length as mutants_kleinstiver
"""
num_pcr_per_mutant = []
for mutant in mutants_kleinstiver:
idx_to_pcr_mutate = [m['aa_idx'] for m in mutant['mutations']] # Get indices of all mutations in this mutant
num_pcr = 0
# Step inwards from min/max amino acid indices by 60 nt (1 PCR on either side) and remove all indices covered
while idx_to_pcr_mutate:
if max(idx_to_pcr_mutate) - min(idx_to_pcr_mutate) > 20: # 2 or more PCR needed
right_coverage = min(idx_to_pcr_mutate) + 20
left_coverage = max(idx_to_pcr_mutate) - 20
idx_to_pcr_mutate = [idx for idx in idx_to_pcr_mutate if right_coverage <= idx <= left_coverage]
num_pcr += 2
else:
num_pcr += 1
idx_to_pcr_mutate = [] # if the max/min are less than 20 AA apart, only 1 more PCR needed, so end
num_pcr_per_mutant.append(num_pcr)
return num_pcr_per_mutant
def hist_mutation_counts(counts_NGA, counts_NGC):
"""
Plots a single figure containing two histograms showing the number of mutations per NGA- or NGA-binding mutant Cas9
"""
# Plot histogram of mutations per mutant Cas9
f, axarr = plt.subplots(2, sharex=True)
axarr[0].hist(counts_NGA, bins=range(2, 12), histtype='stepfilled', normed=True, align='left', color='#71cce6',
label='NGA')
axarr[0].set_title('Mutations per Successful Cas9 Mutant')
axarr[0].set_ylabel('Probability')
axarr[0].legend()
axarr[1].hist(counts_NGC, bins=range(2, 12), histtype='stepfilled', normed=True, align='left', color='#71cce6',
label='NGC')
axarr[1].set_xlabel('Number of Mutations')
axarr[1].set_ylabel('Probability')
axarr[1].legend()
plt.xlim( 1.5, 10.5 )
plt.show()
def bar_graph_dict(dict_to_plot, plot_title="", xlab="", ylab="", log_scale=False, col="#71cce6", sort_key_list=None,
min_count=1):
"""
Plots a bar graph of the provided dictionary.
Params:
dict_to_plot (dict): should have the format {'label': count}
plot_title (str), xlab (str), ylab (str), log_scale (bool): fairly self-explanatory plot customization
col (str): colour for the bars
sort_key_list (list): the keys of the dictionary are assumed to match based its first item and reordered as such
min_count (int): do not plot items with less than this in the count
"""
# Sort dictionary & convert to list using custom keys if needed
if not sort_key_list:
list_to_plot = sorted(dict_to_plot.items())
else:
list_to_plot = sorted(dict_to_plot.items(), key=lambda x: sort_key_list.index(x[0]))
# Remove list items with less than min_count
if min_count != 1:
list_to_plot = [dd for dd in list_to_plot if dd[1] >= min_count]
# Bar plot of secondary structure regions containing mutants in each mutant Cas9
bar_width = 0.45
plt.bar(np.arange(len(list_to_plot)), [dd[1] for dd in list_to_plot], width=bar_width, align='center', color=col)
plt.xticks(range(len(list_to_plot)), [dd[0] for dd in list_to_plot], rotation=45, ha='right')
plt.title(plot_title)
plt.xlabel(xlab)
plt.ylabel(ylab)
if log_scale:
plt.yscale('log')
plt.ylim(min_count-0.1) # Show values with just the minimum count
plt.show()
def main():
# Vectors with one entry per mutant Cas9 containing the number of aa mutations in the mutant, separated by PAM
mutation_counts_NGA = find_mutation_counts('NGA')
mutation_counts_NGC = find_mutation_counts('NGC')
# Dictionaries with one key per secondary structure in the PI domain & values that count the number of times the
# secondary structure was mutated across all mutant Cas9s
ss_counts_NGA = find_ss_counts('NGA')
ss_counts_NGC = find_ss_counts('NGC')
# Dictionaries with one key per amino acid PI domain (well, 1097-1364 since those are the values we exported dists
# for in PyRosetta & values that count the number of times the index was mutated across all mutant Cas9s
idx_counts_NGA = find_idx_counts('NGA', 1097, 1364)
idx_counts_NGC = find_idx_counts('NGC', 1097, 1364)
# Plot two histograms & two bar charts
hist_mutation_counts(mutation_counts_NGA, mutation_counts_NGC)
# Plot bar charts of secondary structure, sorted by secondary structure index
PI_sec_sorted = sorted(PI_sec_structure.items(), key=operator.itemgetter(1))
PI_sec_sorted = [ss[0] for ss in PI_sec_sorted]
bar_graph_dict(ss_counts_NGA, log_scale=True, sort_key_list=PI_sec_sorted, ylab="Number of Mutations",
plot_title="Mutations Across Secondary Structures for NGA Cas9")
bar_graph_dict(ss_counts_NGC, log_scale=True, sort_key_list=PI_sec_sorted, ylab="Number of Mutations",
plot_title="Mutations Across Secondary Structures for NGC Cas9")
# Estimate number of PCR reactions needed to convert WT Cas9 into each Cas9 mutant
num_pcr_needed = find_num_pcr_needed()
print num_pcr_needed
# Bar graphs of amino acid category changes, sorted by index
size_transitions_NGA = find_aa_changes('NGA', 'size', by_idx=False)
bar_graph_dict(size_transitions_NGA, log_scale=True, ylab="Number of Mutations",
plot_title="AA Size Transitions by Index for NGC Cas9", min_count=2)
size_transitions_NGC = find_aa_changes('NGC', 'size')
bar_graph_dict(size_transitions_NGA, log_scale=True, ylab="Number of Mutations",
plot_title="AA Size Transitions by Index for NGC Cas9", min_count=2)
# Look at direct amino acid transitions
aa_transitions_NGA = find_aa_changes('NGA', 'abbreviation')
bar_graph_dict(aa_transitions_NGA, log_scale=True, ylab="Number of Mutations",
plot_title="AA Transitions by Index for NGA Cas9", min_count=2)
aa_transitions_NGC = find_aa_changes('NGC', 'abbreviation')
bar_graph_dict(aa_transitions_NGC, log_scale=True, ylab="Number of Mutations",
plot_title="AA Transitions by Index for NGC Cas9", min_count=2)
if __name__ == '__main__':
main()
| mit |
henrynj/PMLMC | plot/euler_old/plot_fixed_eps.py | 2 | 8887 | #!/usr/bin/env python
import sys
import numpy as np
import matplotlib.pyplot as plt
def read_variance():
"""read results file from """
level = []
var_q = []
var_yl = []
filename = 'mlmc_convergence_test'
fh = open(filename, 'r')
for line in fh:
# Recognise convergence test lines from
# the fact that line[1] is an integer
if line[0] == ' ' and '0' <= line[1] <= '9':
splitline = [float(x) for x in line.split()]
level.append(splitline[0])
var_q.append(splitline[5])
var_yl.append(splitline[7])
fh.close()
var_q = np.array(var_q)
var_yl = np.array(var_yl)
return var_q, var_yl
def calculate_optimal_number(level, var, eps, gamma = 1.4):
""" calculate optimal number of samples on each level,
based on eq.(3.1), Multilevel Monte Carlo methods, Giles, 2015.
"""
### read var_l from mlmc_convergence_test
cost = np.array( [4**(l*gamma) for l in level] )
NM_opt = np.ceil( 2 * np.sqrt( var / cost ) * \
sum( np.sqrt( var * cost ) ) / ( eps**2) )
return NM_opt.astype(int)
def compute_mc_cost(v, eps, l, gamma):
"""
compute cost of MC for given eps
"""
nm = int( np.ceil(2*v/(eps**2)) )
cost = nm * 4**(l*gamma)
return cost
def compute_mlmc_cost(nm, gamma, l0):
cost_ml = 0
for l, n in enumerate(nm):
if l==0:
cost_ml += n * 4**( gamma*(l+l0) )
else:
cost_ml += n * 4**( gamma*(l+l0) ) #* ( 1+4**(-gamma) )
return cost_ml
def estimate_c(L=3):
""" calculate the eps that level L mesh grid can reach"""
del3 = []
l = []
qoi_name = 'FlowAngleOut'
filename = '../TestCase/su2_ls89/mc_data_%s.dat' %qoi_name
fh = open(filename, 'r')
for line in fh:
if line[0] == ' ' and '0' <= line[1] <= '9':
splitline = [float(x) for x in line.split()]
l.append(splitline[0])
del3.append(splitline[3])
A = l[1:]
B = np.log2( np.abs(del3[1:]) )
pa = np.polyfit(A, B, 1)
alpha = -pa[0] / 2
c = 2**pa[1]
eps = c * 2**(-2*L*alpha)
print eps
return eps
### plot total cost
def plot_cost(eps = 5e-2, gamma = 1.4):
v_q, v_yl = read_variance()
cost_mc = []
for l, v in enumerate(v_q):
cost_mc.append(compute_mc_cost(v, eps, l, gamma))
cost_ml1 = []
cost_ml2 = []
cost_ml3 = []
# 2 levels
for l0 in range(0,3,1):
level = [l0, l0+1]
nm = calculate_optimal_number(level, v_yl[l0:l0+2], eps, gamma)
cost_ml1.append( compute_mlmc_cost(nm, gamma, l0) )
# 3 levels
for l0 in range(0,2,1):
level = [l0, l0+1, l0+2]
nm = calculate_optimal_number(level, v_yl[l0:l0+3], eps, gamma)
cost_ml2.append( compute_mlmc_cost(nm, gamma, l0) )
# 4 levels
for l0 in range(0,1,1):
level = [l0, l0+1, l0+2, l0+3]
nm = calculate_optimal_number(level, v_yl[l0:l0+4], eps, gamma)
cost_ml3.append( compute_mlmc_cost(nm, gamma, l0) )
styles = ['o--', 'x--', 'd--', '*--', 's--']
plt.figure(figsize=(10, 8))
plt.subplot(1, 1, 1)
plt.semilogy(range(4), cost_mc, styles[0], label='MC')
plt.semilogy(range(1,4,1), cost_ml1, styles[1], label='MLMC: 2 levels')
plt.semilogy(range(2,4,1), cost_ml2, styles[2], label='MLMC: 3 levels')
plt.semilogy(range(3,4,1), cost_ml3, styles[3], label='MLMC: 4 levels')
plt.xlim(-1, 3+1)
plt.xlabel('level $l$')
plt.ylabel('Standardised Cost')
plt.legend(loc='upper left', frameon=True)
plt.title('$eps=%.2f$' %eps)
plt.savefig('cost_for_eps_%.2f.pdf' %eps)
plt.close()
### of no meaning
def plot_cost_vs_eps(gamma = 1.4):
epss = [1e-2,2e-2,5e-2]
v_q, v_yl = read_variance()
cost_mc = []
cost_ml = []
level = [0,1,2,3]
for eps in epss:
cost_mc.append( compute_mc_cost(v_q[-1], eps, 3, gamma)*eps**2 )
nm = calculate_optimal_number(level, v_yl[0:4], eps, gamma)
cost_ml.append( compute_mlmc_cost(nm, gamma, 0)*eps**2 )
styles = ['o--', 'x--', 'd--', '*--', 's--']
plt.figure(figsize=(10, 8))
plt.subplot(1, 1, 1)
plt.loglog(epss, cost_mc, styles[0], label='MC')
plt.loglog(epss, cost_ml, styles[1], label='MLMC')
#plt.xlim(-1, 3+1)
plt.xlabel('accuracy $\epsilon$')
plt.ylabel('Standardised Cost')
plt.legend(loc='upper left', frameon=True)
#plt.title('$eps=%.2f$' %eps)
plt.savefig('cost_vs_eps.pdf')
plt.close()
### plot number of samples per level
def plot_nm(eps = 2e-2, gamma = 1.4):
v_q, v_yl = read_variance()
nm_mc = int( np.ceil(2*v_q[-1]/(eps**2)) )
level = [2,3]
nm_ml2 = calculate_optimal_number(level, v_yl[2:4], eps, gamma)
level = [1,2,3]
nm_ml3 = calculate_optimal_number(level, v_yl[1:4], eps, gamma)
level = [0,1,2,3]
nm_ml4 = calculate_optimal_number(level, v_yl[0:4], eps, gamma)
### plotting
styles = ['o--', 'x--', 'd--', '*--', 's--']
plt.figure(figsize=(10, 8))
plt.subplot(1, 1, 1)
plt.semilogy(3, nm_mc, styles[0], label='MC')
plt.semilogy(range(2,4,1), nm_ml2, styles[1], label='MLMC: 2 levels')
plt.semilogy(range(1,4,1), nm_ml3, styles[2], label='MLMC: 3 levels')
plt.semilogy(range(0,4,1), nm_ml4, styles[3], label='MLMC: 4 levels')
plt.xlim(-1, 3+1)
plt.xlabel('level $l$')
plt.ylabel(r'$N_l$')
plt.legend(loc='upper right', frameon=True)
plt.title('$eps=%.2f$' %eps)
plt.savefig('nm_for_eps_%.2f.pdf' %eps)
plt.close()
print nm_ml2, nm_ml3, nm_ml4
def plot_nm_vs_eps(gamma=1.4):
v_q, v_yl = read_variance()
#nm_mc = int( np.ceil(2*v_q[-1]/(eps**2)) )
epss = [1e-2,2e-2,5e-2]
level = [0,1,2,3]
nm_ml2 = calculate_optimal_number(level, v_yl[0:4], epss[0], gamma)
nm_ml3 = calculate_optimal_number(level, v_yl[0:4], epss[1], gamma)
nm_ml4 = calculate_optimal_number(level, v_yl[0:4], epss[2], gamma)
### plotting
styles = ['o--', 'x--', 'd--', '*--', 's--']
plt.figure(figsize=(10, 8))
plt.subplot(1, 1, 1)
#plt.semilogy(3, nm_mc, styles[0], label='MC')
plt.semilogy(range(0,4,1), nm_ml2, styles[0], label='MLMC: eps=%.2f' %epss[0])
plt.semilogy(range(0,4,1), nm_ml3, styles[1], label='MLMC: eps=%.2f' %epss[1])
plt.semilogy(range(0,4,1), nm_ml4, styles[2], label='MLMC: eps=%.2f' %epss[2])
#plt.xlim(-1, 3+1)
plt.xlabel('level $l$')
plt.ylabel(r'$N_l$')
plt.legend(loc='upper right', frameon=True)
#plt.title('$eps=%.2f$' %eps)
plt.savefig('nm_vs_eps.pdf')
plt.close()
def plot():
epss = [1e-1, 5e-2, 2e-2, 1e-2]#, 2e-3]
gamma = 1.4
v_q, v_yl = read_variance()
cost_mc = []
cost_ml1 = []
cost_ml2 = []
cost_ml3 = []
for eps in epss:
cost_mc.append( compute_mc_cost( v_q[-1], eps, 3, gamma ) )
level = [2,3]
nm = calculate_optimal_number(level, v_yl[2:4], eps, gamma)
cost_ml1.append( compute_mlmc_cost(nm, gamma, 2) )
level = [1,2,3]
nm = calculate_optimal_number(level, v_yl[1:4], eps, gamma)
cost_ml2.append( compute_mlmc_cost(nm, gamma, 1) )
level = [0,1,2,3]
nm = calculate_optimal_number(level, v_yl[0:4], eps, gamma)
cost_ml3.append( compute_mlmc_cost(nm, gamma, 0) )
styles = ['o--', 'x--', 'd--', '*--', 's--']
plt.figure(figsize=(10, 8))
plt.subplot(1, 1, 1)
plt.loglog(cost_mc, epss, styles[0], label='MC')
plt.loglog(cost_ml1, epss, styles[1], label='MLMC: 2 levels')
plt.loglog(cost_ml2, epss, styles[2], label='MLMC: 3 levels')
plt.loglog(cost_ml3, epss, styles[3], label='MLMC: 4 levels')
saving = np.zeros(3)
saving[0] = cost_mc[2] / cost_ml1[2]
saving[1] = cost_mc[2] / cost_ml2[2]
saving[2] = cost_mc[2] / cost_ml3[2]
print saving
eps = estimate_c()
plt.axhline(y=eps, linewidth=.05, color='k')
eps1 = estimate_c(L=2)
plt.axhline(y=eps1, linewidth=.05, color='r')
#plt.ylim(2e-3, 2e-1)
plt.xlabel('Standardised Cost')
plt.ylabel('Standard deviation of estimator')
plt.legend(loc='upper right', frameon=True)
plt.savefig('eps_vs_cost.pdf')
plt.close()
if __name__ == '__main__':
eps = 5e-2; gamma = 1.4
#plot_cost()
#plot_cost_vs_eps()
plot_nm()
#plot_nm_vs_eps() | gpl-3.0 |
nesterione/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 110 | 34127 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.testing import (assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), set(stoplist))
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
| bsd-3-clause |
wchan/tensorflow | tensorflow/examples/skflow/iris_val_based_early_stopping.py | 2 | 2221 | # Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets, metrics
from sklearn.cross_validation import train_test_split
from tensorflow.contrib import skflow
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
test_size=0.2,
random_state=42)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train,
test_size=0.2, random_state=42)
val_monitor = skflow.monitors.ValidationMonitor(X_val, y_val,
early_stopping_rounds=200,
n_classes=3)
# classifier with early stopping on training data
classifier1 = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3, steps=2000)
classifier1.fit(X_train, y_train)
score1 = metrics.accuracy_score(y_test, classifier1.predict(X_test))
# classifier with early stopping on validation data
classifier2 = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3, steps=2000)
classifier2.fit(X_train, y_train, val_monitor)
score2 = metrics.accuracy_score(y_test, classifier2.predict(X_test))
# in many applications, the score is improved by using early stopping on val data
print(score2 > score1)
| apache-2.0 |
122689305/BigDL | pyspark/bigdl/optim/optimizer.py | 2 | 21025 | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
from distutils.dir_util import mkpath
from bigdl.util.common import DOUBLEMAX
from bigdl.util.common import JTensor
from bigdl.util.common import JavaValue
from bigdl.util.common import callBigDlFunc
from bigdl.util.common import callJavaFunc
from bigdl.util.common import get_spark_context
if sys.version >= '3':
long = int
unicode = str
class MaxIteration(JavaValue):
"""
A trigger specifies a timespot or several timespots during training,
and a corresponding action will be taken when the timespot(s) is reached.
MaxIteration is a trigger that triggers an action when training reaches
the number of iterations specified by "max".
Usually used as end_trigger when creating an Optimizer.
>>> maxIteration = MaxIteration(20)
creating: createMaxIteration
"""
def __init__(self, max, bigdl_type="float"):
"""
Create a MaxIteration trigger.
:param max: max
"""
JavaValue.__init__(self, None, bigdl_type, max)
class MaxEpoch(JavaValue):
"""
A trigger specifies a timespot or several timespots during training,
and a corresponding action will be taken when the timespot(s) is reached.
MaxEpoch is a trigger that triggers an action when training reaches
the number of epochs specified by "max_epoch".
Usually used as end_trigger when creating an Optimizer.
>>> maxEpoch = MaxEpoch(2)
creating: createMaxEpoch
"""
def __init__(self, max_epoch, bigdl_type="float"):
"""
Create a MaxEpoch trigger.
:param max_epoch: max_epoch
"""
JavaValue.__init__(self, None, bigdl_type, max_epoch)
class EveryEpoch(JavaValue):
"""
A trigger specifies a timespot or several timespots during training,
and a corresponding action will be taken when the timespot(s) is reached.
EveryEpoch is a trigger that triggers an action when each epoch finishs.
Could be used as trigger in setvalidation and setcheckpoint in Optimizer,
and also in TrainSummary.set_summary_trigger.
>>> everyEpoch = EveryEpoch()
creating: createEveryEpoch
"""
def __init__(self, bigdl_type="float"):
"""
Create a EveryEpoch trigger.
"""
JavaValue.__init__(self, None, bigdl_type)
class SeveralIteration(JavaValue):
"""
A trigger specifies a timespot or several timespots during training,
and a corresponding action will be taken when the timespot(s) is reached.
SeveralIteration is a trigger that triggers an action every "n"
iterations.
Could be used as trigger in setvalidation and setcheckpoint in Optimizer,
and also in TrainSummary.set_summary_trigger.
>>> serveralIteration = SeveralIteration(2)
creating: createSeveralIteration
"""
def __init__(self, interval, bigdl_type="float"):
"""
Create a SeveralIteration trigger.
:param interval: interval is the "n" where an action is triggeredevery "n" iterations
"""
JavaValue.__init__(self, None, bigdl_type, interval)
class Poly(JavaValue):
"""
A learning rate decay policy, where the effective learning rate
follows a polynomial decay, to be zero by the max_iteration.
Calculation: base_lr (1 - iter/max_iteration) ^ (power)
:param power:
:param max_iteration:
>>> poly = Poly(0.5, 2)
creating: createPoly
"""
def __init__(self, power, max_iteration, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, power, max_iteration)
class Step(JavaValue):
"""
A learning rate decay policy, where the effective learning rate is
calculated as base_lr * gamma ^ (floor(iter / step_size))
:param step_size:
:param gamma:
>>> step = Step(2, 0.3)
creating: createStep
"""
def __init__(self, step_size, gamma, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, step_size, gamma)
class Default(JavaValue):
"""
A learning rate decay policy, where the effective learning rate is
calculated as base_lr * gamma ^ (floor(iter / step_size))
:param step_size
:param gamma
>>> step = Default()
creating: createDefault
"""
def __init__(self, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type)
class SGD(JavaValue):
"""
A plain implementation of SGD
:param learningrate learning rate
:param learningrate_decay learning rate decay
:param weightdecay weight decay
:param momentum momentum
:param dampening dampening for momentum
:param nesterov enables Nesterov momentum
:param learningrates 1D tensor of individual learning rates
:param weightdecays 1D tensor of individual weight decays
>>> sgd = SGD()
creating: createDefault
creating: createSGD
"""
def __init__(self,
learningrate=1e-3,
learningrate_decay=0.0,
weightdecay=0.0,
momentum=0.0,
dampening=DOUBLEMAX,
nesterov=False,
leaningrate_schedule=None,
learningrates=None,
weightdecays=None,
bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, learningrate, learningrate_decay, weightdecay,
momentum, dampening, nesterov,
leaningrate_schedule if (leaningrate_schedule) else Default(),
JTensor.from_ndarray(learningrates), JTensor.from_ndarray(weightdecays))
class Adagrad(JavaValue):
"""
An implementation of Adagrad. See the original paper:
http://jmlr.org/papers/volume12/duchi11a/duchi11a.pdf
:param learningrate learning rate
:param learningrate_decay learning rate decay
:param weightdecay weight decay
>>> adagrad = Adagrad()
creating: createAdagrad
"""
def __init__(self,
learningrate=1e-3,
learningrate_decay=0.0,
weightdecay=0.0,
bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, learningrate, learningrate_decay, weightdecay)
class LBFGS(JavaValue):
"""
This implementation of L-BFGS relies on a user-provided line
search function (state.lineSearch). If this function is not
provided, then a simple learningRate is used to produce fixed
size steps. Fixed size steps are much less costly than line
searches, and can be useful for stochastic problems.
The learning rate is used even when a line search is provided.
This is also useful for large-scale stochastic problems, where
opfunc is a noisy approximation of f(x). In that case, the learning
rate allows a reduction of confidence in the step size.
:param max_iter Maximum number of iterations allowed
:param max_eval Maximum number of function evaluations
:param tolfun Termination tolerance on the first-order optimality
:param tolx Termination tol on progress in terms of func/param changes
:param ncorrection
:param learningrate
:param verbose
:param linesearch A line search function
:param linesearch_options If no line search provided, then a fixed step size is used
>>> lbfgs = LBFGS()
creating: createLBFGS
"""
def __init__(self,
max_iter=20,
max_eval=DOUBLEMAX,
tolfun=1e-5,
tolx=1e-9,
ncorrection=100,
learningrate=1.0,
verbose=False,
linesearch=None,
linesearch_options=None,
bigdl_type="float"):
if linesearch or linesearch_options:
raise ValueError('linesearch and linesearch_options must be None in LBFGS')
JavaValue.__init__(self, None, bigdl_type, max_iter, max_eval, tolfun, tolx,
ncorrection, learningrate, verbose, linesearch, linesearch_options)
class Adadelta(JavaValue):
"""
Adadelta implementation for SGD: http://arxiv.org/abs/1212.5701
:param decayrate interpolation parameter rho
:param epsilon for numerical stability
>>> adagrad = Adadelta()
creating: createAdadelta
"""
def __init__(self,
decayrate = 0.9,
epsilon = 1e-10,
bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, decayrate, epsilon)
class Adam(JavaValue):
"""
An implementation of Adam http://arxiv.org/pdf/1412.6980.pdf
:param learningrate learning rate
:param learningrate_decay learning rate decay
:param beta1 first moment coefficient
:param beta2 second moment coefficient
:param epsilon for numerical stability
>>> adagrad = Adam()
creating: createAdam
"""
def __init__(self,
learningrate = 1e-3,
learningrate_decay = 0.0,
beta1 = 0.9,
beta2 = 0.999,
epsilon = 1e-8,
bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, learningrate, learningrate_decay,
beta1, beta2, epsilon)
class Adamax(JavaValue):
"""
An implementation of Adamax http://arxiv.org/pdf/1412.6980.pdf
:param learningrate learning rate
:param beta1 first moment coefficient
:param beta2 second moment coefficient
:param epsilon for numerical stability
>>> adagrad = Adamax()
creating: createAdamax
"""
def __init__(self,
learningrate = 0.002,
beta1 = 0.9,
beta2 = 0.999,
epsilon = 1e-38,
bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, learningrate, beta1, beta2, epsilon)
class RMSprop(JavaValue):
"""
An implementation of RMSprop
:param learningrate learning rate
:param learningrate_decay learning rate decay
:param decayrate decay rate, also called rho
:param epsilon for numerical stability
>>> adagrad = RMSprop()
creating: createRMSprop
"""
def __init__(self,
learningrate = 1e-2,
learningrate_decay = 0.0,
decayrate = 0.99,
epsilon = 1e-8,
bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, learningrate, learningrate_decay, decayrate, epsilon)
class MultiStep(JavaValue):
"""
similar to step but it allows non uniform steps defined by stepSizes
:param step_size: the series of step sizes used for lr decay
:param gamma: coefficient of decay
>>> step = MultiStep([2, 5], 0.3)
creating: createMultiStep
"""
def __init__(self, step_sizes, gamma, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, step_sizes, gamma)
class Optimizer(JavaValue):
"""
An optimizer is in general to minimize any function with respect
to a set of parameters. In case of training a neural network,
an optimizer tries to minimize the loss of the neural net with
respect to its weights/biases, over the training set.
"""
def __init__(self,
model,
training_rdd,
criterion,
end_trigger,
batch_size,
optim_method=None,
bigdl_type="float"):
"""
Create an optimizer.
:param model: the neural net model
:param training_rdd: the training dataset
:param criterion: the loss function
:param optim_method: the algorithm to use for optimization,
e.g. SGD, Adagrad, etc. If optim_method is None, the default algorithm is SGD.
:param end_trigger: when to end the optimization
:param batch_size: training batch size
"""
JavaValue.__init__(self, None, bigdl_type, model.value,
training_rdd, criterion,
optim_method if optim_method else SGD(), end_trigger, batch_size)
def set_validation(self, batch_size, val_rdd, trigger, val_method=["Top1Accuracy"]):
"""
Configure validation settings.
:param batch_size: validation batch size
:param val_rdd: validation dataset
:param trigger: validation interval
:param val_method: the ValidationMethod to use,e.g. "Top1Accuracy", "Top5Accuracy", "Loss"
"""
callBigDlFunc(self.bigdl_type, "setValidation", self.value, batch_size,
trigger, val_rdd, val_method)
def set_model(self, model):
"""
Set model.
:param model: new model
"""
self.value.setModel(model.value)
def set_checkpoint(self, checkpoint_trigger,
checkpoint_path, isOverWrite=True):
"""
Configure checkpoint settings.
:param checkpoint_trigger: the interval to write snapshots
:param checkpoint_path: the path to write snapshots into
:param isOverWrite: whether to overwrite existing snapshots in path.default is True
"""
if not os.path.exists(checkpoint_path):
mkpath(checkpoint_path)
callBigDlFunc(self.bigdl_type, "setCheckPoint", self.value,
checkpoint_trigger, checkpoint_path, isOverWrite)
# return a module
def optimize(self):
"""
Do an optimization.
"""
jmodel = callJavaFunc(get_spark_context(), self.value.optimize)
from bigdl.nn.layer import Layer
return Layer.of(jmodel)
def set_train_summary(self, summary):
"""
Set train summary. A TrainSummary object contains information
necessary for the optimizer to know how often the logs are recorded,
where to store the logs and how to retrieve them, etc. For details,
refer to the docs of TrainSummary.
:param summary: a TrainSummary object
"""
callBigDlFunc(self.bigdl_type, "setTrainSummary", self.value,
summary)
return self
def set_val_summary(self, summary):
"""
Set validation summary. A ValidationSummary object contains information
necessary for the optimizer to know how often the logs are recorded,
where to store the logs and how to retrieve them, etc. For details,
refer to the docs of ValidationSummary.
:param summary: a ValidationSummary object
"""
callBigDlFunc(self.bigdl_type, "setValSummary", self.value,
summary)
return self
def prepare_input(self):
"""
Load input. Notebook user can call this method to seprate load data and
create optimizer time
"""
print("Loading input ...")
self.value.prepareInput()
class TrainSummary(JavaValue, ):
"""
A logging facility which allows user to trace how indicators (e.g.
learning rate, training loss, throughput, etc.) change with iterations/time
in an optimization process. TrainSummary is for training indicators only
(check ValidationSummary for validation indicators). It contains necessary
information for the optimizer to know where to store the logs, how to
retrieve the logs, and so on. - The logs are written in tensorflow-compatible
format so that they can be visualized directly using tensorboard. Also the
logs can be retrieved as ndarrays and visualized using python libraries
such as matplotlib (in notebook, etc.).
Use optimizer.setTrainSummary to enable train logger.
"""
def __init__(self, log_dir, app_name, bigdl_type="float"):
"""
Create a TrainSummary. Logs will be saved to log_dir/app_name/train.
:param log_dir: the root dir to store the logs
:param app_name: the application name
"""
JavaValue.__init__(self, None, bigdl_type, log_dir, app_name)
def read_scalar(self, tag):
"""
Retrieve train logs by type. Return an array of records in the format
(step,value,wallClockTime). - "Step" is the iteration count by default.
:param tag: the type of the logs, Supported tags are: "LearningRate","Loss", "Throughput"
"""
return callBigDlFunc(self.bigdl_type, "summaryReadScalar", self.value,
tag)
def set_summary_trigger(self, name, trigger):
"""
Set the interval of recording for each indicator.
:param tag: tag name. Supported tag names are "LearningRate", "Loss","Throughput", "Parameters". "Parameters" is an umbrella tag thatincludes weight, bias, gradWeight, gradBias, and some running status(eg. runningMean and runningVar in BatchNormalization). If youdidn't set any triggers, we will by default record Loss and Throughputin each iteration, while *NOT* recording LearningRate and Parameters,as recording parameters may introduce substantial overhead when themodel is very big, LearningRate is not a public attribute for allOptimMethod.
:param trigger: trigger
"""
return callBigDlFunc(self.bigdl_type, "summarySetTrigger", self.value,
name, trigger)
class ValidationSummary(JavaValue):
"""
A logging facility which allows user to trace how indicators (e.g.
validation loss, top1 accuray, top5 accuracy etc.) change with
iterations/time in an optimization process. ValidationSummary is for
validation indicators only (check TrainSummary for train indicators).
It contains necessary information for the optimizer to know where to
store the logs, how to retrieve the logs, and so on. - The logs are
written in tensorflow-compatible format so that they can be visualized
directly using tensorboard. Also the logs can be retrieved as ndarrays
and visualized using python libraries such as matplotlib
(in notebook, etc.).
Use optimizer.setValidationSummary to enable validation logger.
"""
def __init__(self, log_dir, app_name, bigdl_type="float"):
"""
Create a ValidationSummary. Logs will be saved to
log_dir/app_name/train. By default, all ValidationMethod set into
optimizer will be recorded and the recording interval is the same
as trigger of ValidationMethod in the optimizer.
:param log_dir: the root dir to store the logs
:param app_name: the application name
"""
JavaValue.__init__(self, None, bigdl_type, log_dir, app_name)
def read_scalar(self, tag):
"""
Retrieve validation logs by type. Return an array of records in the
format (step,value,wallClockTime). - "Step" is the iteration count
by default.
:param tag: the type of the logs. The tag should match the name ofthe ValidationMethod set into the optimizer. e.g."Top1AccuracyLoss","Top1Accuracy" or "Top5Accuracy".
"""
return callBigDlFunc(self.bigdl_type, "summaryReadScalar", self.value,
tag)
class L1L2Regularizer(JavaValue):
"""
Apply both L1 and L2 regularization
:param l1 l1 regularization rate
:param l2 l2 regularization rate
"""
def __init__(self, l1, l2, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, l1, l2)
class L1Regularizer(JavaValue):
"""
Apply L1 regularization
:param l1 l1 regularization rate
"""
def __init__(self, l1, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, l1)
class L2Regularizer(JavaValue):
"""
Apply L2 regularization
:param l2 l2 regularization rate
"""
def __init__(self, l2, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, l2)
def _test():
import doctest
from pyspark import SparkContext
from bigdl.optim import optimizer
from bigdl.util.common import init_engine
from bigdl.util.common import create_spark_conf
globs = optimizer.__dict__.copy()
sc = SparkContext(master="local[4]", appName="test optimizer",
conf=create_spark_conf())
init_engine()
globs['sc'] = sc
(failure_count, test_count) = doctest.testmod(globs=globs,
optionflags=doctest.ELLIPSIS)
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
PatrickOReilly/scikit-learn | examples/linear_model/plot_iris_logistic.py | 119 | 1679 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
wackymaster/QTClock | Libraries/matplotlib/tri/trifinder.py | 8 | 3645 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.tri import Triangulation
import matplotlib._tri as _tri
import numpy as np
class TriFinder(object):
"""
Abstract base class for classes used to find the triangles of a
Triangulation in which (x,y) points lie.
Rather than instantiate an object of a class derived from TriFinder, it is
usually better to use the function
:func:`matplotlib.tri.Triangulation.get_trifinder`.
Derived classes implement __call__(x,y) where x,y are array_like point
coordinates of the same shape.
"""
def __init__(self, triangulation):
if not isinstance(triangulation, Triangulation):
raise ValueError('Expected a Triangulation object')
self._triangulation = triangulation
class TrapezoidMapTriFinder(TriFinder):
"""
:class:`~matplotlib.tri.TriFinder` class implemented using the trapezoid
map algorithm from the book "Computational Geometry, Algorithms and
Applications", second edition, by M. de Berg, M. van Kreveld, M. Overmars
and O. Schwarzkopf.
The triangulation must be valid, i.e. it must not have duplicate points,
triangles formed from colinear points, or overlapping triangles. The
algorithm has some tolerance to triangles formed from colinear points, but
this should not be relied upon.
"""
def __init__(self, triangulation):
TriFinder.__init__(self, triangulation)
self._cpp_trifinder = _tri.TrapezoidMapTriFinder(
triangulation.get_cpp_triangulation())
self._initialize()
def __call__(self, x, y):
"""
Return an array containing the indices of the triangles in which the
specified x,y points lie, or -1 for points that do not lie within a
triangle.
*x*, *y* are array_like x and y coordinates of the same shape and any
number of dimensions.
Returns integer array with the same shape and *x* and *y*.
"""
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
if x.shape != y.shape:
raise ValueError("x and y must be array-like with the same shape")
# C++ does the heavy lifting, and expects 1D arrays.
indices = self._cpp_trifinder.find_many(x.ravel(), y.ravel())
indices.shape = x.shape
return indices
def _get_tree_stats(self):
"""
Return a python list containing the statistics about the node tree:
0: number of nodes (tree size)
1: number of unique nodes
2: number of trapezoids (tree leaf nodes)
3: number of unique trapezoids
4: maximum parent count (max number of times a node is repeated in
tree)
5: maximum depth of tree (one more than the maximum number of
comparisons needed to search through the tree)
6: mean of all trapezoid depths (one more than the average number
of comparisons needed to search through the tree)
"""
return self._cpp_trifinder.get_tree_stats()
def _initialize(self):
"""
Initialize the underlying C++ object. Can be called multiple times if,
for example, the triangulation is modified.
"""
self._cpp_trifinder.initialize()
def _print_tree(self):
"""
Print a text representation of the node tree, which is useful for
debugging purposes.
"""
self._cpp_trifinder.print_tree()
| mit |
hsaputra/tensorflow | tensorflow/python/estimator/inputs/queues/feeding_functions_test.py | 59 | 13552 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests feeding functions using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python.estimator.inputs.queues import feeding_functions as ff
from tensorflow.python.platform import test
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def vals_to_list(a):
return {
key: val.tolist() if isinstance(val, np.ndarray) else val
for key, val in a.items()
}
class _FeedingFunctionsTestCase(test.TestCase):
"""Tests for feeding functions."""
def testArrayFeedFnBatchOne(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 16
expected = {
"index_placeholder": [i],
"value_placeholder": [[2 * i, 2 * i + 1]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchFive(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {
"index_placeholder": [15, 0, 1, 2, 3],
"value_placeholder": [[30, 31], [0, 1], [2, 3], [4, 5], [6, 7]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchTwoWithOneEpoch(self):
array = np.arange(5) + 10
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, batch_size=2, num_epochs=1)
expected = {
"index_placeholder": [0, 1],
"value_placeholder": [10, 11]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [2, 3],
"value_placeholder": [12, 13]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [4],
"value_placeholder": [14]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchOneHundred(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 100)
expected = {
"index_placeholder":
list(range(0, 16)) * 6 + list(range(0, 4)),
"value_placeholder":
np.arange(32).reshape([16, 2]).tolist() * 6 +
[[0, 1], [2, 3], [4, 5], [6, 7]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchOneHundredWithSmallerArrayAndMultipleEpochs(self):
array = np.arange(2) + 10
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, batch_size=100, num_epochs=2)
expected = {
"index_placeholder": [0, 1, 0, 1],
"value_placeholder": [10, 11, 10, 11],
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOne(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 32
expected = {
"index_placeholder": [i + 96],
"a_placeholder": [32 + i],
"b_placeholder": [64 + i]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchFive(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {
"index_placeholder": [127, 96, 97, 98, 99],
"a_placeholder": [63, 32, 33, 34, 35],
"b_placeholder": [95, 64, 65, 66, 67]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchTwoWithOneEpoch(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 37)
array2 = np.arange(64, 69)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 101))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, batch_size=2, num_epochs=1)
expected = {
"index_placeholder": [96, 97],
"a_placeholder": [32, 33],
"b_placeholder": [64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [98, 99],
"a_placeholder": [34, 35],
"b_placeholder": [66, 67]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [100],
"a_placeholder": [36],
"b_placeholder": [68]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOneHundred(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 100)
expected = {
"index_placeholder": list(range(96, 128)) * 3 + list(range(96, 100)),
"a_placeholder": list(range(32, 64)) * 3 + list(range(32, 36)),
"b_placeholder": list(range(64, 96)) * 3 + list(range(64, 68))
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOneHundredWithSmallDataArrayAndMultipleEpochs(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 34)
array2 = np.arange(64, 66)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 98))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, batch_size=100, num_epochs=2)
expected = {
"index_placeholder": [96, 97, 96, 97],
"a_placeholder": [32, 33, 32, 33],
"b_placeholder": [64, 65, 64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testOrderedDictNumpyFeedFnBatchTwoWithOneEpoch(self):
a = np.arange(32, 37)
b = np.arange(64, 69)
x = {"a": a, "b": b}
ordered_dict_x = collections.OrderedDict(
sorted(x.items(), key=lambda t: t[0]))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._OrderedDictNumpyFeedFn(
placeholders, ordered_dict_x, batch_size=2, num_epochs=1)
expected = {
"index_placeholder": [0, 1],
"a_placeholder": [32, 33],
"b_placeholder": [64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [2, 3],
"a_placeholder": [34, 35],
"b_placeholder": [66, 67]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [4],
"a_placeholder": [36],
"b_placeholder": [68]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testOrderedDictNumpyFeedFnLargeBatchWithSmallArrayAndMultipleEpochs(self):
a = np.arange(32, 34)
b = np.arange(64, 66)
x = {"a": a, "b": b}
ordered_dict_x = collections.OrderedDict(
sorted(x.items(), key=lambda t: t[0]))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._OrderedDictNumpyFeedFn(
placeholders, ordered_dict_x, batch_size=100, num_epochs=2)
expected = {
"index_placeholder": [0, 1, 0, 1],
"a_placeholder": [32, 33, 32, 33],
"b_placeholder": [64, 65, 64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testFillArraySmall(self):
a = (np.ones(shape=[32, 32], dtype=np.int32).tolist() +
np.ones(shape=[32, 36], dtype=np.int32).tolist())
actual = np.ones(shape=[64, 36], dtype=np.int32)
ff._fill_array(actual, a)
expected = np.ones(shape=[64, 36], dtype=np.int32)
expected[:32, 32:] = 0
self.assertEqual(expected.tolist(), actual.tolist())
def testFillArrayLarge(self):
a = (np.ones(shape=[8, 8, 8, 8, 32], dtype=np.int32).tolist() +
np.ones(shape=[8, 8, 8, 8, 36], dtype=np.int32).tolist())
actual = np.ones(shape=[16, 8, 8, 8, 36], dtype=np.int32)
ff._fill_array(actual, a)
expected = np.ones(shape=[16, 8, 8, 8, 36], dtype=np.int32)
expected[:8, ..., 32:] = 0
self.assertEqual(expected.tolist(), actual.tolist())
def testFillArraySmallWithSpecifiedValue(self):
fill_value = 8
a = (np.ones(shape=[32, 32], dtype=np.int32).tolist() +
np.ones(shape=[32, 36], dtype=np.int32).tolist())
actual = np.ones(shape=[64, 36], dtype=np.int32)
ff._fill_array(actual, a, fill_value)
expected = np.ones(shape=[64, 36], dtype=np.int32)
expected[:32, 32:] = fill_value
self.assertEqual(expected.tolist(), actual.tolist())
def testFillArrayLargeWithSpecifiedValue(self):
fill_value = 8
a = (np.ones(shape=[8, 8, 8, 8, 32], dtype=np.int32).tolist() +
np.ones(shape=[8, 8, 8, 8, 36], dtype=np.int32).tolist())
actual = np.ones(shape=[16, 8, 8, 8, 36], dtype=np.int32)
ff._fill_array(actual, a, fill_value)
expected = np.ones(shape=[16, 8, 8, 8, 36], dtype=np.int32)
expected[:8, ..., 32:] = fill_value
self.assertEqual(expected.tolist(), actual.tolist())
def testPadIfNeededSmall(self):
a = (np.ones(shape=[32, 32], dtype=np.int32).tolist() +
np.ones(shape=[32, 36], dtype=np.int32).tolist())
a = list(map(np.array, a))
actual = ff._pad_if_needed(a)
expected = np.ones(shape=[64, 36], dtype=np.int32)
expected[:32, 32:] = 0
self.assertEqual(expected.tolist(), actual.tolist())
def testPadIfNeededLarge(self):
a = (np.ones(shape=[8, 8, 8, 8, 32], dtype=np.int32).tolist() +
np.ones(shape=[8, 8, 8, 8, 36], dtype=np.int32).tolist())
a = list(map(np.array, a))
actual = ff._pad_if_needed(a)
expected = np.ones(shape=[16, 8, 8, 8, 36], dtype=np.int32)
expected[:8, ..., 32:] = 0
self.assertEqual(expected.tolist(), actual.tolist())
def testPadIfNeededSmallWithSpecifiedValue(self):
fill_value = 8
a = (np.ones(shape=[32, 32], dtype=np.int32).tolist() +
np.ones(shape=[32, 36], dtype=np.int32).tolist())
a = list(map(np.array, a))
actual = ff._pad_if_needed(a, fill_value)
expected = np.ones(shape=[64, 36], dtype=np.int32)
expected[:32, 32:] = fill_value
self.assertEqual(expected.tolist(), actual.tolist())
def testPadIfNeededLargeWithSpecifiedValue(self):
fill_value = 8
a = (np.ones(shape=[8, 8, 8, 8, 32], dtype=np.int32).tolist() +
np.ones(shape=[8, 8, 8, 8, 36], dtype=np.int32).tolist())
a = list(map(np.array, a))
actual = ff._pad_if_needed(a, fill_value)
expected = np.ones(shape=[16, 8, 8, 8, 36], dtype=np.int32)
expected[:8, ..., 32:] = fill_value
self.assertEqual(expected.tolist(), actual.tolist())
def testPadIfNeededSmallWithSpecifiedNonNumericValue(self):
fill_value = False
a = (np.ones(shape=[32, 32], dtype=np.bool).tolist() +
np.ones(shape=[32, 36], dtype=np.bool).tolist())
a = list(map(np.array, a))
actual = ff._pad_if_needed(a, fill_value)
expected = np.ones(shape=[64, 36], dtype=np.bool)
expected[:32, 32:] = fill_value
self.assertEqual(expected.tolist(), actual.tolist())
def testPadIfNeededLargeWithSpecifiedNonNumericValue(self):
fill_value = False
a = (np.ones(shape=[8, 8, 8, 8, 32], dtype=np.bool).tolist() +
np.ones(shape=[8, 8, 8, 8, 36], dtype=np.bool).tolist())
a = list(map(np.array, a))
actual = ff._pad_if_needed(a, fill_value)
expected = np.ones(shape=[16, 8, 8, 8, 36], dtype=np.bool)
expected[:8, ..., 32:] = fill_value
self.assertEqual(expected.tolist(), actual.tolist())
if __name__ == "__main__":
test.main()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.