repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
inhuszar/histreg | edge_rkms-experimental.py | 1 | 12857 | #!/Users/inhuszar/MND_HistReg/MND_HistReg_Python/bin/python
# 2017-Jun-05
# Only single file processing is implemented at the moment.
import numpy as np
import tifffile as tiff
from sklearn.cluster import KMeans
from mpi4py import MPI
import os
import psutil
from args import *
class JobDescriptorObj:
def __init__(self, img, bgdp, roi_centres, n_features, n_clusters,
frame_height, frame_width, weights):
self.img = img
self.bgdp = bgdp
self.roi_centres = roi_centres
self.n_features = n_features
self.n_clusters = n_clusters
self.frame_height = frame_height
self.frame_width = frame_width
self.weights = weights
# This listing is only provisional.
CLFLAGS = {'subdir': '-r',
'exclude': '--exclude',
'include': '--include',
'mask': '-m',
'dp': '--bgdp',
'clusters': '-c',
'frame': '-f',
'dims': '--dims',
'random': '--random',
'edt': '--edt',
'cpu': '--cpu',
'smap': '--smap',
'mlth': '--mlth',
'probs': '--probs',
'verbose': '-v',
'weight': '-w'}
def shift_image(img, (x, y), inverse=False):
# The image must be 2D
if not inverse:
return np.roll(np.roll(img, y, 0), x, 1)
else:
return np.roll(np.roll(img, -y, 0), -x, 1)
def run_kmeans(job):
fh, fw = job.frame_height, job.frame_width
h, w, ch = job.img.shape
subtotal = job.roi_centres[:,0].size
# Create stack for classification results
# (layers: number of clusters + background)
results = np.zeros((h, w, job.n_clusters + 1))
smap = np.zeros((h, w))
i = 0
# Loop through the ROIs
for y, x in job.roi_centres:
# Define frame
top = max(y - fh/2, 0)
bottom = min(y + fh/2, h)
left = max(x - fw / 2, 0)
right = min(x + fw / 2, w)
frame = np.array(job.img[top:bottom, left:right, :])
frame_mask = np.array(job.bgdp[top:bottom, left:right])
# Pool data from the frame by discarding background and dark pixels
data_all = frame.reshape((-1, job.n_features), order='F')
data = data_all[(frame_mask == 0).reshape(-1, order='F'), :]
# Prevent ValueError, when the frame is entirely in the mask area
# Count as detection for mask class
# (added on 5th June, 2017)
if data.size < job.n_clusters:
results[top:bottom, left:right, 0] = \
results[top:bottom, left:right, 0] + 1
continue
data = data - np.mean(data, axis=0)
data = data / np.var(data, axis=0)
# Change invalid values (div by zero) to 0
data[np.isnan(data)] = 0
data[np.isinf(data)] = 0
data[np.isneginf(data)] = 0
# Perform k-Means clustering (prepare for hitting the bug)
try:
# Set np.float64 to avoid a bug in the k-means implementation.
data = data.astype(np.float64)
kmeans = KMeans(n_clusters=job.n_clusters, random_state=1).fit(data)
# Reintroduce zero-pixels and reshape labels array
# The axis swap is a trick to comply with earlier Fortran-reshapes.
labels = np.rollaxis(np.zeros_like(frame_mask), 1, 0)
frame_mask = np.rollaxis(frame_mask, 1, 0)
labels[frame_mask == 0] = 1 + kmeans.labels_ # 0 is for background
labels = np.rollaxis(labels, 1, 0)
# Standardise labels
# 1: dark pixels, 2: GM, 3: WM
# order labels by the first feature values
order = 1 + np.argsort(kmeans.cluster_centers_[:, 0])
if not all(order[i] <= order[i + 1] for i in
xrange(len(order) - 1)):
tmp = np.copy(labels)
for label in range(1, job.n_clusters, 1):
labels[tmp == label] = 1 + np.where(order == label)[0][0]
# If the classifier hits the bug, set all labels to -1,
# so they won't be counted.
except IndexError:
print 'IndexError'
err = err + 1
continue
# Stack classification results
for clust in range(job.n_clusters + 1):
inc = np.zeros_like(labels)
inc[np.where(labels == clust)] = 1
results[top:bottom, left:right, clust] = \
results[top:bottom, left:right, clust] + inc
# Update sampling map (if necessary)
smap[top:bottom, left:right] = smap[top:bottom, left:right] + 1
# Report progress
i = i + 1
print '{}. process: {}/{}'.format(rank, i, subtotal)
return results, smap
def main_proc(comm):
print 'Main process running.'
print 'Number of parallel processes: {}'.format(n_workers)
# Load original image
imfile = ''
if argexist(sys.argv[0], True):
imfile = subarg(sys.argv[0])[0]
# The following two lines are here just for the developer's convenience
if imfile == '':
imfile = '../../edge-rkms-experimental/NP140-14 (97).tif'
if not os.path.isfile(imfile):
print "ERROR: The image could not be loaded from '{}'".format(imfile)
comm.Abort()
exit()
fpath, fname = os.path.split(imfile)
img = tiff.imread(imfile)
h, w, ch = img.shape
# FIXME: this is a crude way of getting rid o the alpha channel
if ch > 3:
img = img[:,:,:3]
ch = 3
# Load the combined background and dark pixels mask
bgdpfile = subarg(CLFLAGS['dp'])[0]
# The following two lines are here just for the developer's convenience
if bgdpfile == '':
bgdpfile = os.path.join(fpath, 'bgdp.tif')
if not os.path.isfile(bgdpfile):
print "ERROR: The background and dark pixel map could not be loaded " \
"from '{}'".format(bgdpfile)
comm.Abort()
exit()
bgdp = tiff.imread(bgdpfile)
# Validate that the two images are compatible:
if bgdp.shape != img.shape[:2]:
print 'The mask and the main images have different shapes.'
comm.Abort()
exit()
if argexist(CLFLAGS['edt'], True):
edtfile = subarg(CLFLAGS['edt'])[0]
if os.path.isfile(edtfile):
edt = tiff.imread(edtfile)
if edt.shape != img.shape[:2]:
print 'EDT filed had incompatible shape.'
comm.Abort()
exit()
else:
img = np.dstack((img, edt[:, :, np.newaxis]))
h, w, ch = img.shape
else:
print 'EDT file could not be opened from {}.'.format(edtfile)
comm.Abort()
exit()
# Find edges and define edge pixels as ROI centres
step = 1
edges = np.zeros_like(bgdp)
for x in step*np.asarray([-1, 0, 1], dtype=np.int8):
for y in step*np.asarray([-1, 0, 1], dtype=np.int8):
if (x, y) != (0, 0):
diff = bgdp - shift_image(bgdp, (x, y))
diff[diff != 0] = 1
edges = np.logical_or(edges, shift_image(diff, (x, y),
inverse=True)).astype(
np.int8)
roi_centres = np.vstack(np.where(edges != 0)).T
total = roi_centres[:, 0].size
if argexist(CLFLAGS['random'], True):
try:
r = int(subarg(CLFLAGS['random'], 1000)[0])
except:
print 'Invalid {} argument.'.format(CLFLAGS['random'])
comm.Abort()
exit()
idx = np.random.randint(0, total, r)
roi_centres = roi_centres[idx, :]
total = roi_centres[:, 0].size
else:
# Reduce the number of ROIs
for x in step*np.asarray([-1, 0, 1], dtype=np.int8):
for y in step*np.asarray([-1, 0, 1], dtype=np.int8):
if (x, y) != (0, 0):
diff = shift_image(edges - shift_image(edges, (x, y)), (x, y),
inverse=True)
edges[diff == 0] = 0
roi_centres = np.vstack(np.where(edges != 0)).T
total = roi_centres[:,0].size
print 'Number of ROIs:', total
# Set constants for frame size and counts of clusters and features
n_features = ch
print 'Features: ', n_features
# FIXME: no exception handling
n_clusters = int(subarg(CLFLAGS['clusters'], 3)[0])
fh, fw = np.array(subarg(CLFLAGS['frame'], "301,301")).astype(np.int32)
weights = np.array(subarg(CLFLAGS['weight'], ','.join(['1' for i in
range(ch)])), dtype=np.float64)
if weights.size != ch:
print 'Weight vector had incompatible size.'
comm.Abort()
exit()
else:
#wsum = np.sum(weights)
weights = weights / np.sum(weights)
# FIXME: I ONLY HOPE THAT THIS IS CORRECT:
img = np.multiply(img, weights[np.newaxis, np.newaxis, :])
# Create JobDescriptorObj and pass it to the parallel processes
print 'Distributing the job among {} workers...'.format(n_workers)
comm.Barrier()
jobs = []
unit = total / n_workers
for worker in range(0, n_workers):
start = worker * unit
end = start + unit
if worker == n_workers - 1:
end = total
jobs.append(JobDescriptorObj(img, bgdp, roi_centres[start:end,:],
n_features, n_clusters, fh, fw, weights))
for worker in range(len(jobs)):
if worker != 0:
comm.send(jobs[worker], dest=worker, tag=11)
n_ROIs = np.array(jobs[0].roi_centres[:, 0].size, 'd')
s = np.zeros_like(n_ROIs)
comm.Reduce([n_ROIs, MPI.DOUBLE], [s, MPI.DOUBLE], op=MPI.SUM, root=0)
if s == total:
print 'Distribution of jobs was successful.'
else:
print 'Some ROIs were lost while distributing jobs.'
comm.Abort()
exit()
# Perform k-means classification in the main process
print 'Main process is working...'
results, smap = run_kmeans(jobs[0])
# Pooling results from all workers
comm.Barrier()
print 'Pooling results from all processess...'
results_all = np.zeros_like(results)
comm.Reduce([results, MPI.DOUBLE], [results_all, MPI.DOUBLE], op=MPI.SUM,
root=0)
comm.Barrier()
smap_all = np.zeros_like(smap)
comm.Reduce([smap, MPI.DOUBLE], [smap_all, MPI.DOUBLE], op=MPI.SUM,
root=0)
# Save results table as numpy array
np.save('clustertable', results_all)
# Generate output
sums = np.sum(results_all, axis=2)
probs = np.asarray(results_all, dtype=np.float32) / sums[:, :, np.newaxis]
segmentation = np.argmax(probs, axis=2)
checksum = np.sum(probs, axis=2)
print "Checksum (=1 =1): {}, {}".format(np.min(checksum), np.max(checksum))
# Save segmentation result
if argexist(CLFLAGS['mlth']):
fn = os.path.join(fpath, fname[:-4]) + '_mlth.tif'
tiff.imsave(fn, segmentation.astype(np.uint8))
print 'SAVED:', fn
# Save probability maps
if argexist(CLFLAGS['probs']):
for clss in range(n_clusters + 1):
fn = os.path.join(fpath, fname[:-4]) + '_class{0:02d}.tif'.format(clss)
tiff.imsave(fn, probs[:, :, clss].astype(np.float32))
print 'SAVED:', fn
# Save sampling map
if argexist(CLFLAGS['smap']):
fn = os.path.join(fpath, fname[:-4]) + '_smap.tif'
tiff.imsave(fn, smap_all.astype(np.uint32))
print 'SAVED:', fn
return 0
def parallel_proc(comm):
print 'Process number {} ready.'.format(rank)
# Wait for image descriptor object to arrive
comm.Barrier()
job = comm.recv(source=0, tag=11)
# Report how many ROIs this process has received
n_ROIs = np.array(job.roi_centres[:,0].size, 'd')
comm.Reduce([n_ROIs, MPI.DOUBLE], None, op=MPI.SUM, root=0)
# Do the job and pool the results individually
print 'Process {} is working...'.format(rank)
results, smap = run_kmeans(job)
# Send the results as a numpy array
comm.Barrier()
comm.Reduce([results, MPI.DOUBLE], None, op=MPI.SUM, root=0)
comm.Barrier()
comm.Reduce([smap, MPI.DOUBLE], None, op=MPI.SUM, root=0)
# Close instance
exit()
# Main program execution starts here
p = psutil.Process(os.getppid())
# FIXME: add exception handling
if str(p.name()).lower() != 'orterun':
n_jobs = int(subarg(CLFLAGS['cpu'], 1)[0])
if n_jobs > 1:
os.system('mpirun -n {} python '.format(n_jobs) + ' '.join(sys.argv))
exit()
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
n_workers = comm.Get_size()
if rank == 0:
main_proc(comm)
elif rank != 0:
parallel_proc(comm) | mit |
BhallaLab/moose | moose-examples/snippets/cylinderDiffusion.py | 2 | 6353 | #########################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2014 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
#########################################################################
import sys
sys.path.append('../../python')
import math
import pylab
import numpy
import matplotlib.pyplot as plt
import moose
import os
import signal
PID = os.getpid()
def doNothing( *args ):
pass
signal.signal( signal.SIGUSR1, doNothing )
def makeModel():
# create container for model
r0 = 2e-6 # m
r1 = 1e-6 # m
num = 100
diffLength = 1e-6 # m
len = num * diffLength # m
diffConst = 10e-12 # m^2/sec
motorRate = 10e-6 # m/sec
concA = 1 # millimolar
model = moose.Neutral( 'model' )
compartment = moose.CylMesh( '/model/compartment' )
compartment.r0 = r0
compartment.r1 = r1
compartment.x0 = 0
compartment.x1 = len
compartment.diffLength = diffLength
assert( compartment.numDiffCompts == num )
# create molecules and reactions
a = moose.Pool( '/model/compartment/a' )
b = moose.Pool( '/model/compartment/b' )
c = moose.Pool( '/model/compartment/c' )
d = moose.BufPool( '/model/compartment/d' )
r1 = moose.Reac( '/model/compartment/r1' )
moose.connect( r1, 'sub', b, 'reac' )
moose.connect( r1, 'sub', d, 'reac' )
moose.connect( r1, 'prd', c, 'reac' )
r1.Kf = 1.0 # 1/(mM.sec)
r1.Kb = 1.0 # 1/sec
# Assign parameters
a.diffConst = diffConst
b.diffConst = diffConst / 2.0
b.motorConst = motorRate
c.diffConst = 0
d.diffConst = diffConst
# Make solvers
ksolve = moose.Ksolve( '/model/compartment/ksolve' )
dsolve = moose.Dsolve( '/model/compartment/dsolve' )
stoich = moose.Stoich( '/model/compartment/stoich' )
stoich.compartment = compartment
stoich.ksolve = ksolve
stoich.dsolve = dsolve
os.kill( PID, signal.SIGUSR1 )
stoich.path = "/model/compartment/##"
print((dsolve.numPools))
assert( dsolve.numPools == 3 )
a.vec[0].concInit = concA
b.vec[0].concInit = concA
c.vec[0].concInit = concA
d.vec.concInit = concA / 5.0
d.vec[num-1].concInit = concA
def makePlots():
plt.ion()
fig = plt.figure( figsize=(12,6) )
dynamic = fig.add_subplot( 111 )
a = moose.vec( '/model/compartment/a' )
b = moose.vec( '/model/compartment/b' )
c = moose.vec( '/model/compartment/c' )
d = moose.vec( '/model/compartment/d' )
pos = numpy.arange( 0, a.conc.size, 1 )
aline, = dynamic.plot( pos, a.conc, label='a' )
bline, = dynamic.plot( pos, b.conc, label='b' )
cline, = dynamic.plot( pos, c.conc, label='c' )
dline, = dynamic.plot( pos, d.conc, label='d' )
plt.ylabel( 'Conc (mM)' )
plt.xlabel( 'Cylinder voxel #' )
plt.legend()
timelabel = plt.text( 10, 0.8, 'time = 0.0' )
fig.canvas.draw()
return( fig, dynamic, timelabel, aline, bline, cline, dline )
def updatePlots( plotlist, time ):
a = moose.vec( '/model/compartment/a' )
b = moose.vec( '/model/compartment/b' )
c = moose.vec( '/model/compartment/c' )
d = moose.vec( '/model/compartment/d' )
plotlist[2].set_text( "time = %g" % time )
plotlist[3].set_ydata( a.conc )
plotlist[4].set_ydata( b.conc )
plotlist[5].set_ydata( c.conc )
plotlist[6].set_ydata( d.conc )
plotlist[0].canvas.draw()
def main():
"""
This example illustrates how to set up a diffusion/transport model with
a simple reaction-diffusion system in a tapering cylinder:
| Molecule **a** diffuses with diffConst of 10e-12 m^2/s.
| Molecule **b** diffuses with diffConst of 5e-12 m^2/s.
| Molecule **b** also undergoes motor transport with a rate of 10e-6 m/s,
| Thus it 'piles up' at the end of the cylinder.
| Molecule **c** does not move: diffConst = 0.0
| Molecule **d** does not move: diffConst = 10.0e-12 but it is buffered.
| Because it is buffered, it is treated as non-diffusing.
All molecules other than **d** start out only in the leftmost (first)
voxel, with a concentration of 1 mM. **d** is present throughout
at 0.2 mM, except in the last voxel, where it is at 1.0 mM.
The cylinder has a starting radius of 2 microns, and end radius of
1 micron. So when the molecule undergoing motor transport gets to the
narrower end, its concentration goes up.
There is a little reaction in all compartments: ``b + d <===> c``
As there is a high concentration of **d** in the last compartment,
when the molecule **b** reaches the end of the cylinder, the reaction
produces lots of **c**.
Note that molecule **a** does not participate in this reaction.
The concentrations of all molecules are displayed in an animation.
"""
runtime = 20.0
diffdt = 0.005
plotdt = 0.1
makeModel()
# Set up clocks. The dsolver to know before assigning stoich
moose.setClock( 10, diffdt ) # 10 is the standard clock for Dsolve.
moose.setClock( 16, plotdt ) # 16 is the standard clock for Ksolve.
a = moose.element( '/model/compartment/a' )
b = moose.element( '/model/compartment/b' )
c = moose.element( '/model/compartment/c' )
d = moose.element( '/model/compartment/d' )
moose.reinit()
atot = sum( a.vec.n )
btot = sum( b.vec.n )
ctot = sum( c.vec.n )
dtot = sum( d.vec.n )
plotlist = makePlots()
for t in numpy.arange( 0, runtime, plotdt ):
moose.start( plotdt )
updatePlots( plotlist, t )
# moose.start( runtime ) # Run the model
atot2 = sum( a.vec.n )
btot2 = sum( b.vec.n )
ctot2 = sum( c.vec.n )
dtot2 = sum( d.vec.n )
print('Ratio of initial to final total numbers of of a, b, c, d = ')
print((atot2/atot, btot2/btot, ctot2/ctot, dtot2/dtot))
print(('Initial to final (b+c)=', (btot2 + ctot2) / (btot + ctot )))
print("\nHit '0' to exit")
try:
raw_input( )
except NameError as e: # python3
input( )
quit()
# Run the 'main' if this script is executed standalone.
if __name__ == '__main__':
main()
| gpl-3.0 |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/pandas/tests/series/test_internals.py | 17 | 12814 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytest
from datetime import datetime
from numpy import nan
import numpy as np
from pandas import Series
from pandas.core.indexes.datetimes import Timestamp
import pandas._libs.lib as lib
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
class TestSeriesInternals(object):
def test_convert_objects(self):
s = Series([1., 2, 3], index=['a', 'b', 'c'])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates=False,
convert_numeric=True)
assert_series_equal(result, s)
# force numeric conversion
r = s.copy().astype('O')
r['a'] = '1'
with tm.assert_produces_warning(FutureWarning):
result = r.convert_objects(convert_dates=False,
convert_numeric=True)
assert_series_equal(result, s)
r = s.copy().astype('O')
r['a'] = '1.'
with tm.assert_produces_warning(FutureWarning):
result = r.convert_objects(convert_dates=False,
convert_numeric=True)
assert_series_equal(result, s)
r = s.copy().astype('O')
r['a'] = 'garbled'
expected = s.copy()
expected['a'] = np.nan
with tm.assert_produces_warning(FutureWarning):
result = r.convert_objects(convert_dates=False,
convert_numeric=True)
assert_series_equal(result, expected)
# GH 4119, not converting a mixed type (e.g.floats and object)
s = Series([1, 'na', 3, 4])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_numeric=True)
expected = Series([1, np.nan, 3, 4])
assert_series_equal(result, expected)
s = Series([1, '', 3, 4])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_numeric=True)
expected = Series([1, np.nan, 3, 4])
assert_series_equal(result, expected)
# dates
s = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0),
datetime(2001, 1, 3, 0, 0)])
s2 = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0),
datetime(2001, 1, 3, 0, 0), 'foo', 1.0, 1,
Timestamp('20010104'), '20010105'],
dtype='O')
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates=True,
convert_numeric=False)
expected = Series([Timestamp('20010101'), Timestamp('20010102'),
Timestamp('20010103')], dtype='M8[ns]')
assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce',
convert_numeric=False)
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce',
convert_numeric=True)
assert_series_equal(result, expected)
expected = Series([Timestamp('20010101'), Timestamp('20010102'),
Timestamp('20010103'),
lib.NaT, lib.NaT, lib.NaT, Timestamp('20010104'),
Timestamp('20010105')], dtype='M8[ns]')
with tm.assert_produces_warning(FutureWarning):
result = s2.convert_objects(convert_dates='coerce',
convert_numeric=False)
assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = s2.convert_objects(convert_dates='coerce',
convert_numeric=True)
assert_series_equal(result, expected)
# preserver all-nans (if convert_dates='coerce')
s = Series(['foo', 'bar', 1, 1.0], dtype='O')
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce',
convert_numeric=False)
expected = Series([lib.NaT] * 2 + [Timestamp(1)] * 2)
assert_series_equal(result, expected)
# preserver if non-object
s = Series([1], dtype='float32')
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce',
convert_numeric=False)
assert_series_equal(result, s)
# r = s.copy()
# r[0] = np.nan
# result = r.convert_objects(convert_dates=True,convert_numeric=False)
# assert result.dtype == 'M8[ns]'
# dateutil parses some single letters into today's value as a date
for x in 'abcdefghijklmnopqrstuvwxyz':
s = Series([x])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce')
assert_series_equal(result, s)
s = Series([x.upper()])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce')
assert_series_equal(result, s)
def test_convert_objects_preserve_bool(self):
s = Series([1, True, 3, 5], dtype=object)
with tm.assert_produces_warning(FutureWarning):
r = s.convert_objects(convert_numeric=True)
e = Series([1, 1, 3, 5], dtype='i8')
tm.assert_series_equal(r, e)
def test_convert_objects_preserve_all_bool(self):
s = Series([False, True, False, False], dtype=object)
with tm.assert_produces_warning(FutureWarning):
r = s.convert_objects(convert_numeric=True)
e = Series([False, True, False, False], dtype=bool)
tm.assert_series_equal(r, e)
# GH 10265
def test_convert(self):
# Tests: All to nans, coerce, true
# Test coercion returns correct type
s = Series(['a', 'b', 'c'])
results = s._convert(datetime=True, coerce=True)
expected = Series([lib.NaT] * 3)
assert_series_equal(results, expected)
results = s._convert(numeric=True, coerce=True)
expected = Series([np.nan] * 3)
assert_series_equal(results, expected)
expected = Series([lib.NaT] * 3, dtype=np.dtype('m8[ns]'))
results = s._convert(timedelta=True, coerce=True)
assert_series_equal(results, expected)
dt = datetime(2001, 1, 1, 0, 0)
td = dt - datetime(2000, 1, 1, 0, 0)
# Test coercion with mixed types
s = Series(['a', '3.1415', dt, td])
results = s._convert(datetime=True, coerce=True)
expected = Series([lib.NaT, lib.NaT, dt, lib.NaT])
assert_series_equal(results, expected)
results = s._convert(numeric=True, coerce=True)
expected = Series([nan, 3.1415, nan, nan])
assert_series_equal(results, expected)
results = s._convert(timedelta=True, coerce=True)
expected = Series([lib.NaT, lib.NaT, lib.NaT, td],
dtype=np.dtype('m8[ns]'))
assert_series_equal(results, expected)
# Test standard conversion returns original
results = s._convert(datetime=True)
assert_series_equal(results, s)
results = s._convert(numeric=True)
expected = Series([nan, 3.1415, nan, nan])
assert_series_equal(results, expected)
results = s._convert(timedelta=True)
assert_series_equal(results, s)
# test pass-through and non-conversion when other types selected
s = Series(['1.0', '2.0', '3.0'])
results = s._convert(datetime=True, numeric=True, timedelta=True)
expected = Series([1.0, 2.0, 3.0])
assert_series_equal(results, expected)
results = s._convert(True, False, True)
assert_series_equal(results, s)
s = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 1, 0, 0)],
dtype='O')
results = s._convert(datetime=True, numeric=True, timedelta=True)
expected = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 1, 0,
0)])
assert_series_equal(results, expected)
results = s._convert(datetime=False, numeric=True, timedelta=True)
assert_series_equal(results, s)
td = datetime(2001, 1, 1, 0, 0) - datetime(2000, 1, 1, 0, 0)
s = Series([td, td], dtype='O')
results = s._convert(datetime=True, numeric=True, timedelta=True)
expected = Series([td, td])
assert_series_equal(results, expected)
results = s._convert(True, True, False)
assert_series_equal(results, s)
s = Series([1., 2, 3], index=['a', 'b', 'c'])
result = s._convert(numeric=True)
assert_series_equal(result, s)
# force numeric conversion
r = s.copy().astype('O')
r['a'] = '1'
result = r._convert(numeric=True)
assert_series_equal(result, s)
r = s.copy().astype('O')
r['a'] = '1.'
result = r._convert(numeric=True)
assert_series_equal(result, s)
r = s.copy().astype('O')
r['a'] = 'garbled'
result = r._convert(numeric=True)
expected = s.copy()
expected['a'] = nan
assert_series_equal(result, expected)
# GH 4119, not converting a mixed type (e.g.floats and object)
s = Series([1, 'na', 3, 4])
result = s._convert(datetime=True, numeric=True)
expected = Series([1, nan, 3, 4])
assert_series_equal(result, expected)
s = Series([1, '', 3, 4])
result = s._convert(datetime=True, numeric=True)
assert_series_equal(result, expected)
# dates
s = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0),
datetime(2001, 1, 3, 0, 0)])
s2 = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0),
datetime(2001, 1, 3, 0, 0), 'foo', 1.0, 1,
Timestamp('20010104'), '20010105'], dtype='O')
result = s._convert(datetime=True)
expected = Series([Timestamp('20010101'), Timestamp('20010102'),
Timestamp('20010103')], dtype='M8[ns]')
assert_series_equal(result, expected)
result = s._convert(datetime=True, coerce=True)
assert_series_equal(result, expected)
expected = Series([Timestamp('20010101'), Timestamp('20010102'),
Timestamp('20010103'), lib.NaT, lib.NaT, lib.NaT,
Timestamp('20010104'), Timestamp('20010105')],
dtype='M8[ns]')
result = s2._convert(datetime=True, numeric=False, timedelta=False,
coerce=True)
assert_series_equal(result, expected)
result = s2._convert(datetime=True, coerce=True)
assert_series_equal(result, expected)
s = Series(['foo', 'bar', 1, 1.0], dtype='O')
result = s._convert(datetime=True, coerce=True)
expected = Series([lib.NaT] * 2 + [Timestamp(1)] * 2)
assert_series_equal(result, expected)
# preserver if non-object
s = Series([1], dtype='float32')
result = s._convert(datetime=True, coerce=True)
assert_series_equal(result, s)
# r = s.copy()
# r[0] = np.nan
# result = r._convert(convert_dates=True,convert_numeric=False)
# assert result.dtype == 'M8[ns]'
# dateutil parses some single letters into today's value as a date
expected = Series([lib.NaT])
for x in 'abcdefghijklmnopqrstuvwxyz':
s = Series([x])
result = s._convert(datetime=True, coerce=True)
assert_series_equal(result, expected)
s = Series([x.upper()])
result = s._convert(datetime=True, coerce=True)
assert_series_equal(result, expected)
def test_convert_no_arg_error(self):
s = Series(['1.0', '2'])
pytest.raises(ValueError, s._convert)
def test_convert_preserve_bool(self):
s = Series([1, True, 3, 5], dtype=object)
r = s._convert(datetime=True, numeric=True)
e = Series([1, 1, 3, 5], dtype='i8')
tm.assert_series_equal(r, e)
def test_convert_preserve_all_bool(self):
s = Series([False, True, False, False], dtype=object)
r = s._convert(datetime=True, numeric=True)
e = Series([False, True, False, False], dtype=bool)
tm.assert_series_equal(r, e)
| mit |
DJArmstrong/autovet | Features/old/Centroiding/scripts/binning.py | 4 | 10376 | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 13 21:18:27 2016
@author:
Maximilian N. Guenther
Battcock Centre for Experimental Astrophysics,
Cavendish Laboratory,
JJ Thomson Avenue
Cambridge CB3 0HE
Email: [email protected]
"""
import numpy as np
import matplotlib.pyplot as plt
#from scipy import stats #needed for stats.nanmean/median on ngtshead
######################################################################
# BINNING WITHOUT TIME GAPS
# !!! DO NOT USE FOR COMBINING DIFFERENT NIGHTS !!!
######################################################################
def binning1D(arr, bin_width, setting='mean', normalize=False):
""" WARNING: this does not respect boundaries between different night;
will average data from different nights"""
N_time = len(arr)
N_bins = np.int64(np.ceil(1.*N_time / bin_width))
binarr, binarr_err = np.zeros((2,N_bins))
bin_width = int(bin_width)
if setting=='mean':
for nn in range(N_bins):
binarr[nn] = np.nanmean(arr[nn*bin_width:(nn+1)*bin_width])
binarr_err[nn] = np.nanstd(arr[nn*bin_width:(nn+1)*bin_width])
if setting=='median':
for nn in range(N_bins):
binarr[nn] = np.nanmedian(arr[nn*bin_width:(nn+1)*bin_width])
binarr_err[nn] = 1.48 * np.nanmedian(abs(arr[nn*bin_width:(nn+1)*bin_width] - binarr[nn]))
if normalize==True:
med = np.nanmedian(binarr)
binarr /= med
binarr_err /= med
return binarr, binarr_err
def binning2D(arr, bin_width, setting='mean', normalize=False, axis=1):
#arr being 2D array, with objs on x and time stamps on y
""" WARNING: this does not respect boundaries between different night;
will average data from different nights"""
N_time = arr.shape[1]
# print N
N_objs = arr.shape[0]
# print N_objs
N_bins = np.int64(np.ceil(1.*N_time / bin_width))
# print N_bins
binarr, binarr_err = np.zeros((2,N_objs,N_bins))
# print arr.shape
# print binarr.shape
bin_width = int(bin_width)
if setting=='mean':
for nn in range(N_bins):
binarr[:,nn] = np.nanmean(arr[:,nn*bin_width:(nn+1)*bin_width], axis=axis)
binarr_err[:,nn] = np.nanstd(arr[:,nn*bin_width:(nn+1)*bin_width], axis=axis)
if setting=='median':
for nn in range(N_bins):
binarr[:,nn] = np.nanmedian(arr[:,nn*bin_width:(nn+1)*bin_width], axis=axis)
binarr_err[:,nn] = 1.48 * np.nanmedian(abs(arr[:,nn*bin_width:(nn+1)*bin_width] - binarr[:,nn]))
if normalize==True:
med = np.nanmedian(binarr)
binarr /= med
binarr_err /= med
# print arr.shape
# print binarr.shape
return binarr, binarr_err
######################################################################
# BINNING WITH TIME GAPS
# !!! USE THIS FOR COMBINING DIFFERENT NIGHTS !!!
######################################################################
def bin_edge_indices(time1D, bin_width, timegap, N_time):
""" DETERMINE ALL THE BIN-EDGE-INDICES (TO NOT BIN OVER DIFFERENT NIGHTS)"""
""" this currently relies on the fact that timestamps for all are approximately the same
(given for the case of a HJD array that represents MJD values with small corrections)"""
# ind_start_of_night = np.append( 0 , np.where( np.diff(time) > timegap )[0] + 1 )
ind_end_of_night = np.append( np.where( np.diff(time1D) > timegap )[0], len(np.diff(time1D)-1 ) )
N_nights = len(ind_end_of_night)
first_ind = [0]
last_ind = []
i = 0
# j = 0
while ((first_ind[-1] < N_time) & (i < N_nights) ):
if (first_ind[-1]+bin_width) < ind_end_of_night[i]:
last_ind.append( first_ind[-1] + bin_width )
else:
last_ind.append( ind_end_of_night[i] )
i += 1
first_ind.append( last_ind[-1] + 1 )
# j += 1
del first_ind[-1]
return first_ind, last_ind
def binning1D_per_night(time, arr, bin_width, timegap=3600, setting='mean', normalize=False):
""" If time and arr are 1D arrays """
N_time = len(arr)
bin_width = int(bin_width)
first_ind, last_ind = bin_edge_indices(time, bin_width, timegap, N_time)
N_bins = len(first_ind)
bintime, binarr, binarr_err = np.zeros((3,N_bins)) * np.nan
if setting=='mean':
for nn in range(N_bins):
#skip no/single data points
if last_ind[nn] > first_ind[nn]:
bintime[nn] = np.nanmean( time[first_ind[nn]:last_ind[nn]] )
#skip All-NAN slices (i.e. where all flux data is masked)
if ( np.isnan(arr[first_ind[nn]:last_ind[nn]]).all() == False ):
binarr[nn] = np.nanmean( arr[first_ind[nn]:last_ind[nn]] )
binarr_err[nn] = np.nanstd( arr[first_ind[nn]:last_ind[nn]] )
elif setting=='median':
for nn in range(N_bins):
#skip no/single data points
if (last_ind[nn] > first_ind[nn]):
bintime[nn] = np.nanmedian( time[first_ind[nn]:last_ind[nn]] )
#skip All-NAN slices (i.e. where all flux data is masked)
if ( np.isnan(arr[first_ind[nn]:last_ind[nn]]).all() == False ):
binarr[nn] = np.nanmedian( arr[first_ind[nn]:last_ind[nn]] )
binarr_err[nn] = 1.48 * np.nanmedian( abs(arr[first_ind[nn]:last_ind[nn]] - binarr[nn]) )
if normalize==True:
med = np.nanmedian(binarr)
binarr /= med
binarr_err /= med
return bintime, binarr, binarr_err
def binning2D_per_night(time, arr, bin_width, timegap=3600, setting='mean', normalize=False, axis=1):
""" If time and arr are each a 2D array, with different objs on x and different time stamps on y"""
""" this currently relies on the fact that timestamps for all are approximately the same
(given for the case of a HJD array that represents MJD values with small corrections)"""
N_time = arr.shape[1]
N_objs = arr.shape[0]
bin_width = int(bin_width)
first_ind, last_ind = bin_edge_indices(time[0,:], bin_width, timegap, N_time)
N_bins = len(first_ind)
bintime, binarr, binarr_err = np.zeros((3,N_objs,N_bins))
if setting=='mean':
for nn in range(N_bins):
bintime[:,nn] = np.nanmean( time[:,first_ind[nn]:last_ind[nn]], axis=axis )
binarr[:,nn] = np.nanmean( arr[:,first_ind[nn]:last_ind[nn]], axis=axis )
binarr_err[:,nn] = np.nanstd( arr[:,first_ind[nn]:last_ind[nn]], axis=axis )
elif setting=='median':
for nn in range(N_bins):
bintime[:,nn] = np.nanmedian( time[:,first_ind[nn]:last_ind[nn]], axis=axis )
binarr[:,nn] = np.nanmedian( arr[:,first_ind[nn]:last_ind[nn]], axis=axis )
binarr_err[:,nn] = 1.48 * np.nanmedian( abs(arr[:,first_ind[nn]:last_ind[nn]] - binarr[:,nn]) )
if normalize==True:
med = np.nanmedian(binarr)
binarr /= med
binarr_err /= med
return bintime, binarr, binarr_err
def binning1D_per_night_list(time, arr, bin_width, timegap=3600, setting='mean', normalize=False):
""" different style of program, same application """
N = len(time)
bin_width = int(bin_width)
bintime = []
binarr = []
binarr_err = []
# ind_start_of_night = np.append( 0 , np.where( np.diff(time) > timegap )[0] + 1 )
ind_end_of_night = np.append( np.where( np.diff(time) > timegap )[0], len(np.diff(time)-1 ) )
N_nights = len(ind_end_of_night)
first_ind = 0
i = 0
if setting=='mean':
while ((first_ind < N) & (i < N_nights) ):
if (first_ind+bin_width) < ind_end_of_night[i]:
last_ind = first_ind+bin_width
else:
last_ind = ind_end_of_night[i]
i += 1
bintime.append( np.nanmean( time[first_ind:last_ind] ) )
binarr.append( np.nanmean( arr[first_ind:last_ind] ) )
binarr_err.append( np.nanstd(arr[first_ind:last_ind]) )
first_ind = last_ind + 1
elif setting=='median':
while first_ind < N:
if (first_ind+bin_width) < ind_end_of_night[i]:
last_ind = first_ind+bin_width
else:
last_ind = ind_end_of_night[i]
i += 1
bintime.append( np.nanmedian( time[first_ind:last_ind] ) )
binarr.append( np.nanmedian( arr[first_ind:last_ind] ) )
binarr_err.append( 1.48 * np.nanmedian(abs( arr[first_ind:last_ind] - binarr[-1])) )
first_ind = last_ind
bintime = np.array(bintime)
binarr = np.array(binarr)
binarr_err = np.array(binarr_err)
if normalize==True:
med = np.nanmedian(binarr)
binarr /= med
binarr_err /= med
return bintime, binarr, binarr_err
######################################################################
# MAIN (FOR TESTING)
######################################################################
if __name__ == '__main__':
######################################################################
# TEST binning2D_per_night
######################################################################
arr = np.array([[1,2,3,4,5,6, 67,68,64, -10,-11,-13], \
[1,2,3,4,5,6, 24,28,32, 10,11,13]])
time = np.array([[1,2,3,4,5,6, 10001,10002,10003, 20001,20002,20003], \
[1,2,3,4,5,6.1, 10001,10002.1,10003.3, 20001,20002,20003]])
bintime,binarr, _ = binning2D_per_night(time,arr,6)
plt.figure()
plt.plot(time,arr,'k.')
plt.plot(bintime,binarr,'r.')
######################################################################
# TEST binning1D_per_night
######################################################################
arr = np.array([1,2,3,4,5,6, 67,68,64, -10,-11,-13])
time = np.array([1,2,3,4,5,6, 10001,10002,10003, 20001,20002,20003])
bintime,binarr, _ = binning1D_per_night(time,arr,6)
plt.figure()
plt.plot(time,arr,'k.')
plt.plot(bintime,binarr,'r.') | gpl-3.0 |
glouppe/scikit-learn | examples/decomposition/plot_sparse_coding.py | 12 | 4007 | """
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as plt
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15, 'navy'),
('Lasso', 'lasso_cd', 2, None, 'turquoise'), ]
lw = 2
plt.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
plt.subplot(1, 2, subplot + 1)
plt.title('Sparse coding against %s dictionary' % title)
plt.plot(y, lw=lw, linestyle='--', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero, color in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y)
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color=color, lw=lw,
label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y)
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color='darkorange', lw=lw,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error'
% (len(idx), squared_error))
plt.axis('tight')
plt.legend(shadow=False, loc='best')
plt.subplots_adjust(.04, .07, .97, .90, .09, .2)
plt.show()
| bsd-3-clause |
mkery/CS349-roads | tripmatching/matchDriver.py | 1 | 1187 | import numpy as np
import sys
import matplotlib.pyplot as pyplot
import rdp_trip as rdp
import findMatches as findMatches
driver = sys.argv[1]
driverB = 2
"""
for i in range(1,201):
print "generating rdp for "+str(driver)+" trip "+str(i)
rdp.generateRDP(str(driver)+"_"+str(i), str(driver), str(i))
"""
results = []
for i in range(1,2):
aa = open("driver"+str(driver)+"/"+str(driver)+"_"+str(i)+"_angle_dist.csv")
tripA = np.genfromtxt(aa, delimiter=',')
aa.close()
ardp = open("driver"+str(driver)+"/"+str(driver)+"_"+str(i)+"_rdp.csv")
tripA_rdp = np.genfromtxt(ardp, delimiter=',')
ardp.close()
for j in range(1,200):
if j != i:
aa = open("driver"+str(driver)+"/"+str(driver)+"_"+str(j)+"_angle_dist.csv")
tripB = np.genfromtxt(aa, delimiter=',')
aa.close()
ardp = open("driver"+str(driver)+"/"+str(driver)+"_"+str(j)+"_rdp.csv")
tripB_rdp = np.genfromtxt(ardp, delimiter=',')
ardp.close()
res = findMatches.matchTrips(tripA, tripA_rdp, tripB, tripB_rdp)
print "trips "+str(i)+" : "+str(j)+" "+str(res)
results.append(findMatches.matchTrips(tripA, tripA_rdp, tripB, tripB_rdp))
np.savetxt(str(driver)+"_matchres.csv", results, delimiter=",") | mit |
mifads/pyscripts | emepsitecomp.py | 1 | 7586 | #!/usr/bin/env python3
#!-*- coding: utf-8 -*-
"""
emepsitecomp is intended to extract values for one location,
usually from two or more input files.
produced for variables matching a pattern, e.g. SURF_ppb_NO2 or SURF_ppb -
the latter will produce plots for all variables containing SURF_ppb.
If labels are given (-L option) these are used in the legend, otherwise
mkCdfComp will attempt to 'guess' these by looking for the pattern. We
would get rv4_15a from e.g.:
\n
-i /global/work/mifads/TRENDS/rv4_15a.2012/Base/Base_month.nc
\n
or -i rv4_15a.2012/Base/Base
or -i rv4_15a.2012/rv4_15a_month.nc
This pattern matching for labels is based upon Dave's usual filenames,
so is not guaranteed to work for all cases ;-)
A typical pattern (for EECCA grids) to look at NO, NO2 and NO3 over
north-western Europe might be:
mkCdfComp.py -y 2010 -d "40 70 20 50" -i TRENDS/rv4_15a.2010/Base/Base_month.nc TRENDS/rv4_15anosoil.2010/Base/Base_month.nc -p -v SURF_ppb_NO
(Use -p to get plots to screen; png files are produced anyway.)
"""
import argparse
import matplotlib.pyplot as plt
import numpy as np
import netCDF4 as cdf
import os
import sys
#------------------ arguments ----------------------------------------------
#parser=argparse.ArgumentParser(usage=__doc__) also works, but text at start
parser=argparse.ArgumentParser(epilog=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-v','--varkeys',nargs='*',
help='varname string in nc file, can be partial eg ug_PM',required=True)
parser.add_argument('-i','--ifiles',help='Input files',nargs='*',required=True)
parser.add_argument('-c','--coords',help='coords wanted, i j',nargs=2,required=False)
parser.add_argument('-o','--ofile',help='output file',required=False)
parser.add_argument('-O','--odir',help='output directory',default='.')
parser.add_argument('-p','--plot',help='plot on screen?\n(Optional)',action='store_true')
parser.add_argument('-L','--labels',help='labels, e.g. -L"rv4.15 rv4.15a rv4.15b"\n(Optional)',required=False)
parser.add_argument('-t','--title',help='title',required=False)
parser.add_argument('-T','--tab',help='title',action='store_true')
parser.add_argument('-V','--verbose',help='extra info',action='store_true')
parser.add_argument('-y','--year',help='year',required=False)
args=parser.parse_args()
dtxt='CdfComp'
dbg=False
if args.verbose: dbg=True
if dbg: print(dtxt+'ARGS', args)
if args.coords:
#i, j = [ int(n) for n in args.coords.split() ]
i, j = [ int(n) for n in args.coords ]
else:
i, j = [ -999, -999 ] # will average over full domain
args.coords='Default'
if dbg: print(dtxt+' coords', i, j )
case=dict()
cases=[]
ifiles=[]
for n, ifile in enumerate(args.ifiles):
#print('TRY ', n, ifile)
if os.path.isfile(ifile):
f = ifile
else:
print('TRY File not found! ' + ifile)
f = ifile + '/Base/Base_month.nc' # Default
if dbg: print('=> ', f)
if not os.path.isfile(f):
sys.exit('File not found! ' + f)
tmpc= f.split('/')
if dbg: print(f, '=>fTERMS ', n, tmpc)
if len(tmpc)>2:
if args.year is not None:
case[f]= tmpc[-3].replace('.%s'%args.year,'') # rv4.2012 from rv4.2012/Base/Base_month.nc
else:
case[f]= tmpc[0] # CAMS_IPOA fro CAMS_IPOA/CAMS_IPOA_month.nc
if dbg: print('CASE', case[f])
cases.append(case[f])
ifiles.append(f) # with full path name to .nc
if dbg: print(dtxt+'CASE', n, case[f] )
#sys.exit()
labels = cases.copy() # default
if args.labels:
labels = args.labels.split()
for c, b, f in zip( cases, labels, args.ifiles ):
if dbg: print("LABEL CASE FILE ", b, c, f )
first=True
file0=ifiles[0] # Need file to get keys at start
#print('F0', file0)
#print('FINAL LABELS', labels)
ecdf=cdf.Dataset(file0,'r',format='NETCDF4')
keys = ecdf.variables.keys()
odir='.'
if args.odir:
odir=args.odir
os.makedirs(odir,exist_ok=True)
if args.tab:
tab=open(odir+'/ResCdfCompTab_%s_%s.txt' %
( cases[0], '_'.join(labels[1:])), 'w' )
header='%-30s' % 'Variable'
for c in labels:
header += ( '%18s' % c )
tab.write('%s\n' % header )
months=list(range(1,13))
colours = 'red orange yellow blue green'.split()
for var in args.varkeys:
for key in keys:
if dbg: print(' VAR, KEY ', var, key )
if not var in key:
continue
if key.startswith('D3_'):
print(' SKip 3D VAR, KEY ', var, key )
continue
#print('Processing ', var, key )
nfiles = len(ifiles)
for nf, ifile in enumerate(ifiles):
ecdf=cdf.Dataset(ifile,'r',format='NETCDF4')
monthly = np.full(12,np.nan)
tmpvals = np.full(nfiles+1,np.nan) # TMP used for fake fullrun
tmpx = np.linspace(0.5,nfiles+0.5,nfiles+1)
if key in ecdf.variables.keys():
#Aug tmpv=ecdf.variables[key][:,:,:]
tmpv=ecdf.variables[key]
if dbg: print('KEY VALUES? ', ifile, key, np.max(tmpv), tmpv.ndim )
if i> -1:
if tmpv.ndim>2:
vals=ecdf.variables[key][:,j,i]
else:
vals=[ ecdf.variables[key][j,i], ] # make list
else:
vals=np.mean(ecdf.variables[key][:,:,:],axis=(1,2))
#print('VALS ', nf, vals)
if np.max(vals) < 1.0e-20: # 1.0e-3:
if dbg: print('ZERO VALUES? ', ifile, key )
print('ZERO VALUES? ', ifile, key )
continue
#print('TMPV var ', key, tmpv.shape, i, j, np.max(vals) )
#print('VALS var ', key, vals.shape, i, j, np.max(vals), vals )
for n in range(len(vals)):
print('VAL %30s %3d %.4g ' % ( key, n+1, vals[n] ))
else:
print(' KEY NOT FOUND ', key, case[ifile])
continue
if( len(vals) ==1 ): # Just have one value, e.g. annual
tmpvals[nf] = monthly[0]
#plt.bar(tmpx,tmpvals,label=labels[nf],color='C0')
#SITE plt.bar(tmpx,tmpvals,label=labels[nf],color=colours[nf])
xmin=0.0 # Start in Jan.
xmax=nfiles+2 #
else:
#SITE plt.plot(months,monthly,label=labels[nf])
xmin=1.0 # Start in Jan.
xmax=12.0 # QUERY??
nf += 1
if args.tab:
if nf ==1: tab.write('%-30s' % key)
tab.write('%18.3f' % np.mean(monthly) )
#SITE if dbg: print('M:', monthly)
if nf == 0:
#print('NO VALUES FOUND', ifile, key )
continue
if args.plot:
if args.title is None :
plt.title(key + ' (Domain %s)'%args.domain)
else: # KEY is special
title= args.title.replace('KEY',key)
plt.title(title)
plt.ylim(ymin=0.0)
# We add a bit of vertical space for better legend placement
y=plt.yticks()[0]
plt.ylim(ymax=y[-1]+2*(y[-1]-y[-2]))
plt.xlim(xmin=xmin)
plt.xlim(xmax=xmax)
#SITE if( len(monthly) ==1 ): # Just have one value, e.g. annual
#SITE plt.xticks(visible=False)
plt.legend(loc='upper left',bbox_to_anchor=(0.05,1.0))
if args.ofile:
ofile=args.ofile
else:
ofile='PlotSiteComp_%s_%s_%s.png' % ( key, cases[0], '_'.join(labels) )
if args.plot:
plt.savefig('%s/%s' % ( odir, ofile ))
# if args.plot:
# plt.show()
#plt.clf()
plt.close()
if args.tab:
tab.write('\n')
| gpl-3.0 |
michaelneuder/image_quality_analysis | bin/nets/old/ssim_df_n_si_single.py | 1 | 9907 | #!/usr/bin/env python3
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import numpy as np
np.set_printoptions(threshold=np.nan)
import tensorflow as tf
import time
import pandas as pd
import matplotlib.pyplot as plt
def convolve_inner_layers(x, W, b):
y = tf.nn.conv2d(x, W, strides = [1,1,1,1], padding='VALID')
y = tf.nn.bias_add(y, b)
return tf.nn.tanh(y)
def convolve_ouput_layer(x, W, b):
y = tf.nn.conv2d(x, W, strides = [1,1,1,1], padding='VALID')
y = tf.nn.bias_add(y, b)
return y
def conv_net(x, W, b):
conv1 = convolve_inner_layers(x, W['weights1'], b['bias1'])
conv2 = convolve_inner_layers(conv1, W['weights2'], b['bias2'])
conv3 = convolve_inner_layers(conv2, W['weights3'], b['bias3'])
output_feed = tf.concat([conv1, conv2, conv3],3)
output = convolve_ouput_layer(output_feed, W['weights_out'], b['bias_out'])
return output
def get_variance(training_target):
all_pixels = training_target.flatten()
return all_pixels.var()
def get_epoch(x, y, n):
input_size = x.shape[0]
number_batches = input_size // n
extra_examples = input_size % n
batches = {}
batch_indices = np.arange(input_size)
np.random.shuffle(batch_indices)
for i in range(number_batches):
temp_indices = batch_indices[n*i:n*(i+1)]
temp_x = []
temp_y = []
for j in temp_indices:
temp_x.append(x[j])
temp_y.append(y[j])
batches[i] = [np.asarray(temp_x), np.asarray(temp_y)]
if extra_examples != 0:
extra_indices = batch_indices[input_size-extra_examples:input_size]
temp_x = []
temp_y = []
for k in extra_indices:
temp_x.append(x[k])
temp_y.append(y[k])
batches[i+1] = [np.asarray(temp_x), np.asarray(temp_y)]
return batches
def normalize_input(train_data, test_data):
mean, std_dev = np.mean(train_data, axis=0), np.std(train_data, axis=0)
return (train_data - mean) / std_dev, (test_data - mean) / std_dev
def calculate_ssim(window_orig, window_recon):
k_1 = 0.01
k_2 = 0.03
L = 255
if window_orig.shape != (11,11) or window_recon.shape != (11,11):
raise ValueError('please check window size for SSIM calculation!')
orig_data = window_orig.flatten()
recon_data = window_recon.flatten()
mean_x = np.mean(orig_data)
mean_y = np.mean(recon_data)
var_x = np.var(orig_data)
var_y = np.var(recon_data)
covar = np.cov(orig_data, recon_data)[0][1]
c_1 = (L*k_1)**2
c_2 = (L*k_2)**2
num = (2*mean_x*mean_y+c_1)*(2*covar+c_2)
den = (mean_x**2+mean_y**2+c_1)*(var_x+var_y+c_2)
return num/den
def main():
# parameters
filter_dim = 11
filter_dim2 = 1
batch_size = 200
image_dim = 96
input_layer = 4
first_layer = 17
second_layer = 9
third_layer = 4
output_layer = 1
learning_rate = .01
epochs = 400
# seeding for debug purposes --- dont forget to remove
# SEED = 12345
# np.random.seed(SEED)
# tf.set_random_seed(SEED)
print('loading image files ... ')
# train/test images
orig_500 = pd.read_csv('https://raw.githubusercontent.com/michaelneuder/image_quality_analysis/master/data/sample_data/orig_500.txt', header=None, delim_whitespace = True)
recon_500 = pd.read_csv('https://raw.githubusercontent.com/michaelneuder/image_quality_analysis/master/data/sample_data/recon_500.txt', header=None, delim_whitespace = True)
SSIM_500 = pd.read_csv('https://raw.githubusercontent.com/michaelneuder/image_quality_analysis/master/data/sample_data/SSIM_500.txt', header=None, delim_whitespace = True)
orig_140 = pd.read_csv('https://raw.githubusercontent.com/michaelneuder/image_quality_analysis/master/data/sample_data/orig_140.txt', header=None, delim_whitespace = True)
recon_140 = pd.read_csv('https://raw.githubusercontent.com/michaelneuder/image_quality_analysis/master/data/sample_data/recon_140.txt', header=None, delim_whitespace = True)
SSIM_140 = pd.read_csv('https://raw.githubusercontent.com/michaelneuder/image_quality_analysis/master/data/sample_data/SSIM_140.txt', header=None, delim_whitespace = True)
# getting 4 input channels for train and test
original_images_train = orig_500.values
original_images_train_sq = orig_500.values**2
reconstructed_images_train = recon_500.values
reconstructed_images_train_sq = recon_500.values**2
original_images_test = orig_140.values
original_images_test_sq = orig_140.values**2
reconstructed_images_test = recon_140.values
reconstructed_images_test_sq = recon_140.values**2
# stack inputs
training_input = np.dstack((original_images_train, reconstructed_images_train, original_images_train_sq, reconstructed_images_train_sq))
testing_input = np.dstack((original_images_test, reconstructed_images_test, original_images_test_sq, reconstructed_images_test_sq))
# normalize inputs
training_input_normalized, testing_input_normalized = normalize_input(training_input, testing_input)
# target values
comparison_images_train = SSIM_500.values
comparison_images_test = SSIM_140.values
# get size of training and testing set
train_size = original_images_train.shape[0]
test_size = original_images_test.shape[0]
# reshaping the result data to --- (num pics), 96, 96, 1
target_data_train = np.reshape(comparison_images_train, [train_size, image_dim, image_dim, output_layer])
target_data_test = np.reshape(comparison_images_test, [test_size, image_dim, image_dim, output_layer])
# reshaping
train_data = np.reshape(training_input_normalized, [train_size,image_dim,image_dim,input_layer])
test_data = np.reshape(testing_input_normalized, [test_size,image_dim,image_dim,input_layer])
image_dim = 11
single_train_data, single_test_data = [], []
for i in range(train_data.shape[0]):
for j in range(11):
for k in range(11):
single_train_data.append(train_data[i,j,k])
if i < 140:
single_test_data.append(test_data[i,j,k])
single_train_data = np.reshape(np.asarray(single_train_data), (train_data.shape[0], 11, 11, 4))
single_test_data = np.reshape(np.asarray(single_test_data), (test_data.shape[0], 11, 11, 4))
ssim, ssim1 = [], []
for i in range(single_train_data.shape[0]):
ssim.append(calculate_ssim(single_train_data[i][...,0], single_train_data[i][...,1]))
if i < 140:
ssim1.append(calculate_ssim(single_test_data[i][...,0], single_test_data[i][...,1]))
ssim = np.reshape(np.asarray(ssim), (single_train_data.shape[0],1))
ssim1 = np.reshape(np.asarray(ssim1), (single_test_data.shape[0],1))
# initializing variables --- fan in
scaling_factor = 1.0
initializer = tf.contrib.layers.variance_scaling_initializer(factor=scaling_factor, mode='FAN_IN')
weights = {
'weights1': tf.get_variable('weights1', [filter_dim,filter_dim,input_layer,first_layer], initializer=initializer),
'weights2': tf.get_variable('weights2', [filter_dim2,filter_dim2,first_layer,second_layer], initializer=initializer),
'weights3': tf.get_variable('weights3', [filter_dim2,filter_dim2,second_layer,third_layer], initializer=initializer),
'weights_out': tf.get_variable('weights4', [filter_dim2,filter_dim2,third_layer+second_layer+first_layer,output_layer], initializer=initializer)
}
biases = {
'bias1': tf.get_variable('bias1', [first_layer], initializer=initializer),
'bias2': tf.get_variable('bias2', [second_layer], initializer=initializer),
'bias3': tf.get_variable('bias3', [third_layer], initializer=initializer),
'bias_out': tf.get_variable('bias4', [output_layer], initializer=initializer)
}
# tf Graph input
x = tf.placeholder(tf.float32, [None, image_dim, image_dim, input_layer])
y = tf.placeholder(tf.float32, [None, output_layer])
# model
prediction = conv_net(x, weights, biases)
# get variance to normalize error terms during training
variance = get_variance(target_data_train)
# loss and optimization
cost = tf.reduce_mean(tf.square(tf.subtract(prediction, y)))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# session
init = tf.global_variables_initializer()
error_train, error_test = [], []
with tf.Session() as sess:
sess.run(init)
epoch_count = 0
global_step = 0
start_time = time.time()
print("starting training ... ")
while epoch_count < epochs:
print('---------------------------------------------------------')
print('beginning epoch {} ...'.format(epoch_count))
epoch = get_epoch(single_train_data, ssim, batch_size)
for i in epoch:
x_data_train, y_data_train = np.asarray(epoch[i][0]), np.asarray(epoch[i][1])
sess.run(optimizer, feed_dict={x : x_data_train, y : y_data_train})
loss = sess.run(cost, feed_dict={x : x_data_train, y : y_data_train})
percent_error = 100*loss/variance
print(" - training global_step {0:4d} error: {1:8.4f} {2:8.2f}%".format(global_step, loss, percent_error))
global_step += 1
epoch_count+=1
error_train.append(percent_error)
score = sess.run(cost, feed_dict={x: single_test_data, y: ssim1})
percent_error = 100*score/variance
error_test.append(percent_error)
print('---- test score : {:.4f}, {:.4f}% ----'.format(score, percent_error))
plt.plot(np.arange(len(error_train)), error_train, label='train')
plt.plot(np.arange(len(error_test)), error_test, label='test')
plt.legend()
plt.ylim(0,100)
plt.show()
if __name__ == '__main__':
main()
| mit |
elijah513/scikit-learn | sklearn/learning_curve.py | 110 | 13467 | """Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
from .utils.fixes import astype
__all__ = ['learning_curve', 'validation_curve']
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curves>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<example_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validation_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<example_model_selection_plot_validation_curve.py>`
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| bsd-3-clause |
shusenl/scikit-learn | sklearn/semi_supervised/label_propagation.py | 71 | 15342 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y, check_is_fitted, check_array
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
X_2d = check_array(X, accept_sparse = ['csc', 'csr', 'coo', 'dok',
'bsr', 'lil', 'dia'])
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/sklearn/examples/ensemble/plot_voting_decision_regions.py | 1 | 3238 | """
==================================================
Plot the decision boundaries of a VotingClassifier
==================================================
Plot the decision boundaries of a `VotingClassifier` for
two features of the Iris dataset.
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three exemplary classifiers are initialized (`DecisionTreeClassifier`,
`KNeighborsClassifier`, and `SVC`) and used to initialize a
soft-voting `VotingClassifier` with weights `[2, 1, 2]`, which means that
the predicted probabilities of the `DecisionTreeClassifier` and `SVC`
count 5 times as much as the weights of the `KNeighborsClassifier` classifier
when the averaged probability is calculated.
"""
print(__doc__)
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
# Loading some example data
iris = datasets.load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
# Training classifiers
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=7)
clf3 = SVC(kernel='rbf', probability=True)
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),
('svc', clf3)],
voting='soft', weights=[2, 1, 2])
clf1.fit(X, y)
clf2.fit(X, y)
clf3.fit(X, y)
eclf.fit(X, y)
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
[clf1, clf2, clf3, eclf],
['Decision Tree (depth=4)', 'KNN (k=7)',
'Kernel SVM', 'Soft Voting']):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y,
s=20, edgecolor='k')
axarr[idx[0], idx[1]].set_title(tt)
# plt.show()
pltshow(plt)
| mit |
lmallin/coverage_test | python_venv/lib/python2.7/site-packages/pandas/core/resample.py | 3 | 44151 | from datetime import timedelta
import numpy as np
import warnings
import copy
from textwrap import dedent
import pandas as pd
from pandas.core.base import AbstractMethodError, GroupByMixin
from pandas.core.groupby import (BinGrouper, Grouper, _GroupBy, GroupBy,
SeriesGroupBy, groupby, PanelGroupBy)
from pandas.tseries.frequencies import to_offset, is_subperiod, is_superperiod
from pandas.core.indexes.datetimes import DatetimeIndex, date_range
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.tseries.offsets import DateOffset, Tick, Day, _delta_to_nanoseconds
from pandas.core.indexes.period import PeriodIndex, period_range
import pandas.core.common as com
import pandas.core.algorithms as algos
import pandas.compat as compat
from pandas.compat.numpy import function as nv
from pandas._libs import lib, tslib
from pandas._libs.lib import Timestamp
from pandas._libs.period import IncompatibleFrequency
from pandas.util._decorators import Appender
from pandas.core.generic import _shared_docs
_shared_docs_kwargs = dict()
class Resampler(_GroupBy):
"""
Class for resampling datetimelike data, a groupby-like operation.
See aggregate, transform, and apply functions on this object.
It's easiest to use obj.resample(...) to use Resampler.
Parameters
----------
obj : pandas object
groupby : a TimeGrouper object
axis : int, default 0
kind : str or None
'period', 'timestamp' to override default index treatement
Notes
-----
After resampling, see aggregate, apply, and transform functions.
Returns
-------
a Resampler of the appropriate type
"""
# to the groupby descriptor
_attributes = ['freq', 'axis', 'closed', 'label', 'convention',
'loffset', 'base', 'kind']
# API compat of allowed attributes
_deprecated_valids = _attributes + ['__doc__', '_cache', '_attributes',
'binner', 'grouper', 'groupby',
'sort', 'kind', 'squeeze', 'keys',
'group_keys', 'as_index', 'exclusions',
'_groupby']
# don't raise deprecation warning on attributes starting with these
# patterns - prevents warnings caused by IPython introspection
_deprecated_valid_patterns = ['_ipython', '_repr']
# API compat of disallowed attributes
_deprecated_invalids = ['iloc', 'loc', 'ix', 'iat', 'at']
def __init__(self, obj, groupby=None, axis=0, kind=None, **kwargs):
self.groupby = groupby
self.keys = None
self.sort = True
self.axis = axis
self.kind = kind
self.squeeze = False
self.group_keys = True
self.as_index = True
self.exclusions = set()
self.binner = None
self.grouper = None
if self.groupby is not None:
self.groupby._set_grouper(self._convert_obj(obj), sort=True)
def __unicode__(self):
""" provide a nice str repr of our rolling object """
attrs = ["{k}={v}".format(k=k, v=getattr(self.groupby, k))
for k in self._attributes if
getattr(self.groupby, k, None) is not None]
return "{klass} [{attrs}]".format(klass=self.__class__.__name__,
attrs=', '.join(attrs))
@property
def obj(self):
return self.groupby.obj
@property
def ax(self):
return self.groupby.ax
@property
def _typ(self):
""" masquerade for compat as a Series or a DataFrame """
if isinstance(self._selected_obj, pd.Series):
return 'series'
return 'dataframe'
@property
def _from_selection(self):
""" is the resampling from a DataFrame column or MultiIndex level """
# upsampling and PeriodIndex resampling do not work
# with selection, this state used to catch and raise an error
return (self.groupby is not None and
(self.groupby.key is not None or
self.groupby.level is not None))
def _deprecated(self, op):
warnings.warn(("\n.resample() is now a deferred operation\n"
"You called {op}(...) on this deferred object "
"which materialized it into a {klass}\nby implicitly "
"taking the mean. Use .resample(...).mean() "
"instead").format(op=op, klass=self._typ),
FutureWarning, stacklevel=3)
return self.mean()
def _make_deprecated_binop(op):
# op is a string
def _evaluate_numeric_binop(self, other):
result = self._deprecated(op)
return getattr(result, op)(other)
return _evaluate_numeric_binop
def _make_deprecated_unary(op, name):
# op is a callable
def _evaluate_numeric_unary(self):
result = self._deprecated(name)
return op(result)
return _evaluate_numeric_unary
def __array__(self):
return self._deprecated('__array__').__array__()
__gt__ = _make_deprecated_binop('__gt__')
__ge__ = _make_deprecated_binop('__ge__')
__lt__ = _make_deprecated_binop('__lt__')
__le__ = _make_deprecated_binop('__le__')
__eq__ = _make_deprecated_binop('__eq__')
__ne__ = _make_deprecated_binop('__ne__')
__add__ = __radd__ = _make_deprecated_binop('__add__')
__sub__ = __rsub__ = _make_deprecated_binop('__sub__')
__mul__ = __rmul__ = _make_deprecated_binop('__mul__')
__floordiv__ = __rfloordiv__ = _make_deprecated_binop('__floordiv__')
__truediv__ = __rtruediv__ = _make_deprecated_binop('__truediv__')
if not compat.PY3:
__div__ = __rdiv__ = _make_deprecated_binop('__div__')
__neg__ = _make_deprecated_unary(lambda x: -x, '__neg__')
__pos__ = _make_deprecated_unary(lambda x: x, '__pos__')
__abs__ = _make_deprecated_unary(lambda x: np.abs(x), '__abs__')
__inv__ = _make_deprecated_unary(lambda x: -x, '__inv__')
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self._attributes:
return getattr(self.groupby, attr)
if attr in self.obj:
return self[attr]
if attr in self._deprecated_invalids:
raise ValueError(".resample() is now a deferred operation\n"
"\tuse .resample(...).mean() instead of "
".resample(...)")
matches_pattern = any(attr.startswith(x) for x
in self._deprecated_valid_patterns)
if not matches_pattern and attr not in self._deprecated_valids:
self = self._deprecated(attr)
return object.__getattribute__(self, attr)
def __setattr__(self, attr, value):
if attr not in self._deprecated_valids:
raise ValueError("cannot set values on {0}".format(
self.__class__.__name__))
object.__setattr__(self, attr, value)
def __getitem__(self, key):
try:
return super(Resampler, self).__getitem__(key)
except (KeyError, com.AbstractMethodError):
# compat for deprecated
if isinstance(self.obj, com.ABCSeries):
return self._deprecated('__getitem__')[key]
raise
def __setitem__(self, attr, value):
raise ValueError("cannot set items on {0}".format(
self.__class__.__name__))
def _convert_obj(self, obj):
"""
provide any conversions for the object in order to correctly handle
Parameters
----------
obj : the object to be resampled
Returns
-------
obj : converted object
"""
obj = obj._consolidate()
return obj
def _get_binner_for_time(self):
raise AbstractMethodError(self)
def _set_binner(self):
"""
setup our binners
cache these as we are an immutable object
"""
if self.binner is None:
self.binner, self.grouper = self._get_binner()
def _get_binner(self):
"""
create the BinGrouper, assume that self.set_grouper(obj)
has already been called
"""
binner, bins, binlabels = self._get_binner_for_time()
bin_grouper = BinGrouper(bins, binlabels)
return binner, bin_grouper
def _assure_grouper(self):
""" make sure that we are creating our binner & grouper """
self._set_binner()
def plot(self, *args, **kwargs):
# for compat with prior versions, we want to
# have the warnings shown here and just have this work
return self._deprecated('plot').plot(*args, **kwargs)
_agg_doc = dedent("""
Examples
--------
>>> s = Series([1,2,3,4,5],
index=pd.date_range('20130101',
periods=5,freq='s'))
2013-01-01 00:00:00 1
2013-01-01 00:00:01 2
2013-01-01 00:00:02 3
2013-01-01 00:00:03 4
2013-01-01 00:00:04 5
Freq: S, dtype: int64
>>> r = s.resample('2s')
DatetimeIndexResampler [freq=<2 * Seconds>, axis=0, closed=left,
label=left, convention=start, base=0]
>>> r.agg(np.sum)
2013-01-01 00:00:00 3
2013-01-01 00:00:02 7
2013-01-01 00:00:04 5
Freq: 2S, dtype: int64
>>> r.agg(['sum','mean','max'])
sum mean max
2013-01-01 00:00:00 3 1.5 2
2013-01-01 00:00:02 7 3.5 4
2013-01-01 00:00:04 5 5.0 5
>>> r.agg({'result' : lambda x: x.mean() / x.std(),
'total' : np.sum})
total result
2013-01-01 00:00:00 3 2.121320
2013-01-01 00:00:02 7 4.949747
2013-01-01 00:00:04 5 NaN
See also
--------
pandas.DataFrame.groupby.aggregate
pandas.DataFrame.resample.transform
pandas.DataFrame.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
klass='DataFrame',
versionadded=''))
def aggregate(self, arg, *args, **kwargs):
self._set_binner()
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
result = self._groupby_and_aggregate(arg,
*args,
**kwargs)
result = self._apply_loffset(result)
return result
agg = aggregate
apply = aggregate
def transform(self, arg, *args, **kwargs):
"""
Call function producing a like-indexed Series on each group and return
a Series with the transformed values
Parameters
----------
func : function
To apply to each group. Should return a Series with the same index
Examples
--------
>>> resampled.transform(lambda x: (x - x.mean()) / x.std())
Returns
-------
transformed : Series
"""
return self._selected_obj.groupby(self.groupby).transform(
arg, *args, **kwargs)
def _downsample(self, f):
raise AbstractMethodError(self)
def _upsample(self, f, limit=None, fill_value=None):
raise AbstractMethodError(self)
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
self._set_binner()
grouper = self.grouper
if subset is None:
subset = self.obj
grouped = groupby(subset, by=None, grouper=grouper, axis=self.axis)
# try the key selection
try:
return grouped[key]
except KeyError:
return grouped
def _groupby_and_aggregate(self, how, grouper=None, *args, **kwargs):
""" re-evaluate the obj with a groupby aggregation """
if grouper is None:
self._set_binner()
grouper = self.grouper
obj = self._selected_obj
try:
grouped = groupby(obj, by=None, grouper=grouper, axis=self.axis)
except TypeError:
# panel grouper
grouped = PanelGroupBy(obj, grouper=grouper, axis=self.axis)
try:
result = grouped.aggregate(how, *args, **kwargs)
except Exception:
# we have a non-reducing function
# try to evaluate
result = grouped.apply(how, *args, **kwargs)
result = self._apply_loffset(result)
return self._wrap_result(result)
def _apply_loffset(self, result):
"""
if loffset is set, offset the result index
This is NOT an idempotent routine, it will be applied
exactly once to the result.
Parameters
----------
result : Series or DataFrame
the result of resample
"""
needs_offset = (
isinstance(self.loffset, (DateOffset, timedelta)) and
isinstance(result.index, DatetimeIndex) and
len(result.index) > 0
)
if needs_offset:
result.index = result.index + self.loffset
self.loffset = None
return result
def _get_resampler_for_grouping(self, groupby, **kwargs):
""" return the correct class for resampling with groupby """
return self._resampler_for_grouping(self, groupby=groupby, **kwargs)
def _wrap_result(self, result):
""" potentially wrap any results """
if isinstance(result, com.ABCSeries) and self._selection is not None:
result.name = self._selection
return result
def pad(self, limit=None):
"""
Forward fill the values
Parameters
----------
limit : integer, optional
limit of how many values to fill
See Also
--------
Series.fillna
DataFrame.fillna
"""
return self._upsample('pad', limit=limit)
ffill = pad
def backfill(self, limit=None):
"""
Backward fill the values
Parameters
----------
limit : integer, optional
limit of how many values to fill
See Also
--------
Series.fillna
DataFrame.fillna
"""
return self._upsample('backfill', limit=limit)
bfill = backfill
def fillna(self, method, limit=None):
"""
Fill missing values
Parameters
----------
method : str, method of resampling ('ffill', 'bfill')
limit : integer, optional
limit of how many values to fill
See Also
--------
Series.fillna
DataFrame.fillna
"""
return self._upsample(method, limit=limit)
@Appender(_shared_docs['interpolate'] % _shared_docs_kwargs)
def interpolate(self, method='linear', axis=0, limit=None, inplace=False,
limit_direction='forward', downcast=None, **kwargs):
"""
Interpolate values according to different methods.
.. versionadded:: 0.18.1
"""
result = self._upsample(None)
return result.interpolate(method=method, axis=axis, limit=limit,
inplace=inplace,
limit_direction=limit_direction,
downcast=downcast, **kwargs)
def asfreq(self, fill_value=None):
"""
return the values at the new freq,
essentially a reindex
Parameters
----------
fill_value: scalar, optional
Value to use for missing values, applied during upsampling (note
this does not fill NaNs that already were present).
.. versionadded:: 0.20.0
See Also
--------
Series.asfreq
DataFrame.asfreq
"""
return self._upsample('asfreq', fill_value=fill_value)
def std(self, ddof=1, *args, **kwargs):
"""
Compute standard deviation of groups, excluding missing values
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
nv.validate_resampler_func('std', args, kwargs)
return self._downsample('std', ddof=ddof)
def var(self, ddof=1, *args, **kwargs):
"""
Compute variance of groups, excluding missing values
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
nv.validate_resampler_func('var', args, kwargs)
return self._downsample('var', ddof=ddof)
Resampler._deprecated_valids += dir(Resampler)
# downsample methods
for method in ['min', 'max', 'first', 'last', 'sum', 'mean', 'sem',
'median', 'prod', 'ohlc']:
def f(self, _method=method, *args, **kwargs):
nv.validate_resampler_func(_method, args, kwargs)
return self._downsample(_method)
f.__doc__ = getattr(GroupBy, method).__doc__
setattr(Resampler, method, f)
# groupby & aggregate methods
for method in ['count', 'size']:
def f(self, _method=method):
return self._downsample(_method)
f.__doc__ = getattr(GroupBy, method).__doc__
setattr(Resampler, method, f)
# series only methods
for method in ['nunique']:
def f(self, _method=method):
return self._downsample(_method)
f.__doc__ = getattr(SeriesGroupBy, method).__doc__
setattr(Resampler, method, f)
def _maybe_process_deprecations(r, how=None, fill_method=None, limit=None):
""" potentially we might have a deprecation warning, show it
but call the appropriate methods anyhow """
if how is not None:
# .resample(..., how='sum')
if isinstance(how, compat.string_types):
method = "{0}()".format(how)
# .resample(..., how=lambda x: ....)
else:
method = ".apply(<func>)"
# if we have both a how and fill_method, then show
# the following warning
if fill_method is None:
warnings.warn("how in .resample() is deprecated\n"
"the new syntax is "
".resample(...).{method}".format(
method=method),
FutureWarning, stacklevel=3)
r = r.aggregate(how)
if fill_method is not None:
# show the prior function call
method = '.' + method if how is not None else ''
args = "limit={0}".format(limit) if limit is not None else ""
warnings.warn("fill_method is deprecated to .resample()\n"
"the new syntax is .resample(...){method}"
".{fill_method}({args})".format(
method=method,
fill_method=fill_method,
args=args),
FutureWarning, stacklevel=3)
if how is not None:
r = getattr(r, fill_method)(limit=limit)
else:
r = r.aggregate(fill_method, limit=limit)
return r
class _GroupByMixin(GroupByMixin):
""" provide the groupby facilities """
def __init__(self, obj, *args, **kwargs):
parent = kwargs.pop('parent', None)
groupby = kwargs.pop('groupby', None)
if parent is None:
parent = obj
# initialize our GroupByMixin object with
# the resampler attributes
for attr in self._attributes:
setattr(self, attr, kwargs.get(attr, getattr(parent, attr)))
super(_GroupByMixin, self).__init__(None)
self._groupby = groupby
self._groupby.mutated = True
self._groupby.grouper.mutated = True
self.groupby = copy.copy(parent.groupby)
def _apply(self, f, **kwargs):
"""
dispatch to _upsample; we are stripping all of the _upsample kwargs and
performing the original function call on the grouped object
"""
def func(x):
x = self._shallow_copy(x, groupby=self.groupby)
if isinstance(f, compat.string_types):
return getattr(x, f)(**kwargs)
return x.apply(f, **kwargs)
result = self._groupby.apply(func)
return self._wrap_result(result)
_upsample = _apply
_downsample = _apply
_groupby_and_aggregate = _apply
class DatetimeIndexResampler(Resampler):
@property
def _resampler_for_grouping(self):
return DatetimeIndexResamplerGroupby
def _get_binner_for_time(self):
# this is how we are actually creating the bins
if self.kind == 'period':
return self.groupby._get_time_period_bins(self.ax)
return self.groupby._get_time_bins(self.ax)
def _downsample(self, how, **kwargs):
"""
Downsample the cython defined function
Parameters
----------
how : string / cython mapped function
**kwargs : kw args passed to how function
"""
self._set_binner()
how = self._is_cython_func(how) or how
ax = self.ax
obj = self._selected_obj
if not len(ax):
# reset to the new freq
obj = obj.copy()
obj.index.freq = self.freq
return obj
# do we have a regular frequency
if ax.freq is not None or ax.inferred_freq is not None:
if len(self.grouper.binlabels) > len(ax) and how is None:
# let's do an asfreq
return self.asfreq()
# we are downsampling
# we want to call the actual grouper method here
result = obj.groupby(
self.grouper, axis=self.axis).aggregate(how, **kwargs)
result = self._apply_loffset(result)
return self._wrap_result(result)
def _adjust_binner_for_upsample(self, binner):
""" adjust our binner when upsampling """
if self.closed == 'right':
binner = binner[1:]
else:
binner = binner[:-1]
return binner
def _upsample(self, method, limit=None, fill_value=None):
"""
method : string {'backfill', 'bfill', 'pad',
'ffill', 'asfreq'} method for upsampling
limit : int, default None
Maximum size gap to fill when reindexing
fill_value : scalar, default None
Value to use for missing values
See also
--------
.fillna
"""
self._set_binner()
if self.axis:
raise AssertionError('axis must be 0')
if self._from_selection:
raise ValueError("Upsampling from level= or on= selection"
" is not supported, use .set_index(...)"
" to explicitly set index to"
" datetime-like")
ax = self.ax
obj = self._selected_obj
binner = self.binner
res_index = self._adjust_binner_for_upsample(binner)
# if we have the same frequency as our axis, then we are equal sampling
if limit is None and to_offset(ax.inferred_freq) == self.freq:
result = obj.copy()
result.index = res_index
else:
result = obj.reindex(res_index, method=method,
limit=limit, fill_value=fill_value)
return self._wrap_result(result)
def _wrap_result(self, result):
result = super(DatetimeIndexResampler, self)._wrap_result(result)
# we may have a different kind that we were asked originally
# convert if needed
if self.kind == 'period' and not isinstance(result.index, PeriodIndex):
result.index = result.index.to_period(self.freq)
return result
class DatetimeIndexResamplerGroupby(_GroupByMixin, DatetimeIndexResampler):
"""
Provides a resample of a groupby implementation
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return DatetimeIndexResampler
class PeriodIndexResampler(DatetimeIndexResampler):
@property
def _resampler_for_grouping(self):
return PeriodIndexResamplerGroupby
def _convert_obj(self, obj):
obj = super(PeriodIndexResampler, self)._convert_obj(obj)
offset = to_offset(self.freq)
if offset.n > 1:
if self.kind == 'period': # pragma: no cover
print('Warning: multiple of frequency -> timestamps')
# Cannot have multiple of periods, convert to timestamp
self.kind = 'timestamp'
# convert to timestamp
if not (self.kind is None or self.kind == 'period'):
if self._from_selection:
# see GH 14008, GH 12871
msg = ("Resampling from level= or on= selection"
" with a PeriodIndex is not currently supported,"
" use .set_index(...) to explicitly set index")
raise NotImplementedError(msg)
else:
obj = obj.to_timestamp(how=self.convention)
return obj
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
result = self._downsample(arg, *args, **kwargs)
result = self._apply_loffset(result)
return result
agg = aggregate
def _get_new_index(self):
""" return our new index """
ax = self.ax
if len(ax) == 0:
values = []
else:
start = ax[0].asfreq(self.freq, how=self.convention)
end = ax[-1].asfreq(self.freq, how='end')
values = period_range(start, end, freq=self.freq).asi8
return ax._shallow_copy(values, freq=self.freq)
def _downsample(self, how, **kwargs):
"""
Downsample the cython defined function
Parameters
----------
how : string / cython mapped function
**kwargs : kw args passed to how function
"""
# we may need to actually resample as if we are timestamps
if self.kind == 'timestamp':
return super(PeriodIndexResampler, self)._downsample(how, **kwargs)
how = self._is_cython_func(how) or how
ax = self.ax
new_index = self._get_new_index()
# Start vs. end of period
memb = ax.asfreq(self.freq, how=self.convention)
if is_subperiod(ax.freq, self.freq):
# Downsampling
if len(new_index) == 0:
bins = []
else:
i8 = memb.asi8
rng = np.arange(i8[0], i8[-1] + 1)
bins = memb.searchsorted(rng, side='right')
grouper = BinGrouper(bins, new_index)
return self._groupby_and_aggregate(how, grouper=grouper)
elif is_superperiod(ax.freq, self.freq):
return self.asfreq()
elif ax.freq == self.freq:
return self.asfreq()
raise IncompatibleFrequency(
'Frequency {} cannot be resampled to {}, as they are not '
'sub or super periods'.format(ax.freq, self.freq))
def _upsample(self, method, limit=None, fill_value=None):
"""
method : string {'backfill', 'bfill', 'pad', 'ffill'}
method for upsampling
limit : int, default None
Maximum size gap to fill when reindexing
fill_value : scalar, default None
Value to use for missing values
See also
--------
.fillna
"""
if self._from_selection:
raise ValueError("Upsampling from level= or on= selection"
" is not supported, use .set_index(...)"
" to explicitly set index to"
" datetime-like")
# we may need to actually resample as if we are timestamps
if self.kind == 'timestamp':
return super(PeriodIndexResampler, self)._upsample(
method, limit=limit, fill_value=fill_value)
ax = self.ax
obj = self.obj
new_index = self._get_new_index()
# Start vs. end of period
memb = ax.asfreq(self.freq, how=self.convention)
# Get the fill indexer
indexer = memb.get_indexer(new_index, method=method, limit=limit)
return self._wrap_result(_take_new_index(
obj, indexer, new_index, axis=self.axis))
class PeriodIndexResamplerGroupby(_GroupByMixin, PeriodIndexResampler):
"""
Provides a resample of a groupby implementation
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return PeriodIndexResampler
class TimedeltaIndexResampler(DatetimeIndexResampler):
@property
def _resampler_for_grouping(self):
return TimedeltaIndexResamplerGroupby
def _get_binner_for_time(self):
return self.groupby._get_time_delta_bins(self.ax)
def _adjust_binner_for_upsample(self, binner):
""" adjust our binner when upsampling """
ax = self.ax
if is_subperiod(ax.freq, self.freq):
# We are actually downsampling
# but are in the asfreq path
# GH 12926
if self.closed == 'right':
binner = binner[1:]
else:
binner = binner[:-1]
return binner
class TimedeltaIndexResamplerGroupby(_GroupByMixin, TimedeltaIndexResampler):
"""
Provides a resample of a groupby implementation
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return TimedeltaIndexResampler
def resample(obj, kind=None, **kwds):
""" create a TimeGrouper and return our resampler """
tg = TimeGrouper(**kwds)
return tg._get_resampler(obj, kind=kind)
resample.__doc__ = Resampler.__doc__
def get_resampler_for_grouping(groupby, rule, how=None, fill_method=None,
limit=None, kind=None, **kwargs):
""" return our appropriate resampler when grouping as well """
# .resample uses 'on' similar to how .groupby uses 'key'
kwargs['key'] = kwargs.pop('on', None)
tg = TimeGrouper(freq=rule, **kwargs)
resampler = tg._get_resampler(groupby.obj, kind=kind)
r = resampler._get_resampler_for_grouping(groupby=groupby)
return _maybe_process_deprecations(r,
how=how,
fill_method=fill_method,
limit=limit)
class TimeGrouper(Grouper):
"""
Custom groupby class for time-interval grouping
Parameters
----------
freq : pandas date offset or offset alias for identifying bin edges
closed : closed end of interval; left or right
label : interval boundary to use for labeling; left or right
nperiods : optional, integer
convention : {'start', 'end', 'e', 's'}
If axis is PeriodIndex
Notes
-----
Use begin, end, nperiods to generate intervals that cannot be derived
directly from the associated object
"""
def __init__(self, freq='Min', closed=None, label=None, how='mean',
nperiods=None, axis=0,
fill_method=None, limit=None, loffset=None, kind=None,
convention=None, base=0, **kwargs):
freq = to_offset(freq)
end_types = set(['M', 'A', 'Q', 'BM', 'BA', 'BQ', 'W'])
rule = freq.rule_code
if (rule in end_types or
('-' in rule and rule[:rule.find('-')] in end_types)):
if closed is None:
closed = 'right'
if label is None:
label = 'right'
else:
if closed is None:
closed = 'left'
if label is None:
label = 'left'
self.closed = closed
self.label = label
self.nperiods = nperiods
self.kind = kind
self.convention = convention or 'E'
self.convention = self.convention.lower()
if isinstance(loffset, compat.string_types):
loffset = to_offset(loffset)
self.loffset = loffset
self.how = how
self.fill_method = fill_method
self.limit = limit
self.base = base
# always sort time groupers
kwargs['sort'] = True
super(TimeGrouper, self).__init__(freq=freq, axis=axis, **kwargs)
def _get_resampler(self, obj, kind=None):
"""
return my resampler or raise if we have an invalid axis
Parameters
----------
obj : input object
kind : string, optional
'period','timestamp','timedelta' are valid
Returns
-------
a Resampler
Raises
------
TypeError if incompatible axis
"""
self._set_grouper(obj)
ax = self.ax
if isinstance(ax, DatetimeIndex):
return DatetimeIndexResampler(obj,
groupby=self,
kind=kind,
axis=self.axis)
elif isinstance(ax, PeriodIndex) or kind == 'period':
return PeriodIndexResampler(obj,
groupby=self,
kind=kind,
axis=self.axis)
elif isinstance(ax, TimedeltaIndex):
return TimedeltaIndexResampler(obj,
groupby=self,
axis=self.axis)
raise TypeError("Only valid with DatetimeIndex, "
"TimedeltaIndex or PeriodIndex, "
"but got an instance of %r" % type(ax).__name__)
def _get_grouper(self, obj):
# create the resampler and return our binner
r = self._get_resampler(obj)
r._set_binner()
return r.binner, r.grouper, r.obj
def _get_binner_for_grouping(self, obj):
# return an ordering of the transformed group labels,
# suitable for multi-grouping, e.g the labels for
# the resampled intervals
binner, grouper, obj = self._get_grouper(obj)
l = []
for key, group in grouper.get_iterator(self.ax):
l.extend([key] * len(group))
if isinstance(self.ax, PeriodIndex):
grouper = binner.__class__(l, freq=binner.freq, name=binner.name)
else:
# resampling causes duplicated values, specifying freq is invalid
grouper = binner.__class__(l, name=binner.name)
# since we may have had to sort
# may need to reorder groups here
if self.indexer is not None:
indexer = self.indexer.argsort(kind='quicksort')
grouper = grouper.take(indexer)
return grouper
def _get_time_bins(self, ax):
if not isinstance(ax, DatetimeIndex):
raise TypeError('axis must be a DatetimeIndex, but got '
'an instance of %r' % type(ax).__name__)
if len(ax) == 0:
binner = labels = DatetimeIndex(
data=[], freq=self.freq, name=ax.name)
return binner, [], labels
first, last = ax.min(), ax.max()
first, last = _get_range_edges(first, last, self.freq,
closed=self.closed,
base=self.base)
tz = ax.tz
# GH #12037
# use first/last directly instead of call replace() on them
# because replace() will swallow the nanosecond part
# thus last bin maybe slightly before the end if the end contains
# nanosecond part and lead to `Values falls after last bin` error
binner = labels = DatetimeIndex(freq=self.freq,
start=first,
end=last,
tz=tz,
name=ax.name)
# a little hack
trimmed = False
if (len(binner) > 2 and binner[-2] == last and
self.closed == 'right'):
binner = binner[:-1]
trimmed = True
ax_values = ax.asi8
binner, bin_edges = self._adjust_bin_edges(binner, ax_values)
# general version, knowing nothing about relative frequencies
bins = lib.generate_bins_dt64(
ax_values, bin_edges, self.closed, hasnans=ax.hasnans)
if self.closed == 'right':
labels = binner
if self.label == 'right':
labels = labels[1:]
elif not trimmed:
labels = labels[:-1]
else:
if self.label == 'right':
labels = labels[1:]
elif not trimmed:
labels = labels[:-1]
if ax.hasnans:
binner = binner.insert(0, tslib.NaT)
labels = labels.insert(0, tslib.NaT)
# if we end up with more labels than bins
# adjust the labels
# GH4076
if len(bins) < len(labels):
labels = labels[:len(bins)]
return binner, bins, labels
def _adjust_bin_edges(self, binner, ax_values):
# Some hacks for > daily data, see #1471, #1458, #1483
bin_edges = binner.asi8
if self.freq != 'D' and is_superperiod(self.freq, 'D'):
day_nanos = _delta_to_nanoseconds(timedelta(1))
if self.closed == 'right':
bin_edges = bin_edges + day_nanos - 1
# intraday values on last day
if bin_edges[-2] > ax_values.max():
bin_edges = bin_edges[:-1]
binner = binner[:-1]
return binner, bin_edges
def _get_time_delta_bins(self, ax):
if not isinstance(ax, TimedeltaIndex):
raise TypeError('axis must be a TimedeltaIndex, but got '
'an instance of %r' % type(ax).__name__)
if not len(ax):
binner = labels = TimedeltaIndex(
data=[], freq=self.freq, name=ax.name)
return binner, [], labels
start = ax[0]
end = ax[-1]
labels = binner = TimedeltaIndex(start=start,
end=end,
freq=self.freq,
name=ax.name)
end_stamps = labels + 1
bins = ax.searchsorted(end_stamps, side='left')
# Addresses GH #10530
if self.base > 0:
labels += type(self.freq)(self.base)
return binner, bins, labels
def _get_time_period_bins(self, ax):
if not isinstance(ax, DatetimeIndex):
raise TypeError('axis must be a DatetimeIndex, but got '
'an instance of %r' % type(ax).__name__)
if not len(ax):
binner = labels = PeriodIndex(
data=[], freq=self.freq, name=ax.name)
return binner, [], labels
labels = binner = PeriodIndex(start=ax[0],
end=ax[-1],
freq=self.freq,
name=ax.name)
end_stamps = (labels + 1).asfreq(self.freq, 's').to_timestamp()
if ax.tzinfo:
end_stamps = end_stamps.tz_localize(ax.tzinfo)
bins = ax.searchsorted(end_stamps, side='left')
return binner, bins, labels
def _take_new_index(obj, indexer, new_index, axis=0):
from pandas.core.api import Series, DataFrame
if isinstance(obj, Series):
new_values = algos.take_1d(obj.values, indexer)
return Series(new_values, index=new_index, name=obj.name)
elif isinstance(obj, DataFrame):
if axis == 1:
raise NotImplementedError("axis 1 is not supported")
return DataFrame(obj._data.reindex_indexer(
new_axis=new_index, indexer=indexer, axis=1))
else:
raise ValueError("'obj' should be either a Series or a DataFrame")
def _get_range_edges(first, last, offset, closed='left', base=0):
if isinstance(offset, compat.string_types):
offset = to_offset(offset)
if isinstance(offset, Tick):
is_day = isinstance(offset, Day)
day_nanos = _delta_to_nanoseconds(timedelta(1))
# #1165
if (is_day and day_nanos % offset.nanos == 0) or not is_day:
return _adjust_dates_anchored(first, last, offset,
closed=closed, base=base)
if not isinstance(offset, Tick): # and first.time() != last.time():
# hack!
first = first.normalize()
last = last.normalize()
if closed == 'left':
first = Timestamp(offset.rollback(first))
else:
first = Timestamp(first - offset)
last = Timestamp(last + offset)
return first, last
def _adjust_dates_anchored(first, last, offset, closed='right', base=0):
# First and last offsets should be calculated from the start day to fix an
# error cause by resampling across multiple days when a one day period is
# not a multiple of the frequency.
#
# See https://github.com/pandas-dev/pandas/issues/8683
# 14682 - Since we need to drop the TZ information to perform
# the adjustment in the presence of a DST change,
# save TZ Info and the DST state of the first and last parameters
# so that we can accurately rebuild them at the end.
first_tzinfo = first.tzinfo
last_tzinfo = last.tzinfo
first_dst = bool(first.dst())
last_dst = bool(last.dst())
first = first.tz_localize(None)
last = last.tz_localize(None)
start_day_nanos = first.normalize().value
base_nanos = (base % offset.n) * offset.nanos // offset.n
start_day_nanos += base_nanos
foffset = (first.value - start_day_nanos) % offset.nanos
loffset = (last.value - start_day_nanos) % offset.nanos
if closed == 'right':
if foffset > 0:
# roll back
fresult = first.value - foffset
else:
fresult = first.value - offset.nanos
if loffset > 0:
# roll forward
lresult = last.value + (offset.nanos - loffset)
else:
# already the end of the road
lresult = last.value
else: # closed == 'left'
if foffset > 0:
fresult = first.value - foffset
else:
# start of the road
fresult = first.value
if loffset > 0:
# roll forward
lresult = last.value + (offset.nanos - loffset)
else:
lresult = last.value + offset.nanos
return (Timestamp(fresult).tz_localize(first_tzinfo, ambiguous=first_dst),
Timestamp(lresult).tz_localize(last_tzinfo, ambiguous=last_dst))
def asfreq(obj, freq, method=None, how=None, normalize=False, fill_value=None):
"""
Utility frequency conversion method for Series/DataFrame
"""
if isinstance(obj.index, PeriodIndex):
if method is not None:
raise NotImplementedError("'method' argument is not supported")
if how is None:
how = 'E'
new_obj = obj.copy()
new_obj.index = obj.index.asfreq(freq, how=how)
elif len(obj.index) == 0:
new_obj = obj.copy()
new_obj.index = obj.index._shallow_copy(freq=to_offset(freq))
else:
dti = date_range(obj.index[0], obj.index[-1], freq=freq)
dti.name = obj.index.name
new_obj = obj.reindex(dti, method=method, fill_value=fill_value)
if normalize:
new_obj.index = new_obj.index.normalize()
return new_obj
| mit |
kedz/cuttsum | trec2015/sbin/cross-validation/graph-results.py | 1 | 4596 | import pandas as pd
import os
import matplotlib.pylab as plt
plt.style.use('ggplot')
import sys
dirname = sys.argv[1]
def fmeasure(p, r):
return 2 * (p * r) / (p + r)
with open(os.path.join(dirname, "scores.tsv"), "r") as f:
df = pd.read_csv(f, sep="\t")
mean_df = pd.concat([group.mean().to_frame().transpose()
for niter, group in df.groupby("iter")])
mean_df["F1"] = fmeasure(mean_df["E[gain]"].values, mean_df["Comp."])
print mean_df
x = mean_df["iter"].values
plt.close("all")
plt.plot(x, mean_df["Comp."].values, "b", label="$\mathrm{Comp.}$")
plt.plot(x, mean_df["E[gain]"].values, "g", label="$\mathbb{E}[\mathrm{gain}]$")
plt.plot(x, mean_df["F1"].values, "r", label="$F_1$")
plt.xlabel("iters")
plt.ylabel("score")
plt.xticks(range(1, 21))
plt.gca().set_xlim([0.5, 20.5])
plt.legend()
plt.gcf().suptitle("Mean Scores")
plt.savefig(os.path.join(dirname, "mean.scores.png"))
plt.close("all")
plt.plot(x, mean_df["Loss"].values, "y", label="$\mathrm{Loss}$")
plt.xlabel("iters")
plt.ylabel("loss")
plt.xticks(range(1, 21))
plt.gca().set_xlim([0.5, 20.5])
plt.gcf().suptitle("Mean Loss")
plt.savefig(os.path.join(dirname, "mean.loss.png"))
for qid, event_scores in df.groupby("event"):
x = event_scores["iter"].values
plt.close("all")
f = fmeasure(
event_scores["E[gain]"].values,
event_scores["Comp."].values)
plt.plot(x, event_scores["Comp."], "b", label="$\mathrm{Comp.}$")
plt.plot(mean_df["iter"].values, mean_df["Comp."].values, "b--", alpha=.2)
plt.plot(x, event_scores["E[gain]"], "g", label="$\mathbb{E}[\mathrm{gain}]$")
plt.plot(mean_df["iter"].values, mean_df["E[gain]"].values, "g--", alpha=.2)
plt.plot(x, f, "r", label="$F_1$")
plt.plot(x, mean_df["F1"].values, "r--", alpha=.2)
plt.xlabel("iters")
plt.ylabel("score")
plt.xticks(range(1, 21))
plt.gca().set_xlim([0.5, 20.5])
plt.legend()
plt.gcf().suptitle("{} Scores".format(qid))
plt.savefig(os.path.join(dirname, "{}.scores.png".format(qid)))
plt.close("all")
plt.plot(x, event_scores["Loss"], "y", label="$\mathrm{Loss}$")
plt.plot(mean_df["iter"].values, mean_df["Loss"].values, "y--", alpha=.2)
plt.xlabel("iters")
plt.ylabel("loss")
plt.xticks(range(1, 21))
plt.gca().set_xlim([0.5, 20.5])
#plt.legend()
plt.gcf().suptitle("{} Loss".format(qid))
print qid
print os.path.join(dirname, "{}.loss.png".format(qid))
plt.savefig(os.path.join(dirname, "{}.loss.png".format(qid)))
with open(os.path.join(dirname,"weights.tsv"), "r") as f:
df = pd.read_csv(f, sep="\t")
events = set(df["event"].tolist())
for clazz in ["SELECT", "NEXT"]:
for event in events:
for niter in xrange(1, 21):
b = (df["event"] == event) & (df["class"] == clazz) & (df["iter"] == niter)
df.loc[b, "rank"] = df.loc[b]["weight"].argsort()
for name, fweights in df.groupby("name"):
#print df.loc[df["name"] == name]
plt.close("all")
print name
for clazz, avg_weights in fweights.groupby(["iter", "class"]).mean().reset_index().groupby("class"):
if clazz == "SELECT":
plt.plot(avg_weights["iter"].values, avg_weights["weight"].values, "g", label="$select$")
for x, y, rank in zip(avg_weights["iter"].values, avg_weights["weight"].values, avg_weights["rank"].values):
plt.gca().text(x, y + .0001, "$r_\mu={:0.1f}$".format(rank), fontsize=6)
else:
plt.plot(avg_weights["iter"].values, avg_weights["weight"].values, "r", label="$next$")
for x, y, rank in zip(avg_weights["iter"].values, avg_weights["weight"].values, avg_weights["rank"].values):
plt.gca().text(x, y+.0001, "$r_\mu={:0.1f}$".format(rank), fontsize=6)
for qid, fweights_by_qid in fweights.groupby("event"):
for clazz, fq_by_c_qid in fweights_by_qid.groupby("class"):
#print qid, clazz, fq_by_c_qid
if clazz == "SELECT":
plt.plot(fq_by_c_qid["iter"].values, fq_by_c_qid["weight"].values, "g", alpha=.1)
else:
plt.plot(fq_by_c_qid["iter"].values, fq_by_c_qid["weight"].values, "r", alpha=.1)
plt.xticks(range(1,21))
plt.xlabel("iter")
plt.ylabel("weight")
plt.legend()
plt.gca().set_xlim([0.5, 20.5])
plt.gcf().suptitle("feature {}".format(name.replace(" ", "_")))
plt.savefig(os.path.join(dirname, "feat.{}.png".format(name.replace(" ", "_"))))
| apache-2.0 |
jlegendary/scikit-learn | sklearn/metrics/cluster/tests/test_supervised.py | 206 | 7643 | import numpy as np
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster import entropy
from sklearn.utils.testing import assert_raise_message
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from numpy.testing import assert_array_almost_equal
score_funcs = [
adjusted_rand_score,
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
]
def test_error_messages_on_wrong_input():
for score_func in score_funcs:
expected = ('labels_true and labels_pred must have same size,'
' got 2 and 3')
assert_raise_message(ValueError, expected, score_func,
[0, 1], [1, 1, 1])
expected = "labels_true must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[[0, 1], [1, 0]], [1, 1, 1])
expected = "labels_pred must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[0, 1, 0], [[1, 1], [0, 0]])
def test_perfect_matches():
for score_func in score_funcs:
assert_equal(score_func([], []), 1.0)
assert_equal(score_func([0], [1]), 1.0)
assert_equal(score_func([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(score_func([0, 1, 0], [42, 7, 42]), 1.0)
assert_equal(score_func([0., 1., 0.], [42., 7., 42.]), 1.0)
assert_equal(score_func([0., 1., 2.], [42., 7., 2.]), 1.0)
assert_equal(score_func([0, 1, 2], [42, 7, 2]), 1.0)
def test_homogeneous_but_not_complete_labeling():
# homogeneous but not complete clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 2, 2])
assert_almost_equal(h, 1.00, 2)
assert_almost_equal(c, 0.69, 2)
assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
# complete but not homogeneous clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 1, 1])
assert_almost_equal(h, 0.58, 2)
assert_almost_equal(c, 1.00, 2)
assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
# neither complete nor homogeneous but not so bad either
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
def test_non_consicutive_labels():
# regression tests for labels with gaps
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 2, 2, 2],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 4, 0, 4, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ari_1, 0.24, 2)
assert_almost_equal(ari_2, 0.24, 2)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
seed=42):
# Compute score for random uniform cluster labelings
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(k_range), n_runs))
for i, k in enumerate(k_range):
for j in range(n_runs):
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def test_adjustment_for_chance():
# Check that adjusted scores are almost zero on random labels
n_clusters_range = [2, 10, 50, 90]
n_samples = 100
n_runs = 10
scores = uniform_labelings_scores(
adjusted_rand_score, n_samples, n_clusters_range, n_runs)
max_abs_scores = np.abs(scores).max(axis=1)
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
def test_adjusted_mutual_info_score():
# Compute the Adjusted Mutual Information and test against known values
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
# Mutual information
mi = mutual_info_score(labels_a, labels_b)
assert_almost_equal(mi, 0.41022, 5)
# Expected mutual information
C = contingency_matrix(labels_a, labels_b)
n_samples = np.sum(C)
emi = expected_mutual_information(C, n_samples)
assert_almost_equal(emi, 0.15042, 5)
# Adjusted mutual information
ami = adjusted_mutual_info_score(labels_a, labels_b)
assert_almost_equal(ami, 0.27502, 5)
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
assert_equal(ami, 1.0)
# Test with a very large array
a110 = np.array([list(labels_a) * 110]).flatten()
b110 = np.array([list(labels_b) * 110]).flatten()
ami = adjusted_mutual_info_score(a110, b110)
# This is not accurate to more than 2 places
assert_almost_equal(ami, 0.37, 2)
def test_entropy():
ent = entropy([0, 0, 42.])
assert_almost_equal(ent, 0.6365141, 5)
assert_almost_equal(entropy([]), 1)
def test_contingency_matrix():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C2 = np.histogram2d(labels_a, labels_b,
bins=(np.arange(1, 5),
np.arange(1, 5)))[0]
assert_array_almost_equal(C, C2)
C = contingency_matrix(labels_a, labels_b, eps=.1)
assert_array_almost_equal(C, C2 + .1)
def test_exactly_zero_info_score():
# Check numerical stability when information is exactly zero
for i in np.logspace(1, 4, 4).astype(np.int):
labels_a, labels_b = np.ones(i, dtype=np.int),\
np.arange(i, dtype=np.int)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(v_measure_score(labels_a, labels_b), 0.0)
assert_equal(adjusted_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
def test_v_measure_and_mutual_information(seed=36):
# Check relation between v_measure, entropy and mutual information
for i in np.logspace(1, 4, 4).astype(np.int):
random_state = np.random.RandomState(seed)
labels_a, labels_b = random_state.random_integers(0, 10, i),\
random_state.random_integers(0, 10, i)
assert_almost_equal(v_measure_score(labels_a, labels_b),
2.0 * mutual_info_score(labels_a, labels_b) /
(entropy(labels_a) + entropy(labels_b)), 0)
| bsd-3-clause |
pmatigakis/jsbsim | tests/CheckMomentsUpdate.py | 3 | 3602 | # CheckMomentsUpdate.py
#
# Regression test to check the moments are computed according to the last
# update of the CG location (issue reported by Marta Marimon)
#
# Copyright (c) 2015 Bertrand Coconnier
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, see <http://www.gnu.org/licenses/>
#
import pandas as pd
from JSBSim_utils import JSBSimTestCase, CreateFDM, ExecuteUntil, RunTest
mol2lbs = 0.00013841 * 32.174049
class CheckMomentsUpdate(JSBSimTestCase):
def CheckCGPosition(self):
weight = self.fdm['inertia/weight-lbs']
empty_weight = self.fdm['inertia/empty-weight-lbs']
contents = self.fdm['buoyant_forces/gas-cell/contents-mol']
radiosonde_weight = weight - empty_weight - contents * mol2lbs
CGx = self.fdm['inertia/cg-x-in']
CGy = self.fdm['inertia/cg-y-in']
CGz = self.fdm['inertia/cg-z-in']
X = self.fdm['inertia/pointmass-location-X-inches']
Y = self.fdm['inertia/pointmass-location-Y-inches']
Z = self.fdm['inertia/pointmass-location-Z-inches']
self.assertAlmostEqual(CGx, X * radiosonde_weight / weight, delta=1E-7)
self.assertAlmostEqual(CGy, Y * radiosonde_weight / weight, delta=1E-7)
self.assertAlmostEqual(CGz, Z * radiosonde_weight / weight, delta=1E-7)
def test_moments_update(self):
script_path = self.sandbox.path_to_jsbsim_file('scripts',
'weather-balloon.xml')
self.fdm = CreateFDM(self.sandbox)
self.fdm.load_script(script_path)
self.fdm.set_output_directive(self.sandbox.path_to_jsbsim_file('tests', 'output.xml'))
self.fdm.run_ic()
self.CheckCGPosition()
dt = self.fdm['simulation/dt']
ExecuteUntil(self.fdm, 1.0-2.0*dt)
self.CheckCGPosition()
# Moves the radio sonde to modify the CG location
self.fdm['inertia/pointmass-location-X-inches'] = 5.0
# Check that the moment is immediately updated accordingly
self.fdm.run()
self.CheckCGPosition()
Fbx = self.fdm['forces/fbx-buoyancy-lbs']
Fbz = self.fdm['forces/fbz-buoyancy-lbs']
CGx = self.fdm['inertia/cg-x-in'] / 12.0 # Converts from in to ft
CGz = self.fdm['inertia/cg-z-in'] / 12.0
Mby = self.fdm['moments/m-buoyancy-lbsft']
self.assertAlmostEqual(Fbx * CGz - Fbz * CGx, Mby, delta=1E-7,
msg="Fbx*CGz-Fbz*CGx = %f and Mby = %f do not match" % (Fbx*CGz-Fbz*CGx, Mby))
# One further step to log the same results in the output file
self.fdm.run()
self.CheckCGPosition()
csv = pd.read_csv('output.csv')
Mby = csv['M_{Buoyant} (ft-lbs)'].iget(-1)
Fbx = csv['F_{Buoyant x} (lbs)'].iget(-1)
Fbz = csv['F_{Buoyant z} (lbs)'].iget(-1)
self.assertAlmostEqual(Fbx * CGz - Fbz * CGx, Mby, delta=1E-7,
msg="Fbx*CGz-Fbz*CGx = %f and Mby = %f do not match" % (Fbx*CGz-Fbz*CGx, Mby))
RunTest(CheckMomentsUpdate)
| lgpl-2.1 |
q1ang/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
zedoul/HistoricalCalculus | histcalc/data/data.py | 1 | 1075 | """ Test code for existing dataset
Author : Kim Seonghyun <[email protected]>
"""
import time
import os
import math
import numpy as np
import pandas as pd
def pick_headers(dataframe, headers, log=False):
df = dataframe[headers]
tempvals = []
for row in df.iterrows():
tempvals.append([int(row[1][0]),row[1][1]])
dates = [int(q[0]) for q in tempvals]
values = [float(str(q[1]).replace(',','')) for q in tempvals]
if True == log:
for i,d in enumerate(values):
if d <= 0.0:
values[i] = None
else:
values[i] = math.log(d)
return dates, values
def load_dataframe(dataset):
datafile = os.path.dirname(os.path.abspath(__file__))+"/"+dataset["file"]
assert(os.path.isfile(datafile))
datasize = dataset["size"]
df = pd.read_csv(datafile, nrows=datasize)
df.iloc[0]
return df
if __name__ == '__main__':
import sys
if sys.version_info < (2, 7) or sys.version_info >= (3, 0):
print ("Requires Python 2.7.x")
exit()
del sys
| mit |
mbayon/TFG-MachineLearning | Gradient-Descent-Roosebrock/stochastic-gradient-descent-rosebrock.py | 1 | 3357 | # USAGE
# python sgd.py
# import the necessary packages
import matplotlib.pyplot as plt
from sklearn.datasets.samples_generator import make_blobs
import numpy as np
import argparse
def sigmoid_activation(x):
# compute and return the sigmoid activation value for a
# given input value
return 1.0 / (1 + np.exp(-x))
def next_batch(X, y, batchSize):
# loop over our dataset `X` in mini-batches of size `batchSize`
for i in np.arange(0, X.shape[0], batchSize):
# yield a tuple of the current batched data and labels
yield (X[i:i + batchSize], y[i:i + batchSize])
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-e", "--epochs", type=float, default=100,
help="# of epochs")
ap.add_argument("-a", "--alpha", type=float, default=0.01,
help="learning rate")
ap.add_argument("-b", "--batch-size", type=int, default=32,
help="size of SGD mini-batches")
args = vars(ap.parse_args())
# generate a 2-class classification problem with 400 data points,
# where each data point is a 2D feature vector
(X, y) = make_blobs(n_samples=400, n_features=2, centers=2,
cluster_std=2.5, random_state=95)
# insert a column of 1's as the first entry in the feature
# vector -- this is a little trick that allows us to treat
# the bias as a trainable parameter *within* the weight matrix
# rather than an entirely separate variable
X = np.c_[np.ones((X.shape[0])), X]
# initialize our weight matrix such it has the same number of
# columns as our input features
print("[INFO] starting training...")
W = np.random.uniform(size=(X.shape[1],))
# initialize a list to store the loss value for each epoch
lossHistory = []
# loop over the desired number of epochs
for epoch in np.arange(0, args["epochs"]):
# initialize the total loss for the epoch
epochLoss = []
# loop over our data in batches
for (batchX, batchY) in next_batch(X, y, args["batch_size"]):
# take the dot product between our current batch of
# features and weight matrix `W`, then pass this value
# through the sigmoid activation function
preds = sigmoid_activation(batchX.dot(W))
# now that we have our predictions, we need to determine
# our `error`, which is the difference between our predictions
# and the true values
error = preds - batchY
# given our `error`, we can compute the total loss value on
# the batch as the sum of squared loss
loss = np.sum(error ** 2)
epochLoss.append(loss)
# the gradient update is therefore the dot product between
# the transpose of our current batch and the error on the
# # batch
gradient = batchX.T.dot(error) / batchX.shape[0]
# use the gradient computed on the current batch to take
# a "step" in the correct direction
W += -args["alpha"] * gradient
# update our loss history list by taking the average loss
# across all batches
lossHistory.append(np.average(epochLoss))
# compute the line of best fit by setting the sigmoid function
# to 0 and solving for X2 in terms of X1
Y = (-W[0] - (W[1] * X)) / W[2]
# plot the original data along with our line of best fit
plt.figure()
plt.scatter(X[:, 1], X[:, 2], marker="o", c=y)
plt.plot(X, Y, "r-")
# construct a figure that plots the loss over time
fig = plt.figure()
plt.plot(np.arange(0, args["epochs"]), lossHistory)
fig.suptitle("Training Loss")
plt.xlabel("Epoch #")
plt.ylabel("Loss")
plt.show() | mit |
kisel/trex-core | scripts/external_libs/scapy-2.3.1/python3/scapy/layers/inet.py | 2 | 57080 | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <[email protected]>
## This program is published under a GPLv2 license
"""
IPv4 (Internet Protocol v4).
"""
import os,time,struct,re,socket,types
from select import select
from collections import defaultdict
from scapy.utils import checksum
from scapy.layers.l2 import *
from scapy.config import conf
from scapy.fields import *
from scapy.packet import *
from scapy.volatile import *
from scapy.sendrecv import sr,sr1,srp1
from scapy.plist import PacketList,SndRcvList
from scapy.automaton import Automaton,ATMT
import scapy.as_resolvers
####################
## IP Tools class ##
####################
class IPTools:
"""Add more powers to a class that have a "src" attribute."""
def whois(self):
os.system("whois %s" % self.src)
def ottl(self):
t = [32,64,128,255]+[self.ttl]
t.sort()
return t[t.index(self.ttl)+1]
def hops(self):
return self.ottl()-self.ttl-1
_ip_options_names = { 0: "end_of_list",
1: "nop",
2: "security",
3: "loose_source_route",
4: "timestamp",
5: "extended_security",
6: "commercial_security",
7: "record_route",
8: "stream_id",
9: "strict_source_route",
10: "experimental_measurement",
11: "mtu_probe",
12: "mtu_reply",
13: "flow_control",
14: "access_control",
15: "encode",
16: "imi_traffic_descriptor",
17: "extended_IP",
18: "traceroute",
19: "address_extension",
20: "router_alert",
21: "selective_directed_broadcast_mode",
23: "dynamic_packet_state",
24: "upstream_multicast_packet",
25: "quick_start",
30: "rfc4727_experiment",
}
class _IPOption_HDR(Packet):
fields_desc = [ BitField("copy_flag",0, 1),
BitEnumField("optclass",0,2,{0:"control",2:"debug"}),
BitEnumField("option",0,5, _ip_options_names) ]
class IPOption(Packet):
name = "IP Option"
fields_desc = [ _IPOption_HDR,
FieldLenField("length", None, fmt="B", # Only option 0 and 1 have no length and value
length_of="value", adjust=lambda pkt,l:l+2),
StrLenField("value", "",length_from=lambda pkt:pkt.length-2) ]
def extract_padding(self, p):
return b"",p
registered_ip_options = {}
@classmethod
def register_variant(cls):
cls.registered_ip_options[cls.option.default] = cls
@classmethod
def dispatch_hook(cls, pkt=None, *args, **kargs):
if pkt:
opt = pkt[0]&0x1f
if opt in cls.registered_ip_options:
return cls.registered_ip_options[opt]
return cls
class IPOption_EOL(IPOption):
name = "IP Option End of Options List"
option = 0
fields_desc = [ _IPOption_HDR ]
class IPOption_NOP(IPOption):
name = "IP Option No Operation"
option=1
fields_desc = [ _IPOption_HDR ]
class IPOption_Security(IPOption):
name = "IP Option Security"
copy_flag = 1
option = 2
fields_desc = [ _IPOption_HDR,
ByteField("length", 11),
ShortField("security",0),
ShortField("compartment",0),
ShortField("handling_restrictions",0),
StrFixedLenField("transmission_control_code","xxx",3),
]
class IPOption_LSRR(IPOption):
name = "IP Option Loose Source and Record Route"
copy_flag = 1
option = 3
fields_desc = [ _IPOption_HDR,
FieldLenField("length", None, fmt="B",
length_of="routers", adjust=lambda pkt,l:l+3),
ByteField("pointer",4), # 4 is first IP
FieldListField("routers",[],IPField("","0.0.0.0"),
length_from=lambda pkt:pkt.length-3)
]
def get_current_router(self):
return self.routers[self.pointer//4-1]
class IPOption_RR(IPOption_LSRR):
name = "IP Option Record Route"
option = 7
class IPOption_SSRR(IPOption_LSRR):
name = "IP Option Strict Source and Record Route"
option = 9
class IPOption_Stream_Id(IPOption):
name = "IP Option Stream ID"
option = 8
fields_desc = [ _IPOption_HDR,
ByteField("length", 4),
ShortField("security",0), ]
class IPOption_MTU_Probe(IPOption):
name = "IP Option MTU Probe"
option = 11
fields_desc = [ _IPOption_HDR,
ByteField("length", 4),
ShortField("mtu",0), ]
class IPOption_MTU_Reply(IPOption_MTU_Probe):
name = "IP Option MTU Reply"
option = 12
class IPOption_Traceroute(IPOption):
name = "IP Option Traceroute"
copy_flag = 1
option = 18
fields_desc = [ _IPOption_HDR,
ByteField("length", 12),
ShortField("id",0),
ShortField("outbound_hops",0),
ShortField("return_hops",0),
IPField("originator_ip","0.0.0.0") ]
class IPOption_Address_Extension(IPOption):
name = "IP Option Address Extension"
copy_flag = 1
option = 19
fields_desc = [ _IPOption_HDR,
ByteField("length", 10),
IPField("src_ext","0.0.0.0"),
IPField("dst_ext","0.0.0.0") ]
class IPOption_Router_Alert(IPOption):
name = "IP Option Router Alert"
copy_flag = 1
option = 20
fields_desc = [ _IPOption_HDR,
ByteField("length", 4),
ShortEnumField("alert",0, {0:"router_shall_examine_packet"}), ]
class IPOption_SDBM(IPOption):
name = "IP Option Selective Directed Broadcast Mode"
copy_flag = 1
option = 21
fields_desc = [ _IPOption_HDR,
FieldLenField("length", None, fmt="B",
length_of="addresses", adjust=lambda pkt,l:l+2),
FieldListField("addresses",[],IPField("","0.0.0.0"),
length_from=lambda pkt:pkt.length-2)
]
TCPOptions = (
{ 0 : ("EOL",None),
1 : ("NOP",None),
2 : ("MSS","!H"),
3 : ("WScale","!B"),
4 : ("SAckOK",None),
5 : ("SAck","!"),
8 : ("Timestamp","!II"),
14 : ("AltChkSum","!BH"),
15 : ("AltChkSumOpt",None),
25 : ("Mood","!p")
},
{ "EOL":0,
"NOP":1,
"MSS":2,
"WScale":3,
"SAckOK":4,
"SAck":5,
"Timestamp":8,
"AltChkSum":14,
"AltChkSumOpt":15,
"Mood":25
} )
class TCPOptionsField(StrField):
islist=1
def getfield(self, pkt, s):
opsz = (pkt.dataofs-5)*4
if opsz < 0:
warning("bad dataofs (%i). Assuming dataofs=5"%pkt.dataofs)
opsz = 0
return s[opsz:],self.m2i(pkt,s[:opsz])
def m2i(self, pkt, x):
opt = []
while x:
onum = x[0]
if onum == 0:
opt.append(("EOL",None))
x=x[1:]
break
if onum == 1:
opt.append(("NOP",None))
x=x[1:]
continue
olen = x[1]
if olen < 2:
warning("Malformed TCP option (announced length is %i)" % olen)
olen = 2
oval = x[2:olen]
if onum in TCPOptions[0]:
oname, ofmt = TCPOptions[0][onum]
if onum == 5: #SAck
ofmt += "%iI" % (len(oval)//4)
if ofmt and struct.calcsize(ofmt) == len(oval):
oval = struct.unpack(ofmt, oval)
if len(oval) == 1:
oval = oval[0]
opt.append((oname, oval))
else:
opt.append((onum, oval))
x = x[olen:]
return opt
def i2m(self, pkt, x):
opt = b""
for oname,oval in x:
if type(oname) is str:
if oname == "NOP":
opt += b"\x01"
continue
elif oname == "EOL":
opt += b"\x00"
continue
elif oname in TCPOptions[1]:
onum = TCPOptions[1][oname]
ofmt = TCPOptions[0][onum][1]
if onum == 5: #SAck
ofmt += "%iI" % len(oval)
if ofmt is not None and (type(oval) is not str or "s" in ofmt):
if type(oval) is not tuple:
oval = (oval,)
oval = struct.pack(ofmt, *oval)
else:
warning("option [%s] unknown. Skipped."%oname)
continue
else:
onum = oname
if type(oval) is not str:
warning("option [%i] is not string."%onum)
continue
opt += bytes([(onum), (2+len(oval))]) + oval
return opt+b"\x00"*(3-((len(opt)+3)%4))
def randval(self):
return [] # XXX
class ICMPTimeStampField(IntField):
re_hmsm = re.compile("([0-2]?[0-9])[Hh:](([0-5]?[0-9])([Mm:]([0-5]?[0-9])([sS:.]([0-9]{0,3}))?)?)?$")
def i2repr(self, pkt, val):
if val is None:
return "--"
else:
sec, milli = divmod(val, 1000)
min, sec = divmod(sec, 60)
hour, min = divmod(min, 60)
return "%d:%d:%d.%d" %(hour, min, sec, int(milli))
def any2i(self, pkt, val):
if type(val) is str:
hmsms = self.re_hmsm.match(val)
if hmsms:
h,_,m,_,s,_,ms = hmsms = hmsms.groups()
ms = int(((ms or "")+"000")[:3])
val = ((int(h)*60+int(m or 0))*60+int(s or 0))*1000+ms
else:
val = 0
elif val is None:
val = int((time.time()%(24*60*60))*1000)
return val
class IP(Packet, IPTools):
name = "IP"
fields_desc = [ BitField("version" , 4 , 4),
BitField("ihl", None, 4),
XByteField("tos", 0),
ShortField("len", None),
ShortField("id", 1),
FlagsField("flags", 0, 3, ["MF","DF","evil"]),
BitField("frag", 0, 13),
ByteField("ttl", 64),
ByteEnumField("proto", 0, IP_PROTOS),
XShortField("chksum", None),
#IPField("src", "127.0.0.1"),
#Emph(SourceIPField("src","dst")),
#Emph(IPField("dst", "127.0.0.1")),
Emph(IPField("src", "16.0.0.1")),
Emph(IPField("dst", "48.0.0.1")),
PacketListField("options", [], IPOption, length_from=lambda p:p.ihl*4-20) ]
def post_build(self, p, pay):
ihl = self.ihl
p += b"\0"*((-len(p))%4) # pad IP options if needed
if ihl is None:
ihl = len(p)//4
p = bytes([((self.version&0xf)<<4) | ihl&0x0f])+p[1:]
if self.len is None:
l = len(p)+len(pay)
p = p[:2]+struct.pack("!H", l)+p[4:]
if self.chksum is None:
ck = checksum(p)
p = p[:10]+bytes([ck>>8])+bytes([ck&0xff])+p[12:]
return p+pay
def extract_padding(self, s):
l = self.len - (self.ihl << 2)
return s[:l],s[l:]
def send(self, s, slp=0):
for p in self:
try:
s.sendto(bytes(p), (p.dst,0))
except socket.error as msg:
log_runtime.error(msg)
if slp:
time.sleep(slp)
def route(self):
dst = self.dst
if isinstance(dst,Gen):
dst = next(iter(dst))
return conf.route.route(dst)
def hashret(self):
if ( (self.proto == socket.IPPROTO_ICMP)
and (isinstance(self.payload, ICMP))
and (self.payload.type in [3,4,5,11,12]) ):
return self.payload.payload.hashret()
else:
if conf.checkIPsrc and conf.checkIPaddr:
return strxor(inet_aton(self.src),inet_aton(self.dst))+struct.pack("B",self.proto)+self.payload.hashret()
else:
return struct.pack("B", self.proto)+self.payload.hashret()
def answers(self, other):
if not isinstance(other,IP):
return 0
if conf.checkIPaddr and (self.dst != other.src):
return 0
if ( (self.proto == socket.IPPROTO_ICMP) and
(isinstance(self.payload, ICMP)) and
(self.payload.type in [3,4,5,11,12]) ):
# ICMP error message
return self.payload.payload.answers(other)
else:
if ( (conf.checkIPaddr and (self.src != other.dst)) or
(self.proto != other.proto) ):
return 0
return self.payload.answers(other.payload)
def mysummary(self):
s = self.sprintf("%IP.src% > %IP.dst% %IP.proto%")
if self.frag:
s += " frag:%i" % self.frag
return s
def fragment(self, fragsize=1480):
"""Fragment IP datagrams"""
fragsize = (fragsize+7)//8*8
lst = []
fnb = 0
fl = self
while fl.underlayer is not None:
fnb += 1
fl = fl.underlayer
for p in fl:
s = bytes(p[fnb].payload)
nb = (len(s)+fragsize-1)//fragsize
for i in range(nb):
q = p.copy()
del(q[fnb].payload)
del(q[fnb].chksum)
del(q[fnb].len)
if i == nb-1:
q[IP].flags &= ~1
else:
q[IP].flags |= 1
q[IP].frag = i*fragsize//8
r = conf.raw_layer(load=s[i*fragsize:(i+1)*fragsize])
r.overload_fields = p[IP].payload.overload_fields.copy()
q.add_payload(r)
lst.append(q)
return lst
class TCP(Packet):
name = "TCP"
fields_desc = [ ShortEnumField("sport", 20, TCP_SERVICES),
ShortEnumField("dport", 80, TCP_SERVICES),
IntField("seq", 0),
IntField("ack", 0),
BitField("dataofs", None, 4),
BitField("reserved", 0, 4),
FlagsField("flags", 0x2, 8, "FSRPAUEC"),
ShortField("window", 8192),
XShortField("chksum", None),
ShortField("urgptr", 0),
TCPOptionsField("options", {}) ]
def post_build(self, p, pay):
p += pay
dataofs = self.dataofs
if dataofs is None:
dataofs = 5+((len(self.get_field("options").i2m(self,self.options))+3)//4)
p = p[:12]+bytes([(dataofs << 4) | (p[12])&0x0f])+p[13:]
if self.chksum is None:
if isinstance(self.underlayer, IP):
if self.underlayer.len is not None:
ln = self.underlayer.len-20
else:
ln = len(p)
psdhdr = struct.pack("!4s4sHH",
inet_aton(self.underlayer.src),
inet_aton(self.underlayer.dst),
self.underlayer.proto,
ln)
ck=checksum(psdhdr+p)
p = p[:16]+struct.pack("!H", ck)+p[18:]
elif conf.ipv6_enabled and isinstance(self.underlayer, scapy.layers.inet6.IPv6) or isinstance(self.underlayer, scapy.layers.inet6._IPv6ExtHdr):
ck = scapy.layers.inet6.in6_chksum(socket.IPPROTO_TCP, self.underlayer, p)
p = p[:16]+struct.pack("!H", ck)+p[18:]
else:
warning("No IP underlayer to compute checksum. Leaving null.")
return p
def hashret(self):
if conf.checkIPsrc:
return struct.pack("H",self.sport ^ self.dport)+self.payload.hashret()
else:
return self.payload.hashret()
def answers(self, other):
if not isinstance(other, TCP):
return 0
if conf.checkIPsrc:
if not ((self.sport == other.dport) and
(self.dport == other.sport)):
return 0
if (abs(other.seq-self.ack) > 2+len(other.payload)):
return 0
return 1
def mysummary(self):
if isinstance(self.underlayer, IP):
return self.underlayer.sprintf("TCP %IP.src%:%TCP.sport% > %IP.dst%:%TCP.dport% %TCP.flags%")
elif conf.ipv6_enabled and isinstance(self.underlayer, scapy.layers.inet6.IPv6):
return self.underlayer.sprintf("TCP %IPv6.src%:%TCP.sport% > %IPv6.dst%:%TCP.dport% %TCP.flags%")
else:
return self.sprintf("TCP %TCP.sport% > %TCP.dport% %TCP.flags%")
class UDP(Packet):
name = "UDP"
fields_desc = [ ShortEnumField("sport", 53, UDP_SERVICES),
ShortEnumField("dport", 53, UDP_SERVICES),
ShortField("len", None),
XShortField("chksum", None), ]
def post_build(self, p, pay):
p += pay
l = self.len
if l is None:
l = len(p)
p = p[:4]+struct.pack("!H",l)+p[6:]
if self.chksum is None:
if isinstance(self.underlayer, IP):
if self.underlayer.len is not None:
ln = self.underlayer.len-20
else:
ln = len(p)
psdhdr = struct.pack("!4s4sHH",
inet_aton(self.underlayer.src),
inet_aton(self.underlayer.dst),
self.underlayer.proto,
ln)
ck=checksum(psdhdr+p)
p = p[:6]+struct.pack("!H", ck)+p[8:]
elif isinstance(self.underlayer, scapy.layers.inet6.IPv6) or isinstance(self.underlayer, scapy.layers.inet6._IPv6ExtHdr):
ck = scapy.layers.inet6.in6_chksum(socket.IPPROTO_UDP, self.underlayer, p)
p = p[:6]+struct.pack("!H", ck)+p[8:]
else:
warning("No IP underlayer to compute checksum. Leaving null.")
return p
def extract_padding(self, s):
l = self.len - 8
return s[:l],s[l:]
def hashret(self):
return self.payload.hashret()
def answers(self, other):
if not isinstance(other, UDP):
return 0
if conf.checkIPsrc:
if self.dport != other.sport:
return 0
return self.payload.answers(other.payload)
def mysummary(self):
if isinstance(self.underlayer, IP):
return self.underlayer.sprintf("UDP %IP.src%:%UDP.sport% > %IP.dst%:%UDP.dport%")
elif isinstance(self.underlayer, scapy.layers.inet6.IPv6):
return self.underlayer.sprintf("UDP %IPv6.src%:%UDP.sport% > %IPv6.dst%:%UDP.dport%")
else:
return self.sprintf("UDP %UDP.sport% > %UDP.dport%")
icmptypes = { 0 : "echo-reply",
3 : "dest-unreach",
4 : "source-quench",
5 : "redirect",
8 : "echo-request",
9 : "router-advertisement",
10 : "router-solicitation",
11 : "time-exceeded",
12 : "parameter-problem",
13 : "timestamp-request",
14 : "timestamp-reply",
15 : "information-request",
16 : "information-response",
17 : "address-mask-request",
18 : "address-mask-reply" }
icmpcodes = { 3 : { 0 : "network-unreachable",
1 : "host-unreachable",
2 : "protocol-unreachable",
3 : "port-unreachable",
4 : "fragmentation-needed",
5 : "source-route-failed",
6 : "network-unknown",
7 : "host-unknown",
9 : "network-prohibited",
10 : "host-prohibited",
11 : "TOS-network-unreachable",
12 : "TOS-host-unreachable",
13 : "communication-prohibited",
14 : "host-precedence-violation",
15 : "precedence-cutoff", },
5 : { 0 : "network-redirect",
1 : "host-redirect",
2 : "TOS-network-redirect",
3 : "TOS-host-redirect", },
11 : { 0 : "ttl-zero-during-transit",
1 : "ttl-zero-during-reassembly", },
12 : { 0 : "ip-header-bad",
1 : "required-option-missing", }, }
class ICMP(Packet):
name = "ICMP"
fields_desc = [ ByteEnumField("type",8, icmptypes),
MultiEnumField("code",0, icmpcodes, depends_on=lambda pkt:pkt.type,fmt="B"),
XShortField("chksum", None),
ConditionalField(XShortField("id",0), lambda pkt:pkt.type in [0,8,13,14,15,16,17,18]),
ConditionalField(XShortField("seq",0), lambda pkt:pkt.type in [0,8,13,14,15,16,17,18]),
ConditionalField(ICMPTimeStampField("ts_ori", None), lambda pkt:pkt.type in [13,14]),
ConditionalField(ICMPTimeStampField("ts_rx", None), lambda pkt:pkt.type in [13,14]),
ConditionalField(ICMPTimeStampField("ts_tx", None), lambda pkt:pkt.type in [13,14]),
ConditionalField(IPField("gw","0.0.0.0"), lambda pkt:pkt.type==5),
ConditionalField(ByteField("ptr",0), lambda pkt:pkt.type==12),
ConditionalField(X3BytesField("reserved",0), lambda pkt:pkt.type==12),
ConditionalField(IPField("addr_mask","0.0.0.0"), lambda pkt:pkt.type in [17,18]),
ConditionalField(IntField("unused",0), lambda pkt:pkt.type not in [0,5,8,12,13,14,15,16,17,18]),
]
def post_build(self, p, pay):
p += pay
if self.chksum is None:
ck = checksum(p)
p = p[:2]+bytes([ck>>8, ck&0xff])+p[4:]
return p
def hashret(self):
if self.type in [0,8,13,14,15,16,17,18]:
return struct.pack("HH",self.id,self.seq)+self.payload.hashret()
return self.payload.hashret()
def answers(self, other):
if not isinstance(other,ICMP):
return 0
if ( (other.type,self.type) in [(8,0),(13,14),(15,16),(17,18)] and
self.id == other.id and
self.seq == other.seq ):
return 1
return 0
def guess_payload_class(self, payload):
if self.type in [3,4,5,11,12]:
return IPerror
else:
return Raw
def mysummary(self):
if isinstance(self.underlayer, IP):
return self.underlayer.sprintf("ICMP %IP.src% > %IP.dst% %ICMP.type% %ICMP.code%")
else:
return self.sprintf("ICMP %ICMP.type% %ICMP.code%")
class IPerror(IP):
name = "IP in ICMP"
def answers(self, other):
if not isinstance(other, IP):
return 0
if not ( ((conf.checkIPsrc == 0) or (self.dst == other.dst)) and
(self.src == other.src) and
( ((conf.checkIPID == 0)
or (self.id == other.id)
or (conf.checkIPID == 1 and self.id == socket.htons(other.id)))) and
(self.proto == other.proto) ):
return 0
return self.payload.answers(other.payload)
def mysummary(self):
return Packet.mysummary(self)
class TCPerror(TCP):
fields_desc = [ ShortEnumField("sport", 20, TCP_SERVICES),
ShortEnumField("dport", 80, TCP_SERVICES),
IntField("seq", 0) ]
name = "TCP in ICMP"
def post_build(self, p, pay):
p += pay
return p
def answers(self, other):
if not isinstance(other, TCP):
return 0
if conf.checkIPsrc:
if not ((self.sport == other.sport) and
(self.dport == other.dport)):
return 0
if conf.check_TCPerror_seqack:
if self.seq is not None:
if self.seq != other.seq:
return 0
if self.ack is not None:
if self.ack != other.ack:
return 0
return 1
def mysummary(self):
return Packet.mysummary(self)
class UDPerror(UDP):
name = "UDP in ICMP"
def answers(self, other):
if not isinstance(other, UDP):
return 0
if conf.checkIPsrc:
if not ((self.sport == other.sport) and
(self.dport == other.dport)):
return 0
return 1
def mysummary(self):
return Packet.mysummary(self)
class ICMPerror(ICMP):
name = "ICMP in ICMP"
def answers(self, other):
if not isinstance(other,ICMP):
return 0
if not ((self.type == other.type) and
(self.code == other.code)):
return 0
if self.code in [0,8,13,14,17,18]:
if (self.id == other.id and
self.seq == other.seq):
return 1
else:
return 0
else:
return 1
def mysummary(self):
return Packet.mysummary(self)
bind_layers( Ether, IP, type=2048)
bind_layers( CookedLinux, IP, proto=2048)
bind_layers( GRE, IP, proto=2048)
bind_layers( SNAP, IP, code=2048)
bind_layers( IPerror, IPerror, frag=0, proto=4)
bind_layers( IPerror, ICMPerror, frag=0, proto=1)
bind_layers( IPerror, TCPerror, frag=0, proto=6)
bind_layers( IPerror, UDPerror, frag=0, proto=17)
bind_layers( IP, IP, frag=0, proto=4)
bind_layers( IP, ICMP, frag=0, proto=1)
bind_layers( IP, TCP, frag=0, proto=6)
bind_layers( IP, UDP, frag=0, proto=17)
bind_layers( IP, GRE, frag=0, proto=47)
conf.l2types.register(101, IP)
conf.l2types.register_num2layer(12, IP)
conf.l3types.register(ETH_P_IP, IP)
conf.l3types.register_num2layer(ETH_P_ALL, IP)
conf.neighbor.register_l3(Ether, IP, lambda l2,l3: getmacbyip(l3.dst))
conf.neighbor.register_l3(Dot3, IP, lambda l2,l3: getmacbyip(l3.dst))
###################
## Fragmentation ##
###################
@conf.commands.register
def fragment(pkt, fragsize=1480):
"""Fragment a big IP datagram"""
fragsize = (fragsize+7)//8*8
lst = []
for p in pkt:
s = bytes(p[IP].payload)
nb = (len(s)+fragsize-1)//fragsize
for i in range(nb):
q = p.copy()
del(q[IP].payload)
del(q[IP].chksum)
del(q[IP].len)
if i == nb-1:
q[IP].flags &= ~1
else:
q[IP].flags |= 1
q[IP].frag = i*fragsize//8
r = conf.raw_layer(load=s[i*fragsize:(i+1)*fragsize])
r.overload_fields = p[IP].payload.overload_fields.copy()
q.add_payload(r)
lst.append(q)
return lst
def overlap_frag(p, overlap, fragsize=8, overlap_fragsize=None):
if overlap_fragsize is None:
overlap_fragsize = fragsize
q = p.copy()
del(q[IP].payload)
q[IP].add_payload(overlap)
qfrag = fragment(q, overlap_fragsize)
qfrag[-1][IP].flags |= 1
return qfrag+fragment(p, fragsize)
@conf.commands.register
def defrag(plist):
"""defrag(plist) -> ([not fragmented], [defragmented],
[ [bad fragments], [bad fragments], ... ])"""
frags = defaultdict(PacketList)
nofrag = PacketList()
for p in plist:
ip = p[IP]
if IP not in p:
nofrag.append(p)
continue
if ip.frag == 0 and ip.flags & 1 == 0:
nofrag.append(p)
continue
uniq = (ip.id,ip.src,ip.dst,ip.proto)
frags[uniq].append(p)
defrag = []
missfrag = []
for lst in frags.values():
lst.sort(key=lambda x: x.frag)
p = lst[0]
lastp = lst[-1]
if p.frag > 0 or lastp.flags & 1 != 0: # first or last fragment missing
missfrag.append(lst)
continue
p = p.copy()
if conf.padding_layer in p:
del(p[conf.padding_layer].underlayer.payload)
ip = p[IP]
if ip.len is None or ip.ihl is None:
clen = len(ip.payload)
else:
clen = ip.len - (ip.ihl<<2)
txt = conf.raw_layer()
for q in lst[1:]:
if clen != q.frag<<3: # Wrong fragmentation offset
if clen > q.frag<<3:
warning("Fragment overlap (%i > %i) %r || %r || %r" % (clen, q.frag<<3, p,txt,q))
missfrag.append(lst)
break
if q[IP].len is None or q[IP].ihl is None:
clen += len(q[IP].payload)
else:
clen += q[IP].len - (q[IP].ihl<<2)
if conf.padding_layer in q:
del(q[conf.padding_layer].underlayer.payload)
txt.add_payload(q[IP].payload.copy())
else:
ip.flags &= ~1 # !MF
del(ip.chksum)
del(ip.len)
p = p/txt
defrag.append(p)
defrag2=PacketList()
for p in defrag:
defrag2.append(p.__class__(bytes(p)))
return nofrag,defrag2,missfrag
@conf.commands.register
def defragment(plist):
"""defragment(plist) -> plist defragmented as much as possible """
frags = defaultdict(lambda:[])
final = []
pos = 0
for p in plist:
p._defrag_pos = pos
pos += 1
if IP in p:
ip = p[IP]
if ip.frag != 0 or ip.flags & 1:
ip = p[IP]
uniq = (ip.id,ip.src,ip.dst,ip.proto)
frags[uniq].append(p)
continue
final.append(p)
defrag = []
missfrag = []
for lst in frags.values():
lst.sort(key=lambda x: x.frag)
p = lst[0]
lastp = lst[-1]
if p.frag > 0 or lastp.flags & 1 != 0: # first or last fragment missing
missfrag += lst
continue
p = p.copy()
if conf.padding_layer in p:
del(p[conf.padding_layer].underlayer.payload)
ip = p[IP]
if ip.len is None or ip.ihl is None:
clen = len(ip.payload)
else:
clen = ip.len - (ip.ihl<<2)
txt = conf.raw_layer()
for q in lst[1:]:
if clen != q.frag<<3: # Wrong fragmentation offset
if clen > q.frag<<3:
warning("Fragment overlap (%i > %i) %r || %r || %r" % (clen, q.frag<<3, p,txt,q))
missfrag += lst
break
if q[IP].len is None or q[IP].ihl is None:
clen += len(q[IP].payload)
else:
clen += q[IP].len - (q[IP].ihl<<2)
if conf.padding_layer in q:
del(q[conf.padding_layer].underlayer.payload)
txt.add_payload(q[IP].payload.copy())
else:
ip.flags &= ~1 # !MF
del(ip.chksum)
del(ip.len)
p = p/txt
p._defrag_pos = max(x._defrag_pos for x in lst)
defrag.append(p)
defrag2=[]
for p in defrag:
q = p.__class__(bytes(p))
q._defrag_pos = p._defrag_pos
defrag2.append(q)
final += defrag2
final += missfrag
final.sort(key=lambda x: x._defrag_pos)
for p in final:
del(p._defrag_pos)
if hasattr(plist, "listname"):
name = "Defragmented %s" % plist.listname
else:
name = "Defragmented"
return PacketList(final, name=name)
### Add timeskew_graph() method to PacketList
def _packetlist_timeskew_graph(self, ip, **kargs):
"""Tries to graph the timeskew between the timestamps and real time for a given ip"""
res = map(lambda x: self._elt2pkt(x), self.res)
b = filter(lambda x:x.haslayer(IP) and x.getlayer(IP).src == ip and x.haslayer(TCP), res)
c = []
for p in b:
opts = p.getlayer(TCP).options
for o in opts:
if o[0] == "Timestamp":
c.append((p.time,o[1][0]))
if not c:
warning("No timestamps found in packet list")
return
#d = map(lambda (x,y): (x%2000,((x-c[0][0])-((y-c[0][1])/1000.0))),c)
d = map(lambda a: (a[0]%2000,((a[0]-c[0][0])-((a[1]-c[0][1])/1000.0))),c)
return plt.plot(d, **kargs)
#PacketList.timeskew_graph = types.MethodType(_packetlist_timeskew_graph, None)
### Create a new packet list
class TracerouteResult(SndRcvList):
def __init__(self, res=None, name="Traceroute", stats=None):
PacketList.__init__(self, res, name, stats, vector_index = 1)
self.graphdef = None
self.graphASres = 0
self.padding = 0
self.hloc = None
self.nloc = None
def show(self):
#return self.make_table(lambda (s,r): (s.sprintf("%IP.dst%:{TCP:tcp%ir,TCP.dport%}{UDP:udp%ir,UDP.dport%}{ICMP:ICMP}"),
return self.make_table(lambda s,r: (s.sprintf("%IP.dst%:{TCP:tcp%ir,TCP.dport%}{UDP:udp%ir,UDP.dport%}{ICMP:ICMP}"),
s.ttl,
r.sprintf("%-15s,IP.src% {TCP:%TCP.flags%}{ICMP:%ir,ICMP.type%}")))
def get_trace(self):
raw_trace = {}
for s,r in self.res:
if IP not in s:
continue
d = s[IP].dst
if d not in raw_trace:
raw_trace[d] = {}
raw_trace[d][s[IP].ttl] = r[IP].src, ICMP not in r
trace = {}
for k in raw_trace.keys():
m = [ x for x in raw_trace[k].keys() if raw_trace[k][x][1] ]
if not m:
trace[k] = raw_trace[k]
else:
m = min(m)
trace[k] = {i: raw_trace[k][i] for i in raw_trace[k].keys() if not raw_trace[k][i][1] or i<=m}
return trace
def trace3D(self):
"""Give a 3D representation of the traceroute.
right button: rotate the scene
middle button: zoom
left button: move the scene
left button on a ball: toggle IP displaying
ctrl-left button on a ball: scan ports 21,22,23,25,80 and 443 and display the result"""
trace = self.get_trace()
import visual
class IPsphere(visual.sphere):
def __init__(self, ip, **kargs):
visual.sphere.__init__(self, **kargs)
self.ip=ip
self.label=None
self.setlabel(self.ip)
def setlabel(self, txt,visible=None):
if self.label is not None:
if visible is None:
visible = self.label.visible
self.label.visible = 0
elif visible is None:
visible=0
self.label=visual.label(text=txt, pos=self.pos, space=self.radius, xoffset=10, yoffset=20, visible=visible)
def action(self):
self.label.visible ^= 1
visual.scene = visual.display()
visual.scene.exit = True
start = visual.box()
rings={}
tr3d = {}
for i in trace:
tr = trace[i]
tr3d[i] = []
ttl = tr.keys()
for t in range(1,max(ttl)+1):
if t not in rings:
rings[t] = []
if t in tr:
if tr[t] not in rings[t]:
rings[t].append(tr[t])
tr3d[i].append(rings[t].index(tr[t]))
else:
rings[t].append(("unk",-1))
tr3d[i].append(len(rings[t])-1)
for t in rings:
r = rings[t]
l = len(r)
for i in range(l):
if r[i][1] == -1:
col = (0.75,0.75,0.75)
elif r[i][1]:
col = visual.color.green
else:
col = visual.color.blue
s = IPsphere(pos=((l-1)*visual.cos(2*i*visual.pi/l),(l-1)*visual.sin(2*i*visual.pi/l),2*t),
ip = r[i][0],
color = col)
for trlst in tr3d.values():
if t <= len(trlst):
if trlst[t-1] == i:
trlst[t-1] = s
forecol = colgen(0.625, 0.4375, 0.25, 0.125)
for trlst in tr3d.values():
col = next(forecol)
start = (0,0,0)
for ip in trlst:
visual.cylinder(pos=start,axis=ip.pos-start,color=col,radius=0.2)
start = ip.pos
movcenter=None
while 1:
visual.rate(50)
if visual.scene.kb.keys:
k = visual.scene.kb.getkey()
if k == "esc" or k == "q":
break
if visual.scene.mouse.events:
ev = visual.scene.mouse.getevent()
if ev.press == "left":
o = ev.pick
if o:
if ev.ctrl:
if o.ip == "unk":
continue
savcolor = o.color
o.color = (1,0,0)
a,b=sr(IP(dst=o.ip)/TCP(dport=[21,22,23,25,80,443]),timeout=2)
o.color = savcolor
if len(a) == 0:
txt = "%s:\nno results" % o.ip
else:
txt = "%s:\n" % o.ip
for s,r in a:
txt += r.sprintf("{TCP:%IP.src%:%TCP.sport% %TCP.flags%}{TCPerror:%IPerror.dst%:%TCPerror.dport% %IP.src% %ir,ICMP.type%}\n")
o.setlabel(txt, visible=1)
else:
if hasattr(o, "action"):
o.action()
elif ev.drag == "left":
movcenter = ev.pos
elif ev.drop == "left":
movcenter = None
if movcenter:
visual.scene.center -= visual.scene.mouse.pos-movcenter
movcenter = visual.scene.mouse.pos
## world_trace needs to be reimplemented as gnuplot dependency is removed
# def world_trace(self):
# from modules.geo import locate_ip
# ips = {}
# rt = {}
# ports_done = {}
# for s,r in self.res:
# ips[r.src] = None
# if s.haslayer(TCP) or s.haslayer(UDP):
# trace_id = (s.src,s.dst,s.proto,s.dport)
# elif s.haslayer(ICMP):
# trace_id = (s.src,s.dst,s.proto,s.type)
# else:
# trace_id = (s.src,s.dst,s.proto,0)
# trace = rt.get(trace_id,{})
# if not r.haslayer(ICMP) or r.type != 11:
# if trace_id in ports_done:
# continue
# ports_done[trace_id] = None
# trace[s.ttl] = r.src
# rt[trace_id] = trace
#
# trt = {}
# for trace_id in rt:
# trace = rt[trace_id]
# loctrace = []
# for i in range(max(trace.keys())):
# ip = trace.get(i,None)
# if ip is None:
# continue
# loc = locate_ip(ip)
# if loc is None:
# continue
## loctrace.append((ip,loc)) # no labels yet
# loctrace.append(loc)
# if loctrace:
# trt[trace_id] = loctrace
#
# tr = map(lambda x: Gnuplot.Data(x,with_="lines"), trt.values())
# g = Gnuplot.Gnuplot()
# world = Gnuplot.File(conf.gnuplot_world,with_="lines")
# g.plot(world,*tr)
# return g
def make_graph(self,ASres=None,padding=0):
if ASres is None:
ASres = conf.AS_resolver
self.graphASres = ASres
self.graphpadding = padding
ips = {}
rt = {}
ports = {}
ports_done = {}
for s,r in self.res:
r = r.getlayer(IP) or (conf.ipv6_enabled and r[scapy.layers.inet6.IPv6]) or r
s = s.getlayer(IP) or (conf.ipv6_enabled and s[scapy.layers.inet6.IPv6]) or s
ips[r.src] = None
if TCP in s:
trace_id = (s.src,s.dst,6,s.dport)
elif UDP in s:
trace_id = (s.src,s.dst,17,s.dport)
elif ICMP in s:
trace_id = (s.src,s.dst,1,s.type)
else:
trace_id = (s.src,s.dst,s.proto,0)
trace = rt.get(trace_id,{})
ttl = conf.ipv6_enabled and scapy.layers.inet6.IPv6 in s and s.hlim or s.ttl
if not (ICMP in r and r[ICMP].type == 11) and not (conf.ipv6_enabled and scapy.layers.inet6.IPv6 in r and scapy.layers.inet6.ICMPv6TimeExceeded in r):
if trace_id in ports_done:
continue
ports_done[trace_id] = None
p = ports.get(r.src,[])
if TCP in r:
p.append(r.sprintf("<T%ir,TCP.sport%> %TCP.sport% %TCP.flags%"))
trace[ttl] = r.sprintf('"%r,src%":T%ir,TCP.sport%')
elif UDP in r:
p.append(r.sprintf("<U%ir,UDP.sport%> %UDP.sport%"))
trace[ttl] = r.sprintf('"%r,src%":U%ir,UDP.sport%')
elif ICMP in r:
p.append(r.sprintf("<I%ir,ICMP.type%> ICMP %ICMP.type%"))
trace[ttl] = r.sprintf('"%r,src%":I%ir,ICMP.type%')
else:
p.append(r.sprintf("{IP:<P%ir,proto%> IP %proto%}{IPv6:<P%ir,nh%> IPv6 %nh%}"))
trace[ttl] = r.sprintf('"%r,src%":{IP:P%ir,proto%}{IPv6:P%ir,nh%}')
ports[r.src] = p
else:
trace[ttl] = r.sprintf('"%r,src%"')
rt[trace_id] = trace
# Fill holes with unk%i nodes
unknown_label = incremental_label("unk%i")
blackholes = []
bhip = {}
for rtk in rt:
trace = rt[rtk]
k = trace.keys()
for n in range(min(k), max(k)):
if not n in trace:
trace[n] = next(unknown_label)
if not rtk in ports_done:
if rtk[2] == 1: #ICMP
bh = "%s %i/icmp" % (rtk[1],rtk[3])
elif rtk[2] == 6: #TCP
bh = "%s %i/tcp" % (rtk[1],rtk[3])
elif rtk[2] == 17: #UDP
bh = '%s %i/udp' % (rtk[1],rtk[3])
else:
bh = '%s %i/proto' % (rtk[1],rtk[2])
ips[bh] = None
bhip[rtk[1]] = bh
bh = '"%s"' % bh
trace[max(k)+1] = bh
blackholes.append(bh)
# Find AS numbers
ASN_query_list = dict.fromkeys(map(lambda x:x.rsplit(" ",1)[0],ips)).keys()
if ASres is None:
ASNlist = []
else:
ASNlist = ASres.resolve(*ASN_query_list)
ASNs = {}
ASDs = {}
for ip,asn,desc, in ASNlist:
if asn is None:
continue
iplist = ASNs.get(asn,[])
if ip in bhip:
if ip in ports:
iplist.append(ip)
iplist.append(bhip[ip])
else:
iplist.append(ip)
ASNs[asn] = iplist
ASDs[asn] = desc
backcolorlist=colgen("60","86","ba","ff")
forecolorlist=colgen("a0","70","40","20")
s = "digraph trace {\n"
s += "\n\tnode [shape=ellipse,color=black,style=solid];\n\n"
s += "\n#ASN clustering\n"
for asn in ASNs:
s += '\tsubgraph cluster_%s {\n' % asn
col = next(backcolorlist)
s += '\t\tcolor="#%s%s%s";' % col
s += '\t\tnode [fillcolor="#%s%s%s",style=filled];' % col
s += '\t\tfontsize = 10;'
s += '\t\tlabel = "%s\\n[%s]"\n' % (asn,ASDs[asn])
for ip in ASNs[asn]:
s += '\t\t"%s";\n'%ip
s += "\t}\n"
s += "#endpoints\n"
for p in ports:
s += '\t"%s" [shape=record,color=black,fillcolor=green,style=filled,label="%s|%s"];\n' % (p,p,"|".join(ports[p]))
s += "\n#Blackholes\n"
for bh in blackholes:
s += '\t%s [shape=octagon,color=black,fillcolor=red,style=filled];\n' % bh
if padding:
s += "\n#Padding\n"
pad={}
for snd,rcv in self.res:
if rcv.src not in ports and rcv.haslayer(conf.padding_layer):
p = rcv.getlayer(conf.padding_layer).load
if p != "\x00"*len(p):
pad[rcv.src]=None
for rcv in pad:
s += '\t"%s" [shape=triangle,color=black,fillcolor=red,style=filled];\n' % rcv
s += "\n\tnode [shape=ellipse,color=black,style=solid];\n\n"
for rtk in rt:
s += "#---[%s\n" % repr(rtk)
s += '\t\tedge [color="#%s%s%s"];\n' % next(forecolorlist)
trace = rt[rtk]
k = trace.keys()
for n in range(min(k), max(k)):
s += '\t%s ->\n' % trace[n]
s += '\t%s;\n' % trace[max(k)]
s += "}\n";
self.graphdef = s
def graph(self, ASres=None, padding=0, **kargs):
"""x.graph(ASres=conf.AS_resolver, other args):
ASres=None : no AS resolver => no clustering
ASres=AS_resolver() : default whois AS resolver (riswhois.ripe.net)
ASres=AS_resolver_cymru(): use whois.cymru.com whois database
ASres=AS_resolver(server="whois.ra.net")
format: output type (svg, ps, gif, jpg, etc.), passed to dot's "-T" option
figsize: w,h tuple in inches. See matplotlib documentation
target: filename. If None uses matplotlib to display
prog: which graphviz program to use"""
if ASres is None:
ASres = conf.AS_resolver
if (self.graphdef is None or
self.graphASres != ASres or
self.graphpadding != padding):
self.make_graph(ASres,padding)
return do_graph(self.graphdef, **kargs)
@conf.commands.register
def traceroute(target, dport=80, minttl=1, maxttl=30, sport=RandShort(), l4 = None, filter=None, timeout=2, verbose=None, **kargs):
"""Instant TCP traceroute
traceroute(target, [maxttl=30,] [dport=80,] [sport=80,] [verbose=conf.verb]) -> None
"""
if verbose is None:
verbose = conf.verb
if filter is None:
# we only consider ICMP error packets and TCP packets with at
# least the ACK flag set *and* either the SYN or the RST flag
# set
filter="(icmp and (icmp[0]=3 or icmp[0]=4 or icmp[0]=5 or icmp[0]=11 or icmp[0]=12)) or (tcp and (tcp[13] & 0x16 > 0x10))"
if l4 is None:
a,b = sr(IP(dst=target, id=RandShort(), ttl=(minttl,maxttl))/TCP(seq=RandInt(),sport=sport, dport=dport),
timeout=timeout, filter=filter, verbose=verbose, **kargs)
else:
# this should always work
filter="ip"
a,b = sr(IP(dst=target, id=RandShort(), ttl=(minttl,maxttl))/l4,
timeout=timeout, filter=filter, verbose=verbose, **kargs)
a = TracerouteResult(a.res)
if verbose:
a.show()
return a,b
#############################
## Simple TCP client stack ##
#############################
class TCP_client(Automaton):
def parse_args(self, ip, port, *args, **kargs):
self.dst = next(iter(Net(ip)))
self.dport = port
self.sport = random.randrange(0,2**16)
self.l4 = IP(dst=ip)/TCP(sport=self.sport, dport=self.dport, flags=0,
seq=random.randrange(0,2**32))
self.src = self.l4.src
self.swin=self.l4[TCP].window
self.dwin=1
self.rcvbuf=""
bpf = "host %s and host %s and port %i and port %i" % (self.src,
self.dst,
self.sport,
self.dport)
# bpf=None
Automaton.parse_args(self, filter=bpf, **kargs)
def master_filter(self, pkt):
return (IP in pkt and
pkt[IP].src == self.dst and
pkt[IP].dst == self.src and
TCP in pkt and
pkt[TCP].sport == self.dport and
pkt[TCP].dport == self.sport and
self.l4[TCP].seq >= pkt[TCP].ack and # XXX: seq/ack 2^32 wrap up
((self.l4[TCP].ack == 0) or (self.l4[TCP].ack <= pkt[TCP].seq <= self.l4[TCP].ack+self.swin)) )
@ATMT.state(initial=1)
def START(self):
pass
@ATMT.state()
def SYN_SENT(self):
pass
@ATMT.state()
def ESTABLISHED(self):
pass
@ATMT.state()
def LAST_ACK(self):
pass
@ATMT.state(final=1)
def CLOSED(self):
pass
@ATMT.condition(START)
def connect(self):
raise self.SYN_SENT()
@ATMT.action(connect)
def send_syn(self):
self.l4[TCP].flags = "S"
self.send(self.l4)
self.l4[TCP].seq += 1
@ATMT.receive_condition(SYN_SENT)
def synack_received(self, pkt):
if pkt[TCP].flags & 0x3f == 0x12:
raise self.ESTABLISHED().action_parameters(pkt)
@ATMT.action(synack_received)
def send_ack_of_synack(self, pkt):
self.l4[TCP].ack = pkt[TCP].seq+1
self.l4[TCP].flags = "A"
self.send(self.l4)
@ATMT.receive_condition(ESTABLISHED)
def incoming_data_received(self, pkt):
if not isinstance(pkt[TCP].payload, NoPayload) and not isinstance(pkt[TCP].payload, conf.padding_layer):
raise self.ESTABLISHED().action_parameters(pkt)
@ATMT.action(incoming_data_received)
def receive_data(self,pkt):
data = (bytes(pkt[TCP].payload))
if data and self.l4[TCP].ack == pkt[TCP].seq:
self.l4[TCP].ack += len(data)
self.l4[TCP].flags = "A"
self.send(self.l4)
self.rcvbuf += data
if pkt[TCP].flags & 8 != 0: #PUSH
self.oi.tcp.send(self.rcvbuf)
self.rcvbuf = ""
@ATMT.ioevent(ESTABLISHED,name="tcp", as_supersocket="tcplink")
def outgoing_data_received(self, fd):
raise self.ESTABLISHED().action_parameters(fd.recv())
@ATMT.action(outgoing_data_received)
def send_data(self, d):
self.l4[TCP].flags = "PA"
self.send(self.l4/d)
self.l4[TCP].seq += len(d)
@ATMT.receive_condition(ESTABLISHED)
def reset_received(self, pkt):
if pkt[TCP].flags & 4 != 0:
raise self.CLOSED()
@ATMT.receive_condition(ESTABLISHED)
def fin_received(self, pkt):
if pkt[TCP].flags & 0x1 == 1:
raise self.LAST_ACK().action_parameters(pkt)
@ATMT.action(fin_received)
def send_finack(self, pkt):
self.l4[TCP].flags = "FA"
self.l4[TCP].ack = pkt[TCP].seq+1
self.send(self.l4)
self.l4[TCP].seq += 1
@ATMT.receive_condition(LAST_ACK)
def ack_of_fin_received(self, pkt):
if pkt[TCP].flags & 0x3f == 0x10:
raise self.CLOSED()
#####################
## Reporting stuff ##
#####################
def report_ports(target, ports):
"""portscan a target and output a LaTeX table
report_ports(target, ports) -> string"""
ans,unans = sr(IP(dst=target)/TCP(dport=ports),timeout=5)
rep = "\\begin{tabular}{|r|l|l|}\n\\hline\n"
for s,r in ans:
if not r.haslayer(ICMP):
if r.payload.flags == 0x12:
rep += r.sprintf("%TCP.sport% & open & SA \\\\\n")
rep += "\\hline\n"
for s,r in ans:
if r.haslayer(ICMP):
rep += r.sprintf("%TCPerror.dport% & closed & ICMP type %ICMP.type%/%ICMP.code% from %IP.src% \\\\\n")
elif r.payload.flags != 0x12:
rep += r.sprintf("%TCP.sport% & closed & TCP %TCP.flags% \\\\\n")
rep += "\\hline\n"
for i in unans:
rep += i.sprintf("%TCP.dport% & ? & unanswered \\\\\n")
rep += "\\hline\n\\end{tabular}\n"
return rep
def IPID_count(lst, funcID=lambda x:x[1].id, funcpres=lambda x:x[1].summary()):
idlst = map(funcID, lst)
idlst.sort()
#classes = [idlst[0]]+map(lambda x:x[1],filter(lambda (x,y): abs(x-y)>50, map(lambda x,y: (x,y),idlst[:-1], idlst[1:])))
classes = [idlst[0]]+list(map(lambda x:x[1],filter(lambda a: abs(a[0]-a[1])>50, map(lambda x,y: (x,y),idlst[:-1], idlst[1:]))))
lst = map(lambda x:(funcID(x), funcpres(x)), lst)
lst.sort()
print("Probably %i classes:" % len(classes), classes)
for id,pr in lst:
print("%5i" % id, pr)
def fragleak(target,sport=123, dport=123, timeout=0.2, onlyasc=0):
load = "XXXXYYYYYYYYYY"
# getmacbyip(target)
# pkt = IP(dst=target, id=RandShort(), options="\x22"*40)/UDP()/load
pkt = IP(dst=target, id=RandShort(), options="\x00"*40, flags=1)/UDP(sport=sport, dport=sport)/load
s=conf.L3socket()
intr=0
found={}
try:
while 1:
try:
if not intr:
s.send(pkt)
sin,sout,serr = select([s],[],[],timeout)
if not sin:
continue
ans=s.recv(1600)
if not isinstance(ans, IP): #TODO: IPv6
continue
if not isinstance(ans.payload, ICMP):
continue
if not isinstance(ans.payload.payload, IPerror):
continue
if ans.payload.payload.dst != target:
continue
if ans.src != target:
print("leak from", ans.src,end=" ")
# print repr(ans)
if not ans.haslayer(conf.padding_layer):
continue
# print repr(ans.payload.payload.payload.payload)
# if not isinstance(ans.payload.payload.payload.payload, conf.raw_layer):
# continue
# leak = ans.payload.payload.payload.payload.load[len(load):]
leak = ans.getlayer(conf.padding_layer).load
if leak not in found:
found[leak]=None
linehexdump(leak, onlyasc=onlyasc)
except KeyboardInterrupt:
if intr:
raise
intr=1
except KeyboardInterrupt:
pass
def fragleak2(target, timeout=0.4, onlyasc=0):
found={}
try:
while 1:
p = sr1(IP(dst=target, options="\x00"*40, proto=200)/"XXXXYYYYYYYYYYYY",timeout=timeout,verbose=0)
if not p:
continue
if conf.padding_layer in p:
leak = p[conf.padding_layer].load
if leak not in found:
found[leak]=None
linehexdump(leak,onlyasc=onlyasc)
except:
pass
conf.stats_classic_protocols += [TCP,UDP,ICMP]
conf.stats_dot11_protocols += [TCP,UDP,ICMP]
if conf.ipv6_enabled:
import scapy.layers.inet6
| apache-2.0 |
tequa/ammisoft | ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/matplotlib/backends/backend_gtk3.py | 10 | 33857 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os, sys
def fn_name(): return sys._getframe(1).f_code.co_name
try:
import gi
except ImportError:
raise ImportError("Gtk3 backend requires pygobject to be installed.")
try:
gi.require_version("Gtk", "3.0")
except AttributeError:
raise ImportError(
"pygobject version too old -- it must have require_version")
except ValueError:
raise ImportError(
"Gtk3 backend requires the GObject introspection bindings for Gtk 3 "
"to be installed.")
try:
from gi.repository import Gtk, Gdk, GObject, GLib
except ImportError:
raise ImportError("Gtk3 backend requires pygobject to be installed.")
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, cursors, TimerBase
from matplotlib.backend_bases import (ShowBase, ToolContainerBase,
StatusbarBase)
from matplotlib.backend_managers import ToolManager
from matplotlib import backend_tools
from matplotlib.cbook import is_string_like, is_writable_file_like
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
from matplotlib import cbook, colors as mcolors, lines, verbose, rcParams
backend_version = "%s.%s.%s" % (Gtk.get_major_version(), Gtk.get_micro_version(), Gtk.get_minor_version())
_debug = False
#_debug = True
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 96
cursord = {
cursors.MOVE : Gdk.Cursor.new(Gdk.CursorType.FLEUR),
cursors.HAND : Gdk.Cursor.new(Gdk.CursorType.HAND2),
cursors.POINTER : Gdk.Cursor.new(Gdk.CursorType.LEFT_PTR),
cursors.SELECT_REGION : Gdk.Cursor.new(Gdk.CursorType.TCROSS),
}
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw_idle()
class Show(ShowBase):
def mainloop(self):
if Gtk.main_level() == 0:
Gtk.main()
show = Show()
class TimerGTK3(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses GTK3 for timer events.
Attributes:
* interval: The time between timer events in milliseconds. Default
is 1000 ms.
* single_shot: Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
* callbacks: Stores list of (func, args) tuples that will be called
upon timer events. This list can be manipulated directly, or the
functions add_callback and remove_callback can be used.
'''
def _timer_start(self):
# Need to stop it, otherwise we potentially leak a timer id that will
# never be stopped.
self._timer_stop()
self._timer = GLib.timeout_add(self._interval, self._on_timer)
def _timer_stop(self):
if self._timer is not None:
GLib.source_remove(self._timer)
self._timer = None
def _timer_set_interval(self):
# Only stop and restart it if the timer has already been started
if self._timer is not None:
self._timer_stop()
self._timer_start()
def _on_timer(self):
TimerBase._on_timer(self)
# Gtk timeout_add() requires that the callback returns True if it
# is to be called again.
if len(self.callbacks) > 0 and not self._single:
return True
else:
self._timer = None
return False
class FigureCanvasGTK3 (Gtk.DrawingArea, FigureCanvasBase):
keyvald = {65507 : 'control',
65505 : 'shift',
65513 : 'alt',
65508 : 'control',
65506 : 'shift',
65514 : 'alt',
65361 : 'left',
65362 : 'up',
65363 : 'right',
65364 : 'down',
65307 : 'escape',
65470 : 'f1',
65471 : 'f2',
65472 : 'f3',
65473 : 'f4',
65474 : 'f5',
65475 : 'f6',
65476 : 'f7',
65477 : 'f8',
65478 : 'f9',
65479 : 'f10',
65480 : 'f11',
65481 : 'f12',
65300 : 'scroll_lock',
65299 : 'break',
65288 : 'backspace',
65293 : 'enter',
65379 : 'insert',
65535 : 'delete',
65360 : 'home',
65367 : 'end',
65365 : 'pageup',
65366 : 'pagedown',
65438 : '0',
65436 : '1',
65433 : '2',
65435 : '3',
65430 : '4',
65437 : '5',
65432 : '6',
65429 : '7',
65431 : '8',
65434 : '9',
65451 : '+',
65453 : '-',
65450 : '*',
65455 : '/',
65439 : 'dec',
65421 : 'enter',
}
# Setting this as a static constant prevents
# this resulting expression from leaking
event_mask = (Gdk.EventMask.BUTTON_PRESS_MASK |
Gdk.EventMask.BUTTON_RELEASE_MASK |
Gdk.EventMask.EXPOSURE_MASK |
Gdk.EventMask.KEY_PRESS_MASK |
Gdk.EventMask.KEY_RELEASE_MASK |
Gdk.EventMask.ENTER_NOTIFY_MASK |
Gdk.EventMask.LEAVE_NOTIFY_MASK |
Gdk.EventMask.POINTER_MOTION_MASK |
Gdk.EventMask.POINTER_MOTION_HINT_MASK|
Gdk.EventMask.SCROLL_MASK)
def __init__(self, figure):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
FigureCanvasBase.__init__(self, figure)
GObject.GObject.__init__(self)
self._idle_draw_id = 0
self._need_redraw = True
self._lastCursor = None
self.connect('scroll_event', self.scroll_event)
self.connect('button_press_event', self.button_press_event)
self.connect('button_release_event', self.button_release_event)
self.connect('configure_event', self.configure_event)
self.connect('draw', self.on_draw_event)
self.connect('key_press_event', self.key_press_event)
self.connect('key_release_event', self.key_release_event)
self.connect('motion_notify_event', self.motion_notify_event)
self.connect('leave_notify_event', self.leave_notify_event)
self.connect('enter_notify_event', self.enter_notify_event)
self.connect('size_allocate', self.size_allocate)
self.set_events(self.__class__.event_mask)
self.set_double_buffered(True)
self.set_can_focus(True)
self._renderer_init()
default_context = GLib.main_context_get_thread_default() or GLib.main_context_default()
def destroy(self):
#Gtk.DrawingArea.destroy(self)
self.close_event()
if self._idle_draw_id != 0:
GLib.source_remove(self._idle_draw_id)
def scroll_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
x = event.x
# flipy so y=0 is bottom of canvas
y = self.get_allocation().height - event.y
if event.direction==Gdk.ScrollDirection.UP:
step = 1
else:
step = -1
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
return False # finish event propagation?
def button_press_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
x = event.x
# flipy so y=0 is bottom of canvas
y = self.get_allocation().height - event.y
FigureCanvasBase.button_press_event(self, x, y, event.button, guiEvent=event)
return False # finish event propagation?
def button_release_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
x = event.x
# flipy so y=0 is bottom of canvas
y = self.get_allocation().height - event.y
FigureCanvasBase.button_release_event(self, x, y, event.button, guiEvent=event)
return False # finish event propagation?
def key_press_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
key = self._get_key(event)
if _debug: print("hit", key)
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
return True # stop event propagation
def key_release_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
key = self._get_key(event)
if _debug: print("release", key)
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
return True # stop event propagation
def motion_notify_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
if event.is_hint:
t, x, y, state = event.window.get_pointer()
else:
x, y, state = event.x, event.y, event.get_state()
# flipy so y=0 is bottom of canvas
y = self.get_allocation().height - y
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
return False # finish event propagation?
def leave_notify_event(self, widget, event):
FigureCanvasBase.leave_notify_event(self, event)
def enter_notify_event(self, widget, event):
FigureCanvasBase.enter_notify_event(self, event)
def size_allocate(self, widget, allocation):
if _debug:
print("FigureCanvasGTK3.%s" % fn_name())
print("size_allocate (%d x %d)" % (allocation.width, allocation.height))
dpival = self.figure.dpi
winch = allocation.width / dpival
hinch = allocation.height / dpival
self.figure.set_size_inches(winch, hinch, forward=False)
FigureCanvasBase.resize_event(self)
self.draw_idle()
def _get_key(self, event):
if event.keyval in self.keyvald:
key = self.keyvald[event.keyval]
elif event.keyval < 256:
key = chr(event.keyval)
else:
key = None
modifiers = [
(Gdk.ModifierType.MOD4_MASK, 'super'),
(Gdk.ModifierType.MOD1_MASK, 'alt'),
(Gdk.ModifierType.CONTROL_MASK, 'ctrl'),
]
for key_mask, prefix in modifiers:
if event.state & key_mask:
key = '{0}+{1}'.format(prefix, key)
return key
def configure_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
if widget.get_property("window") is None:
return
w, h = event.width, event.height
if w < 3 or h < 3:
return # empty fig
# resize the figure (in inches)
dpi = self.figure.dpi
self.figure.set_size_inches(w/dpi, h/dpi, forward=False)
self._need_redraw = True
return False # finish event propagation?
def on_draw_event(self, widget, ctx):
# to be overwritten by GTK3Agg or GTK3Cairo
pass
def draw(self):
self._need_redraw = True
if self.get_visible() and self.get_mapped():
self.queue_draw()
# do a synchronous draw (its less efficient than an async draw,
# but is required if/when animation is used)
self.get_property("window").process_updates (False)
def draw_idle(self):
if self._idle_draw_id != 0:
return
def idle_draw(*args):
try:
self.draw()
finally:
self._idle_draw_id = 0
return False
self._idle_draw_id = GLib.idle_add(idle_draw)
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of :class:`backend_bases.Timer`.
This is useful for getting periodic events through the backend's native
event loop. Implemented only for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerGTK3(*args, **kwargs)
def flush_events(self):
Gdk.threads_enter()
while Gtk.events_pending():
Gtk.main_iteration()
Gdk.flush()
Gdk.threads_leave()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerGTK3(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The Gtk.Toolbar (gtk only)
vbox : The Gtk.VBox containing the canvas and toolbar (gtk only)
window : The Gtk.Window (gtk only)
"""
def __init__(self, canvas, num):
if _debug: print('FigureManagerGTK3.%s' % fn_name())
FigureManagerBase.__init__(self, canvas, num)
self.window = Gtk.Window()
self.set_window_title("Figure %d" % num)
try:
self.window.set_icon_from_file(window_icon)
except (SystemExit, KeyboardInterrupt):
# re-raise exit type Exceptions
raise
except:
# some versions of gtk throw a glib.GError but not
# all, so I am not sure how to catch it. I am unhappy
# doing a blanket catch here, but am not sure what a
# better way is - JDH
verbose.report('Could not load matplotlib icon: %s' % sys.exc_info()[1])
self.vbox = Gtk.Box()
self.vbox.set_property("orientation", Gtk.Orientation.VERTICAL)
self.window.add(self.vbox)
self.vbox.show()
self.canvas.show()
self.vbox.pack_start(self.canvas, True, True, 0)
# calculate size for window
w = int (self.canvas.figure.bbox.width)
h = int (self.canvas.figure.bbox.height)
self.toolmanager = self._get_toolmanager()
self.toolbar = self._get_toolbar()
self.statusbar = None
def add_widget(child, expand, fill, padding):
child.show()
self.vbox.pack_end(child, False, False, 0)
size_request = child.size_request()
return size_request.height
if self.toolmanager:
backend_tools.add_tools_to_manager(self.toolmanager)
if self.toolbar:
backend_tools.add_tools_to_container(self.toolbar)
self.statusbar = StatusbarGTK3(self.toolmanager)
h += add_widget(self.statusbar, False, False, 0)
h += add_widget(Gtk.HSeparator(), False, False, 0)
if self.toolbar is not None:
self.toolbar.show()
h += add_widget(self.toolbar, False, False, 0)
self.window.set_default_size (w, h)
def destroy(*args):
Gcf.destroy(num)
self.window.connect("destroy", destroy)
self.window.connect("delete_event", destroy)
if matplotlib.is_interactive():
self.window.show()
self.canvas.draw_idle()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolmanager is not None:
pass
elif self.toolbar is not None:
self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
self.canvas.grab_focus()
def destroy(self, *args):
if _debug: print('FigureManagerGTK3.%s' % fn_name())
self.vbox.destroy()
self.window.destroy()
self.canvas.destroy()
if self.toolbar:
self.toolbar.destroy()
if Gcf.get_num_fig_managers()==0 and \
not matplotlib.is_interactive() and \
Gtk.main_level() >= 1:
Gtk.main_quit()
def show(self):
# show the figure window
self.window.show()
def full_screen_toggle (self):
self._full_screen_flag = not self._full_screen_flag
if self._full_screen_flag:
self.window.fullscreen()
else:
self.window.unfullscreen()
_full_screen_flag = False
def _get_toolbar(self):
# must be inited after the window, drawingArea and figure
# attrs are set
if rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2GTK3 (self.canvas, self.window)
elif rcParams['toolbar'] == 'toolmanager':
toolbar = ToolbarGTK3(self.toolmanager)
else:
toolbar = None
return toolbar
def _get_toolmanager(self):
# must be initialised after toolbar has been setted
if rcParams['toolbar'] == 'toolmanager':
toolmanager = ToolManager(self.canvas)
else:
toolmanager = None
return toolmanager
def get_window_title(self):
return self.window.get_title()
def set_window_title(self, title):
self.window.set_title(title)
def resize(self, width, height):
'set the canvas size in pixels'
#_, _, cw, ch = self.canvas.allocation
#_, _, ww, wh = self.window.allocation
#self.window.resize (width-cw+ww, height-ch+wh)
self.window.resize(width, height)
class NavigationToolbar2GTK3(NavigationToolbar2, Gtk.Toolbar):
def __init__(self, canvas, window):
self.win = window
GObject.GObject.__init__(self)
NavigationToolbar2.__init__(self, canvas)
self.ctx = None
def set_message(self, s):
self.message.set_label(s)
def set_cursor(self, cursor):
self.canvas.get_property("window").set_cursor(cursord[cursor])
#self.canvas.set_cursor(cursord[cursor])
def release(self, event):
try: del self._pixmapBack
except AttributeError: pass
def dynamic_update(self):
# legacy method; new method is canvas.draw_idle
self.canvas.draw_idle()
def draw_rubberband(self, event, x0, y0, x1, y1):
'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744'
self.ctx = self.canvas.get_property("window").cairo_create()
# todo: instead of redrawing the entire figure, copy the part of
# the figure that was covered by the previous rubberband rectangle
self.canvas.draw()
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val) for val in (min(x0,x1), min(y0, y1), w, h)]
self.ctx.new_path()
self.ctx.set_line_width(0.5)
self.ctx.rectangle(rect[0], rect[1], rect[2], rect[3])
self.ctx.set_source_rgb(0, 0, 0)
self.ctx.stroke()
def _init_toolbar(self):
self.set_style(Gtk.ToolbarStyle.ICONS)
basedir = os.path.join(rcParams['datapath'],'images')
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.insert( Gtk.SeparatorToolItem(), -1 )
continue
fname = os.path.join(basedir, image_file + '.png')
image = Gtk.Image()
image.set_from_file(fname)
tbutton = Gtk.ToolButton()
tbutton.set_label(text)
tbutton.set_icon_widget(image)
self.insert(tbutton, -1)
tbutton.connect('clicked', getattr(self, callback))
tbutton.set_tooltip_text(tooltip_text)
toolitem = Gtk.SeparatorToolItem()
self.insert(toolitem, -1)
toolitem.set_draw(False)
toolitem.set_expand(True)
toolitem = Gtk.ToolItem()
self.insert(toolitem, -1)
self.message = Gtk.Label()
toolitem.add(self.message)
self.show_all()
def get_filechooser(self):
fc = FileChooserDialog(
title='Save the figure',
parent=self.win,
path=os.path.expanduser(rcParams.get('savefig.directory', '')),
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
fc.set_current_name(self.canvas.get_default_filename())
return fc
def save_figure(self, *args):
chooser = self.get_filechooser()
fname, format = chooser.get_filename_from_user()
chooser.destroy()
if fname:
startpath = os.path.expanduser(rcParams.get('savefig.directory', ''))
if startpath == '':
# explicitly missing key or empty str signals to use cwd
rcParams['savefig.directory'] = startpath
else:
# save dir for next time
rcParams['savefig.directory'] = os.path.dirname(six.text_type(fname))
try:
self.canvas.print_figure(fname, format=format)
except Exception as e:
error_msg_gtk(str(e), parent=self)
def configure_subplots(self, button):
toolfig = Figure(figsize=(6,3))
canvas = self._get_canvas(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
w = int (toolfig.bbox.width)
h = int (toolfig.bbox.height)
window = Gtk.Window()
try:
window.set_icon_from_file(window_icon)
except (SystemExit, KeyboardInterrupt):
# re-raise exit type Exceptions
raise
except:
# we presumably already logged a message on the
# failure of the main plot, don't keep reporting
pass
window.set_title("Subplot Configuration Tool")
window.set_default_size(w, h)
vbox = Gtk.Box()
vbox.set_property("orientation", Gtk.Orientation.VERTICAL)
window.add(vbox)
vbox.show()
canvas.show()
vbox.pack_start(canvas, True, True, 0)
window.show()
def _get_canvas(self, fig):
return self.canvas.__class__(fig)
class FileChooserDialog(Gtk.FileChooserDialog):
"""GTK+ file selector which remembers the last file/directory
selected and presents the user with a menu of supported image formats
"""
def __init__ (self,
title = 'Save file',
parent = None,
action = Gtk.FileChooserAction.SAVE,
buttons = (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_SAVE, Gtk.ResponseType.OK),
path = None,
filetypes = [],
default_filetype = None
):
super (FileChooserDialog, self).__init__ (title, parent, action,
buttons)
self.set_default_response (Gtk.ResponseType.OK)
if not path: path = os.getcwd() + os.sep
# create an extra widget to list supported image formats
self.set_current_folder (path)
self.set_current_name ('image.' + default_filetype)
hbox = Gtk.Box(spacing=10)
hbox.pack_start(Gtk.Label(label="File Format:"), False, False, 0)
liststore = Gtk.ListStore(GObject.TYPE_STRING)
cbox = Gtk.ComboBox() #liststore)
cbox.set_model(liststore)
cell = Gtk.CellRendererText()
cbox.pack_start(cell, True)
cbox.add_attribute(cell, 'text', 0)
hbox.pack_start(cbox, False, False, 0)
self.filetypes = filetypes
self.sorted_filetypes = list(six.iteritems(filetypes))
self.sorted_filetypes.sort()
default = 0
for i, (ext, name) in enumerate(self.sorted_filetypes):
liststore.append(["%s (*.%s)" % (name, ext)])
if ext == default_filetype:
default = i
cbox.set_active(default)
self.ext = default_filetype
def cb_cbox_changed (cbox, data=None):
"""File extension changed"""
head, filename = os.path.split(self.get_filename())
root, ext = os.path.splitext(filename)
ext = ext[1:]
new_ext = self.sorted_filetypes[cbox.get_active()][0]
self.ext = new_ext
if ext in self.filetypes:
filename = root + '.' + new_ext
elif ext == '':
filename = filename.rstrip('.') + '.' + new_ext
self.set_current_name (filename)
cbox.connect ("changed", cb_cbox_changed)
hbox.show_all()
self.set_extra_widget(hbox)
def get_filename_from_user (self):
while True:
filename = None
if self.run() != int(Gtk.ResponseType.OK):
break
filename = self.get_filename()
break
return filename, self.ext
class RubberbandGTK3(backend_tools.RubberbandBase):
def __init__(self, *args, **kwargs):
backend_tools.RubberbandBase.__init__(self, *args, **kwargs)
self.ctx = None
def draw_rubberband(self, x0, y0, x1, y1):
# 'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/
# Recipe/189744'
self.ctx = self.figure.canvas.get_property("window").cairo_create()
# todo: instead of redrawing the entire figure, copy the part of
# the figure that was covered by the previous rubberband rectangle
self.figure.canvas.draw()
height = self.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val) for val in (min(x0, x1), min(y0, y1), w, h)]
self.ctx.new_path()
self.ctx.set_line_width(0.5)
self.ctx.rectangle(rect[0], rect[1], rect[2], rect[3])
self.ctx.set_source_rgb(0, 0, 0)
self.ctx.stroke()
class ToolbarGTK3(ToolContainerBase, Gtk.Box):
def __init__(self, toolmanager):
ToolContainerBase.__init__(self, toolmanager)
Gtk.Box.__init__(self)
self.set_property("orientation", Gtk.Orientation.VERTICAL)
self._toolarea = Gtk.Box()
self._toolarea.set_property('orientation', Gtk.Orientation.HORIZONTAL)
self.pack_start(self._toolarea, False, False, 0)
self._toolarea.show_all()
self._groups = {}
self._toolitems = {}
def add_toolitem(self, name, group, position, image_file, description,
toggle):
if toggle:
tbutton = Gtk.ToggleToolButton()
else:
tbutton = Gtk.ToolButton()
tbutton.set_label(name)
if image_file is not None:
image = Gtk.Image()
image.set_from_file(image_file)
tbutton.set_icon_widget(image)
if position is None:
position = -1
self._add_button(tbutton, group, position)
signal = tbutton.connect('clicked', self._call_tool, name)
tbutton.set_tooltip_text(description)
tbutton.show_all()
self._toolitems.setdefault(name, [])
self._toolitems[name].append((tbutton, signal))
def _add_button(self, button, group, position):
if group not in self._groups:
if self._groups:
self._add_separator()
toolbar = Gtk.Toolbar()
toolbar.set_style(Gtk.ToolbarStyle.ICONS)
self._toolarea.pack_start(toolbar, False, False, 0)
toolbar.show_all()
self._groups[group] = toolbar
self._groups[group].insert(button, position)
def _call_tool(self, btn, name):
self.trigger_tool(name)
def toggle_toolitem(self, name, toggled):
if name not in self._toolitems:
return
for toolitem, signal in self._toolitems[name]:
toolitem.handler_block(signal)
toolitem.set_active(toggled)
toolitem.handler_unblock(signal)
def remove_toolitem(self, name):
if name not in self._toolitems:
self.toolmanager.message_event('%s Not in toolbar' % name, self)
return
for group in self._groups:
for toolitem, _signal in self._toolitems[name]:
if toolitem in self._groups[group]:
self._groups[group].remove(toolitem)
del self._toolitems[name]
def _add_separator(self):
sep = Gtk.Separator()
sep.set_property("orientation", Gtk.Orientation.VERTICAL)
self._toolarea.pack_start(sep, False, True, 0)
sep.show_all()
class StatusbarGTK3(StatusbarBase, Gtk.Statusbar):
def __init__(self, *args, **kwargs):
StatusbarBase.__init__(self, *args, **kwargs)
Gtk.Statusbar.__init__(self)
self._context = self.get_context_id('message')
def set_message(self, s):
self.pop(self._context)
self.push(self._context, s)
class SaveFigureGTK3(backend_tools.SaveFigureBase):
def get_filechooser(self):
fc = FileChooserDialog(
title='Save the figure',
parent=self.figure.canvas.manager.window,
path=os.path.expanduser(rcParams.get('savefig.directory', '')),
filetypes=self.figure.canvas.get_supported_filetypes(),
default_filetype=self.figure.canvas.get_default_filetype())
fc.set_current_name(self.figure.canvas.get_default_filename())
return fc
def trigger(self, *args, **kwargs):
chooser = self.get_filechooser()
fname, format_ = chooser.get_filename_from_user()
chooser.destroy()
if fname:
startpath = os.path.expanduser(
rcParams.get('savefig.directory', ''))
if startpath == '':
# explicitly missing key or empty str signals to use cwd
rcParams['savefig.directory'] = startpath
else:
# save dir for next time
rcParams['savefig.directory'] = os.path.dirname(
six.text_type(fname))
try:
self.figure.canvas.print_figure(fname, format=format_)
except Exception as e:
error_msg_gtk(str(e), parent=self)
class SetCursorGTK3(backend_tools.SetCursorBase):
def set_cursor(self, cursor):
self.figure.canvas.get_property("window").set_cursor(cursord[cursor])
class ConfigureSubplotsGTK3(backend_tools.ConfigureSubplotsBase, Gtk.Window):
def __init__(self, *args, **kwargs):
backend_tools.ConfigureSubplotsBase.__init__(self, *args, **kwargs)
self.window = None
def init_window(self):
if self.window:
return
self.window = Gtk.Window(title="Subplot Configuration Tool")
try:
self.window.window.set_icon_from_file(window_icon)
except (SystemExit, KeyboardInterrupt):
# re-raise exit type Exceptions
raise
except:
# we presumably already logged a message on the
# failure of the main plot, don't keep reporting
pass
self.vbox = Gtk.Box()
self.vbox.set_property("orientation", Gtk.Orientation.VERTICAL)
self.window.add(self.vbox)
self.vbox.show()
self.window.connect('destroy', self.destroy)
toolfig = Figure(figsize=(6, 3))
canvas = self.figure.canvas.__class__(toolfig)
toolfig.subplots_adjust(top=0.9)
SubplotTool(self.figure, toolfig)
w = int(toolfig.bbox.width)
h = int(toolfig.bbox.height)
self.window.set_default_size(w, h)
canvas.show()
self.vbox.pack_start(canvas, True, True, 0)
self.window.show()
def destroy(self, *args):
self.window.destroy()
self.window = None
def _get_canvas(self, fig):
return self.canvas.__class__(fig)
def trigger(self, sender, event, data=None):
self.init_window()
self.window.present()
# Define the file to use as the GTk icon
if sys.platform == 'win32':
icon_filename = 'matplotlib.png'
else:
icon_filename = 'matplotlib.svg'
window_icon = os.path.join(matplotlib.rcParams['datapath'], 'images', icon_filename)
def error_msg_gtk(msg, parent=None):
if parent is not None: # find the toplevel Gtk.Window
parent = parent.get_toplevel()
if not parent.is_toplevel():
parent = None
if not is_string_like(msg):
msg = ','.join(map(str,msg))
dialog = Gtk.MessageDialog(
parent = parent,
type = Gtk.MessageType.ERROR,
buttons = Gtk.ButtonsType.OK,
message_format = msg)
dialog.run()
dialog.destroy()
backend_tools.ToolSaveFigure = SaveFigureGTK3
backend_tools.ToolConfigureSubplots = ConfigureSubplotsGTK3
backend_tools.ToolSetCursor = SetCursorGTK3
backend_tools.ToolRubberband = RubberbandGTK3
Toolbar = ToolbarGTK3
FigureCanvas = FigureCanvasGTK3
FigureManager = FigureManagerGTK3
| bsd-3-clause |
ODM2/ODMToolsPython | odmtools/gui/pnlDataTable.py | 1 | 7512 | import wx
import wx.grid
import logging
import itertools as iter
import pandas as pd
from odmtools.lib.ObjectListView import ColumnDefn, VirtualObjectListView, ObjectListView
from wx.lib.pubsub import pub as Publisher
import numpy as np
import timeit
# from odmtools.common.logger import LoggerTool
#
# tool = LoggerTool()
# logger = tool.setupLogger(__name__, __name__ + '.log', 'w', logging.DEBUG)
logger =logging.getLogger('main')
[wxID_PNLDATATABLE, wxID_PNLDATATABLEDATAGRID,
] = [wx.NewId() for _init_ctrls in range(2)]
class pnlDataTable(wx.Panel):
toggle = iter.cycle([0, 1]).next
def __init__(self, parent):
self.parent = parent
self._init_ctrls()
def _init_ctrls(self):
# generated method, don't edit
wx.Panel.__init__(self, id=wxID_PNLDATATABLE, name=u'pnlDataTable',
parent=self.parent, size=wx.Size(677, 449),
style=wx.TAB_TRAVERSAL)
# self.record_service = self.parent.Parent.getRecordService()
self.myOlv = VirtualObjectListView(self, style=wx.LC_REPORT)
self.myOlv.SetEmptyListMsg("No Series Selected for Editing")
self.currentItem = None
sizer_2 = wx.BoxSizer(wx.VERTICAL)
sizer_2.Add(self.myOlv, 1, wx.ALL | wx.EXPAND, 4)
self.SetSizer(sizer_2)
self.myOlv.Bind(wx.EVT_LIST_ITEM_SELECTED, self.onItemSelected)
self.myOlv.Bind(wx.EVT_LIST_ITEM_DESELECTED, self.onItemSelected)
self.EnableSorting()
Publisher.subscribe(self.onChangeSelection, "changeTableSelection")
Publisher.subscribe(self.onRefresh, "refreshTable")
Publisher.subscribe(self.onDeselectAll, "deselectAllDataTable")
self.ascending = False
self.enableSelectDataTable = False
self.Layout()
# def toggleBindings(self):
# """ Activates/Deactivates Datatable specific bindings
#
# :param activate:
# :return:
# """
#
# if self.toggle():
# #logger.info("binding activated...")
# try:
# self.myOlv.Bind(wx.EVT_LIST_ITEM_FOCUSED, self.onItemSelected, id=self.myOlv.GetId())
# self.myOlv.Bind(wx.EVT_CHAR, self.onKeyPress, id=self.myOlv.GetId())
# self.myOlv.Bind(wx.EVT_LIST_KEY_DOWN, self.onKeyPress, id=self.myOlv.GetId())
# except:
# pass
# else:
# #logger.info("binding deactivated...")
# try:
# self.myOlv.Unbind(wx.EVT_LIST_ITEM_FOCUSED, self.onItemSelected, id=self.myOlv.GetId())
# self.myOlv.Unbind(wx.EVT_CHAR, self.onKeyPress, id=self.myOlv.GetId())
# self.myOlv.Unbind(wx.EVT_LIST_KEY_DOWN, self.onKeyPress, id=self.myOlv.GetId())
# except:
# pass
def init(self, memDB):
self.memDB = memDB
columns = [ColumnDefn(x.strip(), align="left", valueGetter=i, minimumWidth=125, width=125,
stringConverter='%Y-%m-%d %H:%M:%S' if "date" in x.lower() else '%s')
for x, i in self.memDB.getEditColumns()]
self.myOlv.useAlternateBackColors = True
self.myOlv.oddRowsBackColor = wx.Colour(191, 217, 217)
'''values = self.memDB.getDataValues()
value_length = len(values)
self.myOlvDataFrame = pd.DataFrame(values, columns=[x.title for x in columns])
'''
self.myOlv.SetColumns(columns)
self.myOlvDataFrame = self.memDB.getDataValuesDF()
sort_by_index = list(self.myOlvDataFrame.columns).index("LocalDateTime")
self.myOlvDataFrame.sort(self.myOlvDataFrame.columns[sort_by_index], inplace=True)
self.dataObjects = self.myOlvDataFrame.values.tolist()
self.myOlv.SetObjectGetter(self.objectGetter)
self.myOlv.SetItemCount(len(self.myOlvDataFrame))
def EnableSorting(self):
self.myOlv.Bind(wx.EVT_LIST_COL_CLICK, self.onColSelected)
self.sortedColumnIndex = -1
if not self.myOlv.smallImageList:
self.myOlv.SetImageLists()
if (not self.myOlv.smallImageList.HasName(ObjectListView.NAME_DOWN_IMAGE) and
self.myOlv.smallImageList.GetSize(0) == (16, 16)):
self.myOlv.RegisterSortIndicators()
def objectGetter(self, index):
"""
A Virtual list has to have a callable installed that says which model object is shown
at a given index
"""
return self.dataObjects[index % len(self.dataObjects)]
def onColSelected(self, evt):
"""
Allows users to sort by clicking on columns
"""
logger.debug("Column: %s" % evt.m_col)
self.sortColumn(evt.m_col)
def sortColumn(self, selected_column):
oldSortColumnIndex = self.sortedColumnIndex
self.sortedColumnIndex = selected_column
ascending = self.myOlv.sortAscending
if ascending:
self.myOlvDataFrame.sort(self.myOlvDataFrame.columns[selected_column], inplace=True)
self.myOlv.sortAscending = False
elif not ascending:
self.myOlvDataFrame.sort(self.myOlvDataFrame.columns[selected_column], ascending=False, inplace=True)
self.myOlv.sortAscending = True
self.myOlv._UpdateColumnSortIndicators(selected_column, oldSortColumnIndex)
self.dataObjects = self.myOlvDataFrame.values.tolist()
if self.myOlv.GetItemCount:
itemFrom = self.myOlv.GetTopItem()
itemTo = self.myOlv.GetTopItem()+1 + self.myOlv.GetCountPerPage()
itemTo = min(itemTo, self.myOlv.GetItemCount()-1)
self.myOlv.RefreshItems(itemFrom, itemTo)
def onRefresh(self, e):
self.myOlvDataFrame = self.memDB.getDataValuesDF()
self.dataObjects = self.myOlvDataFrame.values.tolist()
# self.myOlv.RefreshItems()
def clear(self):
self.memDB = None
self.myOlv.DeleteAllItems()
self.myOlvDataFrame = None
self.dataObjects = None
def onItemSelected(self, event):
"""
Disable selecting of an item in the DataTable, only sorting is available
"""
if not self.enableSelectDataTable:
self.myOlv.SetItemState(event.m_itemIndex, 0, wx.LIST_STATE_SELECTED)
def onDeselectAll(self):
selected_item = self.myOlv.GetFirstSelected()
while selected_item != -1:
self.myOlv.SetItemState(selected_item, 0, wx.LIST_STATE_SELECTED)
selected_item = self.myOlv.GetNextSelected(selected_item)
def onChangeSelection(self, datetime_list=[]):
"""
Select values within
"""
self.onDeselectAll()
if isinstance(datetime_list, pd.DataFrame):
try:
self.enableSelectDataTable = True
olv = self.myOlvDataFrame.set_index("LocalDateTime")
filtered_dataframe = self.myOlvDataFrame[olv.index.isin(datetime_list.index)]
results = np.where(self.myOlvDataFrame.index.isin(filtered_dataframe.index))
for i in results[0]:
self.myOlv.SetItemState(i, wx.LIST_STATE_SELECTED, wx.LIST_STATE_SELECTED)
self.myOlv.Focus(results[0][0])
self.enableSelectDataTable = False
except:
pass
def onKeyPress(self, evt):
"""Ignores Keypresses"""
pass
def stopEdit(self):
self.clear()
| bsd-3-clause |
wzbozon/scikit-learn | sklearn/covariance/robust_covariance.py | 198 | 29735 | """
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, default None
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
| bsd-3-clause |
yavalvas/yav_com | build/matplotlib/doc/mpl_examples/pylab_examples/axes_zoom_effect.py | 9 | 3291 | from matplotlib.transforms import Bbox, TransformedBbox, \
blended_transform_factory
from mpl_toolkits.axes_grid1.inset_locator import BboxPatch, BboxConnector,\
BboxConnectorPatch
def connect_bbox(bbox1, bbox2,
loc1a, loc2a, loc1b, loc2b,
prop_lines, prop_patches=None):
if prop_patches is None:
prop_patches = prop_lines.copy()
prop_patches["alpha"] = prop_patches.get("alpha", 1)*0.2
c1 = BboxConnector(bbox1, bbox2, loc1=loc1a, loc2=loc2a, **prop_lines)
c1.set_clip_on(False)
c2 = BboxConnector(bbox1, bbox2, loc1=loc1b, loc2=loc2b, **prop_lines)
c2.set_clip_on(False)
bbox_patch1 = BboxPatch(bbox1, **prop_patches)
bbox_patch2 = BboxPatch(bbox2, **prop_patches)
p = BboxConnectorPatch(bbox1, bbox2,
#loc1a=3, loc2a=2, loc1b=4, loc2b=1,
loc1a=loc1a, loc2a=loc2a, loc1b=loc1b, loc2b=loc2b,
**prop_patches)
p.set_clip_on(False)
return c1, c2, bbox_patch1, bbox_patch2, p
def zoom_effect01(ax1, ax2, xmin, xmax, **kwargs):
"""
ax1 : the main axes
ax1 : the zoomed axes
(xmin,xmax) : the limits of the colored area in both plot axes.
connect ax1 & ax2. The x-range of (xmin, xmax) in both axes will
be marked. The keywords parameters will be used ti create
patches.
"""
trans1 = blended_transform_factory(ax1.transData, ax1.transAxes)
trans2 = blended_transform_factory(ax2.transData, ax2.transAxes)
bbox = Bbox.from_extents(xmin, 0, xmax, 1)
mybbox1 = TransformedBbox(bbox, trans1)
mybbox2 = TransformedBbox(bbox, trans2)
prop_patches=kwargs.copy()
prop_patches["ec"]="none"
prop_patches["alpha"]=0.2
c1, c2, bbox_patch1, bbox_patch2, p = \
connect_bbox(mybbox1, mybbox2,
loc1a=3, loc2a=2, loc1b=4, loc2b=1,
prop_lines=kwargs, prop_patches=prop_patches)
ax1.add_patch(bbox_patch1)
ax2.add_patch(bbox_patch2)
ax2.add_patch(c1)
ax2.add_patch(c2)
ax2.add_patch(p)
return c1, c2, bbox_patch1, bbox_patch2, p
def zoom_effect02(ax1, ax2, **kwargs):
"""
ax1 : the main axes
ax1 : the zoomed axes
Similar to zoom_effect01. The xmin & xmax will be taken from the
ax1.viewLim.
"""
tt = ax1.transScale + (ax1.transLimits + ax2.transAxes)
trans = blended_transform_factory(ax2.transData, tt)
mybbox1 = ax1.bbox
mybbox2 = TransformedBbox(ax1.viewLim, trans)
prop_patches=kwargs.copy()
prop_patches["ec"]="none"
prop_patches["alpha"]=0.2
c1, c2, bbox_patch1, bbox_patch2, p = \
connect_bbox(mybbox1, mybbox2,
loc1a=3, loc2a=2, loc1b=4, loc2b=1,
prop_lines=kwargs, prop_patches=prop_patches)
ax1.add_patch(bbox_patch1)
ax2.add_patch(bbox_patch2)
ax2.add_patch(c1)
ax2.add_patch(c2)
ax2.add_patch(p)
return c1, c2, bbox_patch1, bbox_patch2, p
import matplotlib.pyplot as plt
plt.figure(1, figsize=(5,5))
ax1 = plt.subplot(221)
ax2 = plt.subplot(212)
ax2.set_xlim(0, 1)
ax2.set_xlim(0, 5)
zoom_effect01(ax1, ax2, 0.2, 0.8)
ax1 = plt.subplot(222)
ax1.set_xlim(2, 3)
ax2.set_xlim(0, 5)
zoom_effect02(ax1, ax2)
plt.show()
| mit |
natanielruiz/android-yolo | jni-build/jni/include/tensorflow/contrib/factorization/python/ops/kmeans.py | 4 | 10399 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of k-means clustering on top of learn (aka skflow) API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.factorization.python.ops import clustering_ops
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators._sklearn import TransformerMixin
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.contrib.learn.python.learn.monitors import BaseMonitor
from tensorflow.contrib.learn.python.learn.utils import checkpoints
from tensorflow.python.ops.control_flow_ops import with_dependencies
SQUARED_EUCLIDEAN_DISTANCE = clustering_ops.SQUARED_EUCLIDEAN_DISTANCE
COSINE_DISTANCE = clustering_ops.COSINE_DISTANCE
RANDOM_INIT = clustering_ops.RANDOM_INIT
KMEANS_PLUS_PLUS_INIT = clustering_ops.KMEANS_PLUS_PLUS_INIT
# TODO(agarwal,ands): support sharded input.
# TODO(agarwal,ands): enable stopping criteria based on improvements to cost.
# TODO(agarwal,ands): support random restarts.
class KMeansClustering(estimator.Estimator,
TransformerMixin):
"""K-Means clustering."""
SCORES = 'scores'
CLUSTER_IDX = 'cluster_idx'
CLUSTERS = 'clusters'
ALL_SCORES = 'all_scores'
def __init__(self,
num_clusters,
model_dir=None,
initial_clusters=clustering_ops.RANDOM_INIT,
distance_metric=clustering_ops.SQUARED_EUCLIDEAN_DISTANCE,
random_seed=0,
use_mini_batch=True,
kmeans_plus_plus_num_retries=2,
config=None):
"""Creates a model for running KMeans training and inference.
Args:
num_clusters: number of clusters to train.
model_dir: the directory to save the model results and log files.
initial_clusters: specifies how to initialize the clusters for training.
See clustering_ops.kmeans for the possible values.
distance_metric: the distance metric used for clustering.
See clustering_ops.kmeans for the possible values.
random_seed: Python integer. Seed for PRNG used to initialize centers.
use_mini_batch: If true, use the mini-batch k-means algorithm. Else assume
full batch.
kmeans_plus_plus_num_retries: For each point that is sampled during
kmeans++ initialization, this parameter specifies the number of
additional points to draw from the current distribution before selecting
the best. If a negative value is specified, a heuristic is used to
sample O(log(num_to_sample)) additional points.
config: See Estimator
"""
super(KMeansClustering, self).__init__(
model_dir=model_dir,
config=config)
self.kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries
self._num_clusters = num_clusters
self._training_initial_clusters = initial_clusters
self._training_graph = None
self._distance_metric = distance_metric
self._use_mini_batch = use_mini_batch
self._random_seed = random_seed
self._initialized = False
# pylint: disable=protected-access
class _StopWhenConverged(BaseMonitor):
"""Stops when the change in loss goes below a tolerance."""
def __init__(self, tolerance):
"""Initializes a '_StopWhenConverged' monitor.
Args:
tolerance: A relative tolerance of change between iterations.
"""
super(KMeansClustering._StopWhenConverged, self).__init__()
self._tolerance = tolerance
def begin(self, max_steps):
super(KMeansClustering._StopWhenConverged, self).begin(max_steps)
self._prev_loss = None
def step_begin(self, step):
super(KMeansClustering._StopWhenConverged, self).step_begin(step)
return [self._estimator._loss]
def step_end(self, step, output):
super(KMeansClustering._StopWhenConverged, self).step_end(step, output)
loss = output[self._estimator._loss]
if self._prev_loss is None:
self._prev_loss = loss
return False
relative_change = (abs(loss - self._prev_loss)
/ (1 + abs(self._prev_loss)))
self._prev_loss = loss
return relative_change < self._tolerance
# pylint: enable=protected-access
def fit(self, x, y=None, monitors=None, logdir=None, steps=None, batch_size=128,
relative_tolerance=None):
"""Trains a k-means clustering on x.
Note: See Estimator for logic for continuous training and graph
construction across multiple calls to fit.
Args:
x: training input matrix of shape [n_samples, n_features].
y: labels. Should be None.
monitors: Monitor object to print training progress and invoke early
stopping
logdir: the directory to save the log file that can be used for optional
visualization.
steps: number of training steps. If not None, overrides the value passed
in constructor.
batch_size: mini-batch size to use. Requires `use_mini_batch=True`.
relative_tolerance: A relative tolerance of change in the loss between
iterations. Stops learning if the loss changes less than this amount.
Note that this may not work correctly if use_mini_batch=True.
Returns:
Returns self.
"""
assert y is None
if logdir is not None:
self._model_dir = logdir
self._data_feeder = data_feeder.setup_train_data_feeder(
x, None, self._num_clusters, batch_size if self._use_mini_batch else None)
if relative_tolerance is not None:
if monitors is not None:
monitors += [self._StopWhenConverged(relative_tolerance)]
else:
monitors = [self._StopWhenConverged(relative_tolerance)]
# Make sure that we will eventually terminate.
assert ((monitors is not None and len(monitors)) or (steps is not None)
or (self.steps is not None))
self._train_model(input_fn=self._data_feeder.input_builder,
feed_fn=self._data_feeder.get_feed_dict_fn(),
steps=steps,
monitors=monitors,
init_feed_fn=self._data_feeder.get_feed_dict_fn())
return self
def predict(self, x, batch_size=None):
"""Predict cluster id for each element in x.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Array with same number of rows as x, containing cluster ids.
"""
return super(KMeansClustering, self).predict(
x=x, batch_size=batch_size)[KMeansClustering.CLUSTER_IDX]
def score(self, x, batch_size=None):
"""Predict total sum of distances to nearest clusters.
Note that this function is different from the corresponding one in sklearn
which returns the negative of the sum of distances.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Total sum of distances to nearest clusters.
"""
return np.sum(
self.evaluate(x=x, batch_size=batch_size)[KMeansClustering.SCORES])
def transform(self, x, batch_size=None):
"""Transforms each element in x to distances to cluster centers.
Note that this function is different from the corresponding one in sklearn.
For SQUARED_EUCLIDEAN distance metric, sklearn transform returns the
EUCLIDEAN distance, while this function returns the SQUARED_EUCLIDEAN
distance.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Array with same number of rows as x, and num_clusters columns, containing
distances to the cluster centers.
"""
return super(KMeansClustering, self).predict(
x=x, batch_size=batch_size)[KMeansClustering.ALL_SCORES]
def clusters(self):
"""Returns cluster centers."""
return checkpoints.load_variable(self.model_dir, self.CLUSTERS)
def _get_train_ops(self, features, _):
(_,
_,
losses,
training_op) = clustering_ops.KMeans(
features,
self._num_clusters,
self._training_initial_clusters,
self._distance_metric,
self._use_mini_batch,
random_seed=self._random_seed,
kmeans_plus_plus_num_retries=self.kmeans_plus_plus_num_retries
).training_graph()
incr_step = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
self._loss = tf.reduce_sum(losses)
training_op = with_dependencies([training_op, incr_step], self._loss)
return training_op, self._loss
def _get_predict_ops(self, features):
(all_scores,
model_predictions,
_,
_) = clustering_ops.KMeans(
features,
self._num_clusters,
self._training_initial_clusters,
self._distance_metric,
self._use_mini_batch,
random_seed=self._random_seed,
kmeans_plus_plus_num_retries=self.kmeans_plus_plus_num_retries
).training_graph()
return {
KMeansClustering.ALL_SCORES: all_scores[0],
KMeansClustering.CLUSTER_IDX: model_predictions[0]
}
def _get_eval_ops(self, features, _, unused_metrics):
(_,
_,
losses,
_) = clustering_ops.KMeans(
features,
self._num_clusters,
self._training_initial_clusters,
self._distance_metric,
self._use_mini_batch,
random_seed=self._random_seed,
kmeans_plus_plus_num_retries=self.kmeans_plus_plus_num_retries
).training_graph()
return {
KMeansClustering.SCORES: tf.reduce_sum(losses),
}
| apache-2.0 |
vivekmishra1991/scikit-learn | sklearn/decomposition/__init__.py | 147 | 1421 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
from .online_lda import LatentDirichletAllocation
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD',
'LatentDirichletAllocation']
| bsd-3-clause |
mehdidc/scikit-learn | sklearn/utils/extmath.py | 4 | 21403 | """
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from . import check_random_state, deprecated
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array, NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports CSR sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Data must be of same type. Supported types '
'are 32 and 64 bit float. '
'Falling back to np.dot.', NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.utils.validation import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter, random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
"""
random_state = check_random_state(random_state)
# generating random gaussian vectors r with shape: (A.shape[1], size)
R = random_state.normal(size=(A.shape[1], size))
# sampling the range of A using by linear projection of r
Y = safe_sparse_dot(A, R)
del R
# perform power iterations with Y to further 'imprint' the top
# singular vectors of A in Y
for i in xrange(n_iter):
Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y))
# extracting an orthonormal basis of the A range samples
Q, R = linalg.qr(Y, mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=0,
transpose='auto', flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples.
n_iter: int (default is 0)
Number of power iterations (can be used to deal with very noisy
problems).
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case).
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto' and n_samples > n_features:
transpose = True
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
U, V = svd_flip(U, V)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping. Otherwise,
use the rows of v. The choice of which variable to base the decision on
is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
@deprecated('to be removed in 0.17; use scipy.special.expit or log_logistic')
def logistic_sigmoid(X, log=False, out=None):
"""Logistic function, ``1 / (1 + e ** (-x))``, or its log."""
from .fixes import expit
fn = log_logistic if log else expit
return fn(X, out)
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
out: array-like, shape: (M, N), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N)
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = check_array(X, dtype=np.float)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _batch_mean_variance_update(X, old_mean, old_variance, old_sample_count):
"""Calculate an average mean update and a Youngs and Cramer variance update.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
old_mean : array-like, shape: (n_features,)
old_variance : array-like, shape: (n_features,)
old_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample variance:
recommendations, The American Statistician, Vol. 37, No. 3, pp. 242-247
"""
new_sum = X.sum(axis=0)
new_variance = X.var(axis=0) * X.shape[0]
old_sum = old_mean * old_sample_count
n_samples = X.shape[0]
updated_sample_count = old_sample_count + n_samples
partial_variance = old_sample_count / (n_samples * updated_sample_count) * (
n_samples / old_sample_count * old_sum - new_sum) ** 2
unnormalized_variance = old_variance * old_sample_count + new_variance + \
partial_variance
return ((old_sum + new_sum) / updated_sample_count,
unnormalized_variance / updated_sample_count,
updated_sample_count)
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
| bsd-3-clause |
luo66/scikit-learn | benchmarks/bench_isotonic.py | 268 | 3046 | """
Benchmarks of isotonic regression performance.
We generate a synthetic dataset of size 10^n, for n in [min, max], and
examine the time taken to run isotonic regression over the dataset.
The timings are then output to stdout, or visualized on a log-log scale
with matplotlib.
This alows the scaling of the algorithm with the problem size to be
visualized and understood.
"""
from __future__ import print_function
import numpy as np
import gc
from datetime import datetime
from sklearn.isotonic import isotonic_regression
from sklearn.utils.bench import total_seconds
import matplotlib.pyplot as plt
import argparse
def generate_perturbed_logarithm_dataset(size):
return np.random.randint(-50, 50, size=n) \
+ 50. * np.log(1 + np.arange(n))
def generate_logistic_dataset(size):
X = np.sort(np.random.normal(size=size))
return np.random.random(size=size) < 1.0 / (1.0 + np.exp(-X))
DATASET_GENERATORS = {
'perturbed_logarithm': generate_perturbed_logarithm_dataset,
'logistic': generate_logistic_dataset
}
def bench_isotonic_regression(Y):
"""
Runs a single iteration of isotonic regression on the input data,
and reports the total time taken (in seconds).
"""
gc.collect()
tstart = datetime.now()
isotonic_regression(Y)
delta = datetime.now() - tstart
return total_seconds(delta)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Isotonic Regression benchmark tool")
parser.add_argument('--iterations', type=int, required=True,
help="Number of iterations to average timings over "
"for each problem size")
parser.add_argument('--log_min_problem_size', type=int, required=True,
help="Base 10 logarithm of the minimum problem size")
parser.add_argument('--log_max_problem_size', type=int, required=True,
help="Base 10 logarithm of the maximum problem size")
parser.add_argument('--show_plot', action='store_true',
help="Plot timing output with matplotlib")
parser.add_argument('--dataset', choices=DATASET_GENERATORS.keys(),
required=True)
args = parser.parse_args()
timings = []
for exponent in range(args.log_min_problem_size,
args.log_max_problem_size):
n = 10 ** exponent
Y = DATASET_GENERATORS[args.dataset](n)
time_per_iteration = \
[bench_isotonic_regression(Y) for i in range(args.iterations)]
timing = (n, np.mean(time_per_iteration))
timings.append(timing)
# If we're not plotting, dump the timing to stdout
if not args.show_plot:
print(n, np.mean(time_per_iteration))
if args.show_plot:
plt.plot(*zip(*timings))
plt.title("Average time taken running isotonic regression")
plt.xlabel('Number of observations')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.loglog()
plt.show()
| bsd-3-clause |
xuewei4d/scikit-learn | examples/text/plot_hashing_vs_dict_vectorizer.py | 23 | 3253 | """
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck
# License: BSD 3 clause
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
# categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data, _ = fetch_20newsgroups(subset='train', categories=categories,
return_X_y=True)
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
| bsd-3-clause |
franalli/CS221 | NN.py | 1 | 5251 | import glob
import os
import numpy as np
import collections
import matplotlib.pyplot as plt
from IPython import embed
from sklearn import preprocessing
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import PCA
import torch
import random
from torch.autograd import Variable
import torch.nn.functional
import torch.nn as nn
import cPickle as pickle
print 'Loading Dataset'
train_data = np.loadtxt('train')
y_train_data = np.loadtxt('y_train')
val = np.loadtxt('val')
y_val = np.loadtxt('y_val')
pca = PCA(n_components=185)
train_data = pca.fit_transform(train_data)
val = pca.transform(val)
N,D = np.shape(train_data)
print 'N:',N,'D:',D
H1 = 200
H2 = 200
H3 = 200
H4 = 200
D_out = 10
num_epochs = 5
learning_rate = 0.0001
batch_size = 20
dropout = 0.65
dtype = torch.FloatTensor # Comment this out to run on GPU
# dtype = torch.cuda.FloatTensor # Uncomment this to run on GPU
val_full = Variable(torch.from_numpy(val).type(dtype),requires_grad=False)
train_full = Variable(torch.from_numpy(train_data).type(dtype),requires_grad=False)
y_val_var = Variable(torch.from_numpy(y_val).type(torch.LongTensor),requires_grad=False)
y_train_var = Variable(torch.from_numpy(y_train_data).type(torch.LongTensor),requires_grad=False)
def getMinibatches(data, batch_size, shuffle=True):
num_data = len(data[0])
sounds,labels = data
indices = np.arange(num_data)
if shuffle:
np.random.shuffle(indices)
for minibatch_start in np.arange(0, num_data, batch_size):
minibatch_indices = indices[minibatch_start:minibatch_start + batch_size]
yield [np.array(sounds[minibatch_indices]),np.array(labels[minibatch_indices])]
class FourLayerNet(nn.Module):
def __init__(self,D,H1,H2,H3,H4,D_out):
super(FourLayerNet, self).__init__()
self.training = True
self.linear1 = nn.Linear(D, H1)
self.bn1 = nn.BatchNorm1d(H1)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(p=dropout)
self.linear2 = nn.Linear(H1,H2)
self.bn2 = nn.BatchNorm1d(H2)
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(p=dropout)
self.linear3 = nn.Linear(H2,H3)
self.bn3 = nn.BatchNorm1d(H3)
self.relu3 = nn.ReLU()
self.dropout3 = nn.Dropout(p=dropout)
self.linear4 = nn.Linear(H3,H4)
self.bn4 = nn.BatchNorm1d(H4)
self.relu4 = nn.ReLU()
self.dropout4 = nn.Dropout(p=dropout)
self.score = nn.Linear(H4, D_out)
nn.init.xavier_uniform(self.linear1.weight)
nn.init.xavier_uniform(self.linear2.weight)
nn.init.xavier_uniform(self.linear3.weight)
nn.init.xavier_uniform(self.linear4.weight)
def forward(self, x):
self.dropout1.training = self.training
self.dropout2.training = self.training
self.dropout3.training = self.training
self.dropout4.training = self.training
self.bn1.training = self.training
self.bn2.training = self.training
self.bn3.training = self.training
self.bn4.training = self.training
h1 = self.dropout1(self.relu1(self.bn1(self.linear1(x))))
h2 = self.dropout2(self.relu2(self.bn2(self.linear2(h1))))
h3 = self.dropout3(self.relu3(self.bn3(self.linear3(h2))))
h4 = self.dropout4(self.relu4(self.bn4(self.linear4(h3))))
return self.score(h4)
print 'Building the model'
NN = FourLayerNet(D,H1,H2,H3,H4,D_out)
loss_fn = torch.nn.CrossEntropyLoss(size_average=True)
optimizer = torch.optim.Adam(NN.parameters(), lr=learning_rate,weight_decay =0.001)
E = range(num_epochs)
T = []
V = []
print 'Starting Training'
for epoch in range(num_epochs):
NN.training = True
for i,batch in enumerate(getMinibatches([train_data,y_train_data],batch_size)):
train = Variable(torch.from_numpy(batch[0]).type(dtype), requires_grad=True)
y_train = Variable(torch.from_numpy(batch[1]).type(torch.LongTensor),requires_grad=False)
y_pred_train = NN(train)
loss = loss_fn(y_pred_train,y_train)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# End of epoch evaluation metrics
NN.training = False
_, val_indices = torch.max(NN(val_full),dim=1)
_, train_indices = torch.max(NN(train_full),dim=1)
val_accuracy = np.mean(y_val == val_indices.data.numpy())
train_accuracy = np.mean(y_train_data == train_indices.data.numpy())
train_loss = loss_fn(NN(train_full),y_train_var).data[0]
val_loss = loss_fn(NN(val_full),y_val_var).data[0]
T.append(train_loss)
V.append(val_loss)
print 'epoch: {}, train_loss: {}, val_loss: {}, train_accuracy: {}, val_accuracy: {}'.format(epoch,train_loss,val_loss,train_accuracy,val_accuracy)
# embed()
Tplot, = plt.plot(E,T,linewidth=3)
Vplot, = plt.plot(E,V,linewidth=3)
plt.title('Feed Forward Network Learning Curves',fontsize=20)
plt.xlabel('Epoch',fontsize=20)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.ylabel('Softmax Loss',fontsize=20)
plt.legend([Tplot,Vplot],['Training','Validation'],fontsize=20)
plt.show() | bsd-3-clause |
untom/scikit-learn | sklearn/cluster/tests/test_hierarchical.py | 230 | 19795 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hiearchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hiearchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144)])
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert_equal(ignore_warnings(linkage_func)(X, connectivity)[1], 5)
def test_agg_n_clusters():
# Test that an error is raised when n_clusters <= 0
rng = np.random.RandomState(0)
X = rng.rand(20, 10)
for n_clus in [-1, 0]:
agc = AgglomerativeClustering(n_clusters=n_clus)
msg = ("n_clusters should be an integer greater than 0."
" %s was provided." % str(agc.n_clusters))
assert_raise_message(ValueError, msg, agc.fit, X)
| bsd-3-clause |
coufon/neon-distributed | examples/fast-rcnn/demo.py | 2 | 5520 | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Demo a trained Fast-RCNN model to do object detection using PASCAL VOC dataset.
This demo currently runs 1 image at a time.
Reference:
"Fast R-CNN"
http://arxiv.org/pdf/1504.08083v2.pdf
https://github.com/rbgirshick/fast-rcnn
Usage:
python examples/fast-rcnn/demo.py --model_file frcn_vgg.pkl
Notes:
1. For VGG16 based Fast R-CNN model, we can support testing with batch size as 1
images. The testing consumes about 7G memory.
2. During demo, all the selective search ROIs will be used to go through the network,
so the inference time varies based on how many ROIs in each image.
For PASCAL VOC 2007, the average number of SelectiveSearch ROIs is around 2000.
3. The dataset will cache the preprocessed file and re-use that if the same
configuration of the dataset is used again. The cached file by default is in
~/nervana/data/VOCDevkit/VOC<year>/train_< >.pkl or
~/nervana/data/VOCDevkit/VOC<year>/inference_< >.pkl
"""
import os
import numpy as np
from PIL import Image
from neon.data.pascal_voc import PASCAL_VOC_CLASSES
from neon.data import PASCALVOCInference
from neon.util.argparser import NeonArgparser
from util import create_frcn_model
do_plots = True
try:
import matplotlib.pyplot as plt
plt.switch_backend('agg')
except ImportError:
print('matplotlib needs to be installed manually to generate plots needed '
'for this example. Skipping plot generation')
do_plots = False
# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('--img_prefix', type=str,
help='prefix for the saved image file names. If None, use '
'the model file name')
args = parser.parse_args(gen_be=True)
assert args.model_file is not None, "need a model file to do Fast R-CNN testing"
if args.img_prefix is None:
args.img_prefix = os.path.splitext(os.path.basename(args.model_file))[0]
output_dir = os.path.join(args.data_dir, 'frcn_output')
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
# hyperparameters
args.batch_size = 1
n_mb = 40
img_per_batch = args.batch_size
rois_per_img = 5403
# setup dataset
image_set = 'test'
image_year = '2007'
valid_set = PASCALVOCInference(image_set, image_year, path=args.data_dir, n_mb=n_mb,
rois_per_img=rois_per_img, shuffle=False)
# setup model
model = create_frcn_model()
model.load_params(args.model_file)
model.initialize(dataset=valid_set)
CONF_THRESH = 0.8
NMS_THRESH = 0.3
# iterate through minibatches of the dataset
for mb_idx, (x, db) in enumerate(valid_set):
im = np.array(Image.open(db['img_file'])) # This is RGB order
print db['img_id']
outputs = model.fprop(x, inference=True)
scores, boxes = valid_set.post_processing(outputs, db)
# Visualize detections for each class
if do_plots:
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
for cls in PASCAL_VOC_CLASSES[1:]:
# pick out scores and bboxes replated to this class
cls_ind = PASCAL_VOC_CLASSES.index(cls)
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[cls_ind]
# only keep that ones with high enough scores
keep = np.where(cls_scores >= CONF_THRESH)[0]
if len(keep) == 0:
continue
# with these, do nonmaximum suppression
cls_boxes = cls_boxes[keep]
cls_scores = cls_scores[keep]
keep = valid_set.nonmaximum_suppression(cls_boxes, cls_scores, NMS_THRESH)
# keep these after nms
cls_boxes = cls_boxes[keep]
cls_scores = cls_scores[keep]
# Draw detected bounding boxes
inds = np.where(cls_scores >= CONF_THRESH)[0]
if len(inds) == 0:
continue
print 'detect {}'.format(cls)
if do_plots:
for i in inds:
bbox = cls_boxes[i]
score = cls_scores[i]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(cls, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
plt.axis('off')
plt.tight_layout()
if do_plots:
fname = os.path.join(output_dir, '{}_{}_{}_{}.png'.format(
args.img_prefix, image_set,
image_year, db['img_id']))
plt.savefig(fname)
plt.close()
| apache-2.0 |
hhuuggoo/kitchensink | examples/data_example.py | 1 | 1313 | import logging
import pandas as pd
import numpy as np
from kitchensink import client, setup_client, do, du, dp
"""single node setup
This example illustrates basic usage of remote data sources
first example works with a remote file
second example works with a remote object(stored by pickle)
"""
setup_client("http://localhost:6323/")
c = client()
df = pd.DataFrame({'a' : np.arange(2000000)})
store = pd.HDFStore('test.hdf5')
store['df'] = df
store.close()
"""dp is a convenience function, equivalent to RemoteData(local_path=<path>)
We construct a remote data object, and save the data to the server
(which generates a url). Then we create a new RemoteData pointer with du
(short for data url, equivalent to RemoteData(data_url=<data_url>)
and we use that in a function call
"""
remote = dp("test.hdf5")
remote.save(prefix="testdata/test")
print remote.data_url
new_remote = du(remote.data_url)
def head(obj, name):
store = pd.HDFStore(obj.local_path())
return store.select(name).head(10)
c.bc(head, new_remote, 'df')
c.execute()
result = c.br()[0]
print result
"""do is short for dataobject, equivalent to RemoteData(obj=<obj>)
"""
remote = do(df)
remote.save()
def head(obj):
return obj.obj().head(10)
new_remote = du(remote.data_url)
c.bc(head, new_remote)
c.execute()
print c.br()[0]
| bsd-3-clause |
rsivapr/scikit-learn | sklearn/externals/joblib/parallel.py | 6 | 21763 | """
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
import os
import sys
import warnings
from collections import Sized
from math import sqrt
import functools
import time
import threading
import itertools
try:
import cPickle as pickle
except:
import pickle
# Obtain possible configuration from the environment, assuming 1 (on)
# by default, upon 0 set to None. Should instructively fail if some non
# 0/1 value is set.
multiprocessing = int(os.environ.get('JOBLIB_MULTIPROCESSING', 1)) or None
if multiprocessing:
try:
import multiprocessing
except ImportError:
multiprocessing = None
# 2nd stage: validate that locking is available on the system and
# issue a warning if not
if multiprocessing:
try:
_sem = multiprocessing.Semaphore()
del _sem # cleanup
except (ImportError, OSError) as e:
multiprocessing = None
warnings.warn('%s. joblib will operate in serial mode' % (e,))
from .format_stack import format_exc, format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
###############################################################################
# CPU that works also when multiprocessing is not installed (python2.5)
def cpu_count():
""" Return the number of CPUs.
"""
if multiprocessing is None:
return 1
return multiprocessing.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
class WorkerInterrupt(Exception):
""" An exception that is not KeyboardInterrupt to allow subprocesses
to be interrupted.
"""
pass
###############################################################################
class SafeFunction(object):
""" Wraps a function to make it exception with full traceback in
their representation.
Useful for parallel computing with multiprocessing, for which
exceptions cannot be captured.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except KeyboardInterrupt:
# We capture the KeyboardInterrupt and reraise it as
# something different, as multiprocessing does not
# interrupt processing for a KeyboardInterrupt
raise WorkerInterrupt()
except:
e_type, e_value, e_tb = sys.exc_info()
text = format_exc(e_type, e_value, e_tb, context=10,
tb_offset=1)
raise TransportableException(text, e_type)
###############################################################################
def delayed(function):
""" Decorator used to capture the arguments of a function.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class ImmediateApply(object):
""" A non-delayed apply function.
"""
def __init__(self, func, args, kwargs):
# Don't delay the application, to avoid keeping the input
# arguments in memory
self.results = func(*args, **kwargs)
def get(self):
return self.results
###############################################################################
class CallBack(object):
""" Callback used by parallel: it is used for progress reporting, and
to add data to be processed
"""
def __init__(self, index, parallel):
self.parallel = parallel
self.index = index
def __call__(self, out):
self.parallel.print_progress(self.index)
if self.parallel._iterable:
self.parallel.dispatch_next()
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs: int
The number of jobs to use for the computation. If -1 all CPUs
are used. If 1 is given, no parallel computing code is used
at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
verbose: int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
The amount of jobs to be pre-dispatched. Default is 'all',
but it may be memory consuming, for instance if each job
involves a lot of a data.
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages::
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process::
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called a 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages::
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, verbose=0, pre_dispatch='all'):
self.verbose = verbose
self.n_jobs = n_jobs
self.pre_dispatch = pre_dispatch
self._pool = None
# Not starting the pool in the __init__ is a design decision, to be
# able to close it ASAP, and not burden the user with closing it.
self._output = None
self._jobs = list()
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
def dispatch(self, func, args, kwargs):
""" Queue the function for computing, with or without multiprocessing
"""
if self._pool is None:
job = ImmediateApply(func, args, kwargs)
index = len(self._jobs)
if not _verbosity_filter(index, self.verbose):
self._print('Done %3i jobs | elapsed: %s',
(index + 1,
short_format_time(time.time() - self._start_time)
))
self._jobs.append(job)
self.n_dispatched += 1
else:
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
try:
self._lock.acquire()
job = self._pool.apply_async(SafeFunction(func), args,
kwargs, callback=CallBack(self.n_dispatched, self))
self._jobs.append(job)
self.n_dispatched += 1
except AssertionError:
print('[Parallel] Pool seems closed')
finally:
self._lock.release()
def dispatch_next(self):
""" Dispatch more data for parallel processing
"""
self._dispatch_amount += 1
while self._dispatch_amount:
try:
# XXX: possible race condition shuffling the order of
# dispatches in the next two lines.
func, args, kwargs = next(self._iterable)
self.dispatch(func, args, kwargs)
self._dispatch_amount -= 1
except ValueError:
""" Race condition in accessing a generator, we skip,
the dispatch will be done later.
"""
except StopIteration:
self._iterable = None
return
def _print(self, msg, msg_args):
""" Display the message on stout or stderr depending on verbosity
"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self, index):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# This is heuristic code to print only 'verbose' times a messages
# The challenge is that we may not know the queue length
if self._iterable:
if _verbosity_filter(index, self.verbose):
return
self._print('Done %3i jobs | elapsed: %s',
(index + 1,
short_format_time(elapsed_time),
))
else:
# We are finished dispatching
queue_length = self.n_dispatched
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (queue_length - index + 1
- self._pre_dispatch_amount)
frequency = (queue_length // self.verbose) + 1
is_last_item = (index + 1 == queue_length)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / (index + 1) *
(self.n_dispatched - index - 1.))
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index + 1,
queue_length,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._jobs:
# We need to be careful: the job queue can be filling up as
# we empty it
if hasattr(self, '_lock'):
self._lock.acquire()
job = self._jobs.pop(0)
if hasattr(self, '_lock'):
self._lock.release()
try:
self._output.append(job.get())
except tuple(self.exceptions) as exception:
try:
self._aborting = True
self._lock.acquire()
if isinstance(exception,
(KeyboardInterrupt, WorkerInterrupt)):
# We have captured a user interruption, clean up
# everything
if hasattr(self, '_pool'):
self._pool.close()
self._pool.terminate()
# We can now allow subprocesses again
os.environ.pop('__JOBLIB_SPAWNED_PARALLEL__', 0)
raise exception
elif isinstance(exception, TransportableException):
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (
this_report,
exception.message,
)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
raise exception_type(report)
raise exception
finally:
self._lock.release()
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
n_jobs = self.n_jobs
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
if n_jobs < 0 and multiprocessing is not None:
n_jobs = max(multiprocessing.cpu_count() + 1 + n_jobs, 1)
# The list of exceptions that we will capture
self.exceptions = [TransportableException]
if n_jobs is None or multiprocessing is None or n_jobs == 1:
n_jobs = 1
self._pool = None
else:
if multiprocessing.current_process()._daemonic:
# Daemonic processes cannot have children
n_jobs = 1
self._pool = None
warnings.warn(
'Parallel loops cannot be nested, setting n_jobs=1',
stacklevel=2)
else:
already_forked = int(os.environ.get('__JOBLIB_SPAWNED_PARALLEL__', 0))
if already_forked:
raise ImportError('[joblib] Attempting to do parallel computing'
'without protecting your import on a system that does '
'not support forking. To use parallel-computing in a '
'script, you must protect you main loop using "if '
"__name__ == '__main__'"
'". Please see the joblib documentation on Parallel '
'for more information'
)
# Set an environment variable to avoid infinite loops
os.environ['__JOBLIB_SPAWNED_PARALLEL__'] = '1'
self._pool = multiprocessing.Pool(n_jobs)
self._lock = threading.Lock()
# We are using multiprocessing, we also want to capture
# KeyboardInterrupts
self.exceptions.extend([KeyboardInterrupt, WorkerInterrupt])
pre_dispatch = self.pre_dispatch
if isinstance(iterable, Sized):
# We are given a sized (an object with len). No need to be lazy.
pre_dispatch = 'all'
if pre_dispatch == 'all' or n_jobs == 1:
self._iterable = None
self._pre_dispatch_amount = 0
else:
self._iterable = iterable
self._dispatch_amount = 0
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
iterable = itertools.islice(iterable, pre_dispatch)
self._start_time = time.time()
self.n_dispatched = 0
try:
for function, args, kwargs in iterable:
self.dispatch(function, args, kwargs)
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output),
len(self._output),
short_format_time(elapsed_time)
))
finally:
if n_jobs > 1:
self._pool.close()
self._pool.join()
os.environ.pop('__JOBLIB_SPAWNED_PARALLEL__', 0)
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
| bsd-3-clause |
gregcaporaso/scikit-bio | skbio/stats/tests/test_power.py | 6 | 21831 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from unittest import TestCase, main
import numpy as np
import numpy.testing as npt
import pandas as pd
from scipy.stats import kruskal
from skbio.stats.power import (subsample_power,
subsample_paired_power,
_check_nans,
confidence_bound,
_calculate_power,
_compare_distributions,
_calculate_power_curve,
_check_subsample_power_inputs,
_identify_sample_groups,
_draw_paired_samples,
_get_min_size,
paired_subsamples
)
class PowerAnalysisTest(TestCase):
def setUp(self):
# Defines a testing functions
def test_meta(ids, meta, cat, div):
"""Checks thhe div metric with a kruskal wallis"""
out = [meta.loc[id_, div] for id_ in ids]
return kruskal(*out)[1]
def meta_f(x):
"""Applies `test_meta` to a result"""
return test_meta(x, self.meta, 'INT', 'DIV')
def f(x):
"""returns the p value of a kruskal wallis test"""
return kruskal(*x)[1]
self.test_meta = test_meta
self.f = f
self.meta_f = meta_f
self.num_p = 1
# Sets the random seed
np.random.seed(5)
# Sets up the distributions of data for use
self.s1 = np.arange(0, 10, 1)
# Sets up two distributions which will never be equal by a rank-sum
# test.
self.samps = [np.ones((10))/10., np.ones((10))]
self.pop = [np.arange(0, 10, 0.1), np.arange(0, 20, 0.2)]
# Sets up a vector of alpha values
self.alpha = np.power(10, np.array([-1, -1.301, -2, -3])).round(3)
# Sets up a vector of samples
self.num_samps = np.arange(10, 100, 10)
# Sets up a mapping file
meta = {'GW': {'INT': 'N', 'ABX': np.nan, 'DIV': 19.5, 'AGE': '30s',
'SEX': 'M'},
'CB': {'INT': 'Y', 'ABX': np.nan, 'DIV': 42.7, 'AGE': '30s',
'SEX': 'M'},
'WM': {'INT': 'N', 'ABX': 'N', 'DIV': 27.5, 'AGE': '20s',
'SEX': 'F'},
'MH': {'INT': 'Y', 'ABX': 'N', 'DIV': 62.3, 'AGE': '30s',
'SEX': 'F'},
'CD': {'INT': 'Y', 'ABX': 'Y', 'DIV': 36.4, 'AGE': '40s',
'SEX': 'F'},
'LF': {'INT': 'Y', 'ABX': 'N', 'DIV': 50.2, 'AGE': '20s',
'SEX': 'M'},
'PP': {'INT': 'N', 'ABX': 'Y', 'DIV': 10.8, 'AGE': '30s',
'SEX': 'F'},
'MM': {'INT': 'N', 'ABX': 'N', 'DIV': 55.6, 'AGE': '40s',
'SEX': 'F'},
'SR': {'INT': 'N', 'ABX': 'Y', 'DIV': 2.2, 'AGE': '20s',
'SEX': 'M'},
'TS': {'INT': 'N', 'ABX': 'Y', 'DIV': 16.1, 'AGE': '40s',
'SEX': 'M'},
'PC': {'INT': 'Y', 'ABX': 'N', 'DIV': 82.6, 'AGE': '40s',
'SEX': 'M'},
'NR': {'INT': 'Y', 'ABX': 'Y', 'DIV': 15.7, 'AGE': '20s',
'SEX': 'F'}}
self.meta = pd.DataFrame.from_dict(meta, orient='index')
self.meta_pairs = {0: [['GW', 'SR', 'TS'], ['CB', 'LF', 'PC']],
1: [['MM', 'PP', 'WM'], ['CD', 'MH', 'NR']]}
self.pair_index = np.array([0, 0, 0, 1, 1, 1])
self.counts = np.array([5, 15, 25, 35, 45])
self.powers = [np.array([[0.105, 0.137, 0.174, 0.208, 0.280],
[0.115, 0.135, 0.196, 0.204, 0.281],
[0.096, 0.170, 0.165, 0.232, 0.256],
[0.122, 0.157, 0.202, 0.250, 0.279],
[0.132, 0.135, 0.173, 0.203, 0.279]]),
np.array([[0.157, 0.345, 0.522, 0.639, 0.739],
[0.159, 0.374, 0.519, 0.646, 0.757],
[0.161, 0.339, 0.532, 0.634, 0.745],
[0.169, 0.372, 0.541, 0.646, 0.762],
[0.163, 0.371, 0.522, 0.648, 0.746]]),
np.array([[0.276, 0.626, 0.865, 0.927, 0.992],
[0.267, 0.667, 0.848, 0.937, 0.978],
[0.236, 0.642, 0.850, 0.935, 0.977],
[0.249, 0.633, 0.828, 0.955, 0.986],
[0.249, 0.663, 0.869, 0.951, 0.985]])]
self.power_alpha = 0.1
self.effects = np.array([0.15245, 0.34877, 0.55830])
self.bounds = np.array([0.01049, 0.00299, 0.007492])
self.labels = np.array(['Age', 'Intervenption', 'Antibiotics'])
self.cats = np.array(['AGE', 'INT', 'ABX'])
self.cat = "AGE"
self.control_cats = ['INT', 'ABX']
def test_subsample_power_defaults(self):
test_p, test_c = subsample_power(self.f, self.pop,
num_iter=10, num_runs=5)
self.assertEqual(test_p.shape, (5, 4))
npt.assert_array_equal(np.array([10, 20, 30, 40]), test_c)
def test_subsample_power_counts(self):
test_p, test_c = subsample_power(self.f,
samples=self.pop,
num_iter=10,
num_runs=2,
min_counts=5)
self.assertEqual(test_p.shape, (2, 5))
npt.assert_array_equal(np.arange(5, 50, 10), test_c)
def test_subsample_power_matches(self):
test_p, test_c = subsample_power(self.f,
samples=self.pop,
num_iter=10,
num_runs=5,
draw_mode="matched")
self.assertEqual(test_p.shape, (5, 4))
npt.assert_array_equal(np.array([10, 20, 30, 40]), test_c)
def test_subsample_power_multi_p(self):
test_p, test_c = subsample_power(lambda x: np.array([0.5, 0.5]),
samples=self.pop,
num_iter=10,
num_runs=5)
self.assertEqual(test_p.shape, (5, 4, 2))
npt.assert_array_equal(np.array([10, 20, 30, 40]), test_c)
def test_subsample_paired_power(self):
known_c = np.array([1, 2, 3, 4])
# Sets up the handling values
cat = 'INT'
control_cats = ['SEX']
# Tests for the control cats
test_p, test_c = subsample_paired_power(self.meta_f,
meta=self.meta,
cat=cat,
control_cats=control_cats,
counts_interval=1,
num_iter=10,
num_runs=2)
# Test the output shapes are sane
self.assertEqual(test_p.shape, (2, 4))
npt.assert_array_equal(known_c, test_c)
def test_subsample_paired_power_multi_p(self):
def f(x):
return np.array([0.5, 0.5, 0.005])
cat = 'INT'
control_cats = ['SEX']
# Tests for the control cats
test_p, test_c = subsample_paired_power(f,
meta=self.meta,
cat=cat,
control_cats=control_cats,
counts_interval=1,
num_iter=10,
num_runs=2)
self.assertEqual(test_p.shape, (2, 4, 3))
def test_check_nans_str(self):
self.assertTrue(_check_nans('string'))
def test_check_nans_num(self):
self.assertTrue(_check_nans(4.2))
def test__check_nans_nan(self):
self.assertFalse(_check_nans(np.nan))
def test__check_nans_clean_list(self):
self.assertTrue(_check_nans(['foo', 'bar'], switch=True))
def test__check_nans_list_nan(self):
self.assertFalse(_check_nans(['foo', np.nan], switch=True))
def test__check_str_error(self):
with self.assertRaises(TypeError):
_check_nans(self.f)
def test__get_min_size_strict(self):
known = 5
test = _get_min_size(self.meta, 'INT', ['ABX', 'SEX'], ['Y', 'N'],
True)
self.assertEqual(test, known)
def test__get_min_size_relaxed(self):
known = 5
test = _get_min_size(self.meta, 'INT', ['ABX', 'SEX'], ['Y', 'N'],
False)
self.assertEqual(known, test)
def test_confidence_bound_default(self):
# Sets the know confidence bound
known = 2.2830070
test = confidence_bound(self.s1)
npt.assert_almost_equal(test, known, 3)
def test_confidence_bound_df(self):
known = 2.15109
test = confidence_bound(self.s1, df=15)
npt.assert_almost_equal(known, test, 3)
def test_confidence_bound_alpha(self):
known = 3.2797886
test = confidence_bound(self.s1, alpha=0.01)
npt.assert_almost_equal(known, test, 3)
def test_confidence_bound_nan(self):
# Sets the value to test
samples = np.array([[4, 3.2, 3.05],
[2, 2.8, 2.95],
[5, 2.9, 3.07],
[1, 3.1, 2.93],
[3, np.nan, 3.00]])
# Sets the know value
known = np.array([2.2284, 0.2573, 0.08573])
# Tests the function
test = confidence_bound(samples, axis=0)
npt.assert_almost_equal(known, test, 3)
def test_confidence_bound_axis_none(self):
# Sets the value to test
samples = np.array([[4, 3.2, 3.05],
[2, 2.8, 2.95],
[5, 2.9, 3.07],
[1, 3.1, 2.93],
[3, np.nan, 3.00]])
# Sest the known value
known = 0.52852
# Tests the output
test = confidence_bound(samples, axis=None)
npt.assert_almost_equal(known, test, 3)
def test__calculate_power(self):
# Sets up the values to test
crit = 0.025
# Sets the known value
known = 0.5
# Calculates the test value
test = _calculate_power(self.alpha, crit)
# Checks the test value
npt.assert_almost_equal(known, test)
def test__calculate_power_n(self):
crit = 0.025
known = np.array([0.5, 0.5])
alpha = np.vstack((self.alpha, self.alpha))
test = _calculate_power(alpha, crit)
npt.assert_almost_equal(known, test)
def test__compare_distributions_sample_counts_error(self):
with self.assertRaises(ValueError):
_compare_distributions(self.f, [self.pop[0][:5], self.pop[1]], 1,
counts=25)
def test__compare_distributions_all_mode(self):
known = np.ones((100))*0.0026998
test = _compare_distributions(self.f, self.samps, 1, num_iter=100)
npt.assert_allclose(known, test, 5)
def test__compare_distributions_matched_mode(self):
# Sets the known value
known_mean = 0.162195
known_std = 0.121887
known_shape = (100,)
# Tests the sample value
test = _compare_distributions(self.f, self.pop, self.num_p,
mode='matched', num_iter=100)
npt.assert_allclose(known_mean, test.mean(), rtol=0.1, atol=0.02)
npt.assert_allclose(known_std, test.std(), rtol=0.1, atol=0.02)
self.assertEqual(known_shape, test.shape)
def test__compare_distributions_draw_mode(self):
draw_mode = 'Ultron'
with self.assertRaises(ValueError):
_check_subsample_power_inputs(self.f, self.pop, draw_mode,
self.num_p)
def test__compare_distributions_multiple_returns(self):
known = np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]])
def f(x):
return np.array([1, 2, 3])
test = _compare_distributions(f, self.pop, 3, mode='matched',
num_iter=3)
npt.assert_array_equal(known, test)
def test_check_subsample_power_inputs_matched_mode(self):
with self.assertRaises(ValueError):
_check_subsample_power_inputs(self.f,
samples=[np.ones((2)), np.ones((5))],
draw_mode="matched")
def test_check_subsample_power_inputs_counts(self):
with self.assertRaises(ValueError):
_check_subsample_power_inputs(self.f,
samples=[np.ones((3)), np.ones((5))],
min_counts=5,
counts_interval=1000,
max_counts=7)
def test_check_subsample_power_inputs_ratio(self):
with self.assertRaises(ValueError):
_check_subsample_power_inputs(self.f,
self.samps,
ratio=np.array([1, 2, 3]))
def test_check_subsample_power_inputs_test(self):
# Defines a test function
def test(x):
return 'Hello World!'
with self.assertRaises(TypeError):
_check_subsample_power_inputs(test, self.samps)
def test_check_sample_power_inputs(self):
# Defines the know returns
known_num_p = 1
known_ratio = np.ones((2))
known_counts = np.arange(2, 10, 2)
# Runs the code for the returns
test_ratio, test_num_p, test_counts = \
_check_subsample_power_inputs(self.f,
self.samps,
counts_interval=2,
max_counts=10)
# Checks the returns are sane
self.assertEqual(known_num_p, test_num_p)
npt.assert_array_equal(known_ratio, test_ratio)
npt.assert_array_equal(known_counts, test_counts)
def test__calculate_power_curve_ratio_error(self):
with self.assertRaises(ValueError):
_calculate_power_curve(self.f, self.pop, self.num_samps,
ratio=np.array([0.1, 0.2, 0.3]),
num_iter=100)
def test__calculate_power_curve_default(self):
# Sets the known output
known = np.array([0.509, 0.822, 0.962, 0.997, 1.000, 1.000, 1.000,
1.000, 1.000])
# Generates the test values
test = _calculate_power_curve(self.f,
self.pop,
self.num_samps,
num_iter=100)
# Checks the samples returned sanely
npt.assert_allclose(test, known, rtol=0.1, atol=0.01)
def test__calculate_power_curve_alpha(self):
# Sets the know output
known = np.array([0.31, 0.568, 0.842, 0.954, 0.995, 1.000, 1.000,
1.000, 1.000])
# Generates the test values
test = _calculate_power_curve(self.f,
self.pop,
self.num_samps,
alpha=0.01,
num_iter=100)
# Checks the samples returned sanely
npt.assert_allclose(test, known, rtol=0.1, atol=0.1)
def test__calculate_power_curve_ratio(self):
# Sets the know output
known = np.array([0.096, 0.333, 0.493, 0.743, 0.824, 0.937, 0.969,
0.996, 0.998])
# Generates the test values
test = _calculate_power_curve(self.f,
self.pop,
self.num_samps,
ratio=np.array([0.25, 0.75]),
num_iter=100)
# Checks the samples returned sanely
npt.assert_allclose(test, known, rtol=0.1, atol=0.1)
def test_paired_subsamples_default(self):
# Sets the known np.array set
known_array = [{'MM', 'SR', 'TS', 'GW', 'PP', 'WM'},
{'CD', 'LF', 'PC', 'CB', 'MH', 'NR'}]
# Gets the test value
cat = 'INT'
control_cats = ['SEX', 'AGE']
test_array = paired_subsamples(self.meta, cat, control_cats)
self.assertEqual(known_array[0], set(test_array[0]))
self.assertEqual(known_array[1], set(test_array[1]))
def test_paired_subsamples_break(self):
# Sets known np.array set
known_array = [np.array([]), np.array([])]
# Gets the test value
cat = 'ABX'
control_cats = ['SEX', 'AGE', 'INT']
test_array = paired_subsamples(self.meta, cat, control_cats)
npt.assert_array_equal(known_array, test_array)
def test_paired_subsample_undefined(self):
known_array = np.zeros((2, 0))
cat = 'INT'
order = ['Y', 'N']
control_cats = ['AGE', 'ABX', 'SEX']
test_array = paired_subsamples(self.meta, cat, control_cats,
order=order)
npt.assert_array_equal(test_array, known_array)
def test_paired_subsample_fewer(self):
# Set known value
known_array = {'PP', 'MH', 'CD', 'PC', 'TS', 'MM'}
# Sets up test values
cat = 'AGE'
order = ['30s', '40s']
control_cats = ['ABX']
test_array = paired_subsamples(self.meta, cat, control_cats,
order=order)
for v in test_array[0]:
self.assertTrue(v in known_array)
for v in test_array[1]:
self.assertTrue(v in known_array)
def test_paired_subsamples_not_strict(self):
known_array = [{'WM', 'MM', 'GW', 'SR', 'TS'},
{'LF', 'PC', 'CB', 'NR', 'CD'}]
# Gets the test values
cat = 'INT'
control_cats = ['ABX', 'AGE']
test_array = paired_subsamples(self.meta, cat, control_cats,
strict_match=False)
self.assertEqual(set(test_array[0]), known_array[0])
self.assertEqual(set(test_array[1]), known_array[1])
def test__identify_sample_groups(self):
# Defines the know values
known_pairs = {0: [['MM'], ['CD']],
1: [['SR'], ['LF']],
2: [['TS'], ['PC']],
3: [['GW'], ['CB']],
4: [['PP'], ['MH']],
5: [['WM'], ['NR']]}
known_index = np.array([0, 1, 2, 3, 4, 5])
test_pairs, test_index = _identify_sample_groups(self.meta,
'INT',
['SEX', 'AGE'],
order=['N', 'Y'],
strict_match=True)
self.assertEqual(known_pairs.keys(), test_pairs.keys())
self.assertEqual(sorted(known_pairs.values()),
sorted(test_pairs.values()))
npt.assert_array_equal(known_index, test_index)
def test__identify_sample_groups_not_strict(self):
# Defines the know values
known_pairs = {1: [np.array(['PP'], dtype=object),
np.array(['CD', 'NR'], dtype=object)],
0: [np.array(['MM', 'WM'], dtype=object),
np.array(['MH'], dtype=object)],
2: [np.array(['GW'], dtype=object),
np.array(['CB'], dtype=object)]}
known_index = np.array([0, 1, 2])
test_pairs, test_index = _identify_sample_groups(self.meta,
'INT',
['SEX', 'ABX'],
order=['N', 'Y'],
strict_match=False)
self.assertEqual(known_pairs.keys(), test_pairs.keys())
for k in known_pairs:
for i in range(2):
npt.assert_array_equal(known_pairs[k][i], test_pairs[k][i])
npt.assert_array_equal(known_index, test_index)
def test__draw_paired_samples(self):
num_samps = 3
known_sets = [{'GW', 'SR', 'TS', 'MM', 'PP', 'WM'},
{'CB', 'LF', 'PC', 'CD', 'MH', 'NR'}]
test_samps = _draw_paired_samples(self.meta_pairs, self.pair_index,
num_samps)
for i, t in enumerate(test_samps):
self.assertTrue(set(t).issubset(known_sets[i]))
if __name__ == '__main__':
main()
| bsd-3-clause |
kaleoyster/ProjectNBI | nbi-utilities/data_gen/baselineDifferenceScore/process.py | 1 | 5095 | """Contain functions to manipulate and derive new data"""
import pandas as pd
import numpy as np
import datetime
from collections import defaultdict
from collections import deque
__author__ = 'Akshay Kale'
__copyright__ = 'GPL'
__credit__ = []
__email__ = '[email protected]'
class DataChef:
""" Contains function that operate on list, dictionary, dataframe, and series """
def __init__(self):
pass
def get_values(self, keys, dictionary):
# PERFECT
""" Return a list of values for a given key """
return [dictionary[key] for key in keys]
def create_dictionary(self, keys, values):
# PERFECT
""" Return a dictionary
Args: keys (list)
value (list)
Returns: a dicitonary (dict) """
return dict(zip(keys, values))
def create_maps(self, df, columns, key, type_of_maps='list'):
# PERFECT
"""
Returns dictionary of key : lastest value of the column
for example:
structure number (key): lastest value of the deck (value)
Args:
key (string):
survey_records (pandas dataframe):
Returns:
list_of_dict (list): a list of dictionaries
"""
def create_dictionary(list_of_keys, list_of_values):
# PERFECT
df_keys = dataframe_temp[list_of_key]
df_values = dataframe_temp[list_of_values]
if type_of_maps == 'list':
dictionary = defaultdict(list)
for key_temp, value_temp in zip(df_keys, df_values):
dictionary[key_temp].append(value_temp)
else:
dictionary = defaultdict()
for key_temp, value_temp in zip(df_keys, df_values):
dictionary[key_temp] = value_temp
return dictionary
list_of_dictionaries = []
for col in columns:
dataframe_temp = dataframe[[key, col]]
temp_dict = create_dictionary(dataframe[key], dataframe[col])
list_of_dictionaries.append(temp_dict)
return list_of_dictionaries
def create_groupby_df(self, key, df, list_of_maps):
# PERFECT
"""
Returns Groupby dataframe with values of the columns arranged according to their time series.
Args:
key (string): Key is the groupby criteria
df (dataframe): the existing dataframe of lose individual records
list_of_map (list): list of dictionary of columns in the df.
Each column is mapped to the key such tha'key' as the key,
and value as the 'value' of the column
Returns:
df_new (dataframe): a dataframe of columns grouped by key 'structure number'
"""
#Select columns
columns = list(df.columns)
# initialize empty dataframe
df_new = pd.DataFrame(columns = columns)
# Setting column of structure number
df_new[key] = df[key].unique()
# mapping other column values to the structure number
for number, col in enumerate(columns[1:]):
df_new[col] = df_new[key].map(list_of_maps[number])
return df_new
def is_same_elements(self, elements):
""" Returns True if all the element are same """
return all(elem == elements[0] for elem in elements)
def calculate_age(self, list_of_year_built, list_of_survey, kind='survey'):
""" Returns age of the of the bridge """
year_built = np.array(list_of_year_built)
year_survey = np.array(list_of_survey)
now = datetime.datetime.now()
if kind == 'survey':
return year_survey - year_built
elif kind == 'current':
return now.year - year_built
def categorize_bridges_by_adt(ADT):
# PERFECT
""" returns A list of class of the bridge as define by Author in so and so """
class_of_bridges_adt = []
for adt in ADT:
if adt < 100:
class_of_bridges_adt.append('Very Light')
elif 100 <= adt < 1000:
class_of_bridges_adt.append('Light')
elif 1000 <= adt < 5000:
class_of_bridges_adt.append('Moderate')
elif 5000 <= adt:
class_of_bridges_adt.append('Heavy')
else:
class_of_bridges_adt.append('IDK')
return class_of_bridges_adt
def categorize_bridges_by_adtt(ADTT):
# PERFECT
""" returns A list of class of the bridge as define by Author in so and so """
class_of_bridges_adtt = []
for adtt in ADTT:
if adtt < 100:
class_of_bridges_adtt.append('Light')
elif 100 <= adtt < 500:
class_of_bridges_adtt.append('Moderate')
elif 500 <= adtt:
class_of_bridges_adtt.append('Heavy')
else:
class_of_bridges_adtt.append('IDK')
return class_of_bridges_adtt
| gpl-2.0 |
googleinterns/cabby | cabby/geo/util.py | 1 | 19295 | # coding=utf-8
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Library to support map geographical computations.'''
from collections import namedtuple
import folium
import geographiclib
from geopy.distance import geodesic
import numpy as np
from numpy import int64
import operator
import osmnx as ox
import pandas as pd
from s2geometry import pywraps2 as s2
from shapely.geometry.point import Point
from shapely.geometry.polygon import Polygon
from shapely.geometry.multipolygon import MultiPolygon
from shapely.geometry import box, mapping, LineString, LinearRing
from shapely.geometry.base import BaseGeometry
import sys
from typing import List, Optional, Tuple, Sequence, Any, Text
import webbrowser
FAR_DISTANCE_THRESHOLD = 2000 # Minimum distance between far cells in meters.
MAX_FAILED_ATTEMPTS = 50
CoordsYX = namedtuple('CoordsYX', ('y x'))
CoordsXY = namedtuple('CoordsXY', ('x y'))
def get_distance_between_points(start_point: Point, end_point: Point) -> float:
'''Calculate the line length in meters.
Arguments:
start_point: The point to calculate the distance from.
end_point: The point to calculate the distance to.
Returns:
Distance length in meters.
'''
return ox.distance.great_circle_vec(
start_point.y, start_point.x, end_point.y, end_point.x)
def far_cellid(
point: Point, cells: pd.DataFrame, far_distance = FAR_DISTANCE_THRESHOLD
) -> Optional[float]:
'''Get a cell id far from the given cell point.
Arguments:
point: The center point of the cell.
Returns:
A cellid of a far cell.
'''
far_cell_found = None
failed_counter = 0
while far_cell_found is None:
failed_counter += 1
if failed_counter > MAX_FAILED_ATTEMPTS:
sys.exit(
f"Reached max number of failed attempts in far cell calculation for point: {(Point.y, Point.x)}.")
sample_cell = cells.sample(1).iloc[0]
distance = get_distance_between_points(point, sample_cell.point)
if distance > far_distance:
far_cell_found = sample_cell.cellid
return far_cell_found
def neighbor_cellid(cellid: int) -> int:
'''Get a neighbor cell id.
Arguments:
cellid: The cellid of the cell to return a neighbor cellid for.
Returns:
A cellid of a neighbor cell.
'''
cell = s2.S2CellId(cellid)
return cell.next().id()
def cellids_from_s2cellids(list_s2cells: Sequence[s2.S2CellId]) -> Sequence[int]:
'''Converts a sequence of S2CellIds to a sequence of ids of the S2CellIds.
Arguments:
list_s2cells(S2CellIds): The list of S2CellIds to be converted to ids.
Returns:
A sequence of ids corresponding to the S2CellIds.
'''
return [cell.id() for cell in list_s2cells]
def s2cellids_from_cellids(list_ids: Sequence[int]) -> Sequence[s2.S2CellId]:
'''Converts a sequence of ids of S2CellIds to a sequence of S2CellIds.
Arguments:
list_ids(list): The list of S2CellIds ids to be converted to S2CellIds.
Returns:
A sequence of S2CellIds corresponding to the ids.
'''
return [s2.S2Cell(s2.S2CellId(cellid)) for cellid in list_ids]
def get_s2cover_for_s2polygon(s2polygon: s2.S2Polygon,
level: int) -> Optional[List]:
'''Returns the cellids that cover the shape (point/polygon/polyline).
Arguments:
s2polygon(S2Polygon): The S2Polygon to which S2Cells covering will be
performed.
Returns:
A sequence of S2Cells that completely cover the provided S2Polygon.
'''
if s2polygon is None:
return None
coverer = s2.S2RegionCoverer()
coverer.set_min_level(level)
coverer.set_max_level(level)
coverer.set_max_cells(100)
covering = coverer.GetCovering(s2polygon)
for cell in covering:
assert cell.level() == level
return covering
def s2polygon_from_shapely_point(shapely_point: Point) -> s2.S2Polygon:
'''Converts a Shapely Point to an S2Polygon.
Arguments:
point(Shapely Point): The Shapely Point to be converted to
S2Polygon.
Returns:
The S2Polygon equivelent to the input Shapely Point.
'''
y, x = shapely_point.y, shapely_point.x
latlng = s2.S2LatLng.FromDegrees(y, x)
return s2.S2Polygon(s2.S2Cell(s2.S2CellId(latlng)))
def s2cellid_from_point(point: Point) -> int:
'''Converts point to the S2CellId.
Arguments:
point: The point to be converted to S2CellId.
Returns:
The S2CellId equivelent to the input point.
'''
y, x = point.y, point.x
latlng = s2.S2LatLng.FromDegrees(y, x)
return s2.S2CellId(latlng).id()
def s2point_from_coord_xy(coord: CoordsXY) -> s2.S2Point:
'''Converts coordinates (longtitude and latitude) to the S2Point.
Arguments:
coord: The coordinates given as longtitude and
latitude to be converted to S2Point.
Returns:
The S2Point equivelent to the input coordinates .
'''
# Convert coordinates (lon,lat) to s2LatLng.
latlng = s2.S2LatLng.FromDegrees(coord[1], coord[0])
return latlng.ToPoint() # S2Point
def s2polygon_from_shapely_polygon(shapely_polygon: Polygon) -> s2.S2Polygon:
'''Convert a Shapely Polygon to S2Polygon.
Arguments:
shapely_polygon(Polygon): The Shapely Polygon to be
converted to S2Polygon.
Returns:
The S2Polygon equivelent to the input Shapely Polygon.
'''
# Filter where shape has no exterior attributes (e.g. lines).
if not hasattr(shapely_polygon.buffer(0.00005), 'exterior'):
return
else:
# Add a small buffer for cases where cover doesn't work.
list_coords = list(shapely_polygon.buffer(0.00005).exterior.coords)
# Get list of points.
s2point_list = list(map(s2point_from_coord_xy, list_coords))
s2point_list = s2point_list[::-1] # Counterclockwise.
return s2.S2Polygon(s2.S2Loop(s2point_list))
def s2polygon_from_shapely_polyline(shapely_polyine: Polygon) -> s2.S2Polygon:
'''Convert a shapely polyline to s2polygon.
Arguments:
shapely_polyine(Polygon): The Shapely Polygon which
is a line to be converted to S2Polygon.
Returns:
The S2Polygon equivelent to the input Shapely Polygon.
'''
list_coords = list(shapely_polyine.exterior.coords)
list_ll = []
for lat, lng in list_coords:
list_ll.append(s2.S2LatLng.FromDegrees(lat, lng))
line = s2.S2Polyline()
line.InitFromS2LatLngs(list_ll)
return line
def cut(line: LineString, distance: float) -> Sequence[LineString]:
'''Cut line in two at a distance from its
starting point.
Arguments:
line: The to to be cut.
distance: distance between line and its starting point.
Returns:
A list of lines.
'''
if distance <= 0.0 or distance >= line.length:
return [LineString(line)]
coords = list(line.coords)
for i, p in enumerate(coords):
pd = line.project(Point(p))
if pd == distance:
return [
LineString(coords[:i+1]),
LineString(coords[i:])]
if pd > distance:
cp = line.interpolate(distance)
return [
LineString(coords[:i] + [(cp.x, cp.y)]),
LineString([(cp.x, cp.y)] + coords[i:])]
if coords[0]==coords[-1]: # It is a loop.
cp = line.interpolate(distance)
return [
LineString(coords[:-1] + [(cp.x, cp.y)]),
LineString([(cp.x, cp.y)] + coords[-1:])]
return [LineString(line)]
def plot_cells(cells: s2.S2Cell, location: Sequence[Point], zoom_level: int):
'''Plot the S2Cell covering.'''
# Create a map.
map_osm = folium.Map(
location=location, zoom_start=zoom_level, tiles='Stamen Toner')
for cellid in cells:
cellid = cellid[0]
cell = s2.S2Cell(cellid)
vertices = []
for i in range(0, 4):
vertex = cell.GetVertex(i)
latlng = s2.S2LatLng(vertex)
vertices.append((latlng.lat().degrees(),
latlng.lng().degrees()))
gj = folium.GeoJson(
{
"type": "Polygon",
"coordinates": [vertices]
},
style_function={'weight': 1, 'fillColor': '#eea500'})
gj.add_children(folium.Popup(cellid.ToToken()))
gj.add_to(map_osm)
filepath = 'visualization.html'
map_osm.save(filepath)
webbrowser.open(filepath, new=2)
def s2cellids_from_point(point: Point, level: int) -> Sequence[s2.S2CellId]:
'''Get s2cell covering from shapely point (OpenStreetMaps Nodes).
Arguments:
point(Point): a Shapely Point to which S2Cells.
covering will be performed.
Returns:
A sequence of S2Cells that cover the provided Shapely Point.
'''
s2polygon = s2polygon_from_shapely_point(point)
cellid = get_s2cover_for_s2polygon(s2polygon, level)[0]
return [cellid]
def cellid_from_point(point: Point, level: int) -> int:
'''Get s2cell covering from shapely point (OpenStreetMaps Nodes).
Arguments:
point(Point): a Shapely Point to which S2Cells.
covering will be performed.
Returns:
An id of S2Cellsid that cover the provided Shapely Point.
'''
s2polygon = s2polygon_from_shapely_point(point)
cellids = get_s2cover_for_s2polygon(s2polygon, level)
if cellids is None:
sys.exit("S2cellid covering failed because the point is a None.")
cellid = cellids[0]
return cellid.id()
def s2cellids_from_polygon(polygon: Polygon, level: int) -> Optional[Sequence]:
'''Get s2cell covering from shapely polygon (OpenStreetMaps Ways).
Arguments:
polygon(Polygon): a Shapely Polygon to which S2Cells.
covering will be performed..
Returns:
A sequence of S2Cells that cover the provided Shapely Polygon.
'''
s2polygon = s2polygon_from_shapely_polygon(polygon)
return get_s2cover_for_s2polygon(s2polygon, level)
def cellids_from_polygon(polygon: Polygon, level: int) -> Optional[List]:
'''Get s2cell covering from shapely polygon (OpenStreetMaps Ways).
Arguments:
polygon(Polygon): a Shapely Polygon to which S2Cells.
covering will be performed..
Returns:
A sequence of S2Cells ids that cover the provided Shapely Polygon.
'''
s2polygon = s2polygon_from_shapely_polygon(polygon)
s2cells = get_s2cover_for_s2polygon(s2polygon, level)
return [cell.id() for cell in s2cells]
def cellid_from_polyline(polyline: Polygon, level: int) -> Optional[Sequence]:
'''Get s2cell covering from shapely polygon that are lines (OpenStreetMaps
Ways of streets).
Arguments:
polyline(Polygon): The Shapely Polygon of a line to which S2Cells
covering will be performed.
Returns:
A sequence of s2Cells that cover the provided Shapely Polygon.
'''
s2polygon = s2polygon_from_shapely_polyline(polyline)
return get_s2cover_for_s2polygon(s2polygon, level)
def get_bearing(start: Point, goal: Point) -> float:
"""Get the bearing (heading) from the start lat-lon to the goal lat-lon.
Arguments:
start: The starting point.
goal: The goal point.
Returns:
The geospatial bearing when heading from the start to the goal. The
bearing angle given by azi1 (azimuth) is clockwise relative to north, so
a bearing of 90 degrees is due east, 180 is south, and 270 is west.
"""
solution = geographiclib.geodesic.Geodesic.WGS84.Inverse(
start.y, start.x, goal.y, goal.x)
return solution['azi1'] % 360
def get_distance_km(start: Point, goal: Point) -> float:
"""Returns the geodesic distance (in kilometers) between start and goal.
This distance is direct (as the bird flies), rather than based on a route
going over roads and around buildings.
"""
return geodesic(start.coords, goal.coords).km
def concat_numbers(n_1: int, n_2: int) -> int:
'''Return the concatenation of two numbers.
Arguments:
n_1: The first number to be concatenated.
n_2: The second number to be concatenated.
Returns:
A concatenated int of the two numbers
'''
return int(str(n_1) + str(n_2))
def get_distance_m(start: Point, goal: Point) -> float:
"""Returns the geodesic distance (in meters) between start and goal.
This distance is direct (as the bird flies), rather than based on a route
going over roads and around buildings.
"""
return geodesic(start.coords, goal.coords).m
def tuple_from_point(point: Point) -> CoordsYX:
'''Convert a Point into a tuple, with latitude as first element, and
longitude as second.
Arguments:
point(Point): A lat-lng point.
Returns:
A lat-lng coordinates.
'''
return CoordsYX(point.y, point.x)
def list_xy_from_point(point: Point) -> Sequence[float]:
'''Convert a Point into a sequence, with longitude as first element, and
latitude as second.
Arguments:
point(Point): A lat-lng point.
Returns:
A lng-lat Sequence[float, float].
'''
return [point.x, point.y]
def list_yx_from_point(point: Point) -> Sequence[float]:
'''Convert a Point into a sequence, with latitude as first element, and
longitude as second.
Arguments:
point(Point): A lat-lng point.
Returns:
A lat-lng Sequence[float, float].
'''
return [point.y, point.x]
def midpoint(p1: Point, p2: Point) -> Point:
'''Get the midpoint between two points.
Arguments:
p1(Point): A lat-lng point.
p2(Point): A lat-lng point.
Returns:
A lat-lng Point.
'''
return Point((p1.x+p2.x)/2, (p1.y+p2.y)/2)
def check_if_geometry_in_polygon(geometry: BaseGeometry, poly: Polygon) -> Polygon:
'''Check if geometry is intersects with polygon.
Arguments:
geometry: The geometry to check intersection against a polygon.
poly: The polygon that to check intersection against a geometry.
Returns:
A lat-lng Point.
'''
if isinstance(geometry, Point):
return poly.contains(geometry)
else:
geometry['geometry'].intersects(poly)
def get_distance_between_geometries(geometry: BaseGeometry, point: Point) -> float:
'''Calculate the distance between point and polygon in meters.
Arguments:
route: The line that length calculation will be performed on.
point: point to measure distance from polygon.
Returns:
The distance between point and polygon in meters.
'''
if isinstance(geometry, Point):
return get_distance_between_points(geometry, point)
else:
return get_distance_between_point_to_geometry(geometry, point)
def get_distance_between_point_to_geometry(
geometry: BaseGeometry, point: Point) -> float:
'''Calculate the distance between point and polygon in meters.
Arguments:
route: The line that length calculation will be performed on.
point: point to measure distance from polygon.
Returns:
The distance between point and polygon in meters.
'''
dist_min = float("Inf")
if isinstance(geometry, MultiPolygon):
coords = [coord for poly in geometry for coord in poly.exterior.coords]
elif isinstance(geometry, Polygon):
coords = geometry.exterior.coords
else:
coords = geometry.coords
for coord in coords:
point_current = Point(coord)
dist = get_distance_between_points(point, point_current)
if dist_min > dist:
dist_min = dist
return dist_min
def get_line_length(line: LineString) -> float:
'''Calculate the length of a line in meters.
Arguments:
line: The line to calculate its length.
Returns:
The length of the line in meters
'''
dist = 0
point_1 = Point(line.coords[0])
for coord in line.coords[1:]:
point_2 = Point(coord)
dist += get_distance_between_points(point_1, point_2)
point_1 = point_2
return dist
def point_from_list_coord(coord: Sequence) -> Point:
'''Converts coordinates in list format (latitude and longtitude) to Point.
E.g, of list [40.715865, -74.037258].
Arguments:
coord: A lat-lng coordinate to be converted to a point.
Returns:
A point.
'''
lat = coord[0]
lon = coord[1]
return Point(lon, lat)
def point_from_str_coord(coord_str: Text) -> Point:
'''Converts coordinates in string format (latitude and longtitude) to Point.
E.g, of string '(40.715865, -74.037258)'.
Arguments:
coord: A lat-lng coordinate to be converted to a point.
Returns:
A point.
'''
list_coords_str = coord_str.replace("(", "").replace(")", "").split(',')
coord = list(map(float, list_coords_str))
return Point(coord[1], coord[0])
def coords_from_str_coord(coord_str: Text) -> CoordsYX:
'''Converts coordinates in string format (latitude and longtitude) to
coordinates in a tuple format.
E.g, of string '(40.715865, -74.037258)'.
Arguments:
coord: A lat-lng coordinate to be converted to a tuple.
Returns:
A tuple of lat-lng.
'''
list_coords_str = coord_str.replace("(", "").replace(")", "").split(',')
coord = list(map(float, list_coords_str))
return CoordsYX(coord[0], coord[1])
def get_centers_from_s2cellids(
s2cell_ids: Sequence[int64]) -> Sequence[Point]:
"""Returns the center latitude and longitude of s2 cells.
Arguments:
s2cell_ids: array of valid s2 cell ids. 1D array
Returns:
a list of shapely points of shape the size of the cellids list.
"""
prediction_coords = []
for s2cellid in s2cell_ids:
s2_latlng = s2.S2CellId(int(s2cellid)).ToLatLng()
lat = s2_latlng.lat().degrees()
lng = s2_latlng.lng().degrees()
prediction_coords.append(Point(lng, lat))
return prediction_coords
def get_center_from_s2cellids(
s2cell_ids: Sequence[int64]) -> Sequence[CoordsYX]:
"""Returns the center latitude and longitude of s2 cells.
Arguments:
s2cell_ids: array of valid s2 cell ids. 1D array
Returns:
a list of shapely points of shape the size of the cellids list.
"""
prediction_coords = []
for s2cellid in s2cell_ids:
s2_latlng = s2.S2CellId(int(s2cellid)).ToLatLng()
lat = s2_latlng.lat().degrees()
lng = s2_latlng.lng().degrees()
prediction_coords.append([lat, lng])
return np.array(prediction_coords)
def get_linestring_distance(line: LineString) -> int:
'''Calculate the line length in meters.
Arguments:
route: The line that length calculation will be performed on.
Returns:
Line length in meters.
'''
dist = 0
point_1 = Point(line.coords[0])
for coord in line.coords[1:]:
point_2 = Point(coord)
dist += get_distance_between_points(point_1, point_2)
point_1 = point_2
return dist
def get_distance_between_points(point_1: Point, point_2: Point) -> int:
'''Calculate the line length in meters.
Arguments:
point_1: The point to calculate the distance from.
point_2: The point to calculate the distance to.
Returns:
Distance length in meters.
'''
dist = ox.distance.great_circle_vec(
point_1.y, point_1.x, point_2.y, point_2.x)
return dist
def point_str_to_shapely_point(point_str: Text) -> Point:
'''Converts point string to shapely point.
Arguments:
point_str: The point string to be converted to shapely point. E.g, of
string 'Point(-74.037258 40.715865)'.
Returns:
A Point.
'''
point_str=point_str.split('(')[-1]
point_str=point_str.split(')')[0]
coords = point_str.split(" ")
x, y = float(coords[0]), float(coords[1])
return Point(x,y)
| apache-2.0 |
pabryan/smc | src/smc_sagews/smc_sagews/sage_salvus.py | 1 | 120930 | ##################################################################################
# #
# Extra code that the Salvus server makes available in the running Sage session. #
# #
##################################################################################
#########################################################################################
# Copyright (C) 2013 William Stein <[email protected]> #
# #
# Distributed under the terms of the GNU General Public License (GPL), version 2+ #
# #
# http://www.gnu.org/licenses/ #
#########################################################################################
import copy, os, sys, types
# This reduces a lot of confusion for Sage worksheets -- people expect
# to be able to import from the current working directory.
sys.path.append('.')
salvus = None
import json
from uuid import uuid4
def uuid():
return str(uuid4())
##########################################################################
# New function interact implementation
##########################################################################
import inspect
interacts = {}
def jsonable(x):
"""
Given any object x, make a JSON-able version of x, doing as best we can.
For some objects, sage as Sage integers, this works well. For other
objects which make no sense in Javascript, we get a string.
"""
import sage.all
try:
json.dumps(x)
return x
except:
if isinstance(x, (sage.all.Integer)):
return int(x)
else:
return str(x)
class InteractCell(object):
def __init__(self, f, layout=None, width=None, style=None,
update_args=None, auto_update=True,
flicker=False, output=True):
"""
Given a function f, create an object that describes an interact
for working with f interactively.
INPUT:
- `f` -- Python function
- ``width`` -- (default: None) overall width of the interact canvas
- ``style`` -- (default: None) extra CSS style to apply to canvas
- ``update_args`` -- (default: None) only call f if one of the args in
this list of strings changes.
- ``auto_update`` -- (default: True) call f every time an input changes
(or one of the argus in update_args).
- ``flicker`` -- (default: False) if False, the output part of the cell
never shrinks; it can only grow, which aleviates flicker.
- ``output`` -- (default: True) if False, do not automatically
provide any area to display output.
"""
self._flicker = flicker
self._output = output
self._uuid = uuid()
# Prevent garbage collection until client specifically requests it,
# since we want to be able to store state.
interacts[self._uuid] = self
self._f = f
self._width = jsonable(width)
self._style = str(style)
(args, varargs, varkw, defaults) = inspect.getargspec(f)
if defaults is None:
defaults = []
n = len(args) - len(defaults)
self._controls = dict([(arg, interact_control(arg, defaults[i-n] if i >= n else None))
for i, arg in enumerate(args)])
self._last_vals = {}
for arg in args:
self._last_vals[arg] = self._controls[arg].default()
self._ordered_args = args
self._args = set(args)
if isinstance(layout, dict):
# Implement the layout = {'top':, 'bottom':, 'left':,
# 'right':} dictionary option that is in the Sage
# notebook. I personally think it is really awkward and
# unsuable, but there may be many interacts out there that
# use it.
# Example layout={'top': [['a', 'b'], ['x', 'y']], 'left': [['c']], 'bottom': [['d']]}
top = layout.get('top', [])
bottom = layout.get('bottom', [])
left = layout.get('left', [])
right = layout.get('right', [])
new_layout = []
for row in top:
new_layout.append(row)
if len(left) > 0 and len(right) > 0:
new_layout.append(left[0] + [''] + right[0])
del left[0]
del right[0]
elif len(left) > 0 and len(right) == 0:
new_layout.append(left[0] + [''])
del left[0]
elif len(left) == 0 and len(right) > 0:
new_layout.append([''] + right[0])
del right[0]
i = 0
while len(left) > 0 and len(right) > 0:
new_layout.append(left[0] + ['_salvus_'] + right[0])
del left[0]
del right[0]
while len(left) > 0:
new_layout.append(left[0])
del left[0]
while len(right) > 0:
new_layout.append(right[0])
del right[0]
for row in bottom:
new_layout.append(row)
layout = new_layout
if layout is None:
layout = [[(str(arg), 12, None)] for arg in self._ordered_args]
else:
try:
v = []
for row in layout:
new_row = []
for x in row:
if isinstance(x, str):
x = (x,)
if len(x) == 1:
new_row.append((str(x[0]), 12//len(row), None))
elif len(x) == 2:
new_row.append((str(x[0]), int(x[1]), None))
elif len(x) == 3:
new_row.append((str(x[0]), int(x[1]), str(x[2])))
v.append(new_row)
layout = v
except:
raise ValueError, "layout must be None or a list of tuples (variable_name, width, [optional label]), where width is an integer between 1 and 12, variable_name is a string, and label is a string. The widths in each row must add up to at most 12. The empty string '' denotes the output area."
# Append a row for any remaining controls:
layout_vars = set(sum([[x[0] for x in row] for row in layout],[]))
for v in args:
if v not in layout_vars:
layout.append([(v, 12, None)])
if self._output:
if '' not in layout_vars:
layout.append([('', 12, None)])
self._layout = layout
# TODO -- this is UGLY
if not auto_update:
c = button('Update')
c._opts['var'] = 'auto_update'
self._controls['auto_update'] = c
self._ordered_args.append("auto_update")
layout.append([('auto_update',2)])
update_args = ['auto_update']
self._update_args = update_args
def jsonable(self):
"""
Return a JSON-able description of this interact, which the client
can use for laying out controls.
"""
X = {'controls':[self._controls[arg].jsonable() for arg in self._ordered_args], 'id':self._uuid}
if self._width is not None:
X['width'] = self._width
if self._layout is not None:
X['layout'] = self._layout
X['style'] = self._style
X['flicker'] = self._flicker
return X
def __call__(self, vals):
"""
Call self._f with inputs specified by vals. Any input variables not
specified in vals will have the value they had last time.
"""
self.changed = [str(x) for x in vals.keys()]
for k, v in vals.iteritems():
x = self._controls[k](v)
self._last_vals[k] = x
if self._update_args is not None:
do_it = False
for v in self._update_args:
if v in self.changed:
do_it = True
if not do_it:
return
interact_exec_stack.append(self)
try:
self._f(**dict([(k,self._last_vals[k]) for k in self._args]))
finally:
interact_exec_stack.pop()
class InteractFunction(object):
def __init__(self, interact_cell):
self.__dict__['interact_cell'] = interact_cell
def __call__(self, **kwds):
salvus.clear()
for arg, value in kwds.iteritems():
self.__setattr__(arg, value)
return self.interact_cell(kwds)
def __setattr__(self, arg, value):
I = self.__dict__['interact_cell']
if arg in I._controls and not isinstance(value, control):
# setting value of existing control
v = I._controls[arg].convert_to_client(value)
desc = {'var':arg, 'default':v}
I._last_vals[arg] = value
else:
# create a new control
new_control = interact_control(arg, value)
I._controls[arg] = new_control
desc = new_control.jsonable()
# set the id of the containing interact
desc['id'] = I._uuid
salvus.javascript("worksheet.set_interact_var(obj)", obj=jsonable(desc))
def __getattr__(self, arg):
I = self.__dict__['interact_cell']
try:
return I._last_vals[arg]
except Exception, err:
print err
raise AttributeError("no interact control corresponding to input variable '%s'"%arg)
def __delattr__(self, arg):
I = self.__dict__['interact_cell']
try:
del I._controls[arg]
except KeyError:
pass
desc = {'id':I._uuid, 'name':arg}
salvus.javascript("worksheet.del_interact_var(obj)", obj=jsonable(desc))
def changed(self):
"""
Return the variables that changed since last evaluation of the interact function
body. [SALVUS only]
For example::
@interact
def f(n=True, m=False, xyz=[1,2,3]):
print n, m, xyz, interact.changed()
"""
return self.__dict__['interact_cell'].changed
class _interact_layout:
def __init__(self, *args):
self._args = args
def __call__(self, f):
return interact(f, *self._args)
class Interact(object):
"""
Use interact to create interactive worksheet cells with sliders,
text boxes, radio buttons, check boxes, color selectors, and more.
Put ``@interact`` on the line before a function definition in a
cell by itself, and choose appropriate defaults for the variable
names to determine the types of controls (see tables below). You
may also put ``@interact(layout=...)`` to control the layout of
controls. Within the function, you may explicitly set the value
of the control corresponding to a variable foo to bar by typing
interact.foo = bar.
Type "interact.controls.[tab]" to get access to all of the controls.
INPUT:
- ``f`` -- function
- ``width`` -- number, or string such as '80%', '300px', '20em'.
- ``style`` -- CSS style string, which allows you to change the border,
background color, etc., of the interact.
- ``update_args`` -- (default: None); list of strings, so that
only changing the corresponding controls causes the function to
be re-evaluated; changing other controls will not cause an update.
- ``auto_update`` -- (default: True); if False, a button labeled
'Update' will appear which you can click on to re-evalute.
- ``layout`` -- (default: one control per row) a list [row0,
row1, ...] of lists of tuples row0 = [(var_name, width,
label), ...], where the var_name's are strings, the widths
must add up to at most 12, and the label is optional. This
will layout all of the controls and output using Twitter
Bootstraps "Fluid layout", with spans corresponding
to the widths. Use var_name='' to specify where the output
goes, if you don't want it to last. You may specify entries for
controls that you will create later using interact.var_name = foo.
NOTES: The flicker and layout options above are only in SALVUS.
For backwards compatibility with the Sage notebook, if layout
is a dictionary (with keys 'top', 'bottom', 'left', 'right'),
then the appropriate layout will be rendered as it used to be
in the Sage notebook.
OUTPUT:
- creates an interactive control.
AUTOMATIC CONTROL RULES
-----------------------
There are also some defaults that allow you to make controls
automatically without having to explicitly specify them. E.g.,
you can make ``x`` a continuous slider of values between ``u`` and
``v`` by just writing ``x=(u,v)`` in the argument list.
- ``u`` - blank input_box
- ``u=elt`` - input_box with ``default=element``, unless other rule below
- ``u=(umin,umax)`` - continuous slider (really `100` steps)
- ``u=(umin,umax,du)`` - slider with step size ``du``
- ``u=list`` - buttons if ``len(list)`` at most `5`; otherwise, drop down
- ``u=generator`` - a slider (up to `10000` steps)
- ``u=bool`` - a checkbox
- ``u=Color('blue')`` - a color selector; returns ``Color`` object
- ``u=matrix`` - an ``input_grid`` with ``to_value`` set to
``matrix.parent()`` and default values given by the matrix
- ``u=(default, v)`` - ``v`` anything as above, with given ``default`` value
- ``u=(label, v)`` - ``v`` anything as above, with given ``label`` (a string)
EXAMPLES:
The layout option::
@interact(layout={'top': [['a', 'b']], 'left': [['c']],
'bottom': [['d']], 'right':[['e']]})
def _(a=x^2, b=(0..20), c=100, d=x+1, e=sin(2)):
print a+b+c+d+e
We illustrate some features that are only in Salvus, not in the
Sage cell server or Sage notebook.
You can set the value of a control called foo to 100 using
interact.foo=100. For example::
@interact
def f(n=20, twice=None):
interact.twice = int(n)*2
In this example, we create and delete multiple controls depending
on properties of the input::
@interact
def f(n=20, **kwds):
print kwds
n = Integer(n)
if n % 2 == 1:
del interact.half
else:
interact.half = input_box(n/2, readonly=True)
if n.is_prime():
interact.is_prime = input_box('True', readonly=True)
else:
del interact.is_prime
You can access the value of a control associated to a variable foo
that you create using interact.foo, and check whether there is a
control associated to a given variable name using hasattr::
@interact
def f():
if not hasattr(interact, 'foo'):
interact.foo = 'hello'
else:
print interact.foo
An indecisive interact::
@interact
def f(n=selector(['yes', 'no'])):
for i in range(5):
interact.n = i%2
sleep(.2)
We use the style option to make a holiday interact::
@interact(width=25,
style="background-color:lightgreen; border:5px dashed red;")
def f(x=button('Merry ...',width=20)):
pass
We make a little box that can be dragged around, resized, and is
updated via a computation (in this case, counting primes)::
@interact(width=30,
style="background-color:lightorange; position:absolute; z-index:1000; box-shadow : 8px 8px 4px #888;")
def f(prime=text_control(label="Counting primes: ")):
salvus.javascript("cell.element.closest('.salvus-cell-output-interact').draggable().resizable()")
p = 2
c = 1
while True:
interact.prime = '%s, %.2f'%(p, float(c)/p)
p = next_prime(p)
c += 1
sleep(.25)
"""
def __call__(self, f=None, layout=None, width=None, style=None, update_args=None, auto_update=True, flicker=False, output=True):
if f is None:
return _interact_layout(layout, width, style, update_args, auto_update, flicker)
else:
return salvus.interact(f, layout=layout, width=width, style=style,
update_args=update_args, auto_update=auto_update, flicker=flicker, output=output)
def __setattr__(self, arg, value):
I = interact_exec_stack[-1]
if arg in I._controls and not isinstance(value, control):
# setting value of existing control
v = I._controls[arg].convert_to_client(value)
desc = {'var':arg, 'default':v}
I._last_vals[arg] = value
else:
# create a new control
new_control = interact_control(arg, value)
I._controls[arg] = new_control
desc = new_control.jsonable()
desc['id'] = I._uuid
salvus.javascript("worksheet.set_interact_var(obj)", obj=desc)
def __delattr__(self, arg):
try:
del interact_exec_stack[-1]._controls[arg]
except KeyError:
pass
desc['id'] = I._uuid
salvus.javascript("worksheet.del_interact_var(obj)", obj=jsonable(arg))
def __getattr__(self, arg):
try:
return interact_exec_stack[-1]._last_vals[arg]
except Exception, err:
raise AttributeError("no interact control corresponding to input variable '%s'"%arg)
def changed(self):
"""
Return the variables that changed since last evaluation of the interact function
body. [SALVUS only]
For example::
@interact
def f(n=True, m=False, xyz=[1,2,3]):
print n, m, xyz, interact.changed()
"""
return interact_exec_stack[-1].changed
interact = Interact()
interact_exec_stack = []
class control:
def __init__(self, control_type, opts, repr, convert_from_client=None, convert_to_client=jsonable):
# The type of the control -- a string, used for CSS selectors, switches, etc.
self._control_type = control_type
# The options that define the control -- passed to client
self._opts = dict(opts)
# Used to print the control to a string.
self._repr = repr
# Callable that the control may use in converting from JSON
self._convert_from_client = convert_from_client
self._convert_to_client = convert_to_client
self._last_value = self._opts['default']
def convert_to_client(self, value):
try:
return self._convert_to_client(value)
except Exception, err:
sys.stderr.write("%s -- %s\n"%(err, self))
sys.stderr.flush()
return jsonable(value)
def __call__(self, obj):
"""
Convert JSON-able object returned from client to describe
value of this control.
"""
if self._convert_from_client is not None:
try:
x = self._convert_from_client(obj)
except Exception, err:
sys.stderr.write("%s -- %s\n"%(err, self))
sys.stderr.flush()
x = self._last_value
else:
x = obj
self._last_value = x
return x
def __repr__(self):
return self._repr
def label(self):
"""Return the label of this control."""
return self._opts['label']
def default(self):
"""Return default value of this control."""
return self(self._opts['default'])
def type(self):
"""Return type that values of this control are coerced to."""
return self._opts['type']
def jsonable(self):
"""Return JSON-able object the client browser uses to render the control."""
X = {'control_type':self._control_type}
for k, v in self._opts.iteritems():
X[k] = jsonable(v)
return X
import types
def list_of_first_n(v, n):
"""Given an iterator v, return first n elements it produces as a list."""
if not hasattr(v, 'next'):
v = v.__iter__()
w = []
while n > 0:
try:
w.append(v.next())
except StopIteration:
return w
n -= 1
return w
def automatic_control(default):
from sage.all import Color
from sage.structure.element import is_Matrix
label = None
default_value = None
for _ in range(2):
if isinstance(default, tuple) and len(default) == 2 and isinstance(default[0], str):
label, default = default
if isinstance(default, tuple) and len(default) == 2 and isinstance(default[1], (tuple, list, types.GeneratorType)):
default_value, default = default
if isinstance(default, control):
if label:
default._opts['label'] = label
return default
elif isinstance(default, str):
return input_box(default, label=label, type=str)
elif isinstance(default, bool):
return checkbox(default, label=label)
elif isinstance(default, list):
return selector(default, default=default_value, label=label, buttons=len(default) <= 5)
elif isinstance(default, types.GeneratorType):
return slider(list_of_first_n(default, 10000), default=default_value, label=label)
elif isinstance(default, Color):
return color_selector(default=default, label=label)
elif isinstance(default, tuple):
if len(default) == 2:
return slider(default[0], default[1], default=default_value, label=label)
elif len(default) == 3:
return slider(default[0], default[1], default[2], default=default_value, label=label)
else:
return slider(list(default), default=default_value, label=label)
elif is_Matrix(default):
return input_grid(default.nrows(), default.ncols(), default=default.list(), to_value=default.parent(), label=label)
else:
return input_box(default, label=label)
def interact_control(arg, value):
if isinstance(value, control):
if value._opts['label'] is None:
value._opts['label'] = arg
c = value
else:
c = automatic_control(value)
if c._opts['label'] is None:
c._opts['label'] = arg
c._opts['var'] = arg
return c
def sage_eval(x, locals=None):
x = str(x).strip()
if x.isspace():
return None
from sage.all import sage_eval
return sage_eval(x, locals=locals)
class ParseValue:
def __init__(self, type):
self._type = type
def _eval(self, value):
return sage_eval(value, locals=None if salvus is None else salvus.namespace)
def __call__(self, value):
from sage.all import Color
if self._type is None:
return self._eval(value)
elif self._type is str:
return str(value)
elif self._type is Color:
try:
return Color(value)
except ValueError:
try:
return Color("#"+value)
except ValueError:
raise TypeError("invalid color '%s'"%value)
else:
return self._type(self._eval(value))
def input_box(default=None, label=None, type=None, nrows=1, width=None, readonly=False, submit_button=None):
"""
An input box interactive control for use with the :func:`interact` command.
INPUT:
- default -- default value
- label -- label test
- type -- the type that the input is coerced to (from string)
- nrows -- (default: 1) the number of rows of the box
- width -- width; how wide the box is
- readonly -- is it read-only?
- submit_button -- defaults to true if nrows > 1 and false otherwise.
"""
return control(
control_type = 'input-box',
opts = locals(),
repr = "Input box",
convert_from_client = ParseValue(type)
)
def checkbox(default=True, label=None, readonly=False):
"""
A checkbox interactive control for use with the :func:`interact` command.
"""
return control(
control_type = 'checkbox',
opts = locals(),
repr = "Checkbox"
)
def color_selector(default='blue', label=None, readonly=False, widget=None, hide_box=False):
"""
A color selector.
SALVUS only: the widget option is ignored -- SALVUS only provides
bootstrap-colorpicker.
EXAMPLES::
@interact
def f(c=color_selector()):
print c
"""
from sage.all import Color
default = Color(default).html_color()
return control(
control_type = 'color-selector',
opts = locals(),
repr = "Color selector",
convert_from_client = lambda x : Color(str(x)),
convert_to_client = lambda x : Color(x).html_color()
)
def text_control(default='', label=None, classes=None):
"""
A read-only control that displays arbitrary HTML amongst the other
interact controls. This is very powerful, since it can display
any HTML.
INPUT::
- ``default`` -- actual HTML to display
- ``label`` -- string or None
- ``classes`` -- space separated string of CSS classes
EXAMPLES::
We output the factorization of a number in a text_control::
@interact
def f(n=2013, fact=text_control("")):
interact.fact = factor(n)
We use a CSS class to make the text_control look like a button:
@interact
def f(n=text_control("foo <b>bar</b>", classes='btn')):
pass
We animate a picture into view:
@interact
def f(size=[10,15,..,30], speed=[1,2,3,4]):
for k in range(size):
interact.g = text_control("<img src='http://sagemath.org/pix/sage_logo_new.png' width=%s>"%(20*k))
sleep(speed/50.0)
"""
return control(
control_type = 'text',
opts = locals(),
repr = "Text %r"%(default)
)
def button(default=None, label=None, classes=None, width=None, icon=None):
"""
Create a button. [SALVUS only]
You can tell that pressing this button triggered the interact
evaluation because interact.changed() will include the variable
name tied to the button.
INPUT:
- ``default`` -- value variable is set to
- ``label`` -- string (default: None)
- ``classes`` -- string if None; if given, space separated
list of CSS classes. e.g., Bootstrap CSS classes such as:
btn-primary, btn-info, btn-success, btn-warning, btn-danger,
btn-link, btn-large, btn-small, btn-mini.
See http://twitter.github.com/bootstrap/base-css.html#buttons
If button_classes a single string, that class is applied to all buttons.
- ``width`` - an integer or string (default: None); if given,
all buttons are this width. If an integer, the default units
are 'ex'. A string that specifies any valid HTML units (e.g., '100px', '3em')
is also allowed [SALVUS only].
- ``icon`` -- None or string name of any icon listed at the font
awesome website (http://fortawesome.github.com/Font-Awesome/), e.g., 'fa-repeat'
EXAMPLES::
@interact
def f(hi=button('Hello', label='', classes="btn-primary btn-large"),
by=button("By")):
if 'hi' in interact.changed():
print "Hello to you, good sir."
if 'by' in interact.changed():
print "See you."
Some buttons with icons::
@interact
def f(n=button('repeat', icon='fa-repeat'),
m=button('see?', icon="fa-eye", classes="btn-large")):
print interact.changed()
"""
return control(
control_type = "button",
opts = locals(),
repr = "Button",
convert_from_client = lambda x : default,
convert_to_client = lambda x : str(x)
)
class Slider:
def __init__(self, start, stop, step_size, max_steps):
if isinstance(start, (list, tuple)):
self.vals = start
else:
if step_size is None:
if stop is None:
step_size = start/float(max_steps)
else:
step_size = (stop-start)/float(max_steps)
from sage.all import srange # sage range is much better/more flexible.
self.vals = srange(start, stop, step_size, include_endpoint=True)
# Now check to see if any of thee above constructed a list of
# values that exceeds max_steps -- if so, linearly interpolate:
if len(self.vals) > max_steps:
n = len(self.vals)//max_steps
self.vals = [self.vals[n*i] for i in range(len(self.vals)//n)]
def to_client(self, val):
if val is None:
return 0
if isinstance(val, (list, tuple)):
return [self.to_client(v) for v in val]
else:
# Find index into self.vals of closest match.
try:
return self.vals.index(val) # exact match
except ValueError:
pass
z = [(abs(val-x),i) for i, x in enumerate(self.vals)]
z.sort()
return z[0][1]
def from_client(self, val):
if val is None:
return self.vals[0]
# val can be a n-tuple or an integer
if isinstance(val, (list, tuple)):
return tuple([self.vals[v] for v in val])
else:
return self.vals[int(val)]
class InputGrid:
def __init__(self, nrows, ncols, default, to_value):
self.nrows = nrows
self.ncols = ncols
self.to_value = to_value
self.value = copy.deepcopy(self.adapt(default))
def adapt(self, x):
if not isinstance(x, list):
return [[x for _ in range(self.ncols)] for _ in range(self.nrows)]
elif not all(isinstance(elt, list) for elt in x):
return [[x[i * self.ncols + j] for j in xrange(self.ncols)] for i in xrange(self.nrows)]
else:
return x
def from_client(self, x):
if len(x) == 0:
self.value = []
elif isinstance(x[0], list):
self.value = [[sage_eval(t) for t in z] for z in x]
else:
# x is a list of (unicode) strings -- we sage eval them all at once (instead of individually).
s = '[' + ','.join([str(t) for t in x]) + ']'
v = sage_eval(s)
self.value = [v[n:n+self.ncols] for n in range(0, self.nrows*self.ncols, self.ncols)]
return self.to_value(self.value) if self.to_value is not None else self.value
def to_client(self, x=None):
if x is None:
v = self.value
else:
v = self.adapt(x)
self.value = v # save value in our local cache
return [[repr(x) for x in y] for y in v]
def input_grid(nrows, ncols, default=0, label=None, to_value=None, width=5):
r"""
A grid of input boxes, for use with the :func:`interact` command.
INPUT:
- ``nrows`` - an integer
- ``ncols`` - an integer
- ``default`` - an object; the default put in this input box
- ``label`` - a string; the label rendered to the left of the box.
- ``to_value`` - a list; the grid output (list of rows) is
sent through this function. This may reformat the data or
coerce the type.
- ``width`` - an integer; size of each input box in characters
EXAMPLES:
Solving a system::
@interact
def _(m = input_grid(2,2, default = [[1,7],[3,4]],
label=r'$M\qquad =$', to_value=matrix, width=8),
v = input_grid(2,1, default=[1,2],
label=r'$v\qquad =$', to_value=matrix)):
try:
x = m.solve_right(v)
html('$$%s %s = %s$$'%(latex(m), latex(x), latex(v)))
except:
html('There is no solution to $$%s x=%s$$'%(latex(m), latex(v)))
Squaring an editable and randomizable matrix::
@interact
def f(reset = button('Randomize', classes="btn-primary", icon="fa-th"),
square = button("Square", icon="fa-external-link"),
m = input_grid(4,4,default=0, width=5, label="m =", to_value=matrix)):
if 'reset' in interact.changed():
print "randomize"
interact.m = [[random() for _ in range(4)] for _ in range(4)]
if 'square' in interact.changed():
salvus.tex(m^2)
"""
ig = InputGrid(nrows, ncols, default, to_value)
return control(
control_type = 'input-grid',
opts = {'default' : ig.to_client(),
'label' : label,
'width' : width,
'nrows' : nrows,
'ncols' : ncols},
repr = "Input Grid",
convert_from_client = ig.from_client,
convert_to_client = ig.to_client
)
def slider(start, stop=None, step=None, default=None, label=None,
display_value=True, max_steps=500, step_size=None, range=False,
width=None, animate=True):
"""
An interactive slider control for use with :func:`interact`.
There are several ways to call the slider function, but they all
take several named arguments:
- ``default`` - an object (default: None); default value is closest
value. If range=True, default can also be a 2-tuple (low, high).
- ``label`` -- string
- ``display_value`` -- bool (default: True); whether to display the
current value to the right of the slider.
- ``max_steps`` -- integer, default: 500; this is the maximum
number of values that the slider can take on. Do not make
it too large, since it could overwhelm the client. [SALVUS only]
- ``range`` -- bool (default: False); instead, you can select
a range of values (lower, higher), which are returned as a
2-tuple. You may also set the value of the slider or
specify a default value using a 2-tuple.
- ``width`` -- how wide the slider appears to the user [SALVUS only]
- ``animate`` -- True (default), False,"fast", "slow", or the
duration of the animation in milliseconds. [SALVUS only]
You may call the slider function as follows:
- slider([list of objects], ...) -- slider taking values the objects in the list
- slider([start,] stop[, step]) -- slider over numbers from start
to stop. When step is given it specifies the increment (or
decrement); if it is not given, then the number of steps equals
the width of the control in pixels. In all cases, the number of
values will be shrunk to be at most the pixel_width, since it is
not possible to select more than this many values using a slider.
EXAMPLES::
Use one slider to modify the animation speed of another::
@interact
def f(speed=(50,100,..,2000), x=slider([1..50], animate=1000)):
if 'speed' in interact.triggers():
print "change x to have speed", speed
del interact.x
interact.x = slider([1..50], default=interact.x, animate=speed)
return
"""
if step_size is not None: # for compat with sage
step = step_size
slider = Slider(start, stop, step, max_steps)
vals = [str(x) for x in slider.vals] # for display by the client
if range and default is None:
default = [0, len(vals)-1]
return control(
control_type = 'range-slider' if range else 'slider',
opts = {'default' : slider.to_client(default),
'label' : label,
'animate' : animate,
'vals' : vals,
'display_value' : display_value,
'width' : width},
repr = "Slider",
convert_from_client = slider.from_client,
convert_to_client = slider.to_client
)
def range_slider(*args, **kwds):
"""
range_slider is the same as :func:`slider`, except with range=True.
EXAMPLES:
A range slider with a constraint::
@interact
def _(t = range_slider([1..1000], default=(100,200), label=r'Choose a range for $\alpha$')):
print t
"""
kwds['range'] = True
return slider(*args, **kwds)
def selector(values, label=None, default=None,
nrows=None, ncols=None, width=None, buttons=False,
button_classes=None):
"""
A drop down menu or a button bar for use in conjunction with
the :func:`interact` command. We use the same command to
create either a drop down menu or selector bar of buttons,
since conceptually the two controls do exactly the same thing
- they only look different. If either ``nrows`` or ``ncols``
is given, then you get a buttons instead of a drop down menu.
INPUT:
- ``values`` - either (1) a list [val0, val1, val2, ...] or (2)
a list of pairs [(val0, lbl0), (val1,lbl1), ...] in which case
all labels must be given -- use None to auto-compute a given label.
- ``label`` - a string (default: None); if given, this label
is placed to the left of the entire button group
- ``default`` - an object (default: first); default value in values list
- ``nrows`` - an integer (default: None); if given determines
the number of rows of buttons; if given, buttons=True
- ``ncols`` - an integer (default: None); if given determines
the number of columns of buttons; if given, buttons=True
- ``width`` - an integer or string (default: None); if given,
all buttons are this width. If an integer, the default units
are 'ex'. A string that specifies any valid HTML units (e.g., '100px', '3em')
is also allowed [SALVUS only].
- ``buttons`` - a bool (default: False, except as noted
above); if True, use buttons
- ``button_classes`` - [SALVUS only] None, a string, or list of strings
of the of same length as values, whose entries are a whitespace-separated
string of CSS classes, e.g., Bootstrap CSS classes such as:
btn-primary, btn-info, btn-success, btn-warning, btn-danger,
btn-link, btn-large, btn-small, btn-mini.
See http://twitter.github.com/bootstrap/base-css.html#buttons
If button_classes a single string, that class is applied to all buttons.
"""
if (len(values) > 0 and isinstance(values[0], tuple) and len(values[0]) == 2):
vals = [z[0] for z in values]
lbls = [str(z[1]) if z[1] is not None else None for z in values]
else:
vals = values
lbls = [None] * len(vals)
for i in range(len(vals)):
if lbls[i] is None:
v = vals[i]
lbls[i] = v if isinstance(v, str) else str(v)
if default is None:
default = 0
else:
try:
default = vals.index(default)
except IndexError:
default = 0
opts = dict(locals())
for k in ['vals', 'values', 'i', 'v', 'z']:
if k in opts:
del opts[k] # these could have a big jsonable repr
opts['lbls'] = lbls
return control(
control_type = 'selector',
opts = opts,
repr = "Selector labeled %r with values %s"%(label, values),
convert_from_client = lambda n : vals[int(n)],
convert_to_client = lambda x : vals.index(x)
)
interact_functions = {}
interact_controls = ['button', 'checkbox', 'color_selector', 'input_box',
'range_slider', 'selector', 'slider', 'text_control',
'input_grid']
for f in ['interact'] + interact_controls:
interact_functions[f] = globals()[f]
# A little magic so that "interact.controls.[tab]" shows all the controls.
class Controls:
pass
Interact.controls = Controls()
for f in interact_controls:
interact.controls.__dict__[f] = interact_functions[f]
##########################################################################################
# Cell object -- programatically control the current cell.
##########################################################################################
class Cell(object):
def id(self):
"""
Return the UUID of the cell in which this function is called.
"""
return salvus._id
def hide(self, component='input'):
"""
Hide the 'input' or 'output' component of a cell.
"""
salvus.hide(component)
def show(self, component='input'):
"""
Show the 'input' or 'output' component of a cell.
"""
salvus.show(component)
def hideall(self):
"""
Hide the input and output fields of the cell in which this code executes.
"""
salvus.hide('input')
salvus.hide('output')
#def input(self, val=None):
# """
# Get or set the value of the input component of the cell in
# which this code executes.
# """
# salvus.javascript("cell.set_input(obj)", obj=val)
#
#def output(self, val=None):
# """
# Get or set the value of the output component of the cell in
# which this code executes.
# """
# salvus.javascript("cell.set_output(obj)", obj=val)
# return salvus.output(val, self._id)
cell = Cell()
##########################################################################################
# Cell decorators -- aka "percent modes"
##########################################################################################
import sage.misc.html
try:
_html = sage.misc.html.HTML()
except:
_html = sage.misc.html.HTMLFragmentFactory
class HTML:
"""
Cell mode that renders everything after %html as HTML then hides
the input (unless you pass in hide=False).
EXAMPLES::
---
%html
<h1>A Title</h1>
<h2>Subtitle</h2>
---
%html(hide=False)
<h1>A Title</h1>
<h2>Subtitle</h2>
---
%html("<h1>A title</h1>", hide=False)
---
%html(hide=False) <h1>Title</h1>
"""
def __init__(self, hide=True):
self._hide = hide
def __call__(self, *args, **kwds):
if len(kwds) > 0 and len(args) == 0:
return HTML(**kwds)
if len(args) > 0:
self._render(args[0], **kwds)
def _render(self, s, hide=None):
if hide is None:
hide = self._hide
if hide:
salvus.hide('input')
salvus.html(s)
def table(self):
raise NotImplementedError, "html.table not implemented in SageMathCloud yet"
html = HTML()
html.iframe = _html.iframe # written in a way that works fine
def coffeescript(s=None, once=False):
"""
Execute code using CoffeeScript.
For example:
%coffeescript console.log 'hi'
or
coffeescript("console.log 'hi'")
You may either pass in a string or use this as a cell decorator,
i.e., put %coffeescript at the top of a cell.
If you set once=False, the code will be executed every time the output of the cell is rendered, e.g.,
on load, like with %auto::
coffeescript('console.log("hi")', once=False)
or
%coffeescript(once=False)
console.log("hi")
EXTRA FUNCTIONALITY:
When executing code, a function called print is defined, and objects cell and worksheet.::
print(1,2,'foo','bar') -- displays the inputs in the output cell
cell -- has attributes cell.output (the html output box) and cell.cell_id
worksheet -- has attributes project_page and editor, and methods interrupt, kill, and
execute_code: (opts) =>
opts = defaults opts,
code : required
data : undefined
preparse : true
cb : undefined
OPTIMIZATION: When used alone as a cell decorator in a Sage worksheet
with once=False (the default), rendering is done entirely client side,
which is much faster, not requiring a round-trip to the server.
"""
if s is None:
return lambda s : salvus.javascript(s, once=once, coffeescript=True)
else:
return salvus.javascript(s, coffeescript=True, once=once)
def javascript(s=None, once=False):
"""
Execute code using JavaScript.
For example:
%javascript console.log('hi')
or
javascript("console.log('hi')")
You may either pass in a string or use this as a cell decorator,
i.e., put %javascript at the top of a cell.
If once=False (the default), the code will be executed every time the output of the
cell is rendered, e.g., on load, like with %auto::
javascript('.. some code ', once=False)
or
%javascript(once=False)
... some code
WARNING: If once=True, then this code is likely to get executed *before* the rest
of the output for this cell has been rendered by the client.
javascript('console.log("HI")', once=False)
EXTRA FUNCTIONALITY:
When executing code, a function called print is defined, and objects cell and worksheet.::
print(1,2,'foo','bar') -- displays the inputs in the output cell
cell -- has attributes cell.output (the html output box) and cell.cell_id
worksheet -- has attributes project_page and editor, and methods interrupt, kill, and
execute_code: (opts) =>
opts = defaults opts,
code : required
data : undefined
preparse : true
cb : undefined
This example illustrates using worksheet.execute_code::
%coffeescript
for i in [500..505]
worksheet.execute_code
code : "i=salvus.data['i']; i, factor(i)"
data : {i:i}
cb : (mesg) ->
if mesg.stdout then print(mesg.stdout)
if mesg.stderr then print(mesg.stderr)
OPTIMIZATION: When used alone as a cell decorator in a Sage worksheet
with once=False (the default), rendering is done entirely client side,
which is much faster, not requiring a round-trip to the server.
"""
if s is None:
return lambda s : salvus.javascript(s, once=once)
else:
return salvus.javascript(s, once=once)
javascript_exec_doc = r"""
To send code from Javascript back to the Python process to
be executed use the worksheet.execute_code function::
%javascript worksheet.execute_code(string_to_execute)
You may also use a more general call format of the form::
%javascript
worksheet.execute_code({code:string_to_execute, data:jsonable_object,
preparse:true or false, cb:function});
The data object is available when the string_to_execute is being
evaluated as salvus.data. For example, if you execute this code
in a cell::
javascript('''
worksheet.execute_code({code:"a = salvus.data['b']/2; print a", data:{b:5},
preparse:false, cb:function(mesg) { console.log(mesg)} });
''')
then the Python variable a is set to 2, and the Javascript console log will display::
Object {done: false, event: "output", id: "..."}
Object {stdout: "2\n", done: true, event: "output", id: "..."}
You can also send an interrupt signal to the Python process from
Javascript by calling worksheet.interrupt(), and kill the process
with worksheet.kill(). For example, here the a=4 never
happens (but a=2 does)::
%javascript
worksheet.execute_code({code:'a=2; sleep(100); a=4;',
cb:function(mesg) { worksheet.interrupt(); console.log(mesg)}})
or using CoffeeScript (a Javascript preparser)::
%coffeescript
worksheet.execute_code
code : 'a=2; sleep(100); a=4;'
cb : (mesg) ->
worksheet.interrupt()
console.log(mesg)
The Javascript code is evaluated with numerous standard Javascript libraries available,
including jQuery, Twitter Bootstrap, jQueryUI, etc.
"""
for s in [coffeescript, javascript]:
s.__doc__ += javascript_exec_doc
def latex0(s=None, **kwds):
"""
Create and display an arbitrary LaTeX document as a png image in the Salvus Notebook.
In addition to directly calling latex.eval, you may put %latex (or %latex.eval(density=75, ...etc...))
at the top of a cell, which will typeset everything else in the cell.
"""
if s is None:
return lambda t : latex0(t, **kwds)
import os
if 'filename' not in kwds:
import tempfile
delete_file = True
kwds['filename'] = tempfile.mkstemp(suffix=".png")[1]
else:
delete_file = False
if 'locals' not in kwds:
kwds['locals'] = salvus.namespace
if 'globals' not in kwds:
kwds['globals'] = salvus.namespace
sage.misc.latex.Latex.eval(sage.misc.latex.latex, s, **kwds)
salvus.file(kwds['filename'], once=False)
if delete_file:
os.unlink(kwds['filename'])
return ''
latex0.__doc__ += sage.misc.latex.Latex.eval.__doc__
class Time:
"""
Time execution of code exactly once in Salvus by:
- putting %time at the top of a cell to time execution of the entire cell
- put %time at the beginning of line to time execution of just that line
- write time('some code') to executation of the contents of the string.
If you want to time repeated execution of code for benchmarking purposes, use
the timeit command instead.
"""
def __init__(self, start=False):
if start:
from sage.all import walltime, cputime
self._start_walltime = walltime()
self._start_cputime = cputime()
def before(self, code):
return Time(start=True)
def after(self, code):
from sage.all import walltime, cputime
print "CPU time: %.2f s, Wall time: %.2f s"%( cputime(self._start_cputime), walltime(self._start_walltime))
self._start_cputime = self._start_walltime = None
def __call__(self, code):
from sage.all import walltime, cputime
not_as_decorator = self._start_cputime is None
if not_as_decorator:
self.before(code)
salvus.execute(code)
if not_as_decorator:
self.after(code)
time = Time()
def file(path):
"""
Block decorator to write to a file. Use as follows:
%file('filename') put this line in the file
or
%file('filename')
everything in the rest of the
cell goes into the file with given name.
As with all block decorators in Salvus, the arguments to file can
be arbitrary expressions. For examples,
a = 'file'; b = ['name', 'txt']
%file(a+b[0]+'.'+b[1]) rest of line goes in 'filename.txt'
"""
return lambda content: open(path,'w').write(content)
def timeit(*args, **kwds):
"""
Time execution of a command or block of commands.
This command has been enhanced for Salvus so you may use it as
a block decorator as well, e.g.,
%timeit 2+3
and
%timeit(number=10, preparse=False) 2^3
%timeit(number=10, seconds=True) 2^3
and
%timeit(preparse=False)
[rest of the cell]
Here is the original docstring for timeit:
"""
def go(code):
print sage.misc.sage_timeit.sage_timeit(code, globals_dict=salvus.namespace, **kwds)
if len(args) == 0:
return lambda code : go(code)
else:
go(*args)
# TODO: these need to also give the argspec
timeit.__doc__ += sage.misc.sage_timeit.sage_timeit.__doc__
class Capture:
"""
Capture or ignore the output from evaluating the given code. (SALVUS only).
Use capture as a block decorator by placing either %capture or
%capture(optional args) at the beginning of a cell or at the
beginning of a line. If you use just plain %capture then stdout
and stderr are completely ignored. If you use %capture(args)
you can redirect or echo stdout and stderr to variables or
files. For example if you start a cell with this line::
%capture(stdout='output', stderr=open('error','w'), append=True, echo=True)
then stdout is appended (because append=True) to the global
variable output, stderr is written to the file 'error', and the
output is still displayed in the output portion of the cell (echo=True).
INPUT:
- stdout -- string (or object with write method) to send stdout output to (string=name of variable)
- stderr -- string (or object with write method) to send stderr output to (string=name of variable)
- append -- (default: False) if stdout/stderr are a string, append to corresponding variable
- echo -- (default: False) if True, also echo stdout/stderr to the output cell.
"""
def __init__(self, stdout, stderr, append, echo):
self.v = (stdout, stderr, append, echo)
def before(self, code):
(stdout, stderr, append, echo) = self.v
self._orig_stdout_f = orig_stdout_f = sys.stdout._f
if stdout is not None:
if hasattr(stdout, 'write'):
def write_stdout(buf):
stdout.write(buf)
elif isinstance(stdout, str):
if (stdout not in salvus.namespace) or not append:
salvus.namespace[stdout] = ''
if not isinstance(salvus.namespace[stdout], str):
salvus.namespace[stdout] = str(salvus.namespace[stdout])
def write_stdout(buf):
salvus.namespace[stdout] += buf
else:
raise TypeError, "stdout must be None, a string, or have a write method"
def f(buf, done):
write_stdout(buf)
if echo:
orig_stdout_f(buf, done)
elif done:
orig_stdout_f('', done)
sys.stdout._f = f
elif not echo:
def f(buf,done):
if done:
orig_stdout_f('',done)
sys.stdout._f = f
self._orig_stderr_f = orig_stderr_f = sys.stderr._f
if stderr is not None:
if hasattr(stderr, 'write'):
def write_stderr(buf):
stderr.write(buf)
elif isinstance(stderr, str):
if (stderr not in salvus.namespace) or not append:
salvus.namespace[stderr] = ''
if not isinstance(salvus.namespace[stderr], str):
salvus.namespace[stderr] = str(salvus.namespace[stderr])
def write_stderr(buf):
salvus.namespace[stderr] += buf
else:
raise TypeError, "stderr must be None, a string, or have a write method"
def f(buf, done):
write_stderr(buf)
if echo:
orig_stderr_f(buf, done)
elif done:
orig_stderr_f('', done)
sys.stderr._f = f
elif not echo:
def f(buf,done):
if done:
orig_stderr_f('',done)
sys.stderr._f = f
return self
def __call__(self, code=None, stdout=None, stderr=None, append=False, echo=False):
if code is None:
return Capture(stdout=stdout, stderr=stderr, append=append, echo=echo)
salvus.execute(code)
def after(self, code):
sys.stdout._f = self._orig_stdout_f
sys.stderr._f = self._orig_stderr_f
capture = Capture(stdout=None, stderr=None, append=False, echo=False)
def cython(code=None, **kwds):
"""
Block decorator to easily include Cython code in the Salvus notebook.
Just put %cython at the top of a cell, and the rest is compiled as Cython code.
You can pass options to cython by typing "%cython(... var=value...)" instead.
This is a wrapper around Sage's cython function, whose docstring is:
"""
if code is None:
return lambda code: cython(code, **kwds)
import sage.misc.misc
path = sage.misc.misc.tmp_dir()
filename = os.path.join(path, 'a.pyx')
open(filename, 'w').write(code)
if 'annotate' not in kwds:
kwds['annotate'] = True
import sage.misc.cython
modname, path = sage.misc.cython.cython(filename, **kwds)
try:
sys.path.insert(0,path)
module = __import__(modname)
finally:
del sys.path[0]
import inspect
for name, value in inspect.getmembers(module):
if not name.startswith('_'):
salvus.namespace[name] = value
files = os.listdir(path)
html_filename = None
for n in files:
base, ext = os.path.splitext(n)
if ext.startswith('.html') and '_pyx_' in base:
html_filename = os.path.join(path, n)
if html_filename is not None:
html_url = salvus.file(html_filename, raw=True, show=False)
salvus.html("<a href='%s' target='_new' class='btn btn-small' style='margin-top: 1ex'>Auto-generated code... <i class='fa fa-external-link'></i></a>"%html_url)
cython.__doc__ += sage.misc.cython.cython.__doc__
class script:
r"""
Block decorator to run an arbitrary shell command with input from a
cell in Salvus.
Put %script('shell command line') or %script(['command', 'arg1',
'arg2', ...]) by itself on a line in a cell, and the command line
is run with stdin the rest of the contents of the cell. You can
also use script in single line mode, e.g.,::
%script('gp -q') factor(2^97 - 1)
or
%script(['gp', '-q']) factor(2^97 - 1)
will launch a gp session, feed 'factor(2^97-1)' into stdin, and
display the resulting factorization.
NOTE: the result is stored in the attribute "stdout", so you can do::
s = script('gp -q')
%s factor(2^97-1)
s.stdout
'\n[11447 1]\n\n[13842607235828485645766393 1]\n\n'
and s.stdout will now be the output string.
You may also specify the shell environment with the env keyword.
"""
def __init__(self, args, env=None):
self._args = args
self._env = env
def __call__(self, code=''):
import subprocess
try:
s = None
s = subprocess.Popen(self._args, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=isinstance(self._args, str),
env=self._env)
s.stdin.write(code); s.stdin.close()
finally:
if s is None:
return
try:
self.stdout = s.stdout.read()
sys.stdout.write(self.stdout)
finally:
try:
os.system("pkill -TERM -P %s"%s.pid)
except OSError:
pass
try:
os.kill(s.pid, 9)
except OSError:
pass
def python(code):
"""
Block decorator to run code in pure Python mode, without it being
preparsed by the Sage preparser. Otherwise, nothing changes.
To use this, put %python by itself in a cell so that it applies to
the rest of the cell, or put it at the beginning of a line to
disable preparsing just for that line.
"""
salvus.execute(code, preparse=False)
def python3(code):
"""
Block decorator to run code in a pure Python3 mode session.
To use this, put %python3 by itself in a cell so that it applies to
the rest of the cell, or put it at the beginning of a line to
run just that line using python3.
You can combine %python3 with capture, if you would like to capture
the output to a variable. For example::
%capture(stdout='p3')
%python3
x = set([1,2,3])
print(x)
Afterwards, p3 contains the output '{1, 2, 3}' and the variable x
in the controlling Sage session is in no way impacted.
NOTE: No state is preserved between calls. Each call is a separate process.
"""
script('sage-native-execute python3 -E')(code)
def perl(code):
"""
Block decorator to run code in a Perl session.
To use this, put %perl by itself in a cell so that it applies to
the rest of the cell, or put it at the beginning of a line to
run just that line using perl.
EXAMPLE:
A perl cell::
%perl
$apple_count = 5;
$count_report = "There are $apple_count apples.";
print "The report is: $count_report\n";
Or use %perl on one line::
%perl $apple_count = 5; $count_report = "There are $apple_count apples."; print "The report is: $count_report\n";
You can combine %perl with capture, if you would like to capture
the output to a variable. For example::
%capture(stdout='p')
%perl print "hi"
Afterwards, p contains 'hi'.
NOTE: No state is preserved between calls. Each call is a separate process.
"""
script('sage-native-execute perl')(code)
def ruby(code):
"""
Block decorator to run code in a Ruby session.
To use this, put %ruby by itself in a cell so that it applies to
the rest of the cell, or put it at the beginning of a line to
run just that line using ruby.
EXAMPLE:
A ruby cell::
%ruby
lang = "ruby"
print "Hello from #{lang}!"
Or use %ruby on one line::
%ruby lang = "ruby"; print "Hello from #{lang}!"
You can combine %ruby with capture, if you would like to capture
the output to a variable. For example::
%capture(stdout='p')
%ruby lang = "ruby"; print "Hello from #{lang}!"
Afterwards, p contains 'Hello from ruby!'.
NOTE: No state is preserved between calls. Each call is a separate process.
"""
script('sage-native-execute ruby')(code)
def fortran(x, library_paths=[], libraries=[], verbose=False):
"""
Compile Fortran code and make it available to use.
INPUT:
- x -- a string containing code
Use this as a decorator. For example, put this in a cell and evaluate it::
%fortran
C FILE: FIB1.F
SUBROUTINE FIB(A,N)
C
C CALCULATE FIRST N FIBONACCI NUMBERS
C
INTEGER N
REAL*8 A(N)
DO I=1,N
IF (I.EQ.1) THEN
A(I) = 0.0D0
ELSEIF (I.EQ.2) THEN
A(I) = 1.0D0
ELSE
A(I) = A(I-1) + A(I-2)
ENDIF
ENDDO
END
C END FILE FIB1.F
In the next cell, evaluate this::
import numpy
n = numpy.array(range(10),dtype=float)
fib(n,int(10))
n
This will produce this output: array([ 0., 1., 1., 2., 3., 5., 8., 13., 21., 34.])
"""
import __builtin__
from sage.misc.temporary_file import tmp_dir
if len(x.splitlines()) == 1 and os.path.exists(x):
filename = x
x = open(x).read()
if filename.lower().endswith('.f90'):
x = '!f90\n' + x
from numpy import f2py
from random import randint
# Create everything in a temporary directory
mytmpdir = tmp_dir()
try:
old_cwd = os.getcwd()
os.chdir(mytmpdir)
old_import_path = os.sys.path
os.sys.path.append(mytmpdir)
name = "fortran_module_%s"%randint(0,2**64) # Python module name
# if the first line has !f90 as a comment, gfortran will
# treat it as Fortran 90 code
if x.startswith('!f90'):
fortran_file = name + '.f90'
else:
fortran_file = name + '.f'
s_lib_path = ""
s_lib = ""
for s in library_paths:
s_lib_path = s_lib_path + "-L%s "
for s in libraries:
s_lib = s_lib + "-l%s "%s
log = name + ".log"
extra_args = '--quiet --f77exec=sage-inline-fortran --f90exec=sage-inline-fortran %s %s >"%s" 2>&1'%(
s_lib_path, s_lib, log)
f2py.compile(x, name, extra_args = extra_args, source_fn=fortran_file)
log_string = open(log).read()
# f2py.compile() doesn't raise any exception if it fails.
# So we manually check whether the compiled file exists.
# NOTE: the .so extension is used expect on Cygwin,
# that is even on OS X where .dylib might be expected.
soname = name
uname = os.uname()[0].lower()
if uname[:6] == "cygwin":
soname += '.dll'
else:
soname += '.so'
if not os.path.isfile(soname):
raise RuntimeError("failed to compile Fortran code:\n" + log_string)
if verbose:
print log_string
m = __builtin__.__import__(name)
finally:
os.sys.path = old_import_path
os.chdir(old_cwd)
try:
import shutil
shutil.rmtree(mytmpdir)
except OSError:
# This can fail for example over NFS
pass
for k, x in m.__dict__.iteritems():
if k[0] != '_':
salvus.namespace[k] = x
def sh(code):
"""
Run a bash script in Salvus.
EXAMPLES:
Use as a block decorator on a single line::
%sh pwd
and multiline
%sh
echo "hi"
pwd
ls -l
You can also just directly call it::
sh('pwd')
The output is printed. To capture it, use capture
%capture(stdout='output')
%sh pwd
After that, the variable output contains the current directory
"""
return script('/bin/bash')(code)
# Monkey patch the R interpreter interface to support graphics, when
# used as a decorator.
import sage.interfaces.r
def r_eval0(*args, **kwds):
return sage.interfaces.r.R.eval(sage.interfaces.r.r, *args, **kwds).strip('\n')
_r_plot_options = ''
def set_r_plot_options(width=7, height=7):
global _r_plot_options
_r_plot_options = ", width=%s, height=%s"%(width, height)
r_dev_on = False
def r_eval(code, *args, **kwds):
"""
Run a block of R code.
EXAMPLES::
sage: print r.eval("summary(c(1,2,3,111,2,3,2,3,2,5,4))") # outputs a string
Min. 1st Qu. Median Mean 3rd Qu. Max.
1.00 2.00 3.00 12.55 3.50 111.00
In the notebook, you can put %r at the top of a cell, or type "%default_mode r" into
a cell to set the whole worksheet to r mode.
NOTE: Any plots drawn using the plot command should "just work", without having
to mess with special devices, etc.
"""
# Only use special graphics support when using r as a cell decorator, since it has
# a 10ms penalty (factor of 10 slowdown) -- which doesn't matter for interactive work, but matters
# a lot if one had a loop with r.eval in it.
if sage.interfaces.r.r not in salvus.code_decorators:
return r_eval0(code, *args, **kwds)
global r_dev_on
if r_dev_on:
return r_eval0(code, *args, **kwds)
try:
r_dev_on = True
tmp = '/tmp/' + uuid() + '.svg'
r_eval0("svg(filename='%s'%s)"%(tmp, _r_plot_options))
s = r_eval0(code, *args, **kwds)
r_eval0('dev.off()')
return s
finally:
r_dev_on = False
if os.path.exists(tmp):
salvus.stdout('\n'); salvus.file(tmp, show=True); salvus.stdout('\n')
os.unlink(tmp)
sage.interfaces.r.r.eval = r_eval
sage.interfaces.r.r.set_plot_options = set_r_plot_options
def prun(code):
"""
Use %prun followed by a block of code to profile execution of that
code. This will display the resulting profile, along with a menu
to select how to sort the data.
EXAMPLES:
Profile computing a tricky integral (on a single line)::
%prun integrate(sin(x^2),x)
Profile a block of code::
%prun
E = EllipticCurve([1..5])
v = E.anlist(10^5)
r = E.rank()
"""
import cProfile, pstats
from sage.misc.all import tmp_filename
filename = tmp_filename()
cProfile.runctx(salvus.namespace['preparse'](code), salvus.namespace, locals(), filename)
@interact
def f(title = text_control('', "<h1>Salvus Profiler</h1>"),
sort=("First sort by", selector([('calls', 'number of calls to the function'),
('time', ' total time spent in the function'),
('cumulative', 'total time spent in this and all subfunctions (from invocation till exit)'),
('module', 'name of the module that contains the function'),
('name', 'name of the function')
], width="100%", default='time')),
strip_dirs=True):
try:
p = pstats.Stats(filename)
if strip_dirs:
p.strip_dirs()
p.sort_stats(sort)
p.print_stats()
except Exception, msg:
print msg
##############################################################
# The %fork cell decorator.
##############################################################
def _wait_in_thread(pid, callback, filename):
from sage.structure.sage_object import load
def wait():
try:
os.waitpid(pid,0)
callback(load(filename))
except Exception, msg:
callback(msg)
from threading import Thread
t = Thread(target=wait, args=tuple([]))
t.start()
def async(f, args, kwds, callback):
"""
Run f in a forked subprocess with given args and kwds, then call the
callback function when f terminates.
"""
from sage.misc.all import tmp_filename
filename = tmp_filename() + '.sobj'
sys.stdout.flush()
sys.stderr.flush()
pid = os.fork()
if pid:
# The parent master process
try:
_wait_in_thread(pid, callback, filename)
return pid
finally:
if os.path.exists(filename):
os.unlink(filename)
else:
# The child process
try:
result = f(*args, **kwds)
except Exception, msg:
result = str(msg)
from sage.structure.sage_object import save
save(result, filename)
os._exit(0)
class Fork(object):
"""
The %fork block decorator evaluates its code in a forked subprocess
that does not block the main process.
You may still use the @fork function decorator from Sage, as usual,
to run a function in a subprocess. Type "sage.all.fork?" to see
the help for the @fork decorator.
WARNING: This is highly experimental and possibly flaky. Use with
caution.
All (picklelable) global variables that are set in the forked
subprocess are set in the parent when the forked subprocess
terminates. However, the forked subprocess has no other side
effects, except what it might do to file handles and the
filesystem.
To see currently running forked subprocesses, type
fork.children(), which returns a dictionary {pid:execute_uuid}.
To kill a given subprocess and stop the cell waiting for input,
type fork.kill(pid). This is currently the only way to stop code
running in %fork cells.
TODO/WARNING: The subprocesses spawned by fork are not killed
if the parent process is killed first!
NOTE: All pexpect interfaces are reset in the child process.
"""
def __init__(self):
self._children = {}
def children(self):
return dict(self._children)
def __call__(self, s):
if isinstance(s, types.FunctionType): # check for decorator usage
import sage.parallel.decorate
return sage.parallel.decorate.fork(s)
salvus._done = False
id = salvus._id
changed_vars = set([])
def change(var, val):
changed_vars.add(var)
def f():
# Run some commands to tell Sage that its
# pid has changed.
import sage.misc.misc
reload(sage.misc.misc)
# The pexpect interfaces (and objects defined in them) are
# not valid.
sage.interfaces.quit.invalidate_all()
salvus.namespace.on('change', None, change)
salvus.execute(s)
result = {}
from sage.structure.sage_object import dumps
for var in changed_vars:
try:
result[var] = dumps(salvus.namespace[var])
except:
result[var] = 'unable to pickle %s'%var
return result
from sage.structure.sage_object import loads
def g(s):
if isinstance(s, Exception):
sys.stderr.write(str(s))
sys.stderr.flush()
else:
for var, val in s.iteritems():
try:
salvus.namespace[var] = loads(val)
except:
print "unable to unpickle %s"%var
salvus._conn.send_json({'event':'output', 'id':id, 'done':True})
if pid in self._children:
del self._children[pid]
pid = async(f, tuple([]), {}, g)
print "Forked subprocess %s"%pid
self._children[pid] = id
def kill(self, pid):
if pid in self._children:
salvus._conn.send_json({'event':'output', 'id':self._children[pid], 'done':True})
os.kill(pid, 9)
del self._children[pid]
else:
raise ValueError, "Unknown pid = (%s)"%pid
fork = Fork()
####################################################
# Display of 2d/3d graphics objects
####################################################
from sage.misc.all import tmp_filename
from sage.plot.animate import Animation
import matplotlib.figure
def show_animation(obj, delay=20, gif=False, **kwds):
if gif:
t = tmp_filename(ext='.gif')
obj.gif(delay, t, **kwds)
salvus.file(t, raw=False)
os.unlink(t)
else:
t = tmp_filename(ext='.webm')
obj.ffmpeg(t, delay=delay, **kwds)
salvus.file(t, raw=True) # and let delete when worksheet ends - need this so can replay video.
def show_2d_plot_using_matplotlib(obj, svg, **kwds):
if isinstance(obj, matplotlib.image.AxesImage):
# The result of imshow, e.g.,
#
# from matplotlib import numpy, pyplot
# pyplot.imshow(numpy.random.random_integers(255, size=(100,100,3)))
#
t = tmp_filename(ext='.png')
obj.write_png(t)
salvus.file(t)
os.unlink(t)
return
if isinstance(obj, matplotlib.axes.Axes):
obj = obj.get_figure()
if 'events' in kwds:
from graphics import InteractiveGraphics
ig = InteractiveGraphics(obj, **kwds['events'])
n = '__a'+uuid().replace('-','') # so it doesn't get garbage collected instantly.
obj.__setattr__(n, ig)
kwds2 = dict(kwds)
del kwds2['events']
ig.show(**kwds2)
else:
t = tmp_filename(ext = '.svg' if svg else '.png')
if isinstance(obj, matplotlib.figure.Figure):
obj.savefig(t, **kwds)
else:
obj.save(t, **kwds)
salvus.file(t)
os.unlink(t)
def show_3d_plot_using_tachyon(obj, **kwds):
t = tmp_filename(ext = '.png')
obj.save(t, **kwds)
salvus.file(t)
os.unlink(t)
def show_graph_using_d3(obj, **kwds):
salvus.d3_graph(obj, **kwds)
def plot3d_using_matplotlib(expr, rangeX, rangeY,
density=40, elev=45., azim=35.,
alpha=0.85, cmap=None):
"""
Plots a symbolic expression in two variables on a two dimensional grid
and renders the function using matplotlib's 3D projection.
The purpose is to make it possible to create vectorized images (PDF, SVG)
for high-resolution images in publications -- instead of rasterized image formats.
Example::
%var x y
plot3d_using_matplotlib(x^2 + (1-y^2), (x, -5, 5), (y, -5, 5))
Arguments::
* expr: symbolic expression, e.g. x^2 - (1-y)^2
* rangeX: triple: (variable, minimum, maximum), e.g. (x, -10, 10)
* rangeY: like rangeX
* density: grid density
* elev: elevation, e.g. 45
* azim: azimuth, e.g. 35
* alpha: alpha transparency of plot (default: 0.85)
* cmap: matplotlib colormap, e.g. matplotlib.cm.Blues (default)
"""
from matplotlib import cm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
import numpy as np
cmap = cmap or cm.Blues
plt.cla()
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.view_init(elev=elev, azim=azim)
xx = np.linspace(rangeX[1], rangeX[2], density)
yy = np.linspace(rangeY[1], rangeY[2], density)
X, Y = np.meshgrid(xx, yy)
import numpy as np
exprv = np.vectorize(lambda x1, x2 : \
float(expr.subs({rangeX[0] : x1, rangeY[0] : x2})))
Z = exprv(X, Y)
zlim = np.min(Z), np.max(Z)
ax.plot_surface(X, Y, Z, alpha=alpha, cmap=cmap, linewidth=.5,
shade=True,
rstride=int(len(xx)/10),
cstride=int(len(yy)/10))
ax.set_xlabel('X')
ax.set_xlim(*rangeX[1:])
ax.set_ylabel('Y')
ax.set_ylim(*rangeY[1:])
ax.set_zlabel('Z')
ax.set_zlim(*zlim)
plt.show()
from sage.plot.graphics import Graphics, GraphicsArray
from sage.plot.plot3d.base import Graphics3d
import cgi
def show(*objs, **kwds):
"""
Show a 2d or 3d graphics object (or objects), animation, or matplotlib figure, or show an
expression typeset nicely using LaTeX.
- display: (default: True); if True, use display math for expression (big and centered).
- svg: (default: True); if True, show 2d plots using svg (otherwise use png)
- d3: (default: True); if True, show graphs (vertices and edges) using an interactive D3 viewer
for the many options for this viewer, type 'import graphics; graphics.graph_to_d3_jsonable?'
If false, graphs are converted to plots and displayed as usual.
- renderer: (default: 'webgl'); for 3d graphics
- 'webgl' (fastest) using hardware accelerated 3d;
- 'canvas' (slower) using a 2d canvas, but may work better with transparency;
- 'tachyon' -- a ray traced static image.
- spin: (default: False); spins 3d plot, with number determining speed (requires mouse over plot)
- events: if given, {'click':foo, 'mousemove':bar}; each time the user clicks,
the function foo is called with a 2-tuple (x,y) where they clicked. Similarly
for mousemove. This works for Sage 2d graphics and matplotlib figures.
ANIMATIONS:
- animations are by default encoded and displayed using an efficiently web-friendly
format (currently webm, which is **not supported** by Safari or IE).
- ``delay`` - integer (default: 20); delay in hundredths of a
second between frames.
- gif=False -- if you set gif=True, instead use an animated gif,
which is much less efficient, but works on all browsers.
You can also use options directly to the animate command, e.g., the figsize option below:
a = animate([plot(sin(x + a), (x, 0, 2*pi)) for a in [0, pi/4, .., 2*pi]], figsize=6)
show(a, delay=30)
EXAMPLES:
Some examples:
show(2/3)
show([1, 4/5, pi^2 + e], 1+pi)
show(x^2, display=False)
show(e, plot(sin))
Here's an example that illustrates creating a clickable image with events::
@interact
def f0(fun=x*sin(x^2), mousemove='', click='(0,0)'):
click = sage_eval(click)
g = plot(fun, (x,0,5), zorder=0) + point(click, color='red', pointsize=100, zorder=10)
ymax = g.ymax(); ymin = g.ymin()
m = fun.derivative(x)(x=click[0])
b = fun(x=click[0]) - m*click[0]
g += plot(m*x + b, (click[0]-1,click[0]+1), color='red', zorder=10)
def h(p):
f0.mousemove = p
def c(p):
f0(click=p)
show(g, events={'click':c, 'mousemove':h}, svg=True, gridlines='major', ymin=ymin, ymax=ymax)
"""
# svg=True, d3=True,
svg = kwds.get('svg',True)
d3 = kwds.get('d3',True)
display = kwds.get('display', True)
for t in ['svg', 'd3', 'display']:
if t in kwds:
del kwds[t]
import graphics
def show0(obj, combine_all=False):
# Either show the object and return None or
# return a string of html to represent obj.
if isinstance(obj, (Graphics, GraphicsArray, matplotlib.figure.Figure, matplotlib.axes.Axes, matplotlib.image.AxesImage)):
show_2d_plot_using_matplotlib(obj, svg=svg, **kwds)
elif isinstance(obj, Animation):
show_animation(obj, **kwds)
elif isinstance(obj, Graphics3d):
if kwds.get('viewer') == 'tachyon':
show_3d_plot_using_tachyon(obj, **kwds)
else:
salvus.threed(obj, **kwds)
# graphics.show_3d_plot_using_threejs(obj, **kwds)
elif isinstance(obj, (sage.graphs.graph.Graph, sage.graphs.digraph.DiGraph)):
if d3:
show_graph_using_d3(obj, **kwds)
else:
show(obj.plot(), **kwds)
elif isinstance(obj, str):
return obj
elif isinstance(obj, (list, tuple)):
v = []
for a in obj:
b = show0(a)
if b is not None:
v.append(b)
if combine_all:
return ' '.join(v)
s = ', '.join(v)
if isinstance(obj, list):
return '[%s]'%s
else:
return '(%s)'%s
else:
if display:
return "$\\displaystyle %s$"%sage.misc.latex.latex(obj)
else:
return "$%s$"%sage.misc.latex.latex(obj)
s = show0(objs, combine_all=True)
if s is not None:
if display:
salvus.html("<div align='center'>%s</div>"%cgi.escape(s))
else:
salvus.html("<div>%s</div>"%cgi.escape(s))
# Make it so plots plot themselves correctly when they call their repr.
Graphics.show = show
GraphicsArray.show = show
Animation.show = show
# Very "evil" abuse of the display manager, so sphere().show() works:
try:
from sage.repl.rich_output import get_display_manager
get_display_manager().display_immediately = show
except:
# so doesn't crash on older versions of Sage.
pass
###################################################
# %auto -- automatically evaluate a cell on load
###################################################
def auto(s):
"""
The %auto decorator sets a cell so that it will be automatically
executed when the Sage process first starts.
Thus %auto allows you to initialize functions, variables, interacts,
etc., e.g., when loading a worksheet.
NOTE: The %auto decorator just calls salvus.auto(True), which sets
a cell metatag. You *must* execute the cell containing %auto at
least once in order for it to work.
"""
salvus.auto(True)
return s # the do-nothing block decorator.
def hide(component='input'):
"""
Hide a component of a cell. By default, hide hides the the code
editor part of the cell, but you can hide other parts by passing
in an optional argument:
'input', 'output'
Use the cell.show(...) function to reveal a cell component.
"""
if component not in ['input', 'output']:
# Allow %hide to work, for compatability with sagenb.
hide('input')
return component
cell.hide(component)
def hideall(code=None):
cell.hideall()
if code is not None: # for backwards compat with sagenb
return code
##########################################################
# A "%exercise" cell mode -- a first step toward
# automated homework.
##########################################################
class Exercise:
def __init__(self, question, answer, check=None, hints=None):
import sage.all
from sage.structure.element import is_Matrix
if not (isinstance(answer, (tuple, list)) and len(answer) == 2):
if is_Matrix(answer):
default = sage.all.parent(answer)(0)
else:
default = ''
answer = [answer, default]
if check is None:
R = sage.all.parent(answer[0])
def check(attempt):
return R(attempt) == answer[0]
if hints is None:
hints = ['','','',"The answer is %s."%answer[0]]
self._question = question
self._answer = answer
self._check = check
self._hints = hints
def _check_attempt(self, attempt, interact):
from sage.misc.all import walltime
response = "<div class='well'>"
try:
r = self._check(attempt)
if isinstance(r, tuple) and len(r)==2:
correct = r[0]
comment = r[1]
else:
correct = bool(r)
comment = ''
except TypeError, msg:
response += "<h3 style='color:darkgreen'>Huh? -- %s (attempt=%s)</h3>"%(msg, attempt)
else:
if correct:
response += "<h1 style='color:blue'>RIGHT!</h1>"
if self._start_time:
response += "<h2 class='lighten'>Time: %.1f seconds</h2>"%(walltime()-self._start_time,)
if self._number_of_attempts == 1:
response += "<h3 class='lighten'>You got it first try!</h3>"
else:
response += "<h3 class='lighten'>It took you %s attempts.</h3>"%(self._number_of_attempts,)
else:
response += "<h3 style='color:darkgreen'>Not correct yet...</h3>"
if self._number_of_attempts == 1:
response += "<h4 style='lighten'>(first attempt)</h4>"
else:
response += "<h4 style='lighten'>(%s attempts)</h4>"%self._number_of_attempts
if self._number_of_attempts > len(self._hints):
hint = self._hints[-1]
else:
hint = self._hints[self._number_of_attempts-1]
if hint:
response += "<span class='lighten'>(HINT: %s)</span>"%(hint,)
if comment:
response += '<h4>%s</h4>'%comment
response += "</div>"
interact.feedback = text_control(response,label='')
return correct
def ask(self, cb):
from sage.misc.all import walltime
self._start_time = walltime()
self._number_of_attempts = 0
attempts = []
@interact(layout=[[('question',12)],[('attempt',12)], [('feedback',12)]])
def f(question = ("<b>Question:</b>", text_control(self._question)),
attempt = ('<b>Answer:</b>',self._answer[1])):
if 'attempt' in interact.changed() and attempt != '':
attempts.append(attempt)
if self._start_time == 0:
self._start_time = walltime()
self._number_of_attempts += 1
if self._check_attempt(attempt, interact):
cb({'attempts':attempts, 'time':walltime()-self._start_time})
def exercise(code):
r"""
Use the %exercise cell decorator to create interactive exercise
sets. Put %exercise at the top of the cell, then write Sage code
in the cell that defines the following (all are optional):
- a ``question`` variable, as an HTML string with math in dollar
signs
- an ``answer`` variable, which can be any object, or a pair
(correct_value, interact control) -- see the docstring for
interact for controls.
- an optional callable ``check(answer)`` that returns a boolean or
a 2-tuple
(True or False, message),
where the first argument is True if the answer is correct, and
the optional second argument is a message that should be
displayed in response to the given answer. NOTE: Often the
input "answer" will be a string, so you may have to use Integer,
RealNumber, or sage_eval to evaluate it, depending
on what you want to allow the user to do.
- hints -- optional list of strings to display in sequence each
time the user enters a wrong answer. The last string is
displayed repeatedly. If hints is omitted, the correct answer
is displayed after three attempts.
NOTE: The code that defines the exercise is executed so that it
does not impact (and is not impacted by) the global scope of your
variables elsewhere in your session. Thus you can have many
%exercise cells in a single worksheet with no interference between
them.
The following examples further illustrate how %exercise works.
An exercise to test your ability to sum the first $n$ integers::
%exercise
title = "Sum the first n integers, like Gauss did."
n = randint(3, 100)
question = "What is the sum $1 + 2 + \\cdots + %s$ of the first %s positive integers?"%(n,n)
answer = n*(n+1)//2
Transpose a matrix::
%exercise
title = r"Transpose a $2 \times 2$ Matrix"
A = random_matrix(ZZ,2)
question = "What is the transpose of $%s?$"%latex(A)
answer = A.transpose()
Add together a few numbers::
%exercise
k = randint(2,5)
title = "Add %s numbers"%k
v = [randint(1,10) for _ in range(k)]
question = "What is the sum $%s$?"%(' + '.join([str(x) for x in v]))
answer = sum(v)
The trace of a matrix::
%exercise
title = "Compute the trace of a matrix."
A = random_matrix(ZZ, 3, x=-5, y = 5)^2
question = "What is the trace of $$%s?$$"%latex(A)
answer = A.trace()
Some basic arithmetic with hints and dynamic feedback::
%exercise
k = randint(2,5)
title = "Add %s numbers"%k
v = [randint(1,10) for _ in range(k)]
question = "What is the sum $%s$?"%(' + '.join([str(x) for x in v]))
answer = sum(v)
hints = ['This is basic arithmetic.', 'The sum is near %s.'%(answer+randint(1,5)), "The answer is %s."%answer]
def check(attempt):
c = Integer(attempt) - answer
if c == 0:
return True
if abs(c) >= 10:
return False, "Gees -- not even close!"
if c < 0:
return False, "too low"
if c > 0:
return False, "too high"
"""
f = closure(code)
def g():
x = f()
return x.get('title',''), x.get('question', ''), x.get('answer',''), x.get('check',None), x.get('hints',None)
title, question, answer, check, hints = g()
obj = {}
obj['E'] = Exercise(question, answer, check, hints)
obj['title'] = title
def title_control(t):
return text_control('<h3 class="lighten">%s</h3>'%t)
the_times = []
@interact(layout=[[('go',1), ('title',11,'')],[('')], [('times',12, "<b>Times:</b>")]], flicker=True)
def h(go = button(" "*5 + "Go" + " "*7, label='', icon='fa-refresh', classes="btn-large btn-success"),
title = title_control(title),
times = text_control('')):
c = interact.changed()
if 'go' in c or 'another' in c:
interact.title = title_control(obj['title'])
def cb(obj):
the_times.append("%.1f"%obj['time'])
h.times = ', '.join(the_times)
obj['E'].ask(cb)
title, question, answer, check, hints = g() # get ready for next time.
obj['title'] = title
obj['E'] = Exercise(question, answer, check, hints)
def closure(code):
"""
Wrap the given code block (a string) in a closure, i.e., a
function with an obfuscated random name.
When called, the function returns locals().
"""
import uuid
# TODO: strip string literals first
code = ' ' + ('\n '.join(code.splitlines()))
fname = "__" + str(uuid.uuid4()).replace('-','_')
closure = "def %s():\n%s\n return locals()"%(fname, code)
class Closure:
def __call__(self):
return self._f()
c = Closure()
salvus.execute(closure)
c._f = salvus.namespace[fname]
del salvus.namespace[fname]
return c
#########################################
# Dynamic variables (linked to controls)
#########################################
def _dynamic(var, control=None):
if control is None:
control = salvus.namespace.get(var,'')
@interact(layout=[[(var,12)]], output=False)
def f(x=(var,control)):
salvus.namespace.set(var, x, do_not_trigger=[var])
def g(y):
f.x = y
salvus.namespace.on('change', var, g)
if var in salvus.namespace:
x = salvus.namespace[var]
def dynamic(*args, **kwds):
"""
Make variables in the global namespace dynamically linked to a control from the
interact label (see the documentation for interact).
EXAMPLES:
Make a control linked to a variable that doesn't yet exist::
dynamic('xyz')
Make a slider and a selector, linked to t and x::
dynamic(t=(1..10), x=[1,2,3,4])
t = 5 # this changes the control
"""
for var in args:
if not isinstance(var, str):
i = id(var)
for k,v in salvus.namespace.iteritems():
if id(v) == i:
_dynamic(k)
return
else:
_dynamic(var)
for var, control in kwds.iteritems():
_dynamic(var, control)
import sage.all
def var0(*args, **kwds):
if len(args)==1:
name = args[0]
else:
name = args
G = salvus.namespace
v = sage.all.SR.var(name, **kwds)
if isinstance(v, tuple):
for x in v:
G[repr(x)] = x
else:
G[repr(v)] = v
return v
def var(*args, **kwds):
"""
Create symbolic variables and inject them into the global namespace.
NOTE: In SageMathCloud, you can use var as a line decorator::
%var x
%var a,b,theta # separate with commas
%var x y z t # separate with spaces
Use latex_name to customizing how the variables is typeset:
var1 = var('var1', latex_name=r'\sigma^2_1')
show(e^(var1**2))
Multicolored variables made using the %var line decorator:
%var(latex_name=r"\color{green}{\theta}") theta
%var(latex_name=r"\color{red}{S_{u,i}}") sui
show(expand((sui + x^3 + theta)^2))
Here is the docstring for var in Sage:
"""
if 'latex_name' in kwds:
# wrap with braces -- sage should probably do this, but whatever.
kwds['latex_name'] = '{%s}'%kwds['latex_name']
if len(args) > 0:
return var0(*args, **kwds)
else:
def f(s):
return var0(s, *args, **kwds)
return f
var.__doc__ += sage.all.var.__doc__
#############################################
# Variable reset -- we have to rewrite
# this because of all the monkey patching
# that we do.
#############################################
import sage.misc.reset
def reset(vars=None, attached=False):
"""
If vars is specified, just restore the value of vars and leave
all other variables alone. In SageMathCloud, you can also use
reset as a line decorator::
%reset x, pi, sin # comma-separated
%reset x pi sin # commas are optional
If vars is not given, delete all user-defined variables, reset
all global variables back to their default states, and reset
all interfaces to other computer algebra systems.
Original reset docstring::
"""
if vars is not None:
restore(vars)
return
G = salvus.namespace
T = type(sys) # module type
for k in G.keys():
if k[0] != '_' and type(k) != T:
try:
del G[k]
except KeyError:
pass
restore()
from sage.symbolic.assumptions import forget; forget()
sage.misc.reset.reset_interfaces()
if attached:
sage.misc.reset.reset_attached()
reset.__doc__ += sage.misc.reset.reset.__doc__
def restore(vars=None):
""
if isinstance(vars, unicode):
vars = str(vars) # sage.misc.reset is unicode ignorant
if ',' in vars: # sage.misc.reset is stupid about commas and space -- TODO: make a patch to sage
vars = [v.strip() for v in vars.split(',')]
import sage.calculus.calculus
sage.misc.reset._restore(salvus.namespace, default_namespace, vars)
sage.misc.reset._restore(sage.calculus.calculus.syms_cur, sage.calculus.calculus.syms_default, vars)
restore.__doc__ += sage.misc.reset.restore.__doc__
# NOTE: this is not used anymore
def md2html(s):
from markdown2Mathjax import sanitizeInput, reconstructMath
from markdown2 import markdown
delims = [('\\(','\\)'), ('$$','$$'), ('\\[','\\]'),
('\\begin{equation}', '\\end{equation}'), ('\\begin{equation*}', '\\end{equation*}'),
('\\begin{align}', '\\end{align}'), ('\\begin{align*}', '\\end{align*}'),
('\\begin{eqnarray}', '\\end{eqnarray}'), ('\\begin{eqnarray*}', '\\end{eqnarray*}'),
('\\begin{math}', '\\end{math}'),
('\\begin{displaymath}', '\\end{displaymath}')
]
tmp = [((s,None),None)]
for d in delims:
tmp.append((sanitizeInput(tmp[-1][0][0], equation_delims=d), d))
extras = ['code-friendly', 'footnotes', 'smarty-pants', 'wiki-tables']
markedDownText = markdown(tmp[-1][0][0], extras=extras)
while len(tmp) > 1:
markedDownText = reconstructMath(markedDownText, tmp[-1][0][1], equation_delims=tmp[-1][1])
del tmp[-1]
return markedDownText
# NOTE: this is not used anymore
class Markdown(object):
r"""
Cell mode that renders everything after %md as markdown and hides the input by default.
EXAMPLES::
---
%md
# A Title
## A subheading
---
%md(hide=False)
# A title
- a list
---
md("# A title", hide=False)
---
%md(hide=False) `some code`
This uses the Python markdown2 library with the following
extras enabled:
'code-friendly', 'footnotes',
'smarty-pants', 'wiki-tables'
See https://github.com/trentm/python-markdown2/wiki/Extras
We also use markdown2Mathjax so that LaTeX will be properly
typeset if it is wrapped in $'s and $$'s, \(, \), \[, \],
\begin{equation}, \end{equation}, \begin{align}, \end{align}.,
"""
def __init__(self, hide=True):
self._hide = hide
def __call__(self, *args, **kwds):
if len(kwds) > 0 and len(args) == 0:
return Markdown(**kwds)
if len(args) > 0:
self._render(args[0], **kwds)
def _render(self, s, hide=None):
if hide is None:
hide = self._hide
html(md2html(s),hide=hide)
# not used
#md = Markdown()
# Instead... of the above server-side markdown, we use this client-side markdown.
class Marked(object):
r"""
Cell mode that renders everything after %md as Github flavored
markdown [1] with mathjax and hides the input by default.
[1] https://help.github.com/articles/github-flavored-markdown
The rendering is done client-side using marked and mathjax.
EXAMPLES::
---
%md
# A Title
## A subheading
---
%md(hide=False)
# A title
- a list
---
md("# A title", hide=False)
---
%md(hide=False) `some code`
"""
def __init__(self, hide=True):
self._hide = hide
def __call__(self, *args, **kwds):
if len(kwds) > 0 and len(args) == 0:
return Marked(**kwds)
if len(args) > 0:
self._render(args[0], **kwds)
def _render(self, s, hide=None):
if hide is None:
hide = self._hide
if hide:
salvus.hide('input')
salvus.md(s)
md = Marked()
#####
## Raw Input
def raw_input(prompt='', default='', placeholder='', input_width=None, label_width=None, type=None):
"""
Read a string from the user in the worksheet interface to Sage.
INPUTS:
- prompt -- (default: '') a label to the left of the input
- default -- (default: '') default value to put in input box
- placeholder -- (default: '') default placeholder to put in grey when input box empty
- input_width -- (default: None) css that gives the width of the input box
- label_width -- (default: None) css that gives the width of the label
- type -- (default: None) if not given, returns a unicode string representing the exact user input.
Other options include:
- type='sage' -- will evaluate it to a sage expression in the global scope.
- type=anything that can be called, e.g., type=int, type=float.
OUTPUT:
- By default, returns a **unicode** string (not a normal Python str). However, can be customized
by changing the type.
EXAMPLE:
print salvus.raw_input("What is your full name?", default="Sage Math", input_width="20ex", label_width="15ex")
"""
return salvus.raw_input(prompt=prompt, default=default, placeholder=placeholder, input_width=input_width, label_width=label_width, type=type)
#####
## Clear
def clear():
"""
Clear the output of the current cell. You can use this to
dynamically animate the output of a cell using a for loop.
SEE ALSO: delete_last_output
"""
salvus.clear()
def delete_last_output():
"""
Delete the last output message.
SEE ALSO: clear
"""
salvus.delete_last_output()
#####
# Generic Pandoc cell decorator
def pandoc(fmt, doc=None, hide=True):
"""
INPUT:
- fmt -- one of 'docbook', 'haddock', 'html', 'json', 'latex', 'markdown', 'markdown_github',
'markdown_mmd', 'markdown_phpextra', 'markdown_strict', 'mediawiki',
'native', 'opml', 'rst', 'textile'
- doc -- a string in the given format
OUTPUT:
- Called directly, you get the HTML rendered version of doc as a string.
- If you use this as a cell decorator, it displays the HTML output, e.g.,
%pandoc('mediawiki')
* ''Unordered lists'' are easy to do:
** Start every line with a star.
*** More stars indicate a deeper level.
"""
if doc is None:
return lambda x : html(pandoc(fmt, x), hide=hide) if x is not None else ''
import subprocess
p = subprocess.Popen(['pandoc', '-f', fmt, '--mathjax'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
if not isinstance(doc, unicode):
doc = unicode(doc, 'utf8')
p.stdin.write(doc.encode('UTF-8'))
p.stdin.close()
err = p.stderr.read()
if err:
raise RuntimeError(err)
return p.stdout.read()
def wiki(doc=None, hide=True):
"""
Mediawiki markup cell decorator. E.g.,
EXAMPLE::
%wiki(hide=False)
* ''Unordered lists'' and math like $x^3 - y^2$ are both easy
** Start every line with a star.
*** More stars indicate a deeper level. """
if doc is None:
return lambda doc: wiki(doc=doc, hide=hide) if doc else ''
html(pandoc('mediawiki', doc=doc), hide=hide)
mediawiki = wiki
######
def load_html_resource(filename):
fl = filename.lower()
if fl.startswith('http://') or fl.startswith('https://'):
# remote url
url = fl
else:
# local file
url = salvus.file(filename, show=False)
ext = os.path.splitext(filename)[1][1:].lower()
if ext == "css":
salvus.javascript('''$.get("%s", function(css) { $('<style type=text/css></style>').html(css).appendTo("body")});'''%url)
elif ext == "html":
# TODO: opts.element should change to cell.element when more canonical (need to finish some code in syncdoc)!
salvus.javascript('opts.element.append($("<div>").load("%s"))'%url)
elif ext == "coffee":
salvus.javascript('$.ajax({url:"%s"}).done(function(data) { eval(CoffeeScript.compile(data)); })'%url)
elif ext == "js":
salvus.html('<script src="%s"></script>'%url)
# Monkey-patched the load command
def load(*args, **kwds):
"""
Load Sage object from the file with name filename, which will have
an .sobj extension added if it doesn't have one. Or, if the input
is a filename ending in .py, .pyx, or .sage, load that file into
the current running session. Loaded files are not loaded into
their own namespace, i.e., this is much more like Python's
"execfile" than Python's "import".
You may also load an sobj or execute a code file available on the web
by specifying the full URL to the file. (Set ``verbose = False`` to
supress the download progress indicator.)
INPUT:
- args -- any number of filename strings with any of the following extensions:
.sobj, .sage, .py, .pyx, .html, .css, .js, .coffee, .pdf
- ``verbose`` -- (default: True) load file over the network.
If you load any of the web types (.html, .css, .js, .coffee), they are loaded
into the web browser DOM (or Javascript session), not the Python process.
If you load a pdf, it is displayed in the output of the worksheet. The extra
options are passed to salvus.pdf -- see the docstring for that.
In SageMathCloud you may also use load as a decorator, with filenames separated
by whitespace or commas::
%load foo.sage bar.py a.pyx, b.pyx
The following are all valid ways to use load::
%load a.html
%load a.css
%load a.js
%load a.coffee
%load a.css a.js a.coffee a.html
load('a.css', 'a.js', 'a.coffee', 'a.html')
load('a.css a.js a.coffee a.html')
load(['a.css', 'a.js', 'a.coffee', 'a.html'])
ALIAS: %runfile is the same as %load, for compatibility with IPython.
"""
if len(args) == 1:
if isinstance(args[0], (unicode,str)):
args = tuple(args[0].replace(',',' ').split())
if isinstance(args[0], (list, tuple)):
args = args[0]
if len(args) == 0 and len(kwds) == 1:
# This supports
# %load(verbose=False) a.sage
# which doesn't really matter right now, since there is a bug in Sage's own
# load command, where it isn't verbose for network code, but is for objects.
def f(*args):
return load(*args, **kwds)
return f
t = '__tmp__'; i=0
while t+str(i) in salvus.namespace:
i += 1
t += str(i)
# First handle HTML related args -- these are all very oriented toward cloud.sagemath worksheets
html_extensions = set(['js','css','coffee','html'])
other_args = []
for arg in args:
i = arg.rfind('.')
if i != -1 and arg[i+1:].lower() in html_extensions:
load_html_resource(arg)
elif i != -1 and arg[i+1:].lower() == 'pdf':
show_pdf(arg, **kwds)
else:
other_args.append(arg)
# pdf?
for arg in args:
i = arg.find('.')
# now handle remaining non-web arguments.
if len(other_args) > 0:
try:
exec 'salvus.namespace["%s"] = sage.structure.sage_object.load(*__args, **__kwds)'%t in salvus.namespace, {'__args':other_args, '__kwds':kwds}
return salvus.namespace[t]
finally:
try:
del salvus.namespace[t]
except: pass
# add alias, due to IPython.
runfile = load
## Make it so pylab (matplotlib) figures display, at least using pylab.show
import pylab
def _show_pylab(svg=True):
"""
Show a Pylab plot in a Sage Worksheet.
INPUTS:
- svg -- boolean (default: True); if True use an svg; otherwise, use a png.
"""
try:
ext = '.svg' if svg else '.png'
filename = uuid() + ext
pylab.savefig(filename)
salvus.file(filename)
finally:
try:
os.unlink(filename)
except:
pass
pylab.show = _show_pylab
matplotlib.figure.Figure.show = show
import matplotlib.pyplot
def _show_pyplot(svg=True):
"""
Show a Pylab plot in a Sage Worksheet.
INPUTS:
- svg -- boolean (default: True); if True use an svg; otherwise, use a png.
"""
try:
ext = '.svg' if svg else '.png'
filename = uuid() + ext
matplotlib.pyplot.savefig(filename)
salvus.file(filename)
finally:
try:
os.unlink(filename)
except:
pass
matplotlib.pyplot.show = _show_pyplot
## Our own displayhook
_system_sys_displayhook = sys.displayhook
def displayhook(obj):
if isinstance(obj, (Graphics3d, Graphics, GraphicsArray, matplotlib.figure.Figure, matplotlib.axes.Axes, matplotlib.image.AxesImage, Animation)):
show(obj)
else:
_system_sys_displayhook(obj)
sys.displayhook = displayhook
import sage.misc.latex, types
# We make this a list so that users can append to it easily.
TYPESET_MODE_EXCLUDES = [sage.misc.latex.LatexExpr, types.NoneType,
type, sage.plot.plot3d.base.Graphics3d,
sage.plot.graphics.Graphics,
sage.plot.graphics.GraphicsArray]
def typeset_mode(on=True, display=True, **args):
"""
Turn typeset mode on or off. When on, each output is typeset using LaTeX.
EXAMPLES::
typeset_mode() # turns typesetting on
typeset_mode(False) # turn typesetting off
typeset_mode(True, display=False) # typesetting mode on, but do not make output big and centered
"""
if isinstance(on, (str, unicode)): # e.g., %typeset_mode False
on = sage_eval(on, {'false':False, 'true':True})
if on:
def f(obj):
if isinstance(obj, tuple(TYPESET_MODE_EXCLUDES)):
displayhook(obj)
else:
salvus.tex(obj, display=display)
sys.displayhook = f
else:
sys.displayhook = displayhook
def default_mode(mode):
"""
Set the default mode for cell evaluation. This is equivalent
to putting %mode at the top of any cell that does not start
with %. Use default_mode() to return the current mode.
Use default_mode("") to have no default mode.
EXAMPLES::
Make Pari/GP the default mode:
default_mode("gp")
default_mode() # outputs "gp"
Then switch back to Sage::
default_mode("") # or default_mode("sage")
You can also use default_mode as a line decorator::
%default_mode gp # equivalent to default_mode("gp")
"""
return salvus.default_mode(mode)
#######################################################
# Monkey patching and deprecation --
#######################################################
# Monkey patch around a bug in Python's findsource that breaks deprecation in cloud worksheets.
# This won't matter if we switch to not using exec, since then there will be a file behind
# each block of code. However, for now we have to do this.
import inspect
_findsource = inspect.findsource
def findsource(object):
try: return _findsource(object)
except: raise IOError('source code not available') # as *claimed* by the Python docs!
inspect.findsource = findsource
#######################################################
# Viewing pdf's
#######################################################
def show_pdf(filename, viewer="object", width=1000, height=600, scale=1.6):
"""
Display a PDF file from the filesystem in an output cell of a worksheet.
INPUT:
- filename
- viewer -- 'object' (default): use html object tag, which uses the browser plugin, or
provides a download link in case the browser can't display pdf's.
-- 'pdfjs' (experimental): use the pdf.js pure HTML5 viewer, which doesn't require any plugins
(this works on more browser, but may be slower and uglier)
- width -- (default: 1000) -- pixel width of viewer
- height -- (default: 600) -- pixel height of viewer
- scale -- (default: 1.6) -- zoom scale (only applies to pdfjs)
"""
url = salvus.file(filename, show=False)
if viewer == 'object':
s = '<object data="%s" type="application/pdf" width="%s" height="%s"> Your browser doesn\'t support embedded PDF\'s, but you can <a href="%s">download %s</a></p> </object>'%(url, width, height, url, filename)
salvus.html(s)
elif viewer == 'pdfjs':
import uuid
id = 'a'+str(uuid())
salvus.html('<div id="%s" style="background-color:white; width:%spx; height:%spx; cursor:pointer; overflow:auto;"></div>'%(id, width, height))
salvus.html("""
<!-- pdf.js-based embedded javascript PDF viewer -->
<!-- File from the PDF.JS Library -->
<script type="text/javascript" src="pdfListView/external/compatibility.js"></script>
<script type="text/javascript" src="pdfListView/external/pdf.js"></script>
<!-- to disable webworkers: swap these below -->
<!-- <script type="text/javascript">PDFJS.disableWorker = true;</script> -->
<script type="text/javascript">PDFJS.workerSrc = 'pdfListView/external/pdf.js';</script>
<link rel="stylesheet" href="pdfListView/src/TextLayer.css">
<script src="pdfListView/src/TextLayerBuilder.js"></script>
<link rel="stylesheet" href="pdfListView/src/AnnotationsLayer.css">
<script src="pdfListView/src/AnnotationsLayerBuilder.js"></script>
<script src="pdfListView/src/PdfListView.js"></script>
""")
salvus.javascript('''
var lv = new PDFListView($("#%s")[0], {textLayerBuilder:TextLayerBuilder, annotationsLayerBuilder: AnnotationsLayerBuilder});
lv.setScale(%s);
lv.loadPdf("%s")'''%(
id, scale, url))
else:
raise RuntimeError("viewer must be 'object' or 'pdfjs'")
########################################################
# WebRTC Support
########################################################
def sage_chat(chatroom=None, height="258px"):
if chatroom is None:
from random import randint
chatroom = randint(0,1e24)
html("""
<iframe src="/static/webrtc/group_chat_cell.html?%s" height="%s" width="100%%"></iframe>
"""%(chatroom, height), hide=False)
########################################################
# Documentation of magics
########################################################
def magics(dummy=None):
"""
Type %magics to print all SageMathCloud magic commands or
magics() to get a list of them.
To use a magic command, either type
%command <a line of code>
or
%command
[rest of cell]
Create your own magic command by defining a function that takes
a string as input and outputs a string. (Yes, it is that simple.)
"""
import re
magic_cmds = set()
for s in open(os.path.realpath(__file__), 'r').xreadlines():
s = s.strip()
if s.startswith('%'):
magic_cmds.add(re.findall(r'%[a-zA-Z]+', s)[0])
magic_cmds.discard('%s')
for k,v in sage.interfaces.all.__dict__.iteritems():
if isinstance(v, sage.interfaces.expect.Expect):
magic_cmds.add('%'+k)
magic_cmds.update(['%cython', '%time', '%magics', '%auto', '%hide', '%hideall',
'%fork', '%runfile', '%default_mode', '%typeset_mode'])
v = list(sorted(magic_cmds))
if dummy is None:
return v
else:
for s in v:
print(s)
########################################################
# Go magic
########################################################
def go(s):
"""
Run a go program. For example,
%go
func main() { fmt.Println("Hello World") }
You can set the whole worksheet to be in go mode by typing
%default_mode go
NOTES:
- The official Go tutorial as a long Sage Worksheet is available here:
https://github.com/sagemath/cloud-examples/tree/master/go
- There is no relation between one cell and the next. Each is a separate
self-contained go program, which gets compiled and run, with the only
side effects being changes to the filesystem. The program itself is
stored in a random file that is deleted after it is run.
- The %go command automatically adds 'package main' and 'import "fmt"'
(if fmt. is used) to the top of the program, since the assumption
is that you're using %go interactively.
"""
import uuid
name = str(uuid.uuid4())
if 'fmt.' in s and '"fmt"' not in s and "'fmt'" not in s:
s = 'import "fmt"\n' + s
if 'package main' not in s:
s = 'package main\n' + s
try:
open(name +'.go','w').write(s.encode("UTF-8"))
(child_stdin, child_stdout, child_stderr) = os.popen3('go build %s.go'%name)
err = child_stderr.read()
sys.stdout.write(child_stdout.read())
sys.stderr.write(err)
sys.stdout.flush()
sys.stderr.flush()
if not os.path.exists(name): # failed to produce executable
return
(child_stdin, child_stdout, child_stderr) = os.popen3("./" + name)
sys.stdout.write(child_stdout.read())
sys.stderr.write(child_stderr.read())
sys.stdout.flush()
sys.stderr.flush()
finally:
try:
os.unlink(name+'.go')
except:
pass
try:
os.unlink(name)
except:
pass
# Julia pexepect interface support
import julia
import sage.interfaces
sage.interfaces.julia = julia # the module
julia = julia.julia # specific instance
sage.interfaces.all.julia = julia
# Help command
import sage.misc.sagedoc
import sage.version
def help(*args, **kwds):
if len(args) > 0 or len(kwds) > 0:
sage.misc.sagedoc.help(*args, **kwds)
else:
s = """
## Welcome to Sage %s!
- **Online documentation:** [View the Sage documentation online](http://www.sagemath.org/doc/).
- **Help:** For help on any object or function, for example `matrix_plot`, enter `matrix_plot?` followed by tab or shift+enter. For help on any module (or object or function), for example, `sage.matrix`, enter `help(sage.matrix)`.
- **Tab completion:** Type `obj` followed by tab to see all completions of obj. To see all methods you may call on `obj`, type `obj.` followed by tab.
- **Source code:** Enter `matrix_plot??` followed by tab or shift+enter to look at the source code of `matrix_plot`.
- **License information:** For license information about Sage and its components, enter `license()`."""%sage.version.version
salvus.md(s)
| gpl-3.0 |
cngo-github/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/finance.py | 69 | 20558 | """
A collection of modules for collecting, analyzing and plotting
financial data. User contributions welcome!
"""
#from __future__ import division
import os, time, warnings
from urllib import urlopen
try:
from hashlib import md5
except ImportError:
from md5 import md5 #Deprecated in 2.5
try: import datetime
except ImportError:
raise ImportError('The finance module requires datetime support (python2.3)')
import numpy as np
from matplotlib import verbose, get_configdir
from dates import date2num
from matplotlib.cbook import Bunch
from matplotlib.collections import LineCollection, PolyCollection
from matplotlib.colors import colorConverter
from lines import Line2D, TICKLEFT, TICKRIGHT
from patches import Rectangle
from matplotlib.transforms import Affine2D
configdir = get_configdir()
cachedir = os.path.join(configdir, 'finance.cache')
def parse_yahoo_historical(fh, asobject=False, adjusted=True):
"""
Parse the historical data in file handle fh from yahoo finance and return
results as a list of
d, open, close, high, low, volume
where d is a floating poing representation of date, as returned by date2num
if adjust=True, use adjusted prices
"""
results = []
lines = fh.readlines()
datefmt = None
for line in lines[1:]:
vals = line.split(',')
if len(vals)!=7: continue
datestr = vals[0]
if datefmt is None:
try:
datefmt = '%Y-%m-%d'
dt = datetime.date(*time.strptime(datestr, datefmt)[:3])
except ValueError:
datefmt = '%d-%b-%y' # Old Yahoo--cached file?
dt = datetime.date(*time.strptime(datestr, datefmt)[:3])
d = date2num(dt)
open, high, low, close = [float(val) for val in vals[1:5]]
volume = int(vals[5])
if adjusted:
aclose = float(vals[6])
m = aclose/close
open *= m
high *= m
low *= m
close = aclose
results.append((d, open, close, high, low, volume))
results.reverse()
if asobject:
if len(results)==0: return None
else:
date, open, close, high, low, volume = map(np.asarray, zip(*results))
return Bunch(date=date, open=open, close=close, high=high, low=low, volume=volume)
else:
return results
def fetch_historical_yahoo(ticker, date1, date2, cachename=None):
"""
Fetch historical data for ticker between date1 and date2. date1 and
date2 are datetime instances
Ex:
fh = fetch_historical_yahoo('^GSPC', d1, d2)
cachename is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
a file handle is returned
"""
ticker = ticker.upper()
d1 = (date1.month-1, date1.day, date1.year)
d2 = (date2.month-1, date2.day, date2.year)
urlFmt = 'http://table.finance.yahoo.com/table.csv?a=%d&b=%d&c=%d&d=%d&e=%d&f=%d&s=%s&y=0&g=d&ignore=.csv'
url = urlFmt % (d1[0], d1[1], d1[2],
d2[0], d2[1], d2[2], ticker)
if cachename is None:
cachename = os.path.join(cachedir, md5(url).hexdigest())
if os.path.exists(cachename):
fh = file(cachename)
verbose.report('Using cachefile %s for %s'%(cachename, ticker))
else:
if not os.path.isdir(cachedir): os.mkdir(cachedir)
fh = file(cachename, 'w')
fh.write(urlopen(url).read())
fh.close()
verbose.report('Saved %s data to cache file %s'%(ticker, cachename))
fh = file(cachename, 'r')
return fh
def quotes_historical_yahoo(ticker, date1, date2, asobject=False, adjusted=True, cachename=None):
"""
Get historical data for ticker between date1 and date2. date1 and
date2 are datetime instances
results are a list of tuples
(d, open, close, high, low, volume)
where d is a floating poing representation of date, as returned by date2num
if asobject is True, the return val is an object with attrs date,
open, close, high, low, volume, which are equal length arrays
if adjust=True, use adjusted prices
Ex:
sp = f.quotes_historical_yahoo('^GSPC', d1, d2, asobject=True, adjusted=True)
returns = (sp.open[1:] - sp.open[:-1])/sp.open[1:]
[n,bins,patches] = hist(returns, 100)
mu = mean(returns)
sigma = std(returns)
x = normpdf(bins, mu, sigma)
plot(bins, x, color='red', lw=2)
cachename is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
"""
fh = fetch_historical_yahoo(ticker, date1, date2, cachename)
try: ret = parse_yahoo_historical(fh, asobject, adjusted)
except IOError, exc:
warnings.warn('urlopen() failure\n' + url + '\n' + exc.strerror[1])
return None
return ret
def plot_day_summary(ax, quotes, ticksize=3,
colorup='k', colordown='r',
):
"""
quotes is a list of (time, open, close, high, low, ...) tuples
Represent the time, open, close, high, low as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
time must be in float date format - see date2num
ax : an Axes instance to plot to
ticksize : open/close tick marker in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
return value is a list of lines added
"""
lines = []
for q in quotes:
t, open, close, high, low = q[:5]
if close>=open : color = colorup
else : color = colordown
vline = Line2D(
xdata=(t, t), ydata=(low, high),
color=color,
antialiased=False, # no need to antialias vert lines
)
oline = Line2D(
xdata=(t, t), ydata=(open, open),
color=color,
antialiased=False,
marker=TICKLEFT,
markersize=ticksize,
)
cline = Line2D(
xdata=(t, t), ydata=(close, close),
color=color,
antialiased=False,
markersize=ticksize,
marker=TICKRIGHT)
lines.extend((vline, oline, cline))
ax.add_line(vline)
ax.add_line(oline)
ax.add_line(cline)
ax.autoscale_view()
return lines
def candlestick(ax, quotes, width=0.2, colorup='k', colordown='r',
alpha=1.0):
"""
quotes is a list of (time, open, close, high, low, ...) tuples.
As long as the first 5 elements of the tuples are these values,
the tuple can be as long as you want (eg it may store volume).
time must be in float days format - see date2num
Plot the time, open, close, high, low as a vertical line ranging
from low to high. Use a rectangular bar to represent the
open-close span. If close >= open, use colorup to color the bar,
otherwise use colordown
ax : an Axes instance to plot to
width : fraction of a day for the rectangle width
colorup : the color of the rectangle where close >= open
colordown : the color of the rectangle where close < open
alpha : the rectangle alpha level
return value is lines, patches where lines is a list of lines
added and patches is a list of the rectangle patches added
"""
OFFSET = width/2.0
lines = []
patches = []
for q in quotes:
t, open, close, high, low = q[:5]
if close>=open :
color = colorup
lower = open
height = close-open
else :
color = colordown
lower = close
height = open-close
vline = Line2D(
xdata=(t, t), ydata=(low, high),
color='k',
linewidth=0.5,
antialiased=True,
)
rect = Rectangle(
xy = (t-OFFSET, lower),
width = width,
height = height,
facecolor = color,
edgecolor = color,
)
rect.set_alpha(alpha)
lines.append(vline)
patches.append(rect)
ax.add_line(vline)
ax.add_patch(rect)
ax.autoscale_view()
return lines, patches
def plot_day_summary2(ax, opens, closes, highs, lows, ticksize=4,
colorup='k', colordown='r',
):
"""
Represent the time, open, close, high, low as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
ax : an Axes instance to plot to
ticksize : size of open and close ticks in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
return value is a list of lines added
"""
# note this code assumes if any value open, close, low, high is
# missing they all are missing
rangeSegments = [ ((i, low), (i, high)) for i, low, high in zip(xrange(len(lows)), lows, highs) if low != -1 ]
# the ticks will be from ticksize to 0 in points at the origin and
# we'll translate these to the i, close location
openSegments = [ ((-ticksize, 0), (0, 0)) ]
# the ticks will be from 0 to ticksize in points at the origin and
# we'll translate these to the i, close location
closeSegments = [ ((0, 0), (ticksize, 0)) ]
offsetsOpen = [ (i, open) for i, open in zip(xrange(len(opens)), opens) if open != -1 ]
offsetsClose = [ (i, close) for i, close in zip(xrange(len(closes)), closes) if close != -1 ]
scale = ax.figure.dpi * (1.0/72.0)
tickTransform = Affine2D().scale(scale, 0.0)
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,1
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,1
colord = { True : colorup,
False : colordown,
}
colors = [colord[open<close] for open, close in zip(opens, closes) if open!=-1 and close !=-1]
assert(len(rangeSegments)==len(offsetsOpen))
assert(len(offsetsOpen)==len(offsetsClose))
assert(len(offsetsClose)==len(colors))
useAA = 0, # use tuple here
lw = 1, # and here
rangeCollection = LineCollection(rangeSegments,
colors = colors,
linewidths = lw,
antialiaseds = useAA,
)
openCollection = LineCollection(openSegments,
colors = colors,
antialiaseds = useAA,
linewidths = lw,
offsets = offsetsOpen,
transOffset = ax.transData,
)
openCollection.set_transform(tickTransform)
closeCollection = LineCollection(closeSegments,
colors = colors,
antialiaseds = useAA,
linewidths = lw,
offsets = offsetsClose,
transOffset = ax.transData,
)
closeCollection.set_transform(tickTransform)
minpy, maxx = (0, len(rangeSegments))
miny = min([low for low in lows if low !=-1])
maxy = max([high for high in highs if high != -1])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(rangeCollection)
ax.add_collection(openCollection)
ax.add_collection(closeCollection)
return rangeCollection, openCollection, closeCollection
def candlestick2(ax, opens, closes, highs, lows, width=4,
colorup='k', colordown='r',
alpha=0.75,
):
"""
Represent the open, close as a bar line and high low range as a
vertical line.
ax : an Axes instance to plot to
width : the bar width in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
alpha : bar transparency
return value is lineCollection, barCollection
"""
# note this code assumes if any value open, close, low, high is
# missing they all are missing
delta = width/2.
barVerts = [ ( (i-delta, open), (i-delta, close), (i+delta, close), (i+delta, open) ) for i, open, close in zip(xrange(len(opens)), opens, closes) if open != -1 and close!=-1 ]
rangeSegments = [ ((i, low), (i, high)) for i, low, high in zip(xrange(len(lows)), lows, highs) if low != -1 ]
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,alpha
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,alpha
colord = { True : colorup,
False : colordown,
}
colors = [colord[open<close] for open, close in zip(opens, closes) if open!=-1 and close !=-1]
assert(len(barVerts)==len(rangeSegments))
useAA = 0, # use tuple here
lw = 0.5, # and here
rangeCollection = LineCollection(rangeSegments,
colors = ( (0,0,0,1), ),
linewidths = lw,
antialiaseds = useAA,
)
barCollection = PolyCollection(barVerts,
facecolors = colors,
edgecolors = ( (0,0,0,1), ),
antialiaseds = useAA,
linewidths = lw,
)
minx, maxx = 0, len(rangeSegments)
miny = min([low for low in lows if low !=-1])
maxy = max([high for high in highs if high != -1])
corners = (minx, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(barCollection)
ax.add_collection(rangeCollection)
return rangeCollection, barCollection
def volume_overlay(ax, opens, closes, volumes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""
Add a volume overlay to the current axes. The opens and closes
are used to determine the color of the bar. -1 is missing. If a
value is missing on one it must be missing on all
ax : an Axes instance to plot to
width : the bar width in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
alpha : bar transparency
"""
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,alpha
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,alpha
colord = { True : colorup,
False : colordown,
}
colors = [colord[open<close] for open, close in zip(opens, closes) if open!=-1 and close !=-1]
delta = width/2.
bars = [ ( (i-delta, 0), (i-delta, v), (i+delta, v), (i+delta, 0)) for i, v in enumerate(volumes) if v != -1 ]
barCollection = PolyCollection(bars,
facecolors = colors,
edgecolors = ( (0,0,0,1), ),
antialiaseds = (0,),
linewidths = (0.5,),
)
corners = (0, 0), (len(bars), max(volumes))
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
return barCollection
def volume_overlay2(ax, closes, volumes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""
Add a volume overlay to the current axes. The closes are used to
determine the color of the bar. -1 is missing. If a value is
missing on one it must be missing on all
ax : an Axes instance to plot to
width : the bar width in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
alpha : bar transparency
nb: first point is not displayed - it is used only for choosing the
right color
"""
return volume_overlay(ax,closes[:-1],closes[1:],volumes[1:],colorup,colordown,width,alpha)
def volume_overlay3(ax, quotes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""
Add a volume overlay to the current axes. quotes is a list of (d,
open, close, high, low, volume) and close-open is used to
determine the color of the bar
kwarg
width : the bar width in points
colorup : the color of the lines where close1 >= close0
colordown : the color of the lines where close1 < close0
alpha : bar transparency
"""
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,alpha
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,alpha
colord = { True : colorup,
False : colordown,
}
dates, opens, closes, highs, lows, volumes = zip(*quotes)
colors = [colord[close1>=close0] for close0, close1 in zip(closes[:-1], closes[1:]) if close0!=-1 and close1 !=-1]
colors.insert(0,colord[closes[0]>=opens[0]])
right = width/2.0
left = -width/2.0
bars = [ ( (left, 0), (left, volume), (right, volume), (right, 0)) for d, open, close, high, low, volume in quotes]
sx = ax.figure.dpi * (1.0/72.0) # scale for points
sy = ax.bbox.height / ax.viewLim.height
barTransform = Affine2D().scale(sx,sy)
dates = [d for d, open, close, high, low, volume in quotes]
offsetsBars = [(d, 0) for d in dates]
useAA = 0, # use tuple here
lw = 0.5, # and here
barCollection = PolyCollection(bars,
facecolors = colors,
edgecolors = ( (0,0,0,1), ),
antialiaseds = useAA,
linewidths = lw,
offsets = offsetsBars,
transOffset = ax.transData,
)
barCollection.set_transform(barTransform)
minpy, maxx = (min(dates), max(dates))
miny = 0
maxy = max([volume for d, open, close, high, low, volume in quotes])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
#print 'datalim', ax.dataLim.get_bounds()
#print 'viewlim', ax.viewLim.get_bounds()
ax.add_collection(barCollection)
ax.autoscale_view()
return barCollection
def index_bar(ax, vals,
facecolor='b', edgecolor='l',
width=4, alpha=1.0, ):
"""
Add a bar collection graph with height vals (-1 is missing).
ax : an Axes instance to plot to
width : the bar width in points
alpha : bar transparency
"""
facecolors = (colorConverter.to_rgba(facecolor, alpha),)
edgecolors = (colorConverter.to_rgba(edgecolor, alpha),)
right = width/2.0
left = -width/2.0
bars = [ ( (left, 0), (left, v), (right, v), (right, 0)) for v in vals if v != -1 ]
sx = ax.figure.dpi * (1.0/72.0) # scale for points
sy = ax.bbox.height / ax.viewLim.height
barTransform = Affine2D().scale(sx,sy)
offsetsBars = [ (i, 0) for i,v in enumerate(vals) if v != -1 ]
barCollection = PolyCollection(bars,
facecolors = facecolors,
edgecolors = edgecolors,
antialiaseds = (0,),
linewidths = (0.5,),
offsets = offsetsBars,
transOffset = ax.transData,
)
barCollection.set_transform(barTransform)
minpy, maxx = (0, len(offsetsBars))
miny = 0
maxy = max([v for v in vals if v!=-1])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(barCollection)
return barCollection
| agpl-3.0 |
kuchi/imageio | docs/ext/docscrape_sphinx.py | 9 | 7751 | import re
import inspect
import textwrap
import pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if False and autosum:
out += ['.. autosummary::', ' :toctree:', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.items():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-2-clause |
alexblaessle/PyFRAP | examples/geometryAndMeshing/sliceBL.py | 2 | 2384 | """Example for PyFRAP on how generate BL meshes around a ROI.
Draws mesh and saves it if filename is given.
USAGE: python sliceBL.py outputFilePath
"""
#Import necessary modules
from pyfrp.subclasses import pyfrp_embryo
from pyfrp.modules import pyfrp_misc_module
from pyfrp.modules import pyfrp_gmsh_IO_module
from pyfrp.modules.pyfrp_term_module import *
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import csv
import sys
import os
#--------------------------------------------------------
# Define parameters
#--------------------------------------------------------
# Imaging depth
sliceDepth=36.
sliceWidth=5.
# Resolution
dataResMu=566.79
dataResPx=512
# Define bleached square (assuming its centered)
sidelength=2*73.71
#Try to make rim 100px wide
#rimFactor=0.8
rimFactor=1.-101./316.74
# Define cone geometry
cylinderHeight=90.3332804037
coneUpperRadius=635.3/2.
coneLowerRadius=448.5/2.
# Slice
center=[249.00918273645539, 263.82920110192839]
imagingRadius=216.742098118
#--------------------------------------------------------
# Embryo setup
#--------------------------------------------------------
# Create embryo
emb=pyfrp_embryo.embryo("Test")
# Set experimental details
emb.setDataResMu(dataResMu)
emb.setSliceDepthMu(sliceDepth)
emb.offsetBleachedPx=[center[0]-sidelength/2.,center[1]-sidelength/2.]
# Geometry
emb.setGeometry2Cone(center,coneUpperRadius,coneLowerRadius,cylinderHeight)
# Update geometry properties in geo-file
emb.geometry.updateGeoFile()
# Create default ROIs
emb.genDefaultROIs(emb.geometry.getCenter(),imagingRadius,rimFactor=rimFactor,sliceHeightPx=-sliceDepth)
emb.newAnalysis()
# Add simulation and mesh
sim=emb.newSimulation()
# Generate original mesh
sim.mesh.genMesh()
# Read geo and add circle
dGeo=emb.geometry.readGeoFile()
v,a,l,sf=dGeo.addCircleByParameters(center,imagingRadius,-sliceDepth,35.,genSurface=True)
# Add BLF
blf=dGeo.addBoundaryLayerField(hfar=35.,hwall_n=10.,hwall_t=10.,thickness=15.,Quads=0.)
blf.setAsBkgdField()
sf.addToBoundaryLayer(boundField=blf)
# Update geometry
fnOut=emb.geometry.fnGeo.replace(".geo","_sliceBL.geo")
dGeo.writeToFile(fnOut)
emb.geometry.setFnGeo(fnOut)
# Generate new mesh
sim.mesh.genMesh()
# Show mesh
sim.mesh.plotMesh()
# Save mesh
if len(sys.argv)>0:
sim.mesh.saveMeshToImg(sys.argv[1])
raw_input("Press to quit")
| gpl-3.0 |
ndingwall/scikit-learn | sklearn/impute/tests/test_impute.py | 10 | 52174 | from __future__ import division
import pytest
import numpy as np
from scipy import sparse
from scipy.stats import kstest
import io
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_allclose_dense_sparse
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
# make IterativeImputer available
from sklearn.experimental import enable_iterative_imputer # noqa
from sklearn.datasets import load_diabetes
from sklearn.impute import MissingIndicator
from sklearn.impute import SimpleImputer, IterativeImputer
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import BayesianRidge, ARDRegression, RidgeCV
from sklearn.pipeline import Pipeline
from sklearn.pipeline import make_union
from sklearn.model_selection import GridSearchCV
from sklearn import tree
from sklearn.random_projection import _sparse_random_matrix
from sklearn.exceptions import ConvergenceWarning
from sklearn.impute._base import _most_frequent
def _check_statistics(X, X_true,
strategy, statistics, missing_values):
"""Utility function for testing imputation for a given strategy.
Test with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly"""
err_msg = "Parameters: strategy = %s, missing_values = %s, " \
"sparse = {0}" % (strategy, missing_values)
assert_ae = assert_array_equal
if X.dtype.kind == 'f' or X_true.dtype.kind == 'f':
assert_ae = assert_array_almost_equal
# Normal matrix
imputer = SimpleImputer(missing_values=missing_values, strategy=strategy)
X_trans = imputer.fit(X).transform(X.copy())
assert_ae(imputer.statistics_, statistics,
err_msg=err_msg.format(False))
assert_ae(X_trans, X_true, err_msg=err_msg.format(False))
# Sparse matrix
imputer = SimpleImputer(missing_values=missing_values, strategy=strategy)
imputer.fit(sparse.csc_matrix(X))
X_trans = imputer.transform(sparse.csc_matrix(X.copy()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_ae(imputer.statistics_, statistics,
err_msg=err_msg.format(True))
assert_ae(X_trans, X_true, err_msg=err_msg.format(True))
@pytest.mark.parametrize("strategy",
['mean', 'median', 'most_frequent', "constant"])
def test_imputation_shape(strategy):
# Verify the shapes of the imputed matrix for different strategies.
X = np.random.randn(10, 2)
X[::2] = np.nan
imputer = SimpleImputer(strategy=strategy)
X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
assert X_imputed.shape == (10, 2)
X_imputed = imputer.fit_transform(X)
assert X_imputed.shape == (10, 2)
iterative_imputer = IterativeImputer(initial_strategy=strategy)
X_imputed = iterative_imputer.fit_transform(X)
assert X_imputed.shape == (10, 2)
@pytest.mark.parametrize("strategy", ["const", 101, None])
def test_imputation_error_invalid_strategy(strategy):
X = np.ones((3, 5))
X[0, 0] = np.nan
with pytest.raises(ValueError, match=str(strategy)):
imputer = SimpleImputer(strategy=strategy)
imputer.fit_transform(X)
@pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"])
def test_imputation_deletion_warning(strategy):
X = np.ones((3, 5))
X[:, 0] = np.nan
with pytest.warns(UserWarning, match="Deleting"):
imputer = SimpleImputer(strategy=strategy, verbose=True)
imputer.fit_transform(X)
@pytest.mark.parametrize("strategy", ["mean", "median",
"most_frequent", "constant"])
def test_imputation_error_sparse_0(strategy):
# check that error are raised when missing_values = 0 and input is sparse
X = np.ones((3, 5))
X[0] = 0
X = sparse.csc_matrix(X)
imputer = SimpleImputer(strategy=strategy, missing_values=0)
with pytest.raises(ValueError, match="Provide a dense array"):
imputer.fit(X)
imputer.fit(X.toarray())
with pytest.raises(ValueError, match="Provide a dense array"):
imputer.transform(X)
def safe_median(arr, *args, **kwargs):
# np.median([]) raises a TypeError for numpy >= 1.10.1
length = arr.size if hasattr(arr, 'size') else len(arr)
return np.nan if length == 0 else np.median(arr, *args, **kwargs)
def safe_mean(arr, *args, **kwargs):
# np.mean([]) raises a RuntimeWarning for numpy >= 1.10.1
length = arr.size if hasattr(arr, 'size') else len(arr)
return np.nan if length == 0 else np.mean(arr, *args, **kwargs)
def test_imputation_mean_median():
# Test imputation using the mean and median strategies, when
# missing_values != 0.
rng = np.random.RandomState(0)
dim = 10
dec = 10
shape = (dim * dim, dim + dec)
zeros = np.zeros(shape[0])
values = np.arange(1, shape[0] + 1)
values[4::2] = - values[4::2]
tests = [("mean", np.nan, lambda z, v, p: safe_mean(np.hstack((z, v)))),
("median", np.nan,
lambda z, v, p: safe_median(np.hstack((z, v))))]
for strategy, test_missing_values, true_value_fun in tests:
X = np.empty(shape)
X_true = np.empty(shape)
true_statistics = np.empty(shape[1])
# Create a matrix X with columns
# - with only zeros,
# - with only missing values
# - with zeros, missing values and values
# And a matrix X_true containing all true values
for j in range(shape[1]):
nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
nb_missing_values = max(shape[0] + dec * dec
- (j + dec) * (j + dec), 0)
nb_values = shape[0] - nb_zeros - nb_missing_values
z = zeros[:nb_zeros]
p = np.repeat(test_missing_values, nb_missing_values)
v = values[rng.permutation(len(values))[:nb_values]]
true_statistics[j] = true_value_fun(z, v, p)
# Create the columns
X[:, j] = np.hstack((v, z, p))
if 0 == test_missing_values:
# XXX unreached code as of v0.22
X_true[:, j] = np.hstack((v,
np.repeat(
true_statistics[j],
nb_missing_values + nb_zeros)))
else:
X_true[:, j] = np.hstack((v,
z,
np.repeat(true_statistics[j],
nb_missing_values)))
# Shuffle them the same way
np.random.RandomState(j).shuffle(X[:, j])
np.random.RandomState(j).shuffle(X_true[:, j])
# Mean doesn't support columns containing NaNs, median does
if strategy == "median":
cols_to_keep = ~np.isnan(X_true).any(axis=0)
else:
cols_to_keep = ~np.isnan(X_true).all(axis=0)
X_true = X_true[:, cols_to_keep]
_check_statistics(X, X_true, strategy,
true_statistics, test_missing_values)
def test_imputation_median_special_cases():
# Test median imputation with sparse boundary cases
X = np.array([
[0, np.nan, np.nan], # odd: implicit zero
[5, np.nan, np.nan], # odd: explicit nonzero
[0, 0, np.nan], # even: average two zeros
[-5, 0, np.nan], # even: avg zero and neg
[0, 5, np.nan], # even: avg zero and pos
[4, 5, np.nan], # even: avg nonzeros
[-4, -5, np.nan], # even: avg negatives
[-1, 2, np.nan], # even: crossing neg and pos
]).transpose()
X_imputed_median = np.array([
[0, 0, 0],
[5, 5, 5],
[0, 0, 0],
[-5, 0, -2.5],
[0, 5, 2.5],
[4, 5, 4.5],
[-4, -5, -4.5],
[-1, 2, .5],
]).transpose()
statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5]
_check_statistics(X, X_imputed_median, "median",
statistics_median, np.nan)
@pytest.mark.parametrize("strategy", ["mean", "median"])
@pytest.mark.parametrize("dtype", [None, object, str])
def test_imputation_mean_median_error_invalid_type(strategy, dtype):
X = np.array([["a", "b", 3],
[4, "e", 6],
["g", "h", 9]], dtype=dtype)
msg = "non-numeric data:\ncould not convert string to float: '"
with pytest.raises(ValueError, match=msg):
imputer = SimpleImputer(strategy=strategy)
imputer.fit_transform(X)
@pytest.mark.parametrize("strategy", ["mean", "median"])
@pytest.mark.parametrize("type", ['list', 'dataframe'])
def test_imputation_mean_median_error_invalid_type_list_pandas(strategy, type):
X = [["a", "b", 3],
[4, "e", 6],
["g", "h", 9]]
if type == 'dataframe':
pd = pytest.importorskip("pandas")
X = pd.DataFrame(X)
msg = "non-numeric data:\ncould not convert string to float: '"
with pytest.raises(ValueError, match=msg):
imputer = SimpleImputer(strategy=strategy)
imputer.fit_transform(X)
@pytest.mark.parametrize("strategy", ["constant", "most_frequent"])
@pytest.mark.parametrize("dtype", [str, np.dtype('U'), np.dtype('S')])
def test_imputation_const_mostf_error_invalid_types(strategy, dtype):
# Test imputation on non-numeric data using "most_frequent" and "constant"
# strategy
X = np.array([
[np.nan, np.nan, "a", "f"],
[np.nan, "c", np.nan, "d"],
[np.nan, "b", "d", np.nan],
[np.nan, "c", "d", "h"],
], dtype=dtype)
err_msg = "SimpleImputer does not support data"
with pytest.raises(ValueError, match=err_msg):
imputer = SimpleImputer(strategy=strategy)
imputer.fit(X).transform(X)
def test_imputation_most_frequent():
# Test imputation using the most-frequent strategy.
X = np.array([
[-1, -1, 0, 5],
[-1, 2, -1, 3],
[-1, 1, 3, -1],
[-1, 2, 3, 7],
])
X_true = np.array([
[2, 0, 5],
[2, 3, 3],
[1, 3, 3],
[2, 3, 7],
])
# scipy.stats.mode, used in SimpleImputer, doesn't return the first most
# frequent as promised in the doc but the lowest most frequent. When this
# test will fail after an update of scipy, SimpleImputer will need to be
# updated to be consistent with the new (correct) behaviour
_check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1)
@pytest.mark.parametrize("marker", [None, np.nan, "NAN", "", 0])
def test_imputation_most_frequent_objects(marker):
# Test imputation using the most-frequent strategy.
X = np.array([
[marker, marker, "a", "f"],
[marker, "c", marker, "d"],
[marker, "b", "d", marker],
[marker, "c", "d", "h"],
], dtype=object)
X_true = np.array([
["c", "a", "f"],
["c", "d", "d"],
["b", "d", "d"],
["c", "d", "h"],
], dtype=object)
imputer = SimpleImputer(missing_values=marker,
strategy="most_frequent")
X_trans = imputer.fit(X).transform(X)
assert_array_equal(X_trans, X_true)
@pytest.mark.parametrize("dtype", [object, "category"])
def test_imputation_most_frequent_pandas(dtype):
# Test imputation using the most frequent strategy on pandas df
pd = pytest.importorskip("pandas")
f = io.StringIO("Cat1,Cat2,Cat3,Cat4\n"
",i,x,\n"
"a,,y,\n"
"a,j,,\n"
"b,j,x,")
df = pd.read_csv(f, dtype=dtype)
X_true = np.array([
["a", "i", "x"],
["a", "j", "y"],
["a", "j", "x"],
["b", "j", "x"]
], dtype=object)
imputer = SimpleImputer(strategy="most_frequent")
X_trans = imputer.fit_transform(df)
assert_array_equal(X_trans, X_true)
@pytest.mark.parametrize("X_data, missing_value", [(1, 0), (1., np.nan)])
def test_imputation_constant_error_invalid_type(X_data, missing_value):
# Verify that exceptions are raised on invalid fill_value type
X = np.full((3, 5), X_data, dtype=float)
X[0, 0] = missing_value
with pytest.raises(ValueError, match="imputing numerical"):
imputer = SimpleImputer(missing_values=missing_value,
strategy="constant",
fill_value="x")
imputer.fit_transform(X)
def test_imputation_constant_integer():
# Test imputation using the constant strategy on integers
X = np.array([
[-1, 2, 3, -1],
[4, -1, 5, -1],
[6, 7, -1, -1],
[8, 9, 0, -1]
])
X_true = np.array([
[0, 2, 3, 0],
[4, 0, 5, 0],
[6, 7, 0, 0],
[8, 9, 0, 0]
])
imputer = SimpleImputer(missing_values=-1, strategy="constant",
fill_value=0)
X_trans = imputer.fit_transform(X)
assert_array_equal(X_trans, X_true)
@pytest.mark.parametrize("array_constructor", [sparse.csr_matrix, np.asarray])
def test_imputation_constant_float(array_constructor):
# Test imputation using the constant strategy on floats
X = np.array([
[np.nan, 1.1, 0, np.nan],
[1.2, np.nan, 1.3, np.nan],
[0, 0, np.nan, np.nan],
[1.4, 1.5, 0, np.nan]
])
X_true = np.array([
[-1, 1.1, 0, -1],
[1.2, -1, 1.3, -1],
[0, 0, -1, -1],
[1.4, 1.5, 0, -1]
])
X = array_constructor(X)
X_true = array_constructor(X_true)
imputer = SimpleImputer(strategy="constant", fill_value=-1)
X_trans = imputer.fit_transform(X)
assert_allclose_dense_sparse(X_trans, X_true)
@pytest.mark.parametrize("marker", [None, np.nan, "NAN", "", 0])
def test_imputation_constant_object(marker):
# Test imputation using the constant strategy on objects
X = np.array([
[marker, "a", "b", marker],
["c", marker, "d", marker],
["e", "f", marker, marker],
["g", "h", "i", marker]
], dtype=object)
X_true = np.array([
["missing", "a", "b", "missing"],
["c", "missing", "d", "missing"],
["e", "f", "missing", "missing"],
["g", "h", "i", "missing"]
], dtype=object)
imputer = SimpleImputer(missing_values=marker, strategy="constant",
fill_value="missing")
X_trans = imputer.fit_transform(X)
assert_array_equal(X_trans, X_true)
@pytest.mark.parametrize("dtype", [object, "category"])
def test_imputation_constant_pandas(dtype):
# Test imputation using the constant strategy on pandas df
pd = pytest.importorskip("pandas")
f = io.StringIO("Cat1,Cat2,Cat3,Cat4\n"
",i,x,\n"
"a,,y,\n"
"a,j,,\n"
"b,j,x,")
df = pd.read_csv(f, dtype=dtype)
X_true = np.array([
["missing_value", "i", "x", "missing_value"],
["a", "missing_value", "y", "missing_value"],
["a", "j", "missing_value", "missing_value"],
["b", "j", "x", "missing_value"]
], dtype=object)
imputer = SimpleImputer(strategy="constant")
X_trans = imputer.fit_transform(df)
assert_array_equal(X_trans, X_true)
@pytest.mark.parametrize("X", [[[1], [2]], [[1], [np.nan]]])
def test_iterative_imputer_one_feature(X):
# check we exit early when there is a single feature
imputer = IterativeImputer().fit(X)
assert imputer.n_iter_ == 0
imputer = IterativeImputer()
imputer.fit([[1], [2]])
assert imputer.n_iter_ == 0
imputer.fit([[1], [np.nan]])
assert imputer.n_iter_ == 0
def test_imputation_pipeline_grid_search():
# Test imputation within a pipeline + gridsearch.
X = _sparse_random_matrix(100, 100, density=0.10)
missing_values = X.data[0]
pipeline = Pipeline([('imputer',
SimpleImputer(missing_values=missing_values)),
('tree',
tree.DecisionTreeRegressor(random_state=0))])
parameters = {
'imputer__strategy': ["mean", "median", "most_frequent"]
}
Y = _sparse_random_matrix(100, 1, density=0.10).toarray()
gs = GridSearchCV(pipeline, parameters)
gs.fit(X, Y)
def test_imputation_copy():
# Test imputation with copy
X_orig = _sparse_random_matrix(5, 5, density=0.75, random_state=0)
# copy=True, dense => copy
X = X_orig.copy().toarray()
imputer = SimpleImputer(missing_values=0, strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert not np.all(X == Xt)
# copy=True, sparse csr => copy
X = X_orig.copy()
imputer = SimpleImputer(missing_values=X.data[0], strategy="mean",
copy=True)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert not np.all(X.data == Xt.data)
# copy=False, dense => no copy
X = X_orig.copy().toarray()
imputer = SimpleImputer(missing_values=0, strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_array_almost_equal(X, Xt)
# copy=False, sparse csc => no copy
X = X_orig.copy().tocsc()
imputer = SimpleImputer(missing_values=X.data[0], strategy="mean",
copy=False)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_array_almost_equal(X.data, Xt.data)
# copy=False, sparse csr => copy
X = X_orig.copy()
imputer = SimpleImputer(missing_values=X.data[0], strategy="mean",
copy=False)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert not np.all(X.data == Xt.data)
# Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
# made, even if copy=False.
def test_iterative_imputer_zero_iters():
rng = np.random.RandomState(0)
n = 100
d = 10
X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()
missing_flag = X == 0
X[missing_flag] = np.nan
imputer = IterativeImputer(max_iter=0)
X_imputed = imputer.fit_transform(X)
# with max_iter=0, only initial imputation is performed
assert_allclose(X_imputed, imputer.initial_imputer_.transform(X))
# repeat but force n_iter_ to 0
imputer = IterativeImputer(max_iter=5).fit(X)
# transformed should not be equal to initial imputation
assert not np.all(imputer.transform(X) ==
imputer.initial_imputer_.transform(X))
imputer.n_iter_ = 0
# now they should be equal as only initial imputation is done
assert_allclose(imputer.transform(X),
imputer.initial_imputer_.transform(X))
def test_iterative_imputer_verbose():
rng = np.random.RandomState(0)
n = 100
d = 3
X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()
imputer = IterativeImputer(missing_values=0, max_iter=1, verbose=1)
imputer.fit(X)
imputer.transform(X)
imputer = IterativeImputer(missing_values=0, max_iter=1, verbose=2)
imputer.fit(X)
imputer.transform(X)
def test_iterative_imputer_all_missing():
n = 100
d = 3
X = np.zeros((n, d))
imputer = IterativeImputer(missing_values=0, max_iter=1)
X_imputed = imputer.fit_transform(X)
assert_allclose(X_imputed, imputer.initial_imputer_.transform(X))
@pytest.mark.parametrize(
"imputation_order",
['random', 'roman', 'ascending', 'descending', 'arabic']
)
def test_iterative_imputer_imputation_order(imputation_order):
rng = np.random.RandomState(0)
n = 100
d = 10
max_iter = 2
X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()
X[:, 0] = 1 # this column should not be discarded by IterativeImputer
imputer = IterativeImputer(missing_values=0,
max_iter=max_iter,
n_nearest_features=5,
sample_posterior=False,
skip_complete=True,
min_value=0,
max_value=1,
verbose=1,
imputation_order=imputation_order,
random_state=rng)
imputer.fit_transform(X)
ordered_idx = [i.feat_idx for i in imputer.imputation_sequence_]
assert (len(ordered_idx) // imputer.n_iter_ ==
imputer.n_features_with_missing_)
if imputation_order == 'roman':
assert np.all(ordered_idx[:d-1] == np.arange(1, d))
elif imputation_order == 'arabic':
assert np.all(ordered_idx[:d-1] == np.arange(d-1, 0, -1))
elif imputation_order == 'random':
ordered_idx_round_1 = ordered_idx[:d-1]
ordered_idx_round_2 = ordered_idx[d-1:]
assert ordered_idx_round_1 != ordered_idx_round_2
elif 'ending' in imputation_order:
assert len(ordered_idx) == max_iter * (d - 1)
@pytest.mark.parametrize(
"estimator",
[None, DummyRegressor(), BayesianRidge(), ARDRegression(), RidgeCV()]
)
def test_iterative_imputer_estimators(estimator):
rng = np.random.RandomState(0)
n = 100
d = 10
X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()
imputer = IterativeImputer(missing_values=0,
max_iter=1,
estimator=estimator,
random_state=rng)
imputer.fit_transform(X)
# check that types are correct for estimators
hashes = []
for triplet in imputer.imputation_sequence_:
expected_type = (type(estimator) if estimator is not None
else type(BayesianRidge()))
assert isinstance(triplet.estimator, expected_type)
hashes.append(id(triplet.estimator))
# check that each estimator is unique
assert len(set(hashes)) == len(hashes)
def test_iterative_imputer_clip():
rng = np.random.RandomState(0)
n = 100
d = 10
X = _sparse_random_matrix(n, d, density=0.10,
random_state=rng).toarray()
imputer = IterativeImputer(missing_values=0,
max_iter=1,
min_value=0.1,
max_value=0.2,
random_state=rng)
Xt = imputer.fit_transform(X)
assert_allclose(np.min(Xt[X == 0]), 0.1)
assert_allclose(np.max(Xt[X == 0]), 0.2)
assert_allclose(Xt[X != 0], X[X != 0])
def test_iterative_imputer_clip_truncnorm():
rng = np.random.RandomState(0)
n = 100
d = 10
X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()
X[:, 0] = 1
imputer = IterativeImputer(missing_values=0,
max_iter=2,
n_nearest_features=5,
sample_posterior=True,
min_value=0.1,
max_value=0.2,
verbose=1,
imputation_order='random',
random_state=rng)
Xt = imputer.fit_transform(X)
assert_allclose(np.min(Xt[X == 0]), 0.1)
assert_allclose(np.max(Xt[X == 0]), 0.2)
assert_allclose(Xt[X != 0], X[X != 0])
def test_iterative_imputer_truncated_normal_posterior():
# test that the values that are imputed using `sample_posterior=True`
# with boundaries (`min_value` and `max_value` are not None) are drawn
# from a distribution that looks gaussian via the Kolmogorov Smirnov test.
# note that starting from the wrong random seed will make this test fail
# because random sampling doesn't occur at all when the imputation
# is outside of the (min_value, max_value) range
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
X[0][0] = np.nan
imputer = IterativeImputer(min_value=0,
max_value=0.5,
sample_posterior=True,
random_state=rng)
imputer.fit_transform(X)
# generate multiple imputations for the single missing value
imputations = np.array([imputer.transform(X)[0][0] for _ in range(100)])
assert all(imputations >= 0)
assert all(imputations <= 0.5)
mu, sigma = imputations.mean(), imputations.std()
ks_statistic, p_value = kstest((imputations - mu) / sigma, 'norm')
if sigma == 0:
sigma += 1e-12
ks_statistic, p_value = kstest((imputations - mu) / sigma, 'norm')
# we want to fail to reject null hypothesis
# null hypothesis: distributions are the same
assert ks_statistic < 0.2 or p_value > 0.1, \
"The posterior does appear to be normal"
@pytest.mark.parametrize(
"strategy",
["mean", "median", "most_frequent"]
)
def test_iterative_imputer_missing_at_transform(strategy):
rng = np.random.RandomState(0)
n = 100
d = 10
X_train = rng.randint(low=0, high=3, size=(n, d))
X_test = rng.randint(low=0, high=3, size=(n, d))
X_train[:, 0] = 1 # definitely no missing values in 0th column
X_test[0, 0] = 0 # definitely missing value in 0th column
imputer = IterativeImputer(missing_values=0,
max_iter=1,
initial_strategy=strategy,
random_state=rng).fit(X_train)
initial_imputer = SimpleImputer(missing_values=0,
strategy=strategy).fit(X_train)
# if there were no missing values at time of fit, then imputer will
# only use the initial imputer for that feature at transform
assert_allclose(imputer.transform(X_test)[:, 0],
initial_imputer.transform(X_test)[:, 0])
def test_iterative_imputer_transform_stochasticity():
rng1 = np.random.RandomState(0)
rng2 = np.random.RandomState(1)
n = 100
d = 10
X = _sparse_random_matrix(n, d, density=0.10,
random_state=rng1).toarray()
# when sample_posterior=True, two transforms shouldn't be equal
imputer = IterativeImputer(missing_values=0,
max_iter=1,
sample_posterior=True,
random_state=rng1)
imputer.fit(X)
X_fitted_1 = imputer.transform(X)
X_fitted_2 = imputer.transform(X)
# sufficient to assert that the means are not the same
assert np.mean(X_fitted_1) != pytest.approx(np.mean(X_fitted_2))
# when sample_posterior=False, and n_nearest_features=None
# and imputation_order is not random
# the two transforms should be identical even if rng are different
imputer1 = IterativeImputer(missing_values=0,
max_iter=1,
sample_posterior=False,
n_nearest_features=None,
imputation_order='ascending',
random_state=rng1)
imputer2 = IterativeImputer(missing_values=0,
max_iter=1,
sample_posterior=False,
n_nearest_features=None,
imputation_order='ascending',
random_state=rng2)
imputer1.fit(X)
imputer2.fit(X)
X_fitted_1a = imputer1.transform(X)
X_fitted_1b = imputer1.transform(X)
X_fitted_2 = imputer2.transform(X)
assert_allclose(X_fitted_1a, X_fitted_1b)
assert_allclose(X_fitted_1a, X_fitted_2)
def test_iterative_imputer_no_missing():
rng = np.random.RandomState(0)
X = rng.rand(100, 100)
X[:, 0] = np.nan
m1 = IterativeImputer(max_iter=10, random_state=rng)
m2 = IterativeImputer(max_iter=10, random_state=rng)
pred1 = m1.fit(X).transform(X)
pred2 = m2.fit_transform(X)
# should exclude the first column entirely
assert_allclose(X[:, 1:], pred1)
# fit and fit_transform should both be identical
assert_allclose(pred1, pred2)
def test_iterative_imputer_rank_one():
rng = np.random.RandomState(0)
d = 50
A = rng.rand(d, 1)
B = rng.rand(1, d)
X = np.dot(A, B)
nan_mask = rng.rand(d, d) < 0.5
X_missing = X.copy()
X_missing[nan_mask] = np.nan
imputer = IterativeImputer(max_iter=5,
verbose=1,
random_state=rng)
X_filled = imputer.fit_transform(X_missing)
assert_allclose(X_filled, X, atol=0.02)
@pytest.mark.parametrize(
"rank",
[3, 5]
)
def test_iterative_imputer_transform_recovery(rank):
rng = np.random.RandomState(0)
n = 70
d = 70
A = rng.rand(n, rank)
B = rng.rand(rank, d)
X_filled = np.dot(A, B)
nan_mask = rng.rand(n, d) < 0.5
X_missing = X_filled.copy()
X_missing[nan_mask] = np.nan
# split up data in half
n = n // 2
X_train = X_missing[:n]
X_test_filled = X_filled[n:]
X_test = X_missing[n:]
imputer = IterativeImputer(max_iter=5,
imputation_order='descending',
verbose=1,
random_state=rng).fit(X_train)
X_test_est = imputer.transform(X_test)
assert_allclose(X_test_filled, X_test_est, atol=0.1)
def test_iterative_imputer_additive_matrix():
rng = np.random.RandomState(0)
n = 100
d = 10
A = rng.randn(n, d)
B = rng.randn(n, d)
X_filled = np.zeros(A.shape)
for i in range(d):
for j in range(d):
X_filled[:, (i+j) % d] += (A[:, i] + B[:, j]) / 2
# a quarter is randomly missing
nan_mask = rng.rand(n, d) < 0.25
X_missing = X_filled.copy()
X_missing[nan_mask] = np.nan
# split up data
n = n // 2
X_train = X_missing[:n]
X_test_filled = X_filled[n:]
X_test = X_missing[n:]
imputer = IterativeImputer(max_iter=10,
verbose=1,
random_state=rng).fit(X_train)
X_test_est = imputer.transform(X_test)
assert_allclose(X_test_filled, X_test_est, rtol=1e-3, atol=0.01)
@pytest.mark.parametrize("max_iter, tol, error_type, warning", [
(-1, 1e-3, ValueError, 'should be a positive integer'),
(1, -1e-3, ValueError, 'should be a non-negative float')
])
def test_iterative_imputer_error_param(max_iter, tol, error_type, warning):
X = np.zeros((100, 2))
imputer = IterativeImputer(max_iter=max_iter, tol=tol)
with pytest.raises(error_type, match=warning):
imputer.fit_transform(X)
def test_iterative_imputer_early_stopping():
rng = np.random.RandomState(0)
n = 50
d = 5
A = rng.rand(n, 1)
B = rng.rand(1, d)
X = np.dot(A, B)
nan_mask = rng.rand(n, d) < 0.5
X_missing = X.copy()
X_missing[nan_mask] = np.nan
imputer = IterativeImputer(max_iter=100,
tol=1e-2,
sample_posterior=False,
verbose=1,
random_state=rng)
X_filled_100 = imputer.fit_transform(X_missing)
assert len(imputer.imputation_sequence_) == d * imputer.n_iter_
imputer = IterativeImputer(max_iter=imputer.n_iter_,
sample_posterior=False,
verbose=1,
random_state=rng)
X_filled_early = imputer.fit_transform(X_missing)
assert_allclose(X_filled_100, X_filled_early, atol=1e-7)
imputer = IterativeImputer(max_iter=100,
tol=0,
sample_posterior=False,
verbose=1,
random_state=rng)
imputer.fit(X_missing)
assert imputer.n_iter_ == imputer.max_iter
def test_iterative_imputer_catch_warning():
# check that we catch a RuntimeWarning due to a division by zero when a
# feature is constant in the dataset
X, y = load_diabetes(return_X_y=True)
n_samples, n_features = X.shape
# simulate that a feature only contain one category during fit
X[:, 3] = 1
# add some missing values
rng = np.random.RandomState(0)
missing_rate = 0.15
for feat in range(n_features):
sample_idx = rng.choice(
np.arange(n_samples), size=int(n_samples * missing_rate),
replace=False
)
X[sample_idx, feat] = np.nan
imputer = IterativeImputer(n_nearest_features=5, sample_posterior=True)
with pytest.warns(None) as record:
X_fill = imputer.fit_transform(X, y)
assert not record.list
assert not np.any(np.isnan(X_fill))
@pytest.mark.parametrize(
"min_value, max_value, correct_output",
[(0, 100, np.array([[0] * 3, [100] * 3])),
(None, None, np.array([[-np.inf] * 3, [np.inf] * 3])),
(-np.inf, np.inf, np.array([[-np.inf] * 3, [np.inf] * 3])),
([-5, 5, 10], [100, 200, 300], np.array([[-5, 5, 10], [100, 200, 300]])),
([-5, -np.inf, 10], [100, 200, np.inf],
np.array([[-5, -np.inf, 10], [100, 200, np.inf]]))],
ids=["scalars", "None-default", "inf", "lists", "lists-with-inf"])
def test_iterative_imputer_min_max_array_like(min_value,
max_value,
correct_output):
# check that passing scalar or array-like
# for min_value and max_value in IterativeImputer works
X = np.random.RandomState(0).randn(10, 3)
imputer = IterativeImputer(min_value=min_value, max_value=max_value)
imputer.fit(X)
assert (isinstance(imputer._min_value, np.ndarray) and
isinstance(imputer._max_value, np.ndarray))
assert ((imputer._min_value.shape[0] == X.shape[1]) and
(imputer._max_value.shape[0] == X.shape[1]))
assert_allclose(correct_output[0, :], imputer._min_value)
assert_allclose(correct_output[1, :], imputer._max_value)
@pytest.mark.parametrize(
"min_value, max_value, err_msg",
[(100, 0, "min_value >= max_value."),
(np.inf, -np.inf, "min_value >= max_value."),
([-5, 5], [100, 200, 0], "_value' should be of shape")])
def test_iterative_imputer_catch_min_max_error(min_value, max_value, err_msg):
# check that passing scalar or array-like
# for min_value and max_value in IterativeImputer works
X = np.random.random((10, 3))
imputer = IterativeImputer(min_value=min_value, max_value=max_value)
with pytest.raises(ValueError, match=err_msg):
imputer.fit(X)
@pytest.mark.parametrize(
"min_max_1, min_max_2",
[([None, None], [-np.inf, np.inf]),
([-10, 10], [[-10] * 4, [10] * 4])],
ids=["None-vs-inf", "Scalar-vs-vector"])
def test_iterative_imputer_min_max_array_like_imputation(min_max_1, min_max_2):
# Test that None/inf and scalar/vector give the same imputation
X_train = np.array([
[np.nan, 2, 2, 1],
[10, np.nan, np.nan, 7],
[3, 1, np.nan, 1],
[np.nan, 4, 2, np.nan]])
X_test = np.array([
[np.nan, 2, np.nan, 5],
[2, 4, np.nan, np.nan],
[np.nan, 1, 10, 1]])
imputer1 = IterativeImputer(min_value=min_max_1[0],
max_value=min_max_1[1],
random_state=0)
imputer2 = IterativeImputer(min_value=min_max_2[0],
max_value=min_max_2[1],
random_state=0)
X_test_imputed1 = imputer1.fit(X_train).transform(X_test)
X_test_imputed2 = imputer2.fit(X_train).transform(X_test)
assert_allclose(X_test_imputed1[:, 0], X_test_imputed2[:, 0])
@pytest.mark.parametrize(
"skip_complete", [True, False]
)
def test_iterative_imputer_skip_non_missing(skip_complete):
# check the imputing strategy when missing data are present in the
# testing set only.
# taken from: https://github.com/scikit-learn/scikit-learn/issues/14383
rng = np.random.RandomState(0)
X_train = np.array([
[5, 2, 2, 1],
[10, 1, 2, 7],
[3, 1, 1, 1],
[8, 4, 2, 2]
])
X_test = np.array([
[np.nan, 2, 4, 5],
[np.nan, 4, 1, 2],
[np.nan, 1, 10, 1]
])
imputer = IterativeImputer(
initial_strategy='mean', skip_complete=skip_complete, random_state=rng
)
X_test_est = imputer.fit(X_train).transform(X_test)
if skip_complete:
# impute with the initial strategy: 'mean'
assert_allclose(X_test_est[:, 0], np.mean(X_train[:, 0]))
else:
assert_allclose(X_test_est[:, 0], [11, 7, 12], rtol=1e-4)
@pytest.mark.parametrize(
"rs_imputer",
[None, 1, np.random.RandomState(seed=1)]
)
@pytest.mark.parametrize(
"rs_estimator",
[None, 1, np.random.RandomState(seed=1)]
)
def test_iterative_imputer_dont_set_random_state(rs_imputer, rs_estimator):
class ZeroEstimator:
def __init__(self, random_state):
self.random_state = random_state
def fit(self, *args, **kgards):
return self
def predict(self, X):
return np.zeros(X.shape[0])
estimator = ZeroEstimator(random_state=rs_estimator)
imputer = IterativeImputer(random_state=rs_imputer)
X_train = np.zeros((10, 3))
imputer.fit(X_train)
assert estimator.random_state == rs_estimator
@pytest.mark.parametrize(
"X_fit, X_trans, params, msg_err",
[(np.array([[-1, 1], [1, 2]]), np.array([[-1, 1], [1, -1]]),
{'features': 'missing-only', 'sparse': 'auto'},
'have missing values in transform but have no missing values in fit'),
(np.array([[-1, 1], [1, 2]]), np.array([[-1, 1], [1, 2]]),
{'features': 'random', 'sparse': 'auto'},
"'features' has to be either 'missing-only' or 'all'"),
(np.array([[-1, 1], [1, 2]]), np.array([[-1, 1], [1, 2]]),
{'features': 'all', 'sparse': 'random'},
"'sparse' has to be a boolean or 'auto'"),
(np.array([['a', 'b'], ['c', 'a']], dtype=str),
np.array([['a', 'b'], ['c', 'a']], dtype=str),
{}, "MissingIndicator does not support data with dtype")]
)
def test_missing_indicator_error(X_fit, X_trans, params, msg_err):
indicator = MissingIndicator(missing_values=-1)
indicator.set_params(**params)
with pytest.raises(ValueError, match=msg_err):
indicator.fit(X_fit).transform(X_trans)
@pytest.mark.parametrize(
"missing_values, dtype, arr_type",
[(np.nan, np.float64, np.array),
(0, np.int32, np.array),
(-1, np.int32, np.array),
(np.nan, np.float64, sparse.csc_matrix),
(-1, np.int32, sparse.csc_matrix),
(np.nan, np.float64, sparse.csr_matrix),
(-1, np.int32, sparse.csr_matrix),
(np.nan, np.float64, sparse.coo_matrix),
(-1, np.int32, sparse.coo_matrix),
(np.nan, np.float64, sparse.lil_matrix),
(-1, np.int32, sparse.lil_matrix),
(np.nan, np.float64, sparse.bsr_matrix),
(-1, np.int32, sparse.bsr_matrix)
])
@pytest.mark.parametrize(
"param_features, n_features, features_indices",
[('missing-only', 3, np.array([0, 1, 2])),
('all', 3, np.array([0, 1, 2]))])
def test_missing_indicator_new(missing_values, arr_type, dtype, param_features,
n_features, features_indices):
X_fit = np.array([[missing_values, missing_values, 1],
[4, 2, missing_values]])
X_trans = np.array([[missing_values, missing_values, 1],
[4, 12, 10]])
X_fit_expected = np.array([[1, 1, 0], [0, 0, 1]])
X_trans_expected = np.array([[1, 1, 0], [0, 0, 0]])
# convert the input to the right array format and right dtype
X_fit = arr_type(X_fit).astype(dtype)
X_trans = arr_type(X_trans).astype(dtype)
X_fit_expected = X_fit_expected.astype(dtype)
X_trans_expected = X_trans_expected.astype(dtype)
indicator = MissingIndicator(missing_values=missing_values,
features=param_features,
sparse=False)
X_fit_mask = indicator.fit_transform(X_fit)
X_trans_mask = indicator.transform(X_trans)
assert X_fit_mask.shape[1] == n_features
assert X_trans_mask.shape[1] == n_features
assert_array_equal(indicator.features_, features_indices)
assert_allclose(X_fit_mask, X_fit_expected[:, features_indices])
assert_allclose(X_trans_mask, X_trans_expected[:, features_indices])
assert X_fit_mask.dtype == bool
assert X_trans_mask.dtype == bool
assert isinstance(X_fit_mask, np.ndarray)
assert isinstance(X_trans_mask, np.ndarray)
indicator.set_params(sparse=True)
X_fit_mask_sparse = indicator.fit_transform(X_fit)
X_trans_mask_sparse = indicator.transform(X_trans)
assert X_fit_mask_sparse.dtype == bool
assert X_trans_mask_sparse.dtype == bool
assert X_fit_mask_sparse.format == 'csc'
assert X_trans_mask_sparse.format == 'csc'
assert_allclose(X_fit_mask_sparse.toarray(), X_fit_mask)
assert_allclose(X_trans_mask_sparse.toarray(), X_trans_mask)
@pytest.mark.parametrize(
"arr_type",
[sparse.csc_matrix, sparse.csr_matrix, sparse.coo_matrix,
sparse.lil_matrix, sparse.bsr_matrix])
def test_missing_indicator_raise_on_sparse_with_missing_0(arr_type):
# test for sparse input and missing_value == 0
missing_values = 0
X_fit = np.array([[missing_values, missing_values, 1],
[4, missing_values, 2]])
X_trans = np.array([[missing_values, missing_values, 1],
[4, 12, 10]])
# convert the input to the right array format
X_fit_sparse = arr_type(X_fit)
X_trans_sparse = arr_type(X_trans)
indicator = MissingIndicator(missing_values=missing_values)
with pytest.raises(ValueError, match="Sparse input with missing_values=0"):
indicator.fit_transform(X_fit_sparse)
indicator.fit_transform(X_fit)
with pytest.raises(ValueError, match="Sparse input with missing_values=0"):
indicator.transform(X_trans_sparse)
@pytest.mark.parametrize("param_sparse", [True, False, 'auto'])
@pytest.mark.parametrize("missing_values, arr_type",
[(np.nan, np.array),
(0, np.array),
(np.nan, sparse.csc_matrix),
(np.nan, sparse.csr_matrix),
(np.nan, sparse.coo_matrix),
(np.nan, sparse.lil_matrix)
])
def test_missing_indicator_sparse_param(arr_type, missing_values,
param_sparse):
# check the format of the output with different sparse parameter
X_fit = np.array([[missing_values, missing_values, 1],
[4, missing_values, 2]])
X_trans = np.array([[missing_values, missing_values, 1],
[4, 12, 10]])
X_fit = arr_type(X_fit).astype(np.float64)
X_trans = arr_type(X_trans).astype(np.float64)
indicator = MissingIndicator(missing_values=missing_values,
sparse=param_sparse)
X_fit_mask = indicator.fit_transform(X_fit)
X_trans_mask = indicator.transform(X_trans)
if param_sparse is True:
assert X_fit_mask.format == 'csc'
assert X_trans_mask.format == 'csc'
elif param_sparse == 'auto' and missing_values == 0:
assert isinstance(X_fit_mask, np.ndarray)
assert isinstance(X_trans_mask, np.ndarray)
elif param_sparse is False:
assert isinstance(X_fit_mask, np.ndarray)
assert isinstance(X_trans_mask, np.ndarray)
else:
if sparse.issparse(X_fit):
assert X_fit_mask.format == 'csc'
assert X_trans_mask.format == 'csc'
else:
assert isinstance(X_fit_mask, np.ndarray)
assert isinstance(X_trans_mask, np.ndarray)
def test_missing_indicator_string():
X = np.array([['a', 'b', 'c'], ['b', 'c', 'a']], dtype=object)
indicator = MissingIndicator(missing_values='a', features='all')
X_trans = indicator.fit_transform(X)
assert_array_equal(X_trans, np.array([[True, False, False],
[False, False, True]]))
@pytest.mark.parametrize(
"X, missing_values, X_trans_exp",
[(np.array([['a', 'b'], ['b', 'a']], dtype=object), 'a',
np.array([['b', 'b', True, False], ['b', 'b', False, True]],
dtype=object)),
(np.array([[np.nan, 1.], [1., np.nan]]), np.nan,
np.array([[1., 1., True, False], [1., 1., False, True]])),
(np.array([[np.nan, 'b'], ['b', np.nan]], dtype=object), np.nan,
np.array([['b', 'b', True, False], ['b', 'b', False, True]],
dtype=object)),
(np.array([[None, 'b'], ['b', None]], dtype=object), None,
np.array([['b', 'b', True, False], ['b', 'b', False, True]],
dtype=object))]
)
def test_missing_indicator_with_imputer(X, missing_values, X_trans_exp):
trans = make_union(
SimpleImputer(missing_values=missing_values, strategy='most_frequent'),
MissingIndicator(missing_values=missing_values)
)
X_trans = trans.fit_transform(X)
assert_array_equal(X_trans, X_trans_exp)
@pytest.mark.parametrize("imputer_constructor",
[SimpleImputer, IterativeImputer])
@pytest.mark.parametrize(
"imputer_missing_values, missing_value, err_msg",
[("NaN", np.nan, "Input contains NaN"),
("-1", -1, "types are expected to be both numerical.")])
def test_inconsistent_dtype_X_missing_values(imputer_constructor,
imputer_missing_values,
missing_value,
err_msg):
# regression test for issue #11390. Comparison between incoherent dtype
# for X and missing_values was not raising a proper error.
rng = np.random.RandomState(42)
X = rng.randn(10, 10)
X[0, 0] = missing_value
imputer = imputer_constructor(missing_values=imputer_missing_values)
with pytest.raises(ValueError, match=err_msg):
imputer.fit_transform(X)
def test_missing_indicator_no_missing():
# check that all features are dropped if there are no missing values when
# features='missing-only' (#13491)
X = np.array([[1, 1],
[1, 1]])
mi = MissingIndicator(features='missing-only', missing_values=-1)
Xt = mi.fit_transform(X)
assert Xt.shape[1] == 0
def test_missing_indicator_sparse_no_explicit_zeros():
# Check that non missing values don't become explicit zeros in the mask
# generated by missing indicator when X is sparse. (#13491)
X = sparse.csr_matrix([[0, 1, 2],
[1, 2, 0],
[2, 0, 1]])
mi = MissingIndicator(features='all', missing_values=1)
Xt = mi.fit_transform(X)
assert Xt.getnnz() == Xt.sum()
@pytest.mark.parametrize("imputer_constructor",
[SimpleImputer, IterativeImputer])
def test_imputer_without_indicator(imputer_constructor):
X = np.array([[1, 1],
[1, 1]])
imputer = imputer_constructor()
imputer.fit(X)
assert imputer.indicator_ is None
@pytest.mark.parametrize(
"arr_type",
[
sparse.csc_matrix, sparse.csr_matrix, sparse.coo_matrix,
sparse.lil_matrix, sparse.bsr_matrix
]
)
def test_simple_imputation_add_indicator_sparse_matrix(arr_type):
X_sparse = arr_type([
[np.nan, 1, 5],
[2, np.nan, 1],
[6, 3, np.nan],
[1, 2, 9]
])
X_true = np.array([
[3., 1., 5., 1., 0., 0.],
[2., 2., 1., 0., 1., 0.],
[6., 3., 5., 0., 0., 1.],
[1., 2., 9., 0., 0., 0.],
])
imputer = SimpleImputer(missing_values=np.nan, add_indicator=True)
X_trans = imputer.fit_transform(X_sparse)
assert sparse.issparse(X_trans)
assert X_trans.shape == X_true.shape
assert_allclose(X_trans.toarray(), X_true)
@pytest.mark.parametrize(
'strategy, expected',
[('most_frequent', 'b'), ('constant', 'missing_value')]
)
def test_simple_imputation_string_list(strategy, expected):
X = [['a', 'b'],
['c', np.nan]]
X_true = np.array([
['a', 'b'],
['c', expected]
], dtype=object)
imputer = SimpleImputer(strategy=strategy)
X_trans = imputer.fit_transform(X)
assert_array_equal(X_trans, X_true)
@pytest.mark.parametrize(
"order, idx_order",
[
("ascending", [3, 4, 2, 0, 1]),
("descending", [1, 0, 2, 4, 3])
]
)
def test_imputation_order(order, idx_order):
# regression test for #15393
rng = np.random.RandomState(42)
X = rng.rand(100, 5)
X[:50, 1] = np.nan
X[:30, 0] = np.nan
X[:20, 2] = np.nan
X[:10, 4] = np.nan
with pytest.warns(ConvergenceWarning):
trs = IterativeImputer(max_iter=1,
imputation_order=order,
random_state=0).fit(X)
idx = [x.feat_idx for x in trs.imputation_sequence_]
assert idx == idx_order
@pytest.mark.parametrize("missing_value", [-1, np.nan])
def test_simple_imputation_inverse_transform(missing_value):
# Test inverse_transform feature for np.nan
X_1 = np.array([
[9, missing_value, 3, -1],
[4, -1, 5, 4],
[6, 7, missing_value, -1],
[8, 9, 0, missing_value]
])
X_2 = np.array([
[5, 4, 2, 1],
[2, 1, missing_value, 3],
[9, missing_value, 7, 1],
[6, 4, 2, missing_value]
])
X_3 = np.array([
[1, missing_value, 5, 9],
[missing_value, 4, missing_value, missing_value],
[2, missing_value, 7, missing_value],
[missing_value, 3, missing_value, 8]
])
X_4 = np.array([
[1, 1, 1, 3],
[missing_value, 2, missing_value, 1],
[2, 3, 3, 4],
[missing_value, 4, missing_value, 2]
])
imputer = SimpleImputer(missing_values=missing_value, strategy='mean',
add_indicator=True)
X_1_trans = imputer.fit_transform(X_1)
X_1_inv_trans = imputer.inverse_transform(X_1_trans)
X_2_trans = imputer.transform(X_2) # test on new data
X_2_inv_trans = imputer.inverse_transform(X_2_trans)
assert_array_equal(X_1_inv_trans, X_1)
assert_array_equal(X_2_inv_trans, X_2)
for X in [X_3, X_4]:
X_trans = imputer.fit_transform(X)
X_inv_trans = imputer.inverse_transform(X_trans)
assert_array_equal(X_inv_trans, X)
@pytest.mark.parametrize("missing_value", [-1, np.nan])
def test_simple_imputation_inverse_transform_exceptions(missing_value):
X_1 = np.array([
[9, missing_value, 3, -1],
[4, -1, 5, 4],
[6, 7, missing_value, -1],
[8, 9, 0, missing_value]
])
imputer = SimpleImputer(missing_values=missing_value, strategy="mean")
X_1_trans = imputer.fit_transform(X_1)
with pytest.raises(ValueError,
match=f"Got 'add_indicator={imputer.add_indicator}'"):
imputer.inverse_transform(X_1_trans)
@pytest.mark.parametrize(
"expected,array,dtype,extra_value,n_repeat",
[
# array of object dtype
("extra_value", ['a', 'b', 'c'], object, "extra_value", 2),
(
"most_frequent_value",
['most_frequent_value', 'most_frequent_value', 'value'],
object, "extra_value", 1
),
("a", ['min_value', 'min_value' 'value'], object, "a", 2),
("min_value", ['min_value', 'min_value', 'value'], object, "z", 2),
# array of numeric dtype
(10, [1, 2, 3], int, 10, 2),
(1, [1, 1, 2], int, 10, 1),
(10, [20, 20, 1], int, 10, 2),
(1, [1, 1, 20], int, 10, 2),
]
)
def test_most_frequent(expected, array, dtype, extra_value, n_repeat):
assert expected == _most_frequent(
np.array(array, dtype=dtype), extra_value, n_repeat
)
| bsd-3-clause |
ldirer/scikit-learn | examples/datasets/plot_iris_dataset.py | 36 | 1929 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
chongyangma/python-machine-learning-book | code/optional-py-scripts/ch12.py | 4 | 33098 | # Sebastian Raschka, 2015 (http://sebastianraschka.com)
# Python Machine Learning - Code Examples
#
# Chapter 12 - Training Artificial Neural Networks for Image Recognition
#
# S. Raschka. Python Machine Learning. Packt Publishing Ltd., 2015.
# GitHub Repo: https://github.com/rasbt/python-machine-learning-book
#
# License: MIT
# https://github.com/rasbt/python-machine-learning-book/blob/master/LICENSE.txt
import os
import struct
import numpy as np
from scipy.special import expit
import sys
import matplotlib.pyplot as plt
#############################################################################
print(50 * '=')
print('Obtaining the MNIST dataset')
print(50 * '-')
s = """
The MNIST dataset is publicly available at http://yann.lecun.com/exdb/mnist/
and consists of the following four parts:
- Training set images: train-images-idx3-ubyte.gz
(9.9 MB, 47 MB unzipped, 60,000 samples)
- Training set labels: train-labels-idx1-ubyte.gz
(29 KB, 60 KB unzipped, 60,000 labels)
- Test set images: t10k-images-idx3-ubyte.gz
(1.6 MB, 7.8 MB, 10,000 samples)
- Test set labels: t10k-labels-idx1-ubyte.gz
(5 KB, 10 KB unzipped, 10,000 labels)
In this section, we will only be working with a subset of MNIST, thus,
we only need to download the training set images and training set labels.
After downloading the files, I recommend unzipping the files using
the Unix/Linux gzip tool from
the terminal for efficiency, e.g., using the command
gzip *ubyte.gz -d
in your local MNIST download directory, or, using your
favorite unzipping tool if you are working with a machine
running on Microsoft Windows. The images are stored in byte form,
and using the following function, we will read them into NumPy arrays
that we will use to train our MLP.
"""
print(s)
_ = input("Please hit enter to continue.")
def load_mnist(path, kind='train'):
"""Load MNIST data from `path`"""
labels_path = os.path.join(path,
'%s-labels-idx1-ubyte' % kind)
images_path = os.path.join(path,
'%s-images-idx3-ubyte' % kind)
with open(labels_path, 'rb') as lbpath:
magic, n = struct.unpack('>II',
lbpath.read(8))
labels = np.fromfile(lbpath,
dtype=np.uint8)
with open(images_path, 'rb') as imgpath:
magic, num, rows, cols = struct.unpack(">IIII",
imgpath.read(16))
images = np.fromfile(imgpath,
dtype=np.uint8).reshape(len(labels), 784)
return images, labels
X_train, y_train = load_mnist('mnist', kind='train')
print('Training rows: %d, columns: %d' % (X_train.shape[0], X_train.shape[1]))
X_test, y_test = load_mnist('mnist', kind='t10k')
print('Test rows: %d, columns: %d' % (X_test.shape[0], X_test.shape[1]))
fig, ax = plt.subplots(nrows=2, ncols=5, sharex=True, sharey=True,)
ax = ax.flatten()
for i in range(10):
img = X_train[y_train == i][0].reshape(28, 28)
ax[i].imshow(img, cmap='Greys', interpolation='nearest')
ax[0].set_xticks([])
ax[0].set_yticks([])
# plt.tight_layout()
# plt.savefig('./figures/mnist_all.png', dpi=300)
plt.show()
fig, ax = plt.subplots(nrows=5, ncols=5, sharex=True, sharey=True,)
ax = ax.flatten()
for i in range(25):
img = X_train[y_train == 7][i].reshape(28, 28)
ax[i].imshow(img, cmap='Greys', interpolation='nearest')
ax[0].set_xticks([])
ax[0].set_yticks([])
# plt.tight_layout()
# plt.savefig('./figures/mnist_7.png', dpi=300)
plt.show()
"""
Uncomment the following lines to optionally save the data in CSV format.
However, note that those CSV files will take up a
substantial amount of storage space:
- train_img.csv 1.1 GB (gigabytes)
- train_labels.csv 1.4 MB (megabytes)
- test_img.csv 187.0 MB
- test_labels 144 KB (kilobytes)
"""
# np.savetxt('train_img.csv', X_train, fmt='%i', delimiter=',')
# np.savetxt('train_labels.csv', y_train, fmt='%i', delimiter=',')
# X_train = np.genfromtxt('train_img.csv', dtype=int, delimiter=',')
# y_train = np.genfromtxt('train_labels.csv', dtype=int, delimiter=',')
# np.savetxt('test_img.csv', X_test, fmt='%i', delimiter=',')
# np.savetxt('test_labels.csv', y_test, fmt='%i', delimiter=',')
# X_test = np.genfromtxt('test_img.csv', dtype=int, delimiter=',')
# y_test = np.genfromtxt('test_labels.csv', dtype=int, delimiter=',')
#############################################################################
print(50 * '=')
print('Implementing a multi-layer perceptron')
print(50 * '-')
class NeuralNetMLP(object):
""" Feedforward neural network / Multi-layer perceptron classifier.
Parameters
------------
n_output : int
Number of output units, should be equal to the
number of unique class labels.
n_features : int
Number of features (dimensions) in the target dataset.
Should be equal to the number of columns in the X array.
n_hidden : int (default: 30)
Number of hidden units.
l1 : float (default: 0.0)
Lambda value for L1-regularization.
No regularization if l1=0.0 (default)
l2 : float (default: 0.0)
Lambda value for L2-regularization.
No regularization if l2=0.0 (default)
epochs : int (default: 500)
Number of passes over the training set.
eta : float (default: 0.001)
Learning rate.
alpha : float (default: 0.0)
Momentum constant. Factor multiplied with the
gradient of the previous epoch t-1 to improve
learning speed
w(t) := w(t) - (grad(t) + alpha*grad(t-1))
decrease_const : float (default: 0.0)
Decrease constant. Shrinks the learning rate
after each epoch via eta / (1 + epoch*decrease_const)
shuffle : bool (default: True)
Shuffles training data every epoch if True to prevent circles.
minibatches : int (default: 1)
Divides training data into k minibatches for efficiency.
Normal gradient descent learning if k=1 (default).
random_state : int (default: None)
Set random state for shuffling and initializing the weights.
Attributes
-----------
cost_ : list
Sum of squared errors after each epoch.
"""
def __init__(self, n_output, n_features, n_hidden=30,
l1=0.0, l2=0.0, epochs=500, eta=0.001,
alpha=0.0, decrease_const=0.0, shuffle=True,
minibatches=1, random_state=None):
np.random.seed(random_state)
self.n_output = n_output
self.n_features = n_features
self.n_hidden = n_hidden
self.w1, self.w2 = self._initialize_weights()
self.l1 = l1
self.l2 = l2
self.epochs = epochs
self.eta = eta
self.alpha = alpha
self.decrease_const = decrease_const
self.shuffle = shuffle
self.minibatches = minibatches
def _encode_labels(self, y, k):
"""Encode labels into one-hot representation
Parameters
------------
y : array, shape = [n_samples]
Target values.
Returns
-----------
onehot : array, shape = (n_labels, n_samples)
"""
onehot = np.zeros((k, y.shape[0]))
for idx, val in enumerate(y):
onehot[val, idx] = 1.0
return onehot
def _initialize_weights(self):
"""Initialize weights with small random numbers."""
w1 = np.random.uniform(-1.0, 1.0,
size=self.n_hidden*(self.n_features + 1))
w1 = w1.reshape(self.n_hidden, self.n_features + 1)
w2 = np.random.uniform(-1.0, 1.0,
size=self.n_output*(self.n_hidden + 1))
w2 = w2.reshape(self.n_output, self.n_hidden + 1)
return w1, w2
def _sigmoid(self, z):
"""Compute logistic function (sigmoid)
Uses scipy.special.expit to avoid overflow
error for very small input values z.
"""
# return 1.0 / (1.0 + np.exp(-z))
return expit(z)
def _sigmoid_gradient(self, z):
"""Compute gradient of the logistic function"""
sg = self._sigmoid(z)
return sg * (1 - sg)
def _add_bias_unit(self, X, how='column'):
"""Add bias unit (column or row of 1s) to array at index 0"""
if how == 'column':
X_new = np.ones((X.shape[0], X.shape[1]+1))
X_new[:, 1:] = X
elif how == 'row':
X_new = np.ones((X.shape[0]+1, X.shape[1]))
X_new[1:, :] = X
else:
raise AttributeError('`how` must be `column` or `row`')
return X_new
def _feedforward(self, X, w1, w2):
"""Compute feedforward step
Parameters
-----------
X : array, shape = [n_samples, n_features]
Input layer with original features.
w1 : array, shape = [n_hidden_units, n_features]
Weight matrix for input layer -> hidden layer.
w2 : array, shape = [n_output_units, n_hidden_units]
Weight matrix for hidden layer -> output layer.
Returns
----------
a1 : array, shape = [n_samples, n_features+1]
Input values with bias unit.
z2 : array, shape = [n_hidden, n_samples]
Net input of hidden layer.
a2 : array, shape = [n_hidden+1, n_samples]
Activation of hidden layer.
z3 : array, shape = [n_output_units, n_samples]
Net input of output layer.
a3 : array, shape = [n_output_units, n_samples]
Activation of output layer.
"""
a1 = self._add_bias_unit(X, how='column')
z2 = w1.dot(a1.T)
a2 = self._sigmoid(z2)
a2 = self._add_bias_unit(a2, how='row')
z3 = w2.dot(a2)
a3 = self._sigmoid(z3)
return a1, z2, a2, z3, a3
def _L2_reg(self, lambda_, w1, w2):
"""Compute L2-regularization cost"""
return (lambda_/2.0) * (np.sum(w1[:, 1:] ** 2) +
np.sum(w2[:, 1:] ** 2))
def _L1_reg(self, lambda_, w1, w2):
"""Compute L1-regularization cost"""
return (lambda_/2.0) * (np.abs(w1[:, 1:]).sum() +
np.abs(w2[:, 1:]).sum())
def _get_cost(self, y_enc, output, w1, w2):
"""Compute cost function.
Parameters
----------
y_enc : array, shape = (n_labels, n_samples)
one-hot encoded class labels.
output : array, shape = [n_output_units, n_samples]
Activation of the output layer (feedforward)
w1 : array, shape = [n_hidden_units, n_features]
Weight matrix for input layer -> hidden layer.
w2 : array, shape = [n_output_units, n_hidden_units]
Weight matrix for hidden layer -> output layer.
Returns
---------
cost : float
Regularized cost.
"""
term1 = -y_enc * (np.log(output))
term2 = (1 - y_enc) * np.log(1 - output)
cost = np.sum(term1 - term2)
L1_term = self._L1_reg(self.l1, w1, w2)
L2_term = self._L2_reg(self.l2, w1, w2)
cost = cost + L1_term + L2_term
return cost
def _get_gradient(self, a1, a2, a3, z2, y_enc, w1, w2):
""" Compute gradient step using backpropagation.
Parameters
------------
a1 : array, shape = [n_samples, n_features+1]
Input values with bias unit.
a2 : array, shape = [n_hidden+1, n_samples]
Activation of hidden layer.
a3 : array, shape = [n_output_units, n_samples]
Activation of output layer.
z2 : array, shape = [n_hidden, n_samples]
Net input of hidden layer.
y_enc : array, shape = (n_labels, n_samples)
one-hot encoded class labels.
w1 : array, shape = [n_hidden_units, n_features]
Weight matrix for input layer -> hidden layer.
w2 : array, shape = [n_output_units, n_hidden_units]
Weight matrix for hidden layer -> output layer.
Returns
---------
grad1 : array, shape = [n_hidden_units, n_features]
Gradient of the weight matrix w1.
grad2 : array, shape = [n_output_units, n_hidden_units]
Gradient of the weight matrix w2.
"""
# backpropagation
sigma3 = a3 - y_enc
z2 = self._add_bias_unit(z2, how='row')
sigma2 = w2.T.dot(sigma3) * self._sigmoid_gradient(z2)
sigma2 = sigma2[1:, :]
grad1 = sigma2.dot(a1)
grad2 = sigma3.dot(a2.T)
# regularize
grad1[:, 1:] += (w1[:, 1:] * (self.l1 + self.l2))
grad2[:, 1:] += (w2[:, 1:] * (self.l1 + self.l2))
return grad1, grad2
def predict(self, X):
"""Predict class labels
Parameters
-----------
X : array, shape = [n_samples, n_features]
Input layer with original features.
Returns:
----------
y_pred : array, shape = [n_samples]
Predicted class labels.
"""
if len(X.shape) != 2:
raise AttributeError('X must be a [n_samples, n_features] array.\n'
'Use X[:,None] for 1-feature classification,'
'\nor X[[i]] for 1-sample classification')
a1, z2, a2, z3, a3 = self._feedforward(X, self.w1, self.w2)
y_pred = np.argmax(z3, axis=0)
return y_pred
def fit(self, X, y, print_progress=False):
""" Learn weights from training data.
Parameters
-----------
X : array, shape = [n_samples, n_features]
Input layer with original features.
y : array, shape = [n_samples]
Target class labels.
print_progress : bool (default: False)
Prints progress as the number of epochs
to stderr.
Returns:
----------
self
"""
self.cost_ = []
X_data, y_data = X.copy(), y.copy()
y_enc = self._encode_labels(y, self.n_output)
delta_w1_prev = np.zeros(self.w1.shape)
delta_w2_prev = np.zeros(self.w2.shape)
for i in range(self.epochs):
# adaptive learning rate
self.eta /= (1 + self.decrease_const*i)
if print_progress:
sys.stderr.write('\rEpoch: %d/%d' % (i+1, self.epochs))
sys.stderr.flush()
if self.shuffle:
idx = np.random.permutation(y_data.shape[0])
X_data, y_enc = X_data[idx], y_enc[:, idx]
mini = np.array_split(range(y_data.shape[0]), self.minibatches)
for idx in mini:
# feedforward
a1, z2, a2, z3, a3 = self._feedforward(X_data[idx],
self.w1,
self.w2)
cost = self._get_cost(y_enc=y_enc[:, idx],
output=a3,
w1=self.w1,
w2=self.w2)
self.cost_.append(cost)
# compute gradient via backpropagation
grad1, grad2 = self._get_gradient(a1=a1, a2=a2,
a3=a3, z2=z2,
y_enc=y_enc[:, idx],
w1=self.w1,
w2=self.w2)
delta_w1, delta_w2 = self.eta * grad1, self.eta * grad2
self.w1 -= (delta_w1 + (self.alpha * delta_w1_prev))
self.w2 -= (delta_w2 + (self.alpha * delta_w2_prev))
delta_w1_prev, delta_w2_prev = delta_w1, delta_w2
return self
nn = NeuralNetMLP(n_output=10,
n_features=X_train.shape[1],
n_hidden=50,
l2=0.1,
l1=0.0,
epochs=1000,
eta=0.001,
alpha=0.001,
decrease_const=0.00001,
minibatches=50,
shuffle=True,
random_state=1)
nn.fit(X_train, y_train, print_progress=True)
plt.plot(range(len(nn.cost_)), nn.cost_)
plt.ylim([0, 2000])
plt.ylabel('Cost')
plt.xlabel('Epochs * 50')
# plt.tight_layout()
# plt.savefig('./figures/cost.png', dpi=300)
plt.show()
batches = np.array_split(range(len(nn.cost_)), 1000)
cost_ary = np.array(nn.cost_)
cost_avgs = [np.mean(cost_ary[i]) for i in batches]
plt.plot(range(len(cost_avgs)), cost_avgs, color='red')
plt.ylim([0, 2000])
plt.ylabel('Cost')
plt.xlabel('Epochs')
# plt.tight_layout()
# plt.savefig('./figures/cost2.png', dpi=300)
plt.show()
y_train_pred = nn.predict(X_train)
if sys.version_info < (3, 0):
acc = ((np.sum(y_train == y_train_pred, axis=0)).astype('float') /
X_train.shape[0])
else:
acc = np.sum(y_train == y_train_pred, axis=0) / X_train.shape[0]
print('Training accuracy: %.2f%%' % (acc * 100))
y_test_pred = nn.predict(X_test)
if sys.version_info < (3, 0):
acc = ((np.sum(y_test == y_test_pred, axis=0)).astype('float') /
X_test.shape[0])
else:
acc = np.sum(y_test == y_test_pred, axis=0) / X_test.shape[0]
print('Test accuracy: %.2f%%' % (acc * 100))
miscl_img = X_test[y_test != y_test_pred][:25]
correct_lab = y_test[y_test != y_test_pred][:25]
miscl_lab = y_test_pred[y_test != y_test_pred][:25]
fig, ax = plt.subplots(nrows=5, ncols=5, sharex=True, sharey=True,)
ax = ax.flatten()
for i in range(25):
img = miscl_img[i].reshape(28, 28)
ax[i].imshow(img, cmap='Greys', interpolation='nearest')
ax[i].set_title('%d) t: %d p: %d' % (i+1, correct_lab[i], miscl_lab[i]))
ax[0].set_xticks([])
ax[0].set_yticks([])
# plt.tight_layout()
# plt.savefig('./figures/mnist_miscl.png', dpi=300)
plt.show()
#############################################################################
print(50 * '=')
print('Debugging neural networks with gradient checking')
print(50 * '-')
class MLPGradientCheck(object):
""" Feedforward neural network / Multi-layer perceptron classifier.
Parameters
------------
n_output : int
Number of output units, should be equal to the
number of unique class labels.
n_features : int
Number of features (dimensions) in the target dataset.
Should be equal to the number of columns in the X array.
n_hidden : int (default: 30)
Number of hidden units.
l1 : float (default: 0.0)
Lambda value for L1-regularization.
No regularization if l1=0.0 (default)
l2 : float (default: 0.0)
Lambda value for L2-regularization.
No regularization if l2=0.0 (default)
epochs : int (default: 500)
Number of passes over the training set.
eta : float (default: 0.001)
Learning rate.
alpha : float (default: 0.0)
Momentum constant. Factor multiplied with the
gradient of the previous epoch t-1 to improve
learning speed
w(t) := w(t) - (grad(t) + alpha*grad(t-1))
decrease_const : float (default: 0.0)
Decrease constant. Shrinks the learning rate
after each epoch via eta / (1 + epoch*decrease_const)
shuffle : bool (default: False)
Shuffles training data every epoch if True to prevent circles.
minibatches : int (default: 1)
Divides training data into k minibatches for efficiency.
Normal gradient descent learning if k=1 (default).
random_state : int (default: None)
Set random state for shuffling and initializing the weights.
Attributes
-----------
cost_ : list
Sum of squared errors after each epoch.
"""
def __init__(self, n_output, n_features, n_hidden=30,
l1=0.0, l2=0.0, epochs=500, eta=0.001,
alpha=0.0, decrease_const=0.0, shuffle=True,
minibatches=1, random_state=None):
np.random.seed(random_state)
self.n_output = n_output
self.n_features = n_features
self.n_hidden = n_hidden
self.w1, self.w2 = self._initialize_weights()
self.l1 = l1
self.l2 = l2
self.epochs = epochs
self.eta = eta
self.alpha = alpha
self.decrease_const = decrease_const
self.shuffle = shuffle
self.minibatches = minibatches
def _encode_labels(self, y, k):
"""Encode labels into one-hot representation
Parameters
------------
y : array, shape = [n_samples]
Target values.
Returns
-----------
onehot : array, shape = (n_labels, n_samples)
"""
onehot = np.zeros((k, y.shape[0]))
for idx, val in enumerate(y):
onehot[val, idx] = 1.0
return onehot
def _initialize_weights(self):
"""Initialize weights with small random numbers."""
w1 = np.random.uniform(-1.0, 1.0,
size=self.n_hidden*(self.n_features + 1))
w1 = w1.reshape(self.n_hidden, self.n_features + 1)
w2 = np.random.uniform(-1.0, 1.0,
size=self.n_output*(self.n_hidden + 1))
w2 = w2.reshape(self.n_output, self.n_hidden + 1)
return w1, w2
def _sigmoid(self, z):
"""Compute logistic function (sigmoid)
Uses scipy.special.expit to avoid overflow
error for very small input values z.
"""
# return 1.0 / (1.0 + np.exp(-z))
return expit(z)
def _sigmoid_gradient(self, z):
"""Compute gradient of the logistic function"""
sg = self._sigmoid(z)
return sg * (1 - sg)
def _add_bias_unit(self, X, how='column'):
"""Add bias unit (column or row of 1s) to array at index 0"""
if how == 'column':
X_new = np.ones((X.shape[0], X.shape[1]+1))
X_new[:, 1:] = X
elif how == 'row':
X_new = np.ones((X.shape[0]+1, X.shape[1]))
X_new[1:, :] = X
else:
raise AttributeError('`how` must be `column` or `row`')
return X_new
def _feedforward(self, X, w1, w2):
"""Compute feedforward step
Parameters
-----------
X : array, shape = [n_samples, n_features]
Input layer with original features.
w1 : array, shape = [n_hidden_units, n_features]
Weight matrix for input layer -> hidden layer.
w2 : array, shape = [n_output_units, n_hidden_units]
Weight matrix for hidden layer -> output layer.
Returns
----------
a1 : array, shape = [n_samples, n_features+1]
Input values with bias unit.
z2 : array, shape = [n_hidden, n_samples]
Net input of hidden layer.
a2 : array, shape = [n_hidden+1, n_samples]
Activation of hidden layer.
z3 : array, shape = [n_output_units, n_samples]
Net input of output layer.
a3 : array, shape = [n_output_units, n_samples]
Activation of output layer.
"""
a1 = self._add_bias_unit(X, how='column')
z2 = w1.dot(a1.T)
a2 = self._sigmoid(z2)
a2 = self._add_bias_unit(a2, how='row')
z3 = w2.dot(a2)
a3 = self._sigmoid(z3)
return a1, z2, a2, z3, a3
def _L2_reg(self, lambda_, w1, w2):
"""Compute L2-regularization cost"""
return (lambda_/2.0) * (np.sum(w1[:, 1:] ** 2) +
np.sum(w2[:, 1:] ** 2))
def _L1_reg(self, lambda_, w1, w2):
"""Compute L1-regularization cost"""
return (lambda_/2.0) * (np.abs(w1[:, 1:]).sum() +
np.abs(w2[:, 1:]).sum())
def _get_cost(self, y_enc, output, w1, w2):
"""Compute cost function.
Parameters
----------
y_enc : array, shape = (n_labels, n_samples)
one-hot encoded class labels.
output : array, shape = [n_output_units, n_samples]
Activation of the output layer (feedforward)
w1 : array, shape = [n_hidden_units, n_features]
Weight matrix for input layer -> hidden layer.
w2 : array, shape = [n_output_units, n_hidden_units]
Weight matrix for hidden layer -> output layer.
Returns
---------
cost : float
Regularized cost.
"""
term1 = -y_enc * (np.log(output))
term2 = (1 - y_enc) * np.log(1 - output)
cost = np.sum(term1 - term2)
L1_term = self._L1_reg(self.l1, w1, w2)
L2_term = self._L2_reg(self.l2, w1, w2)
cost = cost + L1_term + L2_term
return cost
def _get_gradient(self, a1, a2, a3, z2, y_enc, w1, w2):
""" Compute gradient step using backpropagation.
Parameters
------------
a1 : array, shape = [n_samples, n_features+1]
Input values with bias unit.
a2 : array, shape = [n_hidden+1, n_samples]
Activation of hidden layer.
a3 : array, shape = [n_output_units, n_samples]
Activation of output layer.
z2 : array, shape = [n_hidden, n_samples]
Net input of hidden layer.
y_enc : array, shape = (n_labels, n_samples)
one-hot encoded class labels.
w1 : array, shape = [n_hidden_units, n_features]
Weight matrix for input layer -> hidden layer.
w2 : array, shape = [n_output_units, n_hidden_units]
Weight matrix for hidden layer -> output layer.
Returns
---------
grad1 : array, shape = [n_hidden_units, n_features]
Gradient of the weight matrix w1.
grad2 : array, shape = [n_output_units, n_hidden_units]
Gradient of the weight matrix w2.
"""
# backpropagation
sigma3 = a3 - y_enc
z2 = self._add_bias_unit(z2, how='row')
sigma2 = w2.T.dot(sigma3) * self._sigmoid_gradient(z2)
sigma2 = sigma2[1:, :]
grad1 = sigma2.dot(a1)
grad2 = sigma3.dot(a2.T)
# regularize
grad1[:, 1:] += (w1[:, 1:] * (self.l1 + self.l2))
grad2[:, 1:] += (w2[:, 1:] * (self.l1 + self.l2))
return grad1, grad2
def _gradient_checking(self, X, y_enc, w1, w2, epsilon, grad1, grad2):
""" Apply gradient checking (for debugging only)
Returns
---------
relative_error : float
Relative error between the numerically
approximated gradients and the backpropagated gradients.
"""
num_grad1 = np.zeros(np.shape(w1))
epsilon_ary1 = np.zeros(np.shape(w1))
for i in range(w1.shape[0]):
for j in range(w1.shape[1]):
epsilon_ary1[i, j] = epsilon
a1, z2, a2, z3, a3 = self._feedforward(X,
w1 - epsilon_ary1, w2)
cost1 = self._get_cost(y_enc, a3, w1-epsilon_ary1, w2)
a1, z2, a2, z3, a3 = self._feedforward(X,
w1 + epsilon_ary1, w2)
cost2 = self._get_cost(y_enc, a3, w1 + epsilon_ary1, w2)
num_grad1[i, j] = (cost2 - cost1) / (2 * epsilon)
epsilon_ary1[i, j] = 0
num_grad2 = np.zeros(np.shape(w2))
epsilon_ary2 = np.zeros(np.shape(w2))
for i in range(w2.shape[0]):
for j in range(w2.shape[1]):
epsilon_ary2[i, j] = epsilon
a1, z2, a2, z3, a3 = self._feedforward(X, w1,
w2 - epsilon_ary2)
cost1 = self._get_cost(y_enc, a3, w1, w2 - epsilon_ary2)
a1, z2, a2, z3, a3 = self._feedforward(X, w1,
w2 + epsilon_ary2)
cost2 = self._get_cost(y_enc, a3, w1, w2 + epsilon_ary2)
num_grad2[i, j] = (cost2 - cost1) / (2 * epsilon)
epsilon_ary2[i, j] = 0
num_grad = np.hstack((num_grad1.flatten(), num_grad2.flatten()))
grad = np.hstack((grad1.flatten(), grad2.flatten()))
norm1 = np.linalg.norm(num_grad - grad)
norm2 = np.linalg.norm(num_grad)
norm3 = np.linalg.norm(grad)
relative_error = norm1 / (norm2 + norm3)
return relative_error
def predict(self, X):
"""Predict class labels
Parameters
-----------
X : array, shape = [n_samples, n_features]
Input layer with original features.
Returns:
----------
y_pred : array, shape = [n_samples]
Predicted class labels.
"""
if len(X.shape) != 2:
raise AttributeError('X must be a [n_samples, n_features] array.\n'
'Use X[:,None] for 1-feature classification,'
'\nor X[[i]] for 1-sample classification')
a1, z2, a2, z3, a3 = self._feedforward(X, self.w1, self.w2)
y_pred = np.argmax(z3, axis=0)
return y_pred
def fit(self, X, y, print_progress=False):
""" Learn weights from training data.
Parameters
-----------
X : array, shape = [n_samples, n_features]
Input layer with original features.
y : array, shape = [n_samples]
Target class labels.
print_progress : bool (default: False)
Prints progress as the number of epochs
to stderr.
Returns:
----------
self
"""
self.cost_ = []
X_data, y_data = X.copy(), y.copy()
y_enc = self._encode_labels(y, self.n_output)
delta_w1_prev = np.zeros(self.w1.shape)
delta_w2_prev = np.zeros(self.w2.shape)
for i in range(self.epochs):
# adaptive learning rate
self.eta /= (1 + self.decrease_const*i)
if print_progress:
sys.stderr.write('\rEpoch: %d/%d' % (i+1, self.epochs))
sys.stderr.flush()
if self.shuffle:
idx = np.random.permutation(y_data.shape[0])
X_data, y_enc = X_data[idx], y_enc[idx]
mini = np.array_split(range(y_data.shape[0]), self.minibatches)
for idx in mini:
# feedforward
a1, z2, a2, z3, a3 = self._feedforward(X[idx],
self.w1,
self.w2)
cost = self._get_cost(y_enc=y_enc[:, idx],
output=a3,
w1=self.w1,
w2=self.w2)
self.cost_.append(cost)
# compute gradient via backpropagation
grad1, grad2 = self._get_gradient(a1=a1, a2=a2,
a3=a3, z2=z2,
y_enc=y_enc[:, idx],
w1=self.w1,
w2=self.w2)
# start gradient checking
grad_diff = self._gradient_checking(X=X_data[idx],
y_enc=y_enc[:, idx],
w1=self.w1,
w2=self.w2,
epsilon=1e-5,
grad1=grad1,
grad2=grad2)
if grad_diff <= 1e-7:
print('Ok: %s' % grad_diff)
elif grad_diff <= 1e-4:
print('Warning: %s' % grad_diff)
else:
print('PROBLEM: %s' % grad_diff)
# update weights; [alpha * delta_w_prev] for momentum learning
delta_w1, delta_w2 = self.eta * grad1, self.eta * grad2
self.w1 -= (delta_w1 + (self.alpha * delta_w1_prev))
self.w2 -= (delta_w2 + (self.alpha * delta_w2_prev))
delta_w1_prev, delta_w2_prev = delta_w1, delta_w2
return self
nn_check = MLPGradientCheck(n_output=10,
n_features=X_train.shape[1],
n_hidden=10,
l2=0.0,
l1=0.0,
epochs=10,
eta=0.001,
alpha=0.0,
decrease_const=0.0,
minibatches=1,
shuffle=False,
random_state=1)
nn_check.fit(X_train[:5], y_train[:5], print_progress=False)
| mit |
elijah513/scikit-learn | examples/decomposition/plot_image_denoising.py | 181 | 5819 | """
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of the Lena image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like Gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import lena
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
###############################################################################
# Load Lena image and extract patches
lena = lena() / 256.0
# downsample for higher speed
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena /= 4.0
height, width = lena.shape
# Distort the right half of the image
print('Distorting image...')
distorted = lena.copy()
distorted[:, height // 2:] += 0.075 * np.random.randn(width, height // 2)
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :height // 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
###############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Dictionary learned from Lena patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
###############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
plt.figure(figsize=(5, 3.3))
plt.subplot(1, 2, 1)
plt.title('Image')
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray, interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.subplot(1, 2, 2)
difference = image - reference
plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle(title, size=16)
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, lena, 'Distorted image')
###############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted[:, height // 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = lena.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, height // 2:] = reconstruct_from_patches_2d(
patches, (width, height // 2))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], lena,
title + ' (time: %.1fs)' % dt)
plt.show()
| bsd-3-clause |
wasade/qiita | setup.py | 1 | 3898 | #!/usr/bin/env python
# -----------------------------------------------------------------------------
# Copyright (c) 2013, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from setuptools import setup
from glob import glob
__version__ = "0.0.1-dev"
classes = """
Development Status :: 2 - Pre-Alpha
License :: OSI Approved :: BSD License
Topic :: Scientific/Engineering :: Bio-Informatics
Topic :: Software Development :: Libraries :: Application Frameworks
Topic :: Software Development :: Libraries :: Python Modules
Programming Language :: Python
Programming Language :: Python :: 2.7
Programming Language :: Python :: Implementation :: CPython
Operating System :: POSIX :: Linux
Operating System :: MacOS :: MacOS X
"""
long_description = """Qiita: Spot Patterns"""
classifiers = [s.strip() for s in classes.split('\n') if s]
setup(name='qiita-spots',
version=__version__,
long_description=long_description,
license="BSD",
description='Qiita: Spot Patterns',
author="Qiita development team",
author_email="[email protected]",
url='https://github.com/biocore/qiita',
test_suite='nose.collector',
packages=['qiita_core',
'qiita_db',
'qiita_pet',
'qiita_pet/handlers',
'qiita_ware'
],
package_data={'qiita_core': ['support_files/config_test.txt'],
'qiita_db': [
'support_files/*.sql',
'support_files/patches/*.sql',
'support_files/patches/python_patches/*.py',
'support_files/test_data/preprocessed_data/*',
'support_files/test_data/processed_data/*',
'support_files/test_data/raw_data/*',
'support_files/test_data/analysis/*',
'support_files/test_data/reference/*',
'support_files/test_data/job/*.txt',
'support_files/test_data/job/2_test_folder/*',
'support_files/test_data/uploads/1/a_folder/*.txt',
'support_files/test_data/uploads/1/.hidden_file.txt',
'support_files/test_data/uploads/1/uploaded_file.txt',
'support_files/test_data/templates/*',
'support_files/work_data/*'],
'qiita_pet': [
'static/css/*.css', 'static/img/*.png',
'static/img/*.gif', 'static/img/*.ico',
'static/js/*.js', 'static/vendor/css/*.css',
'static/vendor/css/images/*.png',
'static/vendor/fonts/glyphicons*.*',
'static/vendor/images/*.png',
'static/vendor/js/*.js',
'results/admin/jobname/*.html',
'templates/*.html']},
scripts=glob('scripts/*'),
extras_require={'test': ["nose >= 0.10.1", "pep8", 'mock'],
'doc': ["Sphinx >= 1.2.2", "sphinx-bootstrap-theme"]},
install_requires=['psycopg2', 'click == 1.0', 'future==0.13.0',
'bcrypt', 'pandas >= 0.15', 'numpy >= 1.7',
'tornado==3.1.1', 'toredis', 'redis',
'ipython[all] >= 2.4.1, < 2.5', 'pyparsing',
'h5py >= 2.3.1', 'biom-format', 'natsort', 'networkx',
'scikit-bio >= 0.2.3, < 0.3.0', 'wtforms == 2.0.1',
'qiime >= 1.9.0, < 1.10.0'],
classifiers=classifiers
)
| bsd-3-clause |
google/timesketch | api_client/python/timesketch_api_client/client.py | 1 | 20819 | # Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Timesketch API client."""
from __future__ import unicode_literals
import os
import logging
# pylint: disable=wrong-import-order
import bs4
import requests
# pylint: disable=redefined-builtin
from requests.exceptions import ConnectionError
import webbrowser
# pylint: disable-msg=import-error
from google_auth_oauthlib import flow as googleauth_flow
import google.auth.transport.requests
import pandas
from . import credentials
from . import definitions
from . import error
from . import index
from . import sketch
from . import user
from . import version
from . import sigma
logger = logging.getLogger('timesketch_api.client')
class TimesketchApi:
"""Timesketch API object
Attributes:
api_root: The full URL to the server API endpoint.
session: Authenticated HTTP session.
"""
DEFAULT_OAUTH_SCOPE = [
'https://www.googleapis.com/auth/userinfo.email',
'openid',
'https://www.googleapis.com/auth/userinfo.profile'
]
DEFAULT_OAUTH_AUTH_URL = 'https://accounts.google.com/o/oauth2/v2/auth'
DEFAULT_OAUTH_TOKEN_URL = 'https://oauth2.googleapis.com/token'
DEFAULT_OAUTH_PROVIDER_URL = 'https://www.googleapis.com/oauth2/v1/certs'
DEFAULT_OAUTH_OOB_URL = 'urn:ietf:wg:oauth:2.0:oob'
DEFAULT_OAUTH_API_CALLBACK = '/login/api_callback/'
# Default retry count for operations that attempt a retry.
DEFAULT_RETRY_COUNT = 5
def __init__(self,
host_uri,
username,
password='',
verify=True,
client_id='',
client_secret='',
auth_mode='userpass',
create_session=True):
"""Initializes the TimesketchApi object.
Args:
host_uri: URI to the Timesketch server (https://<server>/).
username: User username.
password: User password.
verify: Verify server SSL certificate.
client_id: The client ID if OAUTH auth is used.
client_secret: The OAUTH client secret if OAUTH is used.
auth_mode: The authentication mode to use. Defaults to 'userpass'
Supported values are 'userpass' (username/password combo),
'http-basic' (HTTP Basic authentication) and oauth.
create_session: Boolean indicating whether the client object
should create a session object. If set to False the
function "set_session" needs to be called before proceeding.
Raises:
ConnectionError: If the Timesketch server is unreachable.
RuntimeError: If the client is unable to authenticate to the
backend.
"""
self._host_uri = host_uri
self.api_root = '{0:s}/api/v1'.format(host_uri)
self.credentials = None
self._flow = None
if not create_session:
self.session = None
return
try:
self.session = self._create_session(
username, password, verify=verify, client_id=client_id,
client_secret=client_secret, auth_mode=auth_mode)
except ConnectionError as exc:
raise ConnectionError('Timesketch server unreachable') from exc
except RuntimeError as e:
raise RuntimeError(
'Unable to connect to server, error: {0!s}'.format(e)) from e
@property
def current_user(self):
"""Property that returns the user object of the logged in user."""
return user.User(self)
@property
def version(self):
"""Property that returns back the API client version."""
version_dict = self.fetch_resource_data('version/')
ts_version = None
if version_dict:
ts_version = version_dict.get('meta', {}).get('version')
if ts_version:
return 'API Client: {0:s}\nTS Backend: {1:s}'.format(
version.get_version(), ts_version)
return 'API Client: {0:s}'.format(version.get_version())
def set_credentials(self, credential_object):
"""Sets the credential object."""
self.credentials = credential_object
def set_session(self, session_object):
"""Sets the session object."""
self.session = session_object
def _authenticate_session(self, session, username, password):
"""Post username/password to authenticate the HTTP session.
Args:
session: Instance of requests.Session.
username: User username.
password: User password.
"""
# Do a POST to the login handler to set up the session cookies
data = {'username': username, 'password': password}
session.post('{0:s}/login/'.format(self._host_uri), data=data)
def _set_csrf_token(self, session):
"""Retrieve CSRF token from the server and append to HTTP headers.
Args:
session: Instance of requests.Session.
"""
# Scrape the CSRF token from the response
response = session.get(self._host_uri)
soup = bs4.BeautifulSoup(response.text, features='html.parser')
tag = soup.find(id='csrf_token')
csrf_token = None
if tag:
csrf_token = tag.get('value')
else:
tag = soup.find('meta', attrs={'name': 'csrf-token'})
if tag:
csrf_token = tag.attrs.get('content')
if not csrf_token:
return
session.headers.update({
'x-csrftoken': csrf_token,
'referer': self._host_uri
})
def _create_oauth_session(
self, client_id='', client_secret='', client_secrets_file=None,
run_server=True, skip_open=False):
"""Return an OAuth session.
Args:
client_id: The client ID if OAUTH auth is used.
client_secret: The OAUTH client secret if OAUTH is used.
client_secrets_file: Path to the JSON file that contains the client
secrets, in the client_secrets format.
run_server: A boolean, if set to true (default) a web server is
run to catch the OAUTH request and response.
skip_open: A booelan, if set to True (defaults to False) an
authorization URL is printed on the screen to visit. This is
only valid if run_server is set to False.
Return:
session: Instance of requests.Session.
Raises:
RuntimeError: if unable to log in to the application.
"""
if client_secrets_file:
if not os.path.isfile(client_secrets_file):
raise RuntimeError(
'Unable to log in, client secret files does not exist.')
flow = googleauth_flow.InstalledAppFlow.from_client_secrets_file(
client_secrets_file, scopes=self.DEFAULT_OAUTH_SCOPE,
autogenerate_code_verifier=True)
else:
provider_url = self.DEFAULT_OAUTH_PROVIDER_URL
client_config = {
'installed': {
'client_id': client_id,
'client_secret': client_secret,
'auth_uri': self.DEFAULT_OAUTH_AUTH_URL,
'token_uri': self.DEFAULT_OAUTH_TOKEN_URL,
'auth_provider_x509_cert_url': provider_url,
'redirect_uris': [self.DEFAULT_OAUTH_OOB_URL],
},
}
flow = googleauth_flow.InstalledAppFlow.from_client_config(
client_config, self.DEFAULT_OAUTH_SCOPE,
autogenerate_code_verifier=True)
flow.redirect_uri = self.DEFAULT_OAUTH_OOB_URL
if run_server:
_ = flow.run_local_server()
else:
auth_url, _ = flow.authorization_url(prompt='select_account')
if skip_open:
print('Visit the following URL to authenticate: {0:s}'.format(
auth_url))
else:
open_browser = input('Open the URL in a browser window? [y/N] ')
if open_browser.lower() == 'y' or open_browser.lower() == 'yes':
webbrowser.open(auth_url)
else:
print(
'Need to manually visit URL to authenticate: '
'{0:s}'.format(auth_url))
code = input('Enter the token code: ')
_ = flow.fetch_token(code=code)
session = flow.authorized_session()
self._flow = flow
self.credentials = credentials.TimesketchOAuthCredentials()
self.credentials.credential = flow.credentials
return self.authenticate_oauth_session(session)
def authenticate_oauth_session(self, session):
"""Authenticate an OAUTH session.
Args:
session: Authorized session object.
"""
# Authenticate to the Timesketch backend.
login_callback_url = '{0:s}{1:s}'.format(
self._host_uri, self.DEFAULT_OAUTH_API_CALLBACK)
params = {
'id_token': session.credentials.id_token,
}
response = session.get(login_callback_url, params=params)
if response.status_code not in definitions.HTTP_STATUS_CODE_20X:
error.error_message(
response, message='Unable to authenticate', error=RuntimeError)
self._set_csrf_token(session)
return session
def _create_session(
self, username, password, verify, client_id, client_secret,
auth_mode):
"""Create authenticated HTTP session for server communication.
Args:
username: User to authenticate as.
password: User password.
verify: Verify server SSL certificate.
client_id: The client ID if OAUTH auth is used.
client_secret: The OAUTH client secret if OAUTH is used.
auth_mode: The authentication mode to use. Supported values are
'userpass' (username/password combo), 'http-basic'
(HTTP Basic authentication) and oauth.
Returns:
Instance of requests.Session.
"""
if auth_mode == 'oauth':
return self._create_oauth_session(client_id, client_secret)
if auth_mode == 'oauth_local':
return self._create_oauth_session(
client_id=client_id, client_secret=client_secret,
run_server=False, skip_open=True)
session = requests.Session()
# If using HTTP Basic auth, add the user/pass to the session
if auth_mode == 'http-basic':
session.auth = (username, password)
# SSL Cert verification is turned on by default.
if not verify:
session.verify = False
# Get and set CSRF token and authenticate the session if appropriate.
self._set_csrf_token(session)
if auth_mode == 'userpass':
self._authenticate_session(session, username, password)
return session
def fetch_resource_data(self, resource_uri, params=None):
"""Make a HTTP GET request.
Args:
resource_uri: The URI to the resource to be fetched.
params: Dict of URL parameters to send in the GET request.
Returns:
Dictionary with the response data.
"""
resource_url = '{0:s}/{1:s}'.format(self.api_root, resource_uri)
response = self.session.get(resource_url, params=params)
return error.get_response_json(response, logger)
def create_sketch(self, name, description=None):
"""Create a new sketch.
Args:
name: Name of the sketch.
description: Description of the sketch.
Returns:
Instance of a Sketch object.
"""
if not description:
description = name
retry_count = 0
objects = None
while True:
resource_url = '{0:s}/sketches/'.format(self.api_root)
form_data = {'name': name, 'description': description}
response = self.session.post(resource_url, json=form_data)
response_dict = error.get_response_json(response, logger)
objects = response_dict.get('objects')
if objects:
break
retry_count += 1
if retry_count >= self.DEFAULT_RETRY_COUNT:
raise RuntimeError('Unable to create a new sketch.')
sketch_id = objects[0]['id']
return self.get_sketch(sketch_id)
def get_oauth_token_status(self):
"""Return a dict with OAuth token status, if one exists."""
if not self.credentials:
return {
'status': 'No stored credentials.'}
return {
'expired': self.credentials.credential.expired,
'expiry_time': self.credentials.credential.expiry.isoformat(),
}
def get_sketch(self, sketch_id):
"""Get a sketch.
Args:
sketch_id: Primary key ID of the sketch.
Returns:
Instance of a Sketch object.
"""
return sketch.Sketch(sketch_id, api=self)
def get_aggregator_info(self, name='', as_pandas=False):
"""Returns information about available aggregators.
Args:
name: String with the name of an aggregator. If the name is not
provided, a list with all aggregators is returned.
as_pandas: Boolean indicating that the results will be returned
as a Pandas DataFrame instead of a list of dicts.
Returns:
A list with dict objects with the information about aggregators,
unless as_pandas is set, then the function returns a DataFrame
object.
"""
resource_uri = 'aggregation/info/'
if name:
data = {'aggregator': name}
resource_url = '{0:s}/{1:s}'.format(self.api_root, resource_uri)
response = self.session.post(resource_url, json=data)
response_json = error.get_response_json(response, logger)
else:
response_json = self.fetch_resource_data(resource_uri)
if not as_pandas:
return response_json
lines = []
if isinstance(response_json, dict):
response_json = [response_json]
for line in response_json:
line_dict = {
'name': line.get('name', 'N/A'),
'description': line.get('description', 'N/A'),
}
for field_index, field in enumerate(line.get('fields', [])):
line_dict['field_{0:d}_name'.format(
field_index + 1)] = field.get('name')
line_dict['field_{0:d}_description'.format(
field_index + 1)] = field.get('description')
lines.append(line_dict)
return pandas.DataFrame(lines)
def list_sketches(self, per_page=50, scope='user', include_archived=True):
"""Get a list of all open sketches that the user has access to.
Args:
per_page: Number of items per page when paginating. Default is 50.
scope: What scope to get sketches as. Default to user.
include_archived: If archived sketches should be returned.
Yields:
Sketch objects instances.
"""
url_params = {
'per_page': per_page,
'scope': scope,
'include_archived': include_archived
}
# Start with the first page
page = 1
has_next_page = True
while has_next_page:
url_params['page'] = page
response = self.fetch_resource_data('sketches/', params=url_params)
meta = response.get('meta', {})
page = meta.get('next_page')
if not page:
has_next_page = False
for sketch_dict in response.get('objects', []):
sketch_id = sketch_dict['id']
sketch_name = sketch_dict['name']
sketch_obj = sketch.Sketch(
sketch_id=sketch_id, api=self, sketch_name=sketch_name)
yield sketch_obj
def get_searchindex(self, searchindex_id):
"""Get a searchindex.
Args:
searchindex_id: Primary key ID of the searchindex.
Returns:
Instance of a SearchIndex object.
"""
return index.SearchIndex(searchindex_id, api=self)
def check_celery_status(self, job_id=''):
"""Return information about outstanding celery tasks or a specific one.
Args:
job_id (str): Optional Celery job identification string. If
provided that specific job ID is queried, otherwise
a check for all outstanding jobs is checked.
Returns:
A list of dict objects with the status of the celery task/tasks
that were outstanding.
"""
if job_id:
response = self.fetch_resource_data(
'tasks/?job_id={0:s}'.format(job_id))
else:
response = self.fetch_resource_data('tasks/')
return response.get('objects', [])
def list_searchindices(self):
"""Yields all searchindices that the user has access to.
Yields:
A SearchIndex object instances.
"""
response = self.fetch_resource_data('searchindices/')
response_objects = response.get('objects')
if not response_objects:
yield None
return
for index_dict in response_objects[0]:
index_id = index_dict['id']
index_name = index_dict['name']
index_obj = index.SearchIndex(
searchindex_id=index_id, api=self, searchindex_name=index_name)
yield index_obj
def refresh_oauth_token(self):
"""Refresh an OAUTH token if one is defined."""
if not self.credentials:
return
request = google.auth.transport.requests.Request()
self.credentials.credential.refresh(request)
def list_sigma_rules(self, as_pandas=False):
"""Get a list of sigma objects.
Args:
as_pandas: Boolean indicating that the results will be returned
as a Pandas DataFrame instead of a list of dicts.
Returns:
List of Sigme rule object instances or a pandas Dataframe with all
rules if as_pandas is True.
Raises:
ValueError: If no rules are found.
"""
rules = []
response = self.fetch_resource_data('sigma/')
if not response:
raise ValueError('No rules found.')
if as_pandas:
return pandas.DataFrame.from_records(response.get('objects'))
for rule_dict in response['objects']:
if not rule_dict:
raise ValueError('No rules found.')
index_obj = sigma.Sigma(api=self)
for key, value in rule_dict.items():
index_obj.set_value(key, value)
rules.append(index_obj)
return rules
def get_sigma_rule(self, rule_uuid):
"""Get a sigma rule.
Args:
rule_uuid: UUID of the Sigma rule.
Returns:
Instance of a Sigma object.
"""
sigma_obj = sigma.Sigma(api=self)
sigma_obj.from_rule_uuid(rule_uuid)
return sigma_obj
def get_sigma_rule_by_text(self, rule_text):
"""Returns a Sigma Object based on a sigma rule text.
Args:
rule_text: Full Sigma rule text.
Returns:
Instance of a Sigma object.
Raises:
ValueError: No Rule text given or issues parsing it.
"""
if not rule_text:
raise ValueError('No rule text given.')
try:
sigma_obj = sigma.Sigma(api=self)
sigma_obj.from_text(rule_text)
except ValueError:
logger.error(
'Parsing Error, unable to parse the Sigma rule',exc_info=True)
return sigma_obj
| apache-2.0 |
6-Degrees/Instagram_data | examples/user_interest_analysis/user_analysis.py | 1 | 9406 | from Instagram_Spider import *
from nltk.stem import WordNetLemmatizer
from nltk.corpus import words
from nltk.corpus import wordnet as wn
from nltk.corpus import wordnet_ic
from matplotlib import pyplot as plt
def store_tag_data(name, tag_data):
file_name = 'user_tag_data/' +name + '_tag_data.json'
file = open(file_name, 'w')
json.dump(tag_data, file)
file.close()
def load_tag_data(name):
file_name = 'user_tag_data/' +name + '_tag_data.json'
file = open(file_name, 'r')
tag_data = json.load(file)
file.close()
return tag_data
def get_data(my_spider, name):
file_name = 'user_tag_data/' +name + '_tag_data.json'
if os.path.isfile(file_name):
tag_data = load_tag_data(name)
else:
tag_data = my_spider.get_all_tag_from_user(name)
store_tag_data(name, tag_data)
return tag_data
def clean_up_string(old_string):
characters = 'QWERTYUIOPASDFGHJKLZXCVBNMqwertyuiopasdfghjklzxcvbnm'
new_string = ''
for char in old_string:
if char in characters:
new_string += char
return new_string.lower()
def successful_rate(successful_list, fail_list):
successful_number = 0
fail_number = 0
for tag_pair in successful_list:
successful_number += tag_pair[1]
for tag_pair in fail_list:
fail_number += tag_pair[1]
my_rate = successful_number/(successful_number+fail_number)
return my_rate
def store_dictionary(dict_name, dict_data):
file = open(dict_name, 'w')
json.dump(dict_data, file)
file.close()
def load_dictionary(dict_name):
file = open(dict_name, 'r')
dict_data = json.load(file)
file.close()
return dict_data
def display_result(data_dict, confidence, username):
plt.figure(figsize=(9, 9))
labels = ['family', 'sport', 'animal', 'art', 'technology', 'life', 'fashion', 'food', 'travel']
colors = ['green', 'blue', 'cyan', 'purple', 'orange', 'pink', 'seagreen', 'red', 'yellow']
sizes = list()
explode_list = list()
max_label = ''
current_value = 0
total_value = 0
for label in labels:
sizes.append(data_dict[label])
total_value += data_dict[label]
if data_dict[label] > current_value:
current_value = data_dict[label]
max_label = label
for label in labels:
if label == max_label:
explode_list.append(0.1)
else:
explode_list.append(0)
final_sizes = list()
for size in sizes:
final_sizes.append(size/total_value)
explode = tuple(explode_list)
patches, l_text, p_text = plt.pie(final_sizes, explode=explode, labels=labels, colors=colors,
autopct='%3.1f%%', shadow=False, startangle=90, pctdistance=0.7)
for t in l_text:
t.set_size = 12
for t in p_text:
t.set_size = 4
plt.axis('equal')
plt.text(-1.2, 1.1, 'username: ' + username, fontsize=15)
plt.text(-1.2, 1, 'confidence: %.2f%%' % (confidence * 100), fontsize=15)
file_name = 'user_analysis_result/' + username + '_analysis.png'
plt.savefig(file_name, format='png')
plt.show()
def combine_dictionary(official_word_list, dictionary):
official_word_list1 = list(official_word_list)
for category in dictionary:
word_list = dictionary[category]
for word in word_list:
official_word_list1.append(word)
official_word_list2 = set(official_word_list1)
return official_word_list2
def tag2word(tag_list):
result_list = list()
unsolved_list = list()
one_tenth = int(len(tag_list)/10)
current_number = 0
progress = 0
for tag_pair in tag_list:
current_number += 1
if current_number > one_tenth:
progress += 1
current_number = 0
print('finish ' + str(progress) + '0%')
tag = clean_up_string(tag_pair[0]).lower()
tag = clean_up_string(tag)
pos = len(tag)
while pos > 1:
word = wordnet_lemmatizer.lemmatize(tag[0:pos])
if word in wordlist:
result_list.append((word, tag_pair[1]))
tag = tag[pos:]
pos = len(tag)
else:
pos -= 1
if len(tag) > 1:
unsolved_list.append((tag, tag_pair[1]))
print('done...')
return result_list, unsolved_list
def analyze_words(my_words, dictionary):
similarity_dictionary = dict()
local_similarity_dictionary = dict()
distribution_dictionary = dict()
total_number = 0
valid_word_count = 0
for category in dictionary:
similarity_dictionary[category] = 0
local_similarity_dictionary[category] = 0
distribution_dictionary[category] = list()
distribution_dictionary['unknown'] = list()
one_tenth = int(len(my_words)/10)
current_number = 0
progress = 0
total_words = 0
for word_pair in my_words:
find_category = False
current_number += 1
if current_number > one_tenth:
progress += 1
current_number = 0
print('finish ' + str(progress) + '0%')
for category in dictionary:
if word_pair[0] in dictionary[category]:
valid_word_count += 1
similarity_dictionary[category] += 10 * word_pair[1]
total_number += word_pair[1]
distribution_dictionary[category].append(word_pair)
find_category = True
if find_category:
continue
try:
word = wn.synsets(word_pair[0])[0]
total_number += word_pair[1]
valid_word_count += 1
except:
continue
for category in dictionary:
word_list = dictionary[category]
total_similarity = 0
total_categary_words = 0
for test_word in word_list:
try:
test = wn.synsets(test_word)[0]
except:
continue
try:
total_similarity += word.res_similarity(test, brown_ic)
total_categary_words += 1
except:
continue
if total_categary_words > 0:
similarity_dictionary[category] += word_pair[1] * total_similarity / total_categary_words
local_similarity_dictionary[category] = total_similarity / total_categary_words
final_category = 'others'
for category in local_similarity_dictionary:
if local_similarity_dictionary[category] > local_similarity_dictionary[final_category]:
final_category = category
if local_similarity_dictionary[final_category] > 2.5:
if local_similarity_dictionary[final_category] > 4:
if word_pair[0] not in dictionary[final_category]:
dictionary[final_category].append(word_pair[0])
find_category = True
distribution_dictionary[final_category].append(word_pair)
if not find_category:
distribution_dictionary['unknown'].append(word_pair)
for category in similarity_dictionary:
similarity_dictionary[category] /= total_number
recognition_rate = valid_word_count/len(my_words)
percentage_dictionary = dict()
for category in distribution_dictionary:
percentage_dictionary[category] = 0
for word_pair2 in distribution_dictionary[category]:
percentage_dictionary[category] += word_pair2[1]
total_words += word_pair2[1]
for category in percentage_dictionary:
percentage_dictionary[category] /= total_words
print('done...')
store_dictionary('Instagram_tag_dictionary.json', dictionary)
return similarity_dictionary, recognition_rate, distribution_dictionary, percentage_dictionary
wordlist = set(words.words())
wordnet_lemmatizer = WordNetLemmatizer()
brown_ic = wordnet_ic.ic('ic-brown.dat')
semcor_ic = wordnet_ic.ic('ic-semcor.dat')
sample_media_code = 'BGUNUTcMhvo'
sample_user_name = 'yy_god'
sample_private_user_name = 'sretiqa'
sample_public_user_name = 'silisunglasses'
test_username = 'safacosarpinar'
my_dictionary = load_dictionary('Instagram_tag_dictionary.json')
wordlist = combine_dictionary(wordlist, my_dictionary)
spider = InstagramSpider()
# username = 'hongming0611'
# password = input('hi, ' + username + 'please give me your password: ')
# spider.login(username, password)
data = get_data(spider, test_username)
print('data got...')
print('analyzing tags from user: ' + test_username)
words_from_tags, unsolved_data = tag2word(tag_list=data)
rate1 = successful_rate(successful_list=words_from_tags, fail_list=unsolved_data)
print("successful rate of extracting from hashtag is:%.2f%%" % (rate1 * 100))
print('analyzing words from tags from user: ' + test_username)
result, rate, distribute_result, percentage_result = analyze_words(my_words=words_from_tags, dictionary=my_dictionary)
print("successful rate of fitting words into dictionary is:%.2f%%" % (rate * 100))
print('similarity result: ')
print(result)
print(distribute_result['unknown'])
recognize_rate = 1-percentage_result['unknown']
print("our machine's current recognize rate is:%.2f%%" % (recognize_rate * 100))
display_result(data_dict=percentage_result, confidence=recognize_rate, username=test_username)
print('end')
| gpl-3.0 |
haltaro/predicting-comic-end | comic.py | 1 | 19994 | # -*- coding: utf-8 -*-
import json
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
import matplotlib.pyplot as plt
import urllib.request
from time import sleep
from matplotlib.font_manager import FontProperties
font_path = '/usr/share/fonts/truetype/takao-gothic/TakaoPGothic.ttf'
font_prop = FontProperties(fname=font_path)
sns.set(style='ticks')
def search_magazine(key='JUMPrgl', n_pages=25):
"""
「ユニークID」「雑誌巻号ID」あるいは「雑誌コード」にkey含む雑誌を,
n_pages分取得する関数です.
"""
url = 'https://mediaarts-db.bunka.go.jp/mg/api/v1/results_magazines?id=' + \
key + '&page='
magazines = []
for i in range(1, n_pages):
response = urllib.request.urlopen(url + str(i))
content = json.loads(response.read().decode('utf8'))
magazines.extend(content['results'])
return magazines
def extract_data(content):
"""
contentに含まれる目次情報を取得する関数です.
- year: 発行年
- no: 号数
- title: 作品名
- author: 著者
- color: カラーか否か
- pages: 掲載ページ数
- start_page: 作品のスタートページ
- best: 巻頭から数えた掲載順
- worst: 巻末から数えた掲載順
"""
# マンガ作品のみ抽出します.
comics = [comic for comic in content['contents']
if comic['category']=='マンガ作品']
data = []
year = int(content['basics']['date_indication'][:4])
# 号数が記載されていない場合があるので,例外処理が必要です.
try:
no = int(content['basics']['number_indication'])
except ValueError:
no = content['basics']['number_indication']
for comic in comics:
title= comic['work']
if not title:
continue
# ページ数が記載されていない作品があるので,例外処理が必要です.
# 特に理由はないですが,無記載の作品は10ページとして処理を進めます.
try:
pages = int(comic['work_pages'])
except ValueError:
pages = 10
# 「いぬまるだしっ」等,1週に複数話掲載されている作品に対応するため
# data中にすでにtitleが含まれる場合は,新規datumとして登録せずに,
# 既存のdatumのページ数のみ加算します.
if len(data) > 0 and title in [datum['title'] for datum in data]:
data[[datum['title'] for datum in
data].index(title)]['pages'] += pages
else:
data.append({
'year': year,
'no': no,
'title': comic['work'],
'author': comic['author'],
'subtitle': comic['subtitle'],
'color': int('カラー' in comic['note']),
'pages': int(comic['work_pages']),
'start_pages': int(comic['start_page'])
})
# 企画物のミニマンガを除外するため,合計5ページ以下のdatumはリストから除外します.
filterd_data = [datum for datum in data if datum['pages'] > 5]
for n, datum in enumerate(filterd_data):
datum['best'] = n + 1
datum['worst'] = len(filterd_data) - n
return filterd_data
def save_data(magazines, offset=0, file_name='data/wj-api.json'):
"""
magazinesに含まれる全てのmagazineについて,先頭からoffset以降の巻号の
目次情報を取得し,file_nameに保存する関数です.
"""
url = 'https://mediaarts-db.bunka.go.jp/mg/api/v1/magazine?id='
# ファイル先頭行
if offset == 0:
with open(file_name, 'w') as f:
f.write('[\n')
with open(file_name, 'a') as f:
# magazines中のmagazine毎にWeb APIを叩きます.
for m, magazine in enumerate(magazines[offset:]):
response = urllib.request.urlopen(url + str(magazine['id']),
timeout=30)
content = json.loads(response.read().decode('utf8'))
# 前記の関数extract_data()で,必要な情報を抽出します.
comics = extract_data(content)
print('{0:4d}/{1}: Extracted data from {2}'.\
format(m + offset, len(magazines), url + str(magazine['id'])))
# comics中の各comicについて,file_nameに情報を保存します.
for n, comic in enumerate(comics):
# ファイル先頭以外の,magazineの最初のcomicの場合は,
# まず',\n'を追記.
if m + offset > 0 and n == 0:
f.write(',\n')
json.dump(comic, f, ensure_ascii=False)
# 最後のcomic以外は',\n'を追記.
if not n == len(comics) - 1:
f.write(',\n')
print('{0:9}: Saved data to {1}'.format(' ', file_name))
# サーバへの負荷を抑えるため,必ず一時停止します.
sleep(3)
# ファイル最終行
with open(file_name, 'a') as f:
f.write(']')
class ComicAnalyzer():
"""漫画雑誌の目次情報を読みだして,管理するクラスです."""
def __init__(self, data_path='data/wj-api.json', min_week=7,
short_week=10):
"""
初期化時に,data_pathにある.jsonファイルから目次情報を抽出します.
- self.data: 全目次情報を保持するリスト型
- self.all_titles: 全作品名情報を保持するリスト型
- self.serialized_titles: min_week以上連載した全作品名を保持するリスト型
- self.last_year: 最新の目次情報の年を保持する数値型
- self.last_no: 最新の目次情報の号数を保持する数値型
- self.end_titles: self.serialized_titlesのうち,self.last_yearおよび
self.last_noまでに終了した全作品名を保持するリスト型
- self.short_end_titles: self.end_titlesのうち,short_week週以内に
連載が終了した作品名を保持するリスト型
- self.long_end_titles: self.end_titlesのうち,short_week+1週以上に
連載が継続した作品名を保持するリスト型
"""
self.data = self.read_data(data_path)
self.all_titles = self.collect_all_titles()
self.serialized_titles = self.drop_short_titles(
self.all_titles, min_week)
self.last_year = self.find_last_year(self.serialized_titles[-100:])
self.last_no = self.find_last_no(self.serialized_titles[-100:],
self.last_year)
self.end_titles = self.drop_continued_titles(
self.serialized_titles, self.last_year, self.last_no)
self.short_end_titles = self.drop_long_titles(
self.end_titles, short_week)
self.long_end_titles = self.drop_short_titles(
self.end_titles, short_week + 1)
def read_data(self, data_path):
""" data_pathにあるjsonファイルを読み出して,
全ての目次情報をまとめたリストを返します. """
with open(data_path, 'r', encoding='utf-8') as f:
data = json.load(f)
return data
def collect_all_titles(self):
""" self.dataから全ての作品名を抽出したリストを返します. """
titles = []
for comic in self.data:
if comic['title'] not in titles:
titles.append(comic['title'])
return titles
def extract_item(self, title='ONE PIECE', item='worst'):
""" self.dataからtitleのitemをすべて抽出したリストを返します. """
return [comic[item] for comic in self.data if comic['title'] == title]
def drop_short_titles(self, titles, min_week):
""" titlesのうち,min_week週以上連載した作品名のリストを返します. """
return [title for title in titles
if len(self.extract_item(title)) >= min_week]
def drop_long_titles(self, titles, max_week):
""" titlesのうち,max_week週以内で終了した作品名のリストを返します. """
return [title for title in titles
if len(self.extract_item(title)) <= max_week]
def find_last_year(self, titles):
""" titlesが掲載された雑誌のうち,最新の年を返します. """
return max([self.extract_item(title, 'year')[-1]
for title in titles])
def find_last_no(self, titles, year):
""" titlesが掲載されたyear年の雑誌のうち,最新の号数を返します. """
return max([self.extract_item(title, 'no')[-1]
for title in titles
if self.extract_item(title, 'year')[-1] == year])
def drop_continued_titles(self, titles, year, no):
""" titlesのうち,year年のno号までに連載が終了した作品名のリストを返します. """
end_titles = []
for title in titles:
last_year = self.extract_item(title, 'year')[-1]
if last_year < year:
end_titles.append(title)
elif last_year == year:
if self.extract_item(title, 'no')[-1] < no:
end_titles.append(title)
return end_titles
def search_title(self, key, titles):
""" titlesのうち,keyを含む作品名のリストを返します. """
return [title for title in titles if key in title]
class ComicNet():
""" マンガ作品が短命か否かを識別する多層パーセプトロンを管理するクラスです.
:param thresh_week:短命作品とそれ以外を分けるしきい値.
:param n_x:多層パーセプトロンに入力する掲載週の数.入力層のノード数.
"""
def __init__(self, thresh_week=20, n_x=7):
self.n_x = n_x
self.thresh_week = thresh_week
def get_x(self, analyzer, title):
"""指定された作品の指定週までの正規化掲載順を取得する関数です."""
worsts = np.array(analyzer.extract_item(title)[:self.n_x])
bests = np.array(analyzer.extract_item(title, 'best')[:self.n_x])
bests_normalized = bests / (worsts + bests - 1)
color = sum(analyzer.extract_item(title, 'color')[:self.n_x]
) /self.n_x
return np.append(bests_normalized, color)
def get_y(self, analyzer, title, thresh_week):
"""指定された作品が,短命作品か否かを取得する関数です."""
return int(len(analyzer.extract_item(title)) <= thresh_week)
def get_xs_ys(self, analyzer, titles, thresh_week):
"""指定された作品群の特徴量とラベルとタイトルを返す関数です.
y==0とy==1のデータ数を揃えて返します.
"""
xs = np.array([self.get_x(analyzer, title) for title in titles])
ys = np.array([[self.get_y(analyzer, title, thresh_week)]
for title in titles])
# ys==0とys==1のデータ数を揃えます.
idx_ps = np.where(ys.reshape((-1)) == 1)[0]
idx_ng = np.where(ys.reshape((-1)) == 0)[0]
len_data = min(len(idx_ps), len(idx_ng))
x_ps = xs[idx_ps[-len_data:]]
x_ng = xs[idx_ng[-len_data:]]
y_ps = ys[idx_ps[-len_data:]]
y_ng = ys[idx_ng[-len_data:]]
t_ps = [titles[ii] for ii in idx_ps[-len_data:]]
t_ng = [titles[ii] for ii in idx_ng[-len_data:]]
return x_ps, x_ng, y_ps, y_ng, t_ps, t_ng
def augment_x(self, x, n_aug):
"""指定された数のxデータを人為的に生成する関数です."""
if n_aug:
x_pair = np.array(
[[x[idx] for idx in
np.random.choice(range(len(x)), 2, replace=False)]
for _ in range(n_aug)])
weights = np.random.rand(n_aug, 1, self.n_x + 1)
weights = np.concatenate((weights, 1 - weights), axis=1)
x_aug = (x_pair * weights).sum(axis=1)
return np.concatenate((x, x_aug), axis=0)
else:
return x
def augment_y(self, y, n_aug):
"""指定された数のyデータを人為的に生成する関数です."""
if n_aug:
y_aug = np.ones((n_aug, 1)) if y[0, 0] \
else np.zeros((n_aug, 1))
return np.concatenate((y, y_aug), axis=0)
else:
return y
def configure_dataset(self, analyzer, n_drop=0, n_aug=0):
"""データセットを設定する関数です.
:param analyzer: ComicAnalyzerクラスのインスタンス
:param n_drop: trainingデータから除外する古いデータの数
:param n_aug: trainingデータに追加するaugmentedデータの数
"""
x_ps, x_ng, y_ps, y_ng, t_ps, t_ng = self.get_xs_ys(
analyzer, analyzer.end_titles, self.thresh_week)
self.x_test = np.concatenate((x_ps[-50:], x_ng[-50:]), axis=0)
self.y_test = np.concatenate((y_ps[-50:], y_ng[-50:]), axis=0)
self.titles_test = t_ps[-50:] + t_ng[-50:]
self.x_val = np.concatenate((x_ps[-100 : -50],
x_ng[-100 : -50]), axis=0)
self.y_val = np.concatenate((y_ps[-100 : -50],
y_ng[-100 : -50]), axis=0)
self.x_tra = np.concatenate(
(self.augment_x(x_ps[n_drop//2 : -100], n_aug//2),
self.augment_x(x_ng[n_drop//2 : -100], n_aug//2)), axis=0)
self.y_tra = np.concatenate(
(self.augment_y(y_ps[n_drop//2 : -100], n_aug//2),
self.augment_y(y_ng[n_drop//2 : -100], n_aug//2)), axis=0)
def build_graph(self, r=0.001, n_h=7, stddev=0.01):
"""多層パーセプトロンを構築する関数です.
:param r: 学習率
:param n_h: 隠れ層のノード数
:param stddev: 変数の初期分布の標準偏差
"""
tf.reset_default_graph()
# 入力層およびターゲット
n_y = self.y_test.shape[1]
self.x = tf.placeholder(tf.float32, [None, self.n_x + 1], name='x')
self.y = tf.placeholder(tf.float32, [None, n_y], name='y')
# 隠れ層(1層目)
self.w_h_1 = tf.Variable(
tf.truncated_normal((self.n_x + 1, n_h), stddev=stddev))
self.b_h_1 = tf.Variable(tf.zeros(n_h))
self.logits = tf.add(tf.matmul(self.x, self.w_h_1), self.b_h_1)
self.logits = tf.nn.relu(self.logits)
# 隠れ層(2層目)
self.w_h_2 = tf.Variable(
tf.truncated_normal((n_h, n_h), stddev=stddev))
self.b_h_2 = tf.Variable(tf.zeros(n_h))
self.logits = tf.add(tf.matmul(self.logits, self.w_h_2), self.b_h_2)
self.logits = tf.nn.relu(self.logits)
# 出力層
self.w_y = tf.Variable(
tf.truncated_normal((n_h, n_y), stddev=stddev))
self.b_y = tf.Variable(tf.zeros(n_y))
self.logits = tf.add(tf.matmul(self.logits, self.w_y), self.b_y)
tf.summary.histogram('logits', self.logits)
# 損失関数
self.loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.logits, labels=self.y))
tf.summary.scalar('loss', self.loss)
# 最適化
self.optimizer = tf.train.AdamOptimizer(r).minimize(self.loss)
self.output = tf.nn.sigmoid(self.logits, name='output')
correct_prediction = tf.equal(self.y, tf.round(self.output))
self.acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32),
name='acc')
tf.summary.histogram('output', self.output)
tf.summary.scalar('acc', self.acc)
self.merged = tf.summary.merge_all()
def train(self, epoch=2000, print_loss=False, save_log=False,
log_dir='./logs/1', log_name='', save_model=False,
model_name='prediction_model'):
"""多層パーセプトロンを学習させ,ログや学習済みモデルを保存する関数です.
:param epoch: エポック数
:pram print_loss: 損失関数の履歴を出力するか否か
:param save_log: ログを保存するか否か
:param log_dir: ログの保存ディレクトリ
:param log_name: ログの保存名
:param save_model: 学習済みモデルを保存するか否か
:param model_name: 学習済みモデルの保存名
"""
with tf.Session() as sess:
sess.run(tf.global_variables_initializer()) # 変数の初期化
# ログ保存用の設定
log_tra = log_dir + '/tra/' + log_name
writer_tra = tf.summary.FileWriter(log_tra)
log_val = log_dir + '/val/' + log_name
writer_val = tf.summary.FileWriter(log_val)
for e in range(epoch):
feed_dict = {self.x: self.x_tra, self.y: self.y_tra}
_, loss_tra, acc_tra, mer_tra = sess.run(
(self.optimizer, self.loss, self.acc, self.merged),
feed_dict=feed_dict)
# validation
feed_dict = {self.x: self.x_val, self.y: self.y_val}
loss_val, acc_val, mer_val = sess.run(
(self.loss, self.acc, self.merged),
feed_dict=feed_dict)
# ログの保存
if save_log:
writer_tra.add_summary(mer_tra, e)
writer_val.add_summary(mer_val, e)
# 損失関数の出力
if print_loss and e % 500 == 0:
print('# epoch {}: loss_tra = {}, loss_val = {}'.
format(e, str(loss_tra), str(loss_val)))
# モデルの保存
if save_model:
saver = tf.train.Saver()
_ = saver.save(sess, './models/' + model_name)
def test(self, model_name='prediction_model'):
"""指定されたモデルを読み込み,テストする関数です.
:param model_name: 読み込むモデルの名前
"""
tf.reset_default_graph()
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# モデルの読み込み
loader = tf.train.import_meta_graph(
'./models/{}.meta'.format(model_name))
loader.restore(sess, './models/' + model_name)
x_loaded = loaded_graph.get_tensor_by_name('x:0')
y_loaded = loaded_graph.get_tensor_by_name('y:0')
loss_loaded = loaded_graph.get_tensor_by_name('loss:0')
acc_loaded = loaded_graph.get_tensor_by_name('acc:0')
output_loaded = loaded_graph.get_tensor_by_name('output:0')
# test
feed_dict = {x_loaded: self.x_test, y_loaded: self.y_test}
loss_test, acc_test, output_test = sess.run(
(loss_loaded, acc_loaded, output_loaded), feed_dict=feed_dict)
return acc_test, output_test
| mit |
466152112/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 355 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
ddietze/FSRStools | raman/__init__.py | 1 | 67473 | """
.. module: FSRStools.raman
:platform: Windows
.. moduleauthor:: Daniel Dietze <[email protected]>
A collection of functions to process and analyze Raman spectra. These functions work both for spontaneous Raman as well as femtosecond stimulated Raman data. The import functions for spontaneous Raman are optimized for Princeton Instruments' WinSpec ASCII files, while those for FSRS expect the files in the output format of pyFSRS or David Hoffman's LabView FSRS.
**Changelog:**
*10-13-2015:*
- Some bug fixes.
- Added more functionality for analyzing TA data, e.g. automated kinetics stripping.
*01-04-2016:*
- Changed type of padding in FT_baseline_correction.
- Added moving-median based baseline correction (MM_baseline_correction).
- Added support for analysis of shifted-excitation or frequency-modulated FSRS spectra (`reconstruct_FM_spectrum`).
..
This file is part of the FSRStools python module.
The FSRStools python module is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
The FSRStools python module is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with the FSRStools python module. If not, see <http://www.gnu.org/licenses/>.
Copyright 2014, 2015 Daniel Dietze <[email protected]>.
"""
import numpy as np
import pylab as pl
from scipy.optimize import curve_fit, differential_evolution, minimize
from scipy.interpolate import interp1d, RectBivariateSpline
from scipy.integrate import simps
import glob
import sys
import FSRStools.fitting as ft
# -------------------------------------------------------------------------------------------------------------------
# some axis label shortcuts
talabel = "Absorption (OD)"
tamlabel = "Absorption (mOD)"
glabel = "Raman Gain (OD)"
gmlabel = "Raman Gain (mOD)"
rslabel = "Raman Shift (cm$-1$)"
wnlabel = "Wavenumber (cm$-1$)"
# -------------------------------------------------------------------------------------------------------------------
# axis calibration functions
def Raman2WL(lambda0, lines):
"""Convert Raman shift (cm-1) to wavelength (nm) for given pump wavelength lambda0.
:param float lambda0: Pump wavelength (nm).
:param array lines: Raman shifts (cm-1) to convert to wavelengths.
:returns: Array of wavelengths (nm).
"""
return 1e7 / (1e7 / lambda0 - lines)
def WL2Raman(lambda0, lines):
"""Convert wavelength (nm) to Raman shift (cm-1) for given pump wavelength lambda0.
:param float lambda0: Pump wavelength (nm).
:param array lines: Wavelengths (nm) to convert to Raman shifts (cm-1).
:returns: Array of Raman shifts (cm-1).
"""
return 1e7 * ((lines - lambda0) / (lines * lambda0))
def get_closest_maximum(x, y, x0):
"""Returns the position and value of the (local) maximum closest to x0.
First, the function gets the local maximum in y closest to x0 by following the gradient. Once the function value does not increase any more, it stops. Second, a parabolic approximation is used on this value and its two neighbours to get a more exact estimate.
.. note:: This function is not noise resistant. In order to get good results, use a x0-value as close as possible to the peak maximum.
:param array x: x-values.
:param array y: y-values / data (same shape as x).
:param float x0: x-position of a point close to the maximum.
:returns: (x', y') Position and value of (local) maximum closest to x0.
"""
i0 = np.argmin(np.absolute(x - x0))
# get closest maximum
if(y[i0 + 1] > y[i0]):
i1 = i0 + 1
while(y[i1] > y[i0]):
i0 = i1
i1 = i0 + 1
else:
i1 = i0 - 1
while(y[i1] > y[i0]):
i0 = i1
i1 = i0 - 1
# now i0 is the index of the maximum
a = (- y[i0 + 1] - 4.0 * x[i0] * y[i0] + 2.0 * x[i0] * y[i0 - 1] + 2.0 * x[i0] * y[i0 + 1] + y[i0 - 1]) / (2.0 * (-2.0 * y[i0] + y[i0 - 1] + y[i0 + 1]))
# b = -y[i0] + y[i0 - 1] / 2.0 + y[i0 + 1] / 2.0
c = -(16.0 * y[i0]**2 - 8 * y[i0] * y[i0 - 1] - 8 * y[i0] * y[i0 + 1] + y[i0 - 1]**2 - 2.0 * y[i0 - 1] * y[i0 + 1] + y[i0 + 1]**2) / (8.0 * (-2.0 * y[i0] + y[i0 - 1] + y[i0 + 1]))
return [a, c]
# interpolate data y on wavenumber axis wn
# returns new wavenumber axis and data interpolated data with equidistant sampling points
def interpolate(x, y, N=0, kind='linear'):
"""Convenience wrapper around SciPy's :py:func:`~scipy.interpolate.interp1d` function.
:param array x: x-axis with non-equidistant sampling points
:param array y: data sampled on x (same shape as x)
:param int N: Number of *additional* points that should be added per interval (default = 0).
:param str kind: Type of interpolation to be used (default = 'linear'). This parameter is directly passed to :py:func:`~scipy.interpolate.interp1d`.
:returns: (x', y') x- and y-arrays sampled equidistantly.
"""
if(x[-1] < x[0]):
x = np.flipud(x)
y = np.flipud(y)
x1 = np.linspace(x[0], x[-1], (len(x) - 1) * (N + 1) + 1)
y1 = interp1d(x, y, kind)(x1)
return [x1, y1]
# get vibrational frequencies of common used standards for calibration
# selection of lines is adapted for the red (blue) table
# if sorted is true, return in order of largest peak to smallest peak
def mol_lines(mol="chex", window=(600, 2000), sorted = False):
"""Returns a list of vibrational frequencies for commonly used standards.
:param str mol: Identifier of solvent. Currently supported solvents are:
- Cyclohexane (`chex`, default)
- Benzene (`benzene`)
- Methanol (`meoh`)
- Isopropanol / isopropyl alcohol (`iso`)
- Chloroform (`chloroform`)
:param tuple window: Select the wavenumber interval of interest in cm-1 (default = (600, 2000)).
:param bool sorted: If True, return sorted by amplitude (highest first); otherwise by wavenumber (default).
:returns: Array of Stokes shifts in cm-1.
"""
# a list of solvent lines; [[wavenumber], [amplitude]]
spectra = {'chex': [[384.1, 426.3, 801.3, 1028.3, 1157.6, 1266.4, 1444.4, 2664.4, 2852.9, 2923.8, 2938.3], [2, 3, 95, 15, 6, 14, 12, 8, 100, 58, 67]],
'benzene': [[605.6, 848.9, 991.6, 1178, 1326, 1595, 3046.8, 3061.9], [2.2, 1.0, 11.7, 2.7, 0.1, 4.5, 8.1, 18.0]],
'meoh': [[1037, 1453, 2835, 2945], [48, 18, 96, 71]],
'iso': [[820, 955, 1132, 1454, 2881, 2919, 2938, 2972], [95, 19, 10, 18, 45, 46, 44, 41]],
'chloroform': [[3178.8, 685.7, 366.7, 1261.8, 781.6, 263.7], [46.7, 14.3, 6.31, 3.58, 9.32, 4.44]]}
# check first whether solvent exists
if mol not in spectra.keys():
print("ERROR: Solvent not found! Choose one of", spectra.keys())
return np.array([])
# select proper wavenumber window
lines = np.array(spectra[mol])
lines = np.compress((lines[0, :] >= window[0]) & (lines[0, :] <= window[1]), lines, axis=1)
# return properly sorted array
if sorted:
return np.array(lines[0])[np.flipud(np.argsort(lines[1]))]
else:
return np.array(lines[0])[np.argsort(lines[0])]
def calibrate(y, lambda0, peaks, mol, show=False):
"""Returns a calibrated x-axis in wavenumbers using a calibration spectrum and a list of vibrational modes.
:param array y: Calibration spectrum.
:param float lambda0: Pump wavelength (nm).
:param array peaks: List of estimated peak positions in indices.
:param array mol: List of **associated** vibrational frequencies (cm-1). Does not have to have same shape as `peaks`. If shape is different, use the first `min(len(peaks), len(mol))` entries.
:param bool show: If True, show a plot of pixel index vs wavelength, False by default.
:returns: Wavenumber axis with same shape as y (non-equidistantly sampled).
.. seealso:: If the wavenumber axis should be sampled equidistantly, use :py:func:`interpolate`.
Example::
import FSRStools.raman as fs
lmbda0 = 795.6 # pump wavelength
showCal = True # display calibration results
# load the calibration spectrum
chex, _, _ = loadFSRS("chex")
# generate wavenumber axis
wn = fs.calibrate(chex, lmbda0, fs.find_peaks(chex, sorted=True), fs.mol_lines('chex', sorted=True), show=showCal)
"""
mappx = np.array([])
mapwl = np.array([])
# get a list of calibration points (px vs wl)
line = lambda x, a, m: a * x + m
x = np.arange(y.size)
N = min(len(peaks), len(mol))
for i in range(N):
x1, y1 = get_closest_maximum(x, y, peaks[i])
mappx = np.append(mappx, x1)
mapwl = np.append(mapwl, Raman2WL(lambda0, mol[i]))
# fit with a line
popt, _ = curve_fit(line, mappx, mapwl, (1, 0))
# and convert new wavelength axis to wavenumber axis
wn = WL2Raman(lambda0, line(x, *popt))
# plot a nice figure when show == True
if(show):
pl.figure()
xtmp = np.linspace(0, len(y), 10)
pl.plot(xtmp, line(xtmp, *popt), color='k')
pl.plot(mappx, mapwl, "o")
pl.xlabel("Pixel")
pl.ylabel("Wavelength (nm)")
return wn
# -------------------------------------------------------------------------------------------------------------------
# file loading functions
def loadFSRS(basename, wn=None, timesteps=None, excstr="exc*", filteroutliers=False):
"""Load and average all FSRS files matching the basename (e.g. using wildcards).
:param mixed basename: If basename is a `str`, use :py:mod:`glob` to load all matching files using wildcards. If basename is a list, use this list directly without wildcards (only works when no time steps are given).
:param array wn: If a wavenumber axis is given, the first column is interpolated over this axis (default = None).
:param array timesteps: If not None, load excited state data using the given time points. The time value is converted to a string and inserted into a `%s` tag in basename, or appended if no `%s` tag is found. Ground state data is not loaded.
:param str excstr: String that gets appended to the basename to indicate excited state spectra (default = 'exc*').
:param bool filteroutliers: If True, outliers, i.e., invalid data sets, are removed before averaging. An outlier is defined as a spectrum whose sum over amplitude squared deviates from the mean by more than two standard deviations.
:returns: When no time steps are given, return the three columns of the FSRS file averaged over all input spectra, i.e. (Raman gain, probe with pump on, probe with pump off). When time steps are given, return three 2d arrays corresponding to the three columns with timesteps along axis 0 and data along axis 1.
"""
if(timesteps is not None):
alldata = []
# load excited state
for t in timesteps:
if(t <= 0):
tstr = "m%d%s" % (abs(t), excstr)
else:
tstr = "p%d%s" % (t, excstr)
data = 0
if("%s" in basename):
print("load", basename % tstr)
files = glob.glob(basename % tstr) # get all matching files
else:
print("load", basename + tstr)
files = glob.glob(basename + tstr) # get all matching files
if(len(files) < 1):
print("ERROR: No files found!")
return []
tmp = []
for f in files:
tmp.append(np.loadtxt(f, unpack=True))
tmp = np.array(tmp)
# [#][column][data]
if filteroutliers:
data = np.mean(tmp[filter_outliers(tmp[:, 0, :])], axis=0)
else:
data = np.mean(tmp, axis=0)
if(wn is not None):
x, data[0] = interpolate(wn, data[0])
alldata.append(data)
return np.rollaxis(np.array(alldata), 1) # rotate data such that its ordered as [column][timepoint][data]
else:
data = 0
if isinstance(basename, str):
files = glob.glob(basename) # get all matching files
else:
files = basename
if(len(files) < 1):
print("ERROR: No files found!")
return []
tmp = []
for f in files:
tmp.append(np.loadtxt(f, unpack=True))
tmp = np.array(tmp)
# [#][column][data]
if filteroutliers:
data = np.mean(tmp[filter_outliers(tmp[:, 0, :])], axis=0)
else:
data = np.mean(tmp, axis=0)
if(wn is not None):
x, data[0] = interpolate(wn, data[0])
return data
def loadTA(basename, wl=None, timesteps=None, excstr="*", filteroutliers=False):
"""Convenience function for loading transient absorption data. The only difference to :py:func:`loadFSRS` is the naming convention for TA excited state data compared to FSRS data.
.. seealso:: :py:func:`loadFSRS` for list of parameters.
"""
return loadFSRS(basename, wl, timesteps, excstr, filteroutliers)
# use glob to support wildcards and regExp
# loads Raman spectra and returns the averaged data
def loadRaman(filename):
"""Load ASCII Raman (or other) data.
:param str filename: Filename / base name of Raman spectra to load. Supports wildcards and regular expression via :py:mod:`glob`. If filename matches multiple files, these are averaged.
:returns: 2d array containing the (possibly averaged) columns of the input file(s).
"""
files = glob.glob(filename) # get all matching files
if(len(files) < 1):
print("ERROR: No files found!")
return []
tmp = []
for f in files:
tmp.append(np.loadtxt(f, unpack=True))
tmp = np.array(tmp)
data = np.mean(tmp, axis=0)
return data
# -------------------------------------------------------------------------------------------------------------------
# Data filtering / noise improvements
def filter_outliers(y, eps=2.0):
"""Returns a list of indices for datasets that NOT outliers. An outlier is defined as a spectrum whose sum over amplitudes squared deviates from the ensemble average by more than eps times the ensemble standard deviation.
:param array y: A 2d array of spectra, where the first axis is the number of the dataset.
:param float eps: Threshold that defines an outlier in units of standard deviations (default = 2).
:returns: Array of indices of spectra in y that are NOT outliers.
"""
mean = np.mean(y, axis=0)
chi2 = np.sum((y - mean)**2, axis=1)
meanchi2 = np.mean(chi2)
stdchi2 = np.std(chi2)
return np.nonzero(np.absolute(chi2 - meanchi2) <= eps * stdchi2)
# taken from http://www.scipy.org/Cookbook/SavitzkyGolay
def savitzky_golay(y, window_size, order, deriv=0):
"""Implementation of the Savitzky Golay filter. Code adapted from http://www.scipy.org/Cookbook/SavitzkyGolay.
:param array y: Data array to be smoothed.
:param int window_size: Length of the smoothing window. Must be an odd integer.
:param int order: Order of the smoothing polynomial. Must be less than window_size - 1.
:param int deriv: Order of the derivative to compute (default = 0).
:returns: Smoothed signal or its derivative. Same shape as y.
"""
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order + 1)
half_window = (window_size - 1) // 2 # divide and round off
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window + 1)])
m = np.linalg.pinv(b).A[deriv]
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs(y[1:half_window + 1][::-1] - y[0])
lastvals = y[-1] + np.abs(y[-half_window - 1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve(m, y, mode='valid')
def noise_estimate(y):
"""Estimate the standard deviation of Gaussian white noise contained in a spectrum. Code is based on Schulze et al., *Appl. Spectrosc.* **60**, 820 (2006).
:param array y: Input spectrum.
:returns: Noise estimate for input spectrum (float).
"""
ws = 21
e0 = 1e8
for i in range(int(y.size / ws)):
e = np.var(np.roll(y[i * ws:(i + 1) * ws], -2)[0:-2] - 2.0 * np.roll(y[i * ws:(i + 1) * ws], -1)[0:-2] + (y[i * ws:(i + 1) * ws])[0:-2]) / 3.0
if(e < e0):
e0 = e
return np.sqrt(e0)
def denoise(y):
"""Fully automatic optimized smoothing algorithm. Code is based on Schulze et al. *Appl. Spectrosc.* **62**, 1160 (2008).
The algorithm uses a repeated application of a 3-pixel zero-order Savitzky-Golay filter until a stopping criterion is fulfilled. This stopping criterion is equivalent to a notable distortion of the signal due to smoothing.
:param array y: Input spectrum. If y is a `list` or a 2d array of spectra, denoise every spectrum in that list.
:returns: Filtered spectrum with same shape as y.
"""
if np.array(y).ndim > 1:
out = []
for sp in y:
out.append(denoise(sp))
return np.array(out)
else:
N = float(len(y))
s = noise_estimate(y) # get stddev of input data
m = np.copy(y)
while(True):
m = savitzky_golay(m, 3, 0) # apply smoothing
if(np.sum((y - m)**2 / s**2) > N):
break
return m
def FT_denoise(y, cutoff=1, filter='rect'):
"""Apply a Fourier low pass filter to the data to remove high frequency noise.
:param array y: Input spectrum. If y is a `list` or 2d-array of spectra, filter every spectrum.
:param int cutoff: Low pass cutoff position taken from the high frequency side in array indices (0 means no filtering, default = 1).
:param str filter: Type of step function to apply as filter. Currently supported are:
- 'cos2' - A cosine squared.
- 'linear' - A linear onset.
- 'rect' - A rectangular step function (default).
:returns: Filtered spectrum with same shape as y.
"""
if np.array(y).ndim > 1:
out = []
for sp in y:
out.append(FT_denoise(sp, cutoff, filter))
return np.array(out)
else:
# get FFT - use padding to reduce edge effects
ypad = np.pad(y, len(y), mode='reflect', reflect_type='odd')
FT = np.fft.rfft(ypad * np.hanning(len(ypad)))
xtmp = np.arange(0, cutoff)
if filter == 'cos2':
FT[-cutoff:] = FT[-cutoff] * np.cos(4.0 * np.pi * xtmp / cutoff)**2
elif filter == 'linear':
FT[-cutoff:] = FT[-cutoff] / cutoff * np.flipud(xtmp)
else:
FT[-cutoff:] = np.zeros(cutoff)
return np.fft.irfft(FT)[len(y):-len(y)]
# -------------------------------------------------------------------------------------------------------------------
# baseline correction and smoothing functions
def rayleigh_correction(y):
"""Remove the baseline arising from the Rayleigh line by fitting a Gauss-Lorentz curve to the data.
The position of the Rayleigh line (left or right end) is chosen by the amplitude of the spectrum. To reduce the effect of huge Raman peaks, the second order derivative is subtracted from the spectrum before fitting the baseline.
:param array y: Input spectrum. If y is a `list` or 2d-array of spectra, filter every spectrum.
:returns: Spectrum without Rayleigh baseline, same shape as y.
"""
if np.array(y).ndim > 1:
out = []
for sp in y:
out.append(rayleigh_correction(sp))
return np.array(out)
else:
# generate pixel axis
x = np.arange(len(y))
# partially remove the peaks
ytmp = y - np.gradient(np.gradient(y))
# fit stripped spectrum by Voigt profile
popt = [ytmp[0], np.amax(ytmp) - np.amin(ytmp), (x[[0, -1]])[np.argmax(ytmp[[0, -1]])], (x[0] - x[-1]) / 10, 0.9]
popt, _ = curve_fit(ft.voigts_const, x, ytmp, popt, maxfev=10000)
# return residuum
return y - ft.voigts_const(x, *popt)
def interpolated_bl_correction(x, y, px, py=None, usedatay=True):
"""Remove a baseline obtained by interpolating a set of fulcrums.
:param array x: x-axis.
:param array y: Input spectrum, same shape as x. If y is a `list` or 2d-array of spectra, filter every spectrum.
:param array px: - If `py != None`: list of x-coordinates of interpolation points.
- If `py == None` and `usedatay == True`: list of x-coordinates of interpolation points.
- If `py == None` and `usedatay == False`: list of x- and y-coordinates of interpolation points in the form [x0, y0, x1, y1, x2, y2, ...].
:param array py: List of y-coordinates of interpolation points (optional).
:param bool usedatay: If True, use the y-data at px for py (default).
:returns: Baseline corrected spectrum.
"""
if np.array(y).ndim > 1:
out = []
for sp in y:
out.append(interpolated_bl_correction(x, sp, px, py, usedatay))
return np.array(out)
else:
# get interpolation points
if usedatay and py is None:
x0 = px
y0 = interp1d(x, y, 'linear')(x0)
elif not usedatay and py is None:
x0 = px[::2]
y0 = px[1::2]
else:
x0 = px
y0 = py
# make sure the end points are contained in the interpolation
if(np.amin(x0) > np.amin(x)):
x0 = np.append(x0, np.amin(x))
y0 = np.append(y0, y[np.argmin(x)])
if(np.amax(x0) < np.amax(x)):
x0 = np.append(x0, np.amax(x))
y0 = np.append(y0, y[np.argmax(x)])
# make sure the points are correctly sorted for interpolation
s = np.argsort(x0)
x0 = x0[s]
y0 = y0[s]
# return the spectrum minus the interpolated baseline
return y - interp1d(x0, y0, 'cubic')(x)
def baseline_correction(y, n0=2, verbose=False, iterate=True):
"""Automated baseline removal algorithm. Code is based on Schulze et al., *Appl. Spectrosc.* **65**, 75 (2011).
Works better if baseline is more flat at the beginning and end of the spectrum. If divisor is too high, there will be some ringing of the baseline. Sometimes it is better to start with a higher value for n0 to get a good baseline removal, especially when the baseline is wavy and there are strong Raman lines.
More suited for spectra with dominant Raman lines than `FT_baseline_correction`.
:param array y: Input spectrum. If y is a list or a 2d-array of spectra, apply correction to each one.
:param int n0: Initial divisor for window size, i.e. initial window size is size of spectrum divided by n0. Must be at least 1 (default = 2).
:param bool verbose: If True, print final number of iterations and final divisor at the end (default = False).
:param bool iterate: If True, automatically increase the order of the divisor until optimal baseline removal is achieved. If False, just use the value given by n0.
:returns: Baseline corrected spectrum with same shape as y.
"""
if np.array(y).ndim > 1:
out = []
for sp in y:
out.append(baseline_correction(sp, n0, verbose, iterate))
return np.array(out)
else:
if(n0 < 1 or n0 > y.size / 3):
print("n0 is out of range (1, %d)! Set to 1." % (y.size / 3))
n0 = 1
s0 = noise_estimate(y) # get stddev of input data
Npad = len(y) # number of points for padding
ypad = np.pad(y, Npad, mode='reflect', reflect_type='odd') # create padded spectrum to reduce edge effects
N = ypad.size
sblbest = np.sum(ypad**2) # estimate for baseline chi2 best value
blbest = np.zeros(N) # store best baseline estimate
wndiv = n0
while(wndiv < N // 3): # window size reduction
# prepare filter window size
wn = int(N / wndiv)
if(wn % 2 == 0):
wn += 1
y0 = np.copy(ypad) # copy original spectrum
bl = np.zeros(N) # initialize empty baseline
sblold = np.sum(y0**2) # estimate for baseline chi2
cbl = 0
while(cbl < 1000): # baseline estimation
ys1 = denoise(y0) # autosmoothing step
cint = 0
while(cint < 1000): # peak stripping
blint = savitzky_golay(ys1, wn, 0) # intermediate baseline estimate
ys2 = np.where(blint + 2.0 * s0 < ys1, blint, ys1) # replace values bigger than baseline + 2x stdev by baseline
if(np.sum((ys2 - ys1)**2 / s0**2) < N): # stripping changes are below noise level
break # break inner loop
else:
ys1 = np.copy(ys2) # else: proceed with partially stripped spectrum
cint += 1 # increase counter
sbl = np.sum(blint**2)
if(sbl >= sblold and cbl > 2): # chi2 of intermediate baseline has not been reduced after > 2 iterations
break # break second loop
else:
y0 -= blint # set new starting spectrum by subtracting intermediate baseline
bl += blint # add intermediate baseline to total baseline
sblold = np.copy(sbl) # store old value
cbl += 1 # increase counter
if not iterate: # stop here if iterate = False
break
if(sblold < sblbest or wndiv == n0): # could reduce best chi2 or completed just first iteration
sblbest = np.copy(sblold) # new intermediate bl is flatter than previous best intermediate bl
wndiv += 1 # reduce window size
blbest = np.copy(bl) # store new best baseline estimate
else:
break
if verbose:
print("finished with divisor %d after %d iterations" % (wndiv, cbl))
# return baseline corrected spectrum
return (ypad - blbest)[Npad:-Npad]
def FT_baseline_correction(y, cutoff=None, filter='rect'):
"""Automatic baseline correction based on a Fourier high pass filter after removal of regression line. The stopping criterion for the automatic cutoff search is that the incremental change in the sum over squares should be at least one percent.
This algorithm is best suited for spectra with dominant baselines and very small Raman features.
.. versionchanged:: 04-01-2016
Changed type of padding to reduce artifacts from Fourier transformation.
:param array y: Input spectrum. If y is a list or a 2d array of spectra, correct all spectra.
:param int cutoff: Cutoff frequency for high pass filter:
- If `cutoff == None`, display the Fourier transform of the (first) input spectrum and stop the script.
- If `cutoff > 0`, use cutoff directly for high pass filter.
- If `cutoff == -1`, do an automatic determination of the optimal cutoff.
:param str filter: Type of filter function to use. Currently supported values are:
- 'rect' (default): a rectangular step function.
- 'cos2': a cosine squared.
- 'linear': linear interpolation.
:returns: Baseline corrected spectrum with same shape as y.
"""
if np.array(y).ndim > 1:
out = []
for sp in y:
out.append(FT_baseline_correction(sp, cutoff, filter))
return np.array(out)
else:
wnx = np.arange(0, len(y))
# subtract regression line
line = lambda x, a, m: a * x + m
popt, _ = curve_fit(line, wnx, y, [1, 0])
y = y - line(wnx, *popt)
# get FFT - use padding to reduce edge effects
Npad = len(y) // 2
ypad = np.pad(y, Npad, mode='reflect', reflect_type='odd')
FT = np.fft.rfft(ypad) # no window as function in ypad is periodic
# FT = np.fft.rfft(ypad * np.hanning(len(ypad)))
if(cutoff is None):
pl.figure()
pl.plot(ypad)
pl.figure()
pl.plot(np.absolute(FT))
pl.show()
sys.exit()
elif(cutoff <= 0):
c = 10
chi20 = 1e8
chi2 = np.sum(y**2)
while(abs(chi20 - chi2) / chi20 > 1e-2 and c < len(y)):
c += 1
xtmp = np.arange(0, c)
if filter == 'cos2':
FT[0:c] = FT[c] * np.sin(4.0 * np.pi * xtmp / c)**2
elif filter == 'linear':
FT[0:c] = FT[c] / c * xtmp
else:
FT[0:c] = np.zeros(c)
y1 = np.fft.irfft(FT)[Npad:Npad + len(y)]
chi20 = chi2
chi2 = np.sum(y1**2)
print(c, "iterations, dchi =", abs(chi20 - chi2) / chi20)
return y1
else:
xtmp = np.arange(0, cutoff)
if filter == 'cos2':
FT[0:cutoff] = FT[cutoff] * np.sin(4.0 * np.pi * xtmp / cutoff)**2
elif filter == 'linear':
FT[0:cutoff] = FT[cutoff] / cutoff * xtmp
else:
FT[0:cutoff] = np.zeros(cutoff)
return np.fft.irfft(FT)[Npad:Npad + len(y)]
def MM_baseline_correction(y, w=60):
"""Baseline correction based on a moving median filter.
See Grumstrup et al., *J. Phys. Chem. B* **117**, 8245 (2013) for more details.
This filter tends to reduce the peak height and works best as supplement to any of the other baseline correction functions.
.. versionadded:: 01-04-2016
:param array y: Input spectrum. If y is a list or a 2d array of spectra, correct all spectra.
:param int w: Window size for median filter in indices. Works best for 5x Raman line width.
:returns: Baseline corrected spectrum with same shape as y.
"""
if np.array(y).ndim > 1:
out = []
for sp in y:
out.append(MM_baseline_correction(sp, w))
return np.array(out)
else:
Npad = int(w) // 2
ypad = np.pad(y, Npad, mode='reflect', reflect_type='odd')
N = ypad.size
yout = ypad.copy()
for i in range(Npad, N - Npad):
yout[i] = np.median(ypad[i - Npad:i + Npad])
return y - yout[Npad:-Npad]
# -------------------------------------------------------------------------------------------------------------------
# solvent/ground state subtraction functions
def solvent_subtract_chi2(y, solvent, scaling='const', shiftx=False):
"""Subtract a solvent or ground state spectrum from a spectrum or list of spectra. The optimum scaling of the spectrum to subtract is found by minimizing the sum over the residual spectrum squared. This function works good if the residual spectrum has weaker peaks than the solvent / ground state spectrum.
:param array y: Input spectrum. If y is a list or 2d array of spectra, apply solvent subtraction to each spectrum.
:param array solvent: Spectrum to subtract (same shape as y).
:param str scaling: Type of scaling function ('const', default, or 'linear'). Linear scaling is used to account for self-absorption effects.
:param bool shiftx: If True, the spectrum to subtract can also be translated along the x-axis (default = False).
:returns: Solvent / ground state corrected spectrum with same shape as y.
"""
# fitting functions
if not shiftx:
f1 = lambda x, a, f0: solvent * a + f0
f2 = lambda x, a, m, f0: solvent * (m * x + a) + f0
else:
f1 = lambda x, a, f0, dx: shift_data(x, solvent, dx) * a + f0
f2 = lambda x, a, m, f0, dx: shift_data(x, solvent, dx) * (m * x + a) + f0
if np.array(y).ndim > 1:
out = []
for sp in y:
out.append(solvent_subtract_chi2(sp, solvent, scaling, shiftx))
return np.array(out)
else:
x = np.arange(len(y))
if(scaling == 'const'):
if not shiftx:
popt, _ = curve_fit(f1, x, y, [1.0, 0.0])
else:
popt, _ = curve_fit(f1, x, y, [1.0, 0.0, 0.0])
out = y - f1(x, *popt)
else:
if not shiftx:
popt, _ = curve_fit(f2, x, y, [0.0, 1.0, 0.0])
else:
popt, _ = curve_fit(f2, x, y, [0.0, 1.0, 0.0, 0.0])
out = y - f2(x, *popt)
print("solvent subtract fit results: ", popt)
return out
def solvent_subtract(y, solvent, peaks, scaling='const', type='lor'):
"""Subtract solvent or ground state spectrum from a spectrum or a list of spectra. The optimum scaling of the spectrum to subtract is found by fitting one or more solvent peaks using a Lorentzian or Gaussian. This function works well with spectra having peaks of similar intensity as the solvent spectrum.
:param array y: Input spectrum. If y is a list or 2d array of spectra, apply solvent subtraction to each spectrum.
:param array solvent: Spectrum to subtract (same shape as y).
:param array peaks: Array of tuples, one for each solvent peak, giving the data interval in indices used for fitting. Each peak is fit by a single Lorentzian or Gaussian.
:param str scaling: Type of scaling function ('const', default, or 'linear'). Linear scaling is used to account for self-absorption effects and requires at least two solvent peaks.
:param str type: Type of fit function ('lor', default, or 'gauss').
:returns: Solvent / ground state corrected spectrum with same shape as y.
"""
if np.array(y).ndim > 1:
out = []
for sp in y:
out.append(solvent_subtract(sp, solvent, peaks, scaling, type))
return np.array(out)
else:
x = np.arange(len(y))
if(type == 'lor'):
func = ft.lorentzians_line
else:
func = ft.gaussians_line
areas = np.zeros(len(peaks))
positions = np.zeros(len(peaks))
for i in range(len(peaks)):
poptspe, _ = curve_fit(func, x[peaks[i][0]:peaks[i][1]], y[peaks[i][0]:peaks[i][1]], [y[peaks[i][0]], 0, 1, x[int((peaks[i][0] + peaks[i][1]) / 2)], x[peaks[i][0]] - x[peaks[i][1]]])
poptsol, _ = curve_fit(func, x[peaks[i][0]:peaks[i][1]], solvent[peaks[i][0]:peaks[i][1]], [solvent[peaks[i][0]], 0, 1, x[int((peaks[i][0] + peaks[i][1]) / 2)], x[peaks[i][0]] - x[peaks[i][1]]])
positions[i] = poptsol[3]
areas[i] = np.absolute((poptspe[2] * poptspe[4]) / (poptsol[2] * poptsol[4]))
if(scaling == 'const'):
popt, _ = curve_fit(ft.const, positions, areas, [1.0])
out = y - ft.const(x, *popt) * solvent
else:
popt, _ = curve_fit(ft.line, positions, areas, [1.0, 0.0])
out = y - ft.line(x, *popt) * solvent
print("solvent subtract fit results: ", popt)
return out
# -------------------------------------------------------------------------------------------------------------------
# time zero functions - use in conjunction with cross correlation data/ TA
# translate 1d data by amount dx
# uses padding to reduce edge effects
def shift_data(x, y, dx):
"""Smoothly translate 1d data by an arbitrary amount along the x-axis using Fourier transformation.
:param array x: x-axis.
:param array y: Data, same shape as x.
:param float dx: Delta value.
:returns: Translated data, same shape as y.
.. note:: This function uses padding to reduce edge artefacts. While the output has the same shape as the input, strictly speaking, the `dx / (x[1] - x[0])` first values on the left or right edge (depending on sign of dx) are invalid.
"""
sx = x[1] - x[0]
Npad = max(int(abs(2 * dx / sx)), 1) * 2
ypad = np.pad(y, Npad, mode='reflect', reflect_type='odd')
w = np.fft.rfftfreq(len(ypad), d=sx) * 2.0 * np.pi
ypad = np.fft.irfft(np.fft.rfft(ypad) * np.exp(-1j * w * dx))
return ypad[Npad:len(y) + Npad]
# shift all frequency columns along time axis to set t0 (given in c) to 0
# data d is assumed to be in the format [time][wl]
def correct_t0(t, d, c):
"""Shift all frequency columns in d along the time axis to correct t0 to 0.
:param array t: Time axis.
:param array d: 2d array containing spectra vs time.
:param array c: Time t0 for each wavelength / wavenumber, e.g. obtained by cross-correlation.
:returns: Shifted spectra with time t0 at zero, same shape as d.
"""
A = np.copy(d)
# iterate over all frequency columns
for i in range(d.shape[1]):
tmp = shift_data(t, d[:, i], -c[i])
A[:, i] = tmp
return A
# -------------------------------------------------------------------------------------------------------------------
# normalization / data selection functions
# normalize data in y
# mode = max: divide by maximum
# mode = 01: shift minimum to zero before dividing by max
# mode = area: normalize by area
def norm(y, mode='max'):
"""Normalize 1d spectrum.
:param array y: Spectral data.
:param str mode: Type of normalization:
- 'max' - Divide by maximum.
- 'area' - Divide by sum over absolute value.
- '01' - Scale to interval [0, 1] by subtracting offset before dividing by maximum.
:returns: Normalized spectrum with same shape as y.
"""
if mode == 'max':
return y / np.amax(y)
if mode == '01':
y = y - np.amin(y)
return y / np.amax(y)
if mode == 'area':
return y / np.sum(np.absolute(y))
def cut(x, y, x0, x1):
"""Cut out a subarray from x and y according to the *closed* interval [xfrom, xto].
:param array x: x-axis.
:param array y: Data (same shape as x; if 2d array, same shape as x along axis 1).
:param float x0: Lower bound of interval in same units as x.
:param float x1: Upper bound of interval in same units as x.
:returns: Sliced arrays x and y.
.. note:: This function does not perform any interpolation but rather allows slicing of arrays using physical values rather than array indices.
"""
if x0 > x1:
x0, x1 = x1, x0
if x0 == x1:
x1 = x0 + x[1] - x[0]
u = np.compress((x >= x0) & (x <= x1), x)
if np.array(y).ndim > 1:
v = np.compress((x >= x0) & (x <= x1), y, axis=1)
else:
v = np.compress((x >= x0) & (x <= x1), y)
return u, v
def at(x, y, x0):
"""Return the value of y with x-coordinate closest to x0.
:param array x: x-axis.
:param array y: Data (same shape as x).
:param float x0: x-coordinate of desired data point. If x0 is a list or array, return an array of y values.
:returns: Data point with x-coordinate closest to x0.
"""
if isinstance(x0, list) or isinstance(x0, np.ndarray):
out = []
for xp in x0:
out.append(at(x, y, xp))
return np.array(out)
else:
return y[np.argmin(np.absolute(x - x0))]
# -------------------------------------------------------------------------------------------------------------------
# TA analysis functions
def plotTA(t, wl, d, showContour=True, filename=None, xlog=False, vmin=None, vmax=None, cmap=None, xlabel="Time (fs)", ylabel="Wavelength (nm)", zlabel="$\Delta$OD"):
"""Convenience function to generate a fancy TA contour plot using matplotlib's *contour* and *contourf* functions.
:param array t: Time axis.
:param array wl: Wavelength axis.
:param array d: 2d TA data array with shape [time][wavelength].
:param bool showContour: If True, plot black contour lines.
:param str filename: If not None, safe generated figure.
:param bool xlog: If True, use log scale for time axis.
:param float vmin: Override the automatic color scaling (default = None).
:param float vmax: Override the automatic color scaling (default = None).
:param Colormap cmap: Override Matplotlib colormap to use for the contourplot (default = None).
:param str xlabel: X-label text (default = "Time (fs)").
:param str ylabel: Y-label text (default = "Wavelength (nm)").
:param str zlabel: Z-label text (default = "$\Delta$OD").
"""
tmp = np.flipud(np.rot90(d))
# make a fancy plot
pl.figure()
vmin = np.amin(d) if vmin is None else vmin
vmax = np.amax(d) if vmax is None else vmax
pl.contourf(t, wl, tmp, np.linspace(vmin, vmax, 128), extend='both', cmap=cmap)
cbar = pl.colorbar()
cbar.set_label(zlabel)
if(showContour):
pl.contour(t, wl, tmp, np.linspace(vmin, vmax, 10), linestyles=("-",), linewidths=(0.5,), colors=('k',))
pl.xlabel(xlabel)
pl.ylabel(ylabel)
if xlog:
pl.xscale("log", nonposx='clip', subsx=range(2, 10))
if filename is not None:
pl.savefig(filename)
def bandintegral(x, y, x0, x1):
"""Calculate the band integral over time dependent data.
:param array x: x-axis (wavelength / wavenumber).
:param array y: 2d array containing spectra vs time.
:param float x0: Left boundary of the band integral.
:param float x1: Right boundary of the band integral.
:returns: Band integral as function of time (= axis 1 of y).
"""
i0 = np.argmin(np.absolute(x - x0))
i1 = np.argmin(np.absolute(x - x1))
if i0 > i1:
i0, i1 = i1, i0
return simps(y[:, i0:i1], x[i0:i1], axis=1)
def strip_kinetics(y, Nmax=5, verbose=False, tryIRF=True, NyI=5, NyS=20, smooth=True):
"""Automatically remove slow kinetics from a (single) TA trace.
This function uses a minimal number of decaying / rising exponentials that may be convoluted with a Gaussian instrument response function to obtain the best fit to the slow kinetics.
Paremeters are determined automatically by choosing the feature set with the minimal chi2. Currently this function uses simple least squares curve fitting with bounds.
If y is a 2d array of TA traces, use every NyS-th row (and column) averaging over NyI rows to reconstruct a smooth TA. The result is subtracted from the original by cubic interpolation over the columns.
:param array y: The kinetics trace as function of time. May be a 2d array or list of traces with shape [time][wavelength] as, for example, returned by :py:func:`loadTA`.
:param int Nmax: The maximum number of exponentials to be used (default = 5).
:param bool verbose: If True, print a list of fitting parameters used for fitting the kinetics trace (default = False).
:param bool tryIRF: If True, also try exponentials convoluted with Gaussian, set to False if there is no step function in your data (default = True).
:param int NyI: Number of rows to integrate for kinetics estimation. Used only for 2d TA traces (default = 5).
:param int NyS: Number of rows/columns to skip for kinetics estimates. Must be > NyI (default = 20).
:param bool smooth: If True, apply denoise to the data for estimation of the kinetics. The returned residuum is not smoothed. For some data, however, denoise will not work properly, in which case it should be set to False.
:returns: Stripped trace or list of traces (with same shape as y), ystripped, kinetics trace or list of kinetics traces (same shape as y), ykinetic.
.. seealso:: See :py:func:`FSRStools.fitting.exponentials` and :py:func:`FSRStools.fitting.exp_gauss` for details on the fitting functions.
The following example uses ``strip_kinetics`` to obtain a 2d Fourier map relating impulsively excited Raman modes with their position in the TA. See, for example, Musser et al., *Nat. Phys.* **11**, 352 (2015) for more details.::
import numpy as np
import FSRStools.raman as fs
# y is a 2d TA map as loaded with loadTA(), wl is the wavelength axis, t is the time axis
# first, strip the slow kinetics from the TA data
ystr, ykin = fs.strip_kinetics(y)
# next, do an |FFT|**2 along the time axis to get the impulsive Raman spectrum
Y = np.absolute(np.fft.rfft2(ystr, axes=(0,)))**2
# get the modes' frequencies in cm-1
wn = np.fft.rfftfreq(len(t), d=t[1] - t[0]) / (2.9979e8 * 1e-13)
# now make a 2d plot using plotTA() - adjust the value of vmax to get a good contrast on the impulsive modes
fs.plotTA(wn, wl, Y, vmax=0.01, showContour=False, xlabel="Wavenumber (cm-1)")
"""
# check some values
if NyI > NyS or NyI <= 0 or NyS <= 0:
raise ValueError("Illegal values for NyI and/or NyS. Must be 0 < NyI < NyS < y.shape[1].")
# check for 2d map
if np.array(y).ndim > 1:
y = np.array(y).T
ytmp = np.arange(0, y.shape[1])
# slice data and do kinetics stripping on each slice
xtmp = np.arange(0, y.shape[0], NyS)
xtmp[[0, -1]] = 0, y.shape[0] - 1
tmp = np.zeros((xtmp.shape[0], ytmp.shape[0]))
for i, xi in enumerate(xtmp):
tmp[i, :] = strip_kinetics(np.mean(y[max(0, xi - NyI // 2):min(y.shape[0] - 1, xi + NyI // 2)], axis=0), Nmax, verbose, tryIRF, NyI, NyS, smooth)[1]
# get kinetics trace by interpolating on 2d grid
ykin = RectBivariateSpline(xtmp, ytmp, tmp)(np.arange(y.shape[0]), np.arange(y.shape[1]))
# remove kinetics from data
# as the kinetics trace is interpolated, the dc removal is not perfect, which is why also the mean value gets subtracted
ystr = (y - ykin).T
ystr = ystr - np.mean(ystr, axis=1)
return ystr, ykin.T
else:
x = np.linspace(0, 1, len(y))
params = []
errs = []
funcs = []
# define a general fitting function
def fitfunc(p, x, y, func):
return np.sum((y - func(x, 0.0, *p))**2) # force offset for both functions to be zero!
y0 = np.copy(y)
# do smoothing spectrum by spectrum, seems more stable
if smooth:
y = denoise(y)
# do the actual fitting
for round in [0, 1]:
if round == 0: # start with 1 to Nmax exponentials without Gaussian instrument response function
func = ft.exponentials
popt = []
bounds = []
elif round == 1: # repeat with Gaussian impulse response function
if not tryIRF:
break
func = ft.exp_gauss
popt = [1.0 / 20.0, x[np.argmin(np.absolute(y[:len(y) // 2] - (np.amax(y) - np.amin(y)) / 2.0))]]
bounds = [(1.0 / 100.0, 1.0), (0, 1)]
for i in range(Nmax):
funcs.append(func)
popt = list(popt) + [1.0, 1.0 / (i + 1)]
bounds = list(bounds) + [(None, None), (1.0 / 25.0, 10.0)]
res = minimize(fitfunc, popt, args=(x, y, func), bounds=bounds)
popt = res.x
params.append(popt)
errs.append(fitfunc(popt, x, y, func) if res.success else np.inf)
# select best guess
iopt = np.nanargmin(errs)
popt = params[iopt]
fopt = funcs[iopt]
yopt = fopt(x, 0.0, *popt)
if verbose:
print "Best fit for kinetics:", "ft.exponentials" if iopt < Nmax else "ft.exp_gauss", "(0.0", popt, ")"
return y0 - yopt, yopt
def TAFFT(t, y, padlength=4, wnd=np.blackman):
"""Calculate the Fourier transform of a 2d transient absorption signal after stripping of the slow kinetics over the time axis.
:param array t: Time axis (fs).
:param array y: 2d TA signal of shape [time][wavelength]. The shape of axis 0 has to match that of t.
:param int padlength: Length of zero padding applied to data before transforming in units of original signal shape (default = 4).
:param function wnd: Window function, f(N) with N being array length, to be applied to data before transforming (default = numpy.blackman).
:returns: Wavenumber axis (cm-1) and transformed data with shape [wavenumber x padlength][wavelength].
"""
y = np.array(y)
if y.ndim != 2:
raise ValueError("TA data has wrong shape.")
Npad = max(1, int(padlength)) * y.shape[0]
Y = np.fft.rfft2(y * np.outer(wnd(y.shape[0]), np.ones(y.shape[1])), s=(Npad,), axes=(0,))
wn = np.fft.rfftfreq(Npad, d=t[1] - t[0]) / (2.9979e8 * 1e-13)
return wn, Y
# -------------------------------------------------------------------------------------------------------------------
# peak fitting functions
def find_peaks(y, wnd=9, ath=0.01, sth=1.0, bl=-1, useMMBL=True, show=False, sorted=False):
"""Automated peak finding algorithm based on zero crossings of smoothed derivative as well as on slope and amplitude thresholds.
.. versionchanged:: 01-04-2016
Added more control possibilities over the type of baseline removal. Supports now `baseline_correction` and `FT_baseline_correction`, optionally followed
by a moving-median filter step.
:param array y: Spectrum.
:param int wnd: Smoothing window size. Has to be an odd integer (default = 9).
:param float ath: Amplitude threshold in units of maximum amplitude (default = 0.01).
:param float sth: Slope threshold in units of estimated noise standard deviation (default = 1.0).
:param in bl: Control baseline removal.
- If `bl > 0`: Initial window size divisor for automated baseline removal.
- If `bl == None`: No baseline removal is performed.
- If `bl == -1`: Perform automated Fourier transform baseline removal (default).
:param bool useMMBL: If True, use a moving-median baseline correction following the initial baseline removal (default).
:param bool show: If True, make a plot showing the extracted peak positions in the spectrum.
:param bool sorted: If True, return sorted by amplitude (highest first). If False, returns sorted by frequency.
:returns: A list of peak position *INDICES*, not wavenumbers.
"""
g = np.copy(y)
# baseline removal
if bl is not None:
if bl > 0:
g = baseline_correction(g, bl)
else:
g = FT_baseline_correction(g, cutoff=-1)
if useMMBL:
g = MM_baseline_correction(g)
# get derivative of smoothed data
dg = np.gradient(savitzky_golay(g, wnd, 0))
# get noise estimate for thresholds
s0 = noise_estimate(g)
gmin = ath * np.amax(g)
smin = sth * s0
# find peaks
peaks = []
gs = []
for i in range(1, dg.size - 1): # leave out end points
# apply criteria: local max, min slope and min amp
if(dg[i] > 0 and dg[i + 1] < 0 and dg[i] - dg[i + 1] > smin and max(g[i], g[i + 1]) > gmin):
if(g[i] > g[i + 1]):
peaks.append(i)
gs.append(g[i])
else:
peaks.append(i + 1)
gs.append(g[i + 1])
peaks = np.array(peaks)
# sort for peak amplitude
if sorted:
peaks = peaks[np.flipud(np.argsort(gs))]
if show:
pl.figure()
pl.plot(g, color='k')
for i in range(len(peaks)):
pl.plot(peaks[i], g[peaks[i]], "o", color="r")
pl.annotate(str(i), (peaks[i], g[peaks[i]]))
return peaks
# utility function to quickly extract data x and y coordinates by double clicking in the plot
# after the function is executed, the script terminates
# these functions are not to be called directly but act as event handler
pick_peaks_lastclickx = 0
pick_peaks_lastclicky = 0
def pick_peaks_onclick_x_y(event):
global pick_peaks_lastclickx, pick_peaks_lastclicky
if(pick_peaks_lastclickx == event.x and pick_peaks_lastclicky == event.y):
pick_peaks_lastclickx = 0
pick_peaks_lastclicky = 0
print ("%f, %f" % (event.xdata, event.ydata))
else:
pick_peaks_lastclickx = event.x
pick_peaks_lastclicky = event.y
def pick_peaks_onclick(event):
global pick_peaks_lastclickx, pick_peaks_lastclicky
if(pick_peaks_lastclickx == event.x and pick_peaks_lastclicky == event.y):
pick_peaks_lastclickx = 0
pick_peaks_lastclicky = 0
print ("%f," % (event.xdata))
else:
pick_peaks_lastclickx = event.x
pick_peaks_lastclicky = event.y
def pick_peaks(x, y=None):
"""Utility function to quickly extract x coordinates of points from data by double clicking in a plot window.
For each double click, the x coordinates are printed on stdout. After the function is executed, the script terminates.
Use this function to get peak positions as input for :py:func:`get_peak_estimates` or :py:func:`calibrate`, for example.
:param array x: x-axis or data if y is None.
:param array y: Data (optional). If None, the function displays the data vs indices.
.. seealso: :py:func:`pick_peaks_x_y` for extracting x and y coordinates.
"""
pl.figure()
if(y is not None):
pl.plot(x, y)
else:
pl.plot(x)
pl.gcf().canvas.mpl_connect('button_press_event', pick_peaks_onclick)
pl.show()
sys.exit()
# if only x is given, it takes the role of y
def pick_peaks_x_y(x, y=None):
"""Utility function to quickly extract x and y coordinates of points from data by double clicking in a plot window.
For each double click, the x and y coordinates are printed on stdout. After the function is executed, the script terminates.
Use this function to get interpolation points as input for :py:func:`interpolated_bl_correction`, for example.
:param array x: x-axis or data if y is None.
:param array y: Data (optional). If None, the function displays the data vs indices.
.. seealso: :py:func:`pick_peaks` for extracting just x coordinates.
"""
pl.figure()
if(y is not None):
pl.plot(x, y)
else:
pl.plot(x)
pl.gcf().canvas.mpl_connect('button_press_event', pick_peaks_onclick_x_y)
pl.show()
sys.exit()
# get a list of parameters A, x0, dx for each peak whose x coordinate is listed in peaks
# peaks is in same units as x
# x, y is data
def get_peak_estimate(x, y, peaks):
"""Returns a list of estimated peak parameters from a spectrum for each peak whose approximate x-coordinate is listed in peaks. The output can be directly used in the Lorentzian or Gaussian fitting functions or in :py:func:`fit_peaks`.
:param array x: x-axis.
:param array y: Data, same shape as x.
:param array peaks: List with peak positions in same units as x.
:returns: A list of estimated parameters for each peak:
- A amplitude,
- x0 center,
- dx FWHM.
"""
# list for results
est = []
i0 = []
imin = []
imax = []
# get gradient to better pronounce the peaks
dy = np.gradient(y)
# iterate over all guesses and get indices of peaks
for i in range(len(peaks)):
i0.append(np.argmin(np.absolute(x - peaks[i]))) # center index
# get interval containing this peak
if(i != 0):
imin.append(np.argmin(np.absolute(x - (peaks[i] + peaks[i - 1]) / 2)))
else:
imin.append(0) # np.maximum(0, 2 * i0[-1] - imax[-1]))
if(i != len(peaks) - 1):
imax.append(np.argmin(np.absolute(x - (peaks[i] + peaks[i + 1]) / 2)))
else:
imax.append(-1) # np.maximum(0, 2 * i0[-1] - imin[-1]))
# now get the estimates
for i, _ in enumerate(peaks):
i1 = np.argmin(dy[imin[i]:imax[i]] - (dy[imax[i]] - dy[imin[i]]) / (x[imax[i]] - x[imin[i]]) * x[imin[i]:imax[i]]) + imin[i]
i2 = np.argmax(dy[imin[i]:imax[i]] - (dy[imax[i]] - dy[imin[i]]) / (x[imax[i]] - x[imin[i]]) * x[imin[i]:imax[i]]) + imin[i]
delta = np.maximum(np.absolute(i0[i] - i1), np.absolute(i0[i] - i2))
i12 = np.maximum(0, i0[i] - 2 * delta)
i22 = np.minimum(len(x) - 1, i0[i] + 2 * delta)
est.append(y[i0[i]] - 0.5 * (y[i12] + y[i22]))
est.append(x[i0[i]])
est.append(np.absolute(x[i2] - x[i1]))
return np.array(est)
def fit_peaks(x, y, popt0, fit_func, bounds=None, estimate_bl=False, use_popt0=True, inc_blorder=1, max_blorder=-1, global_opt=True):
"""Try to fit Raman peaks with (automatic) baseline fitting.
:param array x: x-axis.
:param array y: Data. If y is a list or 2d array of spectra, fit every spectrum.
:param array popt0: Initial parameters for fitting, for example obtained from :py:func:`get_peak_estimate`.
:param function fit_func: Fitting function to be used to fit the data. If `estimate_bl` is True, this function should only fit the peaks, not the baseline.
:param array bounds: Array of tuples giving the minimum and maximum allowed value for each parameter in popt0. If not None, use :py:func:`scipy.optimize.differential_evolution` to fit the peaks. (default = None).
:param bool estimate_bl: If True, attempt an automatic baseline fitting using n-th order polynomials. This baseline is added to any basline contained in fit_func.
:param bool use_popt0: If fitting multiple spectra, control whether the initial parameters are always the ones provided in `popt0` (True, default) or the ones from the last spectrum (False).
:param int inc_blorder: When fitting the baseline, the order of the polynomial is increased by `inc_blorder` until no further improvement can be achieved (default = 1).
:param int max_blorder: Maximum permitted order of the baseline polynomial. Set to -1 for no restriction (default).
:param bool global_opt: If True, attempt a simultaneous fit of baseline AND peaks once a decent fit for each has been found.
:returns: The fitted spectrum, same shape as y, and the final peak fit parameters (popt0). If y is a list or 2d array of spectra, the returned array has the shape [[yfit], [popt]].
"""
popt_p0 = np.copy(popt0)
if np.array(y).ndim > 1:
out = []
outpopt = []
for sp in y:
tmp, popt = fit_peaks(x, sp, popt_p0, fit_func, bounds, estimate_bl, use_popt0, inc_blorder, global_opt, max_blorder)
out.append(tmp)
outpopt.append(popt)
if not use_popt0:
popt_p0 = np.copy(popt)
return [np.array(out), np.array(outpopt)]
else:
N = len(x)
bl = np.zeros(N)
blorder = 1
popt_bl = np.array([0])
func = lambda p, x, y: (fit_func(x, *p) - y)**2
func_glob = lambda x, *p: fit_func(x, *p[0:len(popt0)]) + ft.poly(x, *p[len(popt0):])
if(estimate_bl):
err1 = np.inf
while(1):
y1 = np.copy(y)
y1old = np.zeros(N)
while(np.sum((y1old - y1)**2) / N > 1e-7):
popt_p = np.copy(popt_p0)
y1old = np.copy(y1)
# fit baseline
popt_bl, pcov_bl = curve_fit(ft.poly, x, y1, np.zeros(blorder))
bl = ft.poly(x, *popt_bl)
y1 = y - bl
# fit peaks
try:
if bounds is None:
popt_p, pcov_p = curve_fit(fit_func, x, y1, popt_p)
else:
popt_p = differential_evolution(func, bounds, args=(x, y1)).x
except:
pass
y1 = y - fit_func(x, *popt_p)
# save error for next order
y1 = fit_func(x, *popt_p) + bl
err = np.sum((y - y1)**2) / N
if(err < err1 and ((max_blorder == -1) or blorder < max_blorder)):
err1 = err
blorder += inc_blorder
else:
break
# try a global optimization of baseline and peaks simultaneously
if global_opt:
popt, pcov = curve_fit(func_glob, x, y, np.append(popt_p0, popt_bl))
popt_p = popt[0:len(popt0)]
pcov_p = pcov[0:len(popt0), 0:len(popt0)]
popt_bl = popt[len(popt0):]
pcov_bl = pcov[len(popt0):, len(popt0):]
elif bounds is not None:
pcov_p = np.zeros((len(popt0), len(popt0)))
else:
popt_p = np.copy(popt_p0)
popt_p, pcov_p = curve_fit(fit_func, x, y, popt_p)
print("Peak fit parameters:")
perr = np.sqrt(np.diag(pcov_p))
for i in range(len(popt_p)):
print(i, ": ", popt_p[i], "+-", perr[i])
if estimate_bl:
print("Baseline fit parameters:")
print("polynomial order = ", blorder)
perr = np.sqrt(np.diag(pcov_bl))
for i in range(len(popt_bl)):
print(i, ": ", popt_bl[i], "+-", perr[i])
return [fit_func(x, *popt_p) + bl, popt_p]
def peak_remove(y, peaks):
"""Remove (narrow) peaks like single pixel artefacts from Raman data by linear interpolation.
:param array y: Spectrum.
:param array peaks: List of tuples containing the first and last index of the interval to remove.
:returns: Stripped spectrum with same shape as y.
"""
ytmp = np.copy(y)
for p in peaks:
ytmp[p[0] + 1:p[1]] = (ytmp[p[1]] - ytmp[p[0]]) * (np.arange(p[0] + 1, p[1]) - p[0]) / (p[1] - p[0]) + ytmp[p[0]]
return ytmp
def peak_area(y, peaks):
"""Returns the area under the spectrum in a given interval after removing a linear baseline.
:param array y: Spectrum.
:param array peaks: List of tuples containing the first and last index of the interval to integrate over.
:returns: List of peak areas.
"""
areas = np.zeros(len(peaks))
for i in range(len(peaks)):
p = peaks[i]
areas[i] = simps(y[p[0] + 1:p[1]] - ((y[p[1]] - y[p[0]]) * (np.arange(p[0] + 1, p[1]) - p[0]) / (p[1] - p[0]) + y[p[0]]))
return areas
# -------------------------------------------------------------------------------------------------------------------
# FM modulated FSRS stuff
def reconstruct_FM_spectrum(y, delta, MMw=0, bDenoise=False, blOrder=4):
"""Reconstruct Raman spectrum A from a shifted-excitation of frequency-modulated spectrum A - B.
This algorithm is adapted from Grumstrup et al., *J. Phys. Chem. B* **117**, 8245 (2013).
.. note:: The difference frequency spectrum y has to be sampled equidistantly, i.e., apply :py:func:`interpolate` before taking the difference.
.. versionadded:: 01-04-2016
:param array y: Difference spectrum A-B.
:param int delta: Index shift between the two spectra. Has to be positive and nu(A) < nu(B).
:param int MMw: Window size for moving-median filter (use 5x delta when 0, default).
:param bool bDenoise: If True, apply a denoise step before returning the result.
:param int blOrder: Polynomial order for baseline removal (default = 4).
:returns: Reconstructed spectrum with same shape as y.
"""
# 1. baseline removal by fitting with polynomial
popt, _ = ft.curve_fit(ft.poly, np.arange(len(y)), y, np.zeros(blOrder))
y = y - ft.poly(np.arange(len(y)), *popt)
# 2. sum up
y1 = np.zeros(len(y))
for i, _ in enumerate(y1):
y1[i] = y[i]
if i >= delta:
y1[i] += y1[i - delta]
# 3. apply FT notch filter
Npad = len(y1) // 2
ypad = np.pad(y1, Npad, mode='reflect', reflect_type='odd')
FT = np.fft.fft(ypad * np.hamming(len(ypad)))
alpha = 1.0
beta = delta
f0 = int(len(FT) / delta)
N = int(round(len(FT) / float(f0)))
w = np.ones(len(FT))
nu = np.arange(len(FT))
for i in range(1, N):
w = w * (1.0 - 0.5 * (np.tanh((nu - (f0 * i - beta)) / alpha) + np.tanh(((f0 * i + beta) - nu) / alpha)))
FT = FT * w
y2 = np.fft.ifft(FT)[Npad:Npad + len(y1)]
# 4. apply moving-median filter and maybe smoothing
if MMw <= 1:
MMw = 5 * delta
y3 = MM_baseline_correction(y2, w=MMw)
if bDenoise:
y3 = denoise(y3)
return y3
| gpl-3.0 |
PatrickChrist/scikit-learn | examples/mixture/plot_gmm_pdf.py | 284 | 1528 | """
=============================================
Density Estimation for a mixture of Gaussians
=============================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
| bsd-3-clause |
marscher/PyEMMA | pyemma/coordinates/clustering/interface.py | 1 | 12476 |
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Created on 18.02.2015
@author: marscher
'''
from __future__ import absolute_import
import os
import numpy as np
from pyemma._base.serialization.serialization import SerializableMixIn
from pyemma._base.model import Model
from pyemma._base.parallel import NJobsMixIn
from pyemma._ext.sklearn.base import ClusterMixin
from pyemma.coordinates.data._base.transformer import StreamingEstimationTransformer
from pyemma.util.annotators import fix_docs, aliased, alias
from pyemma.util.discrete_trajectories import index_states, sample_indexes_by_state
from pyemma.util.files import mkdir_p
@fix_docs
@aliased
class AbstractClustering(StreamingEstimationTransformer, Model, ClusterMixin, NJobsMixIn, SerializableMixIn):
"""
provides a common interface for cluster algorithms.
Parameters
----------
metric: str, default='euclidean'
metric to pass to c extension
n_jobs: int or None, default=None
How much threads to use during assignment
If None, all available CPUs will be used.
"""
def __init__(self, metric='euclidean', n_jobs=None):
super(AbstractClustering, self).__init__()
self.metric = metric
self.clustercenters = None
self._previous_stride = -1
self._dtrajs = []
self._overwrite_dtrajs = False
self._index_states = []
self.n_jobs = n_jobs
__serialize_fields = ('_dtrajs', '_previous_stride', '_index_states', '_overwrite_dtrajs', '_precentered')
__serialize_version = 0
def set_model_params(self, clustercenters):
self.clustercenters = clustercenters
@property
@alias('cluster_centers_') # sk-learn compat.
def clustercenters(self):
""" Array containing the coordinates of the calculated cluster centers. """
return self._clustercenters
@clustercenters.setter
def clustercenters(self, val):
self._clustercenters = np.asarray(val, dtype='float32', order='C')[:] if val is not None else None
self._precentered = False
@property
def overwrite_dtrajs(self):
"""
Should existing dtraj files be overwritten. Set this property to True to overwrite.
"""
return self._overwrite_dtrajs
@overwrite_dtrajs.setter
def overwrite_dtrajs(self, value):
self._overwrite_dtrajs = value
@property
#@alias('labels_') # TODO: for fully sklearn-compat this would have to be a flat array!
def dtrajs(self):
"""Discrete trajectories (assigned data to cluster centers)."""
if len(self._dtrajs) == 0: # nothing assigned yet, doing that now
self._dtrajs = self.assign(stride=1)
return self._dtrajs # returning what we have saved
@property
def index_clusters(self):
"""Returns trajectory/time indexes for all the clusters
Returns
-------
indexes : list of ndarray( (N_i, 2) )
For each state, all trajectory and time indexes where this cluster occurs.
Each matrix has a number of rows equal to the number of occurrences of the corresponding state,
with rows consisting of a tuple (i, t), where i is the index of the trajectory and t is the time index
within the trajectory.
"""
if len(self._dtrajs) == 0: # nothing assigned yet, doing that now
self._dtrajs = self.assign()
if len(self._index_states) == 0: # has never been run
self._index_states = index_states(self._dtrajs)
return self._index_states
def sample_indexes_by_cluster(self, clusters, nsample, replace=True):
"""Samples trajectory/time indexes according to the given sequence of states.
Parameters
----------
clusters : iterable of integers
It contains the cluster indexes to be sampled
nsample : int
Number of samples per cluster. If replace = False, the number of returned samples per cluster could be smaller
if less than nsample indexes are available for a cluster.
replace : boolean, optional
Whether the sample is with or without replacement
Returns
-------
indexes : list of ndarray( (N, 2) )
List of the sampled indices by cluster.
Each element is an index array with a number of rows equal to N=len(sequence), with rows consisting of a
tuple (i, t), where i is the index of the trajectory and t is the time index within the trajectory.
"""
# Check if the catalogue (index_states)
if len(self._index_states) == 0: # has never been run
self._index_states = index_states(self.dtrajs)
return sample_indexes_by_state(self._index_states[clusters], nsample, replace=replace)
def _transform_array(self, X):
"""get closest index of point in :attr:`clustercenters` to x."""
X = np.require(X, dtype=np.float32, requirements='C')
if not hasattr(self, '_inst'):
self.logger.debug("new cluster inst")
from ._ext import ClusteringBase_f
self._inst = ClusteringBase_f(self.metric, X.shape[1])
# for performance reasons we pre-center the cluster centers for minRMSD.
if self.metric == 'minRMSD' and not self._precentered:
self.logger.debug("precentering cluster centers for minRMSD.")
self._inst.precenter_centers(self.clustercenters)
self._precentered = True
dtraj = self._inst.assign(X, self.clustercenters, self.n_jobs)
res = dtraj[:, None] # always return a column vector in this function
return res
def dimension(self):
"""output dimension of clustering algorithm (always 1)."""
return 1
def output_type(self):
return np.int32()
def assign(self, X=None, stride=1):
"""
Assigns the given trajectory or list of trajectories to cluster centers by using the discretization defined
by this clustering method (usually a Voronoi tesselation).
You can assign multiple times with different strides. The last result of assign will be saved and is available
as the attribute :func:`dtrajs`.
Parameters
----------
X : ndarray(T, n) or list of ndarray(T_i, n), optional, default = None
Optional input data to map, where T is the number of time steps and n is the number of dimensions.
When a list is provided they can have differently many time steps, but the number of dimensions need
to be consistent. When X is not provided, the result of assign is identical to get_output(), i.e. the
data used for clustering will be assigned. If X is given, the stride argument is not accepted.
stride : int, optional, default = 1
If set to 1, all frames of the input data will be assigned. Note that this could cause this calculation
to be very slow for large data sets. Since molecular dynamics data is usually
correlated at short timescales, it is often sufficient to obtain the discretization at a longer stride.
Note that the stride option used to conduct the clustering is independent of the assign stride.
This argument is only accepted if X is not given.
Returns
-------
Y : ndarray(T, dtype=int) or list of ndarray(T_i, dtype=int)
The discretized trajectory: int-array with the indexes of the assigned clusters, or list of such int-arrays.
If called with a list of trajectories, Y will also be a corresponding list of discrete trajectories
"""
if X is None:
# if the stride did not change and the discrete trajectory is already present,
# just return it
if self._previous_stride is stride and len(self._dtrajs) > 0:
return self._dtrajs
self._previous_stride = stride
skip = self.skip if hasattr(self, 'skip') else 0
# map to column vectors
mapped = self.get_output(stride=stride, chunk=self.chunksize, skip=skip)
# flatten and save
self._dtrajs = [np.transpose(m)[0] for m in mapped]
# return
return self._dtrajs
else:
if stride != 1:
raise ValueError('assign accepts either X or stride parameters, but not both. If you want to map '+
'only a subset of your data, extract the subset yourself and pass it as X.')
# map to column vector(s)
mapped = self.transform(X)
# flatten
if isinstance(mapped, np.ndarray):
mapped = np.transpose(mapped)[0]
else:
mapped = [np.transpose(m)[0] for m in mapped]
# return
return mapped
def save_dtrajs(self, trajfiles=None, prefix='',
output_dir='.',
output_format='ascii',
extension='.dtraj'):
"""saves calculated discrete trajectories. Filenames are taken from
given reader. If data comes from memory dtrajs are written to a default
filename.
Parameters
----------
trajfiles : list of str (optional)
names of input trajectory files, will be used generate output files.
prefix : str
prepend prefix to filenames.
output_dir : str
save files to this directory.
output_format : str
if format is 'ascii' dtrajs will be written as csv files, otherwise
they will be written as NumPy .npy files.
extension : str
file extension to append (eg. '.itraj')
"""
if extension[0] != '.':
extension = '.' + extension
# obtain filenames from input (if possible, reader is a featurereader)
if output_format == 'ascii':
from msmtools.dtraj import write_discrete_trajectory as write_dtraj
else:
from msmtools.dtraj import save_discrete_trajectory as write_dtraj
import os.path as path
output_files = []
if trajfiles is not None: # have filenames available?
for f in trajfiles:
p, n = path.split(f) # path and file
basename, _ = path.splitext(n)
if prefix != '':
name = "%s_%s%s" % (prefix, basename, extension)
else:
name = "%s%s" % (basename, extension)
# name = path.join(p, name)
output_files.append(name)
else:
for i in range(len(self.dtrajs)):
if prefix is not '':
name = "%s_%i%s" % (prefix, i, extension)
else:
name = str(i) + extension
output_files.append(name)
assert len(self.dtrajs) == len(output_files)
if not os.path.exists(output_dir):
mkdir_p(output_dir)
for filename, dtraj in zip(output_files, self.dtrajs):
dest = path.join(output_dir, filename)
self.logger.debug('writing dtraj to "%s"' % dest)
try:
if path.exists(dest) and not self.overwrite_dtrajs:
raise EnvironmentError('Attempted to write dtraj "%s" which already existed. To automatically'
' overwrite existing files, set source.overwrite_dtrajs=True.' % dest)
write_dtraj(dest, dtraj)
except IOError:
self.logger.exception('Exception during writing dtraj to "%s"' % dest)
| lgpl-3.0 |
ZhangXinNan/tensorflow | tensorflow/examples/learn/iris_custom_decay_dnn.py | 43 | 3572 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with exponential decay."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
X_FEATURE = 'x' # Name of the input feature.
def my_model(features, labels, mode):
"""DNN with three hidden layers."""
# Create three fully connected layers respectively of size 10, 20, and 10.
net = features[X_FEATURE]
for units in [10, 20, 10]:
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
# Compute logits (1 per class).
logits = tf.layers.dense(net, 3, activation=None)
# Compute predictions.
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Compute loss.
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Create training op with exponentially decaying learning rate.
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_global_step()
learning_rate = tf.train.exponential_decay(
learning_rate=0.1, global_step=global_step,
decay_steps=100, decay_rate=0.001)
optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss, global_step=global_step)
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
# Compute evaluation metrics.
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = tf.estimator.Estimator(model_fn=my_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=1000)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
joshloyal/scikit-learn | examples/covariance/plot_mahalanobis_distances.py | 348 | 6232 | r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
An example to show covariance estimation with the Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i -
\mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are
the location and the covariance of the underlying Gaussian
distribution.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The usual covariance maximum likelihood estimate is very
sensitive to the presence of outliers in the data set and therefor,
the corresponding Mahalanobis distances are. One would better have to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set and that the
associated Mahalanobis distances accurately reflect the true
organisation of the observations.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
This example illustrates how the Mahalanobis distances are affected by
outlying data: observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution that one may want to work with. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications are outliers detection,
observations ranking, clustering, ...
For visualization purpose, the cubic root of the Mahalanobis distances
are represented in the boxplot, as Wilson and Hilferty suggest [2]
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square.
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
###############################################################################
# Display results
fig = plt.figure()
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(3, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'robust dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.xticks(())
plt.yticks(())
# Plot the scores for each point
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
subfig2 = plt.subplot(2, 2, 3)
subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
subfig2.plot(1.26 * np.ones(n_samples - n_outliers),
emp_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig2.plot(2.26 * np.ones(n_outliers),
emp_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)")
plt.yticks(())
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
subfig3 = plt.subplot(2, 2, 4)
subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
subfig3.plot(1.26 * np.ones(n_samples - n_outliers),
robust_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig3.plot(2.26 * np.ones(n_outliers),
robust_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)")
plt.yticks(())
plt.show()
| bsd-3-clause |
Barmaley-exe/scikit-learn | examples/gaussian_process/plot_gp_probabilistic_classification_after_regression.py | 252 | 3490 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
==============================================================================
Gaussian Processes classification example: exploiting the probabilistic output
==============================================================================
A two-dimensional regression exercise with a post-processing allowing for
probabilistic classification thanks to the Gaussian property of the prediction.
The figure illustrates the probability that the prediction is negative with
respect to the remaining uncertainty in the prediction. The red and blue lines
corresponds to the 95% confidence interval on the prediction of the zero level
set.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from scipy import stats
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
from matplotlib import cm
# Standard normal distribution functions
phi = stats.distributions.norm().pdf
PHI = stats.distributions.norm().cdf
PHIinv = stats.distributions.norm().ppf
# A few constants
lim = 8
def g(x):
"""The function to predict (classification will then consist in predicting
whether g(x) <= 0 or not)"""
return 5. - x[:, 1] - .5 * x[:, 0] ** 2.
# Design of experiments
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
# Observations
y = g(X)
# Instanciate and fit Gaussian Process Model
gp = GaussianProcess(theta0=5e-1)
# Don't perform MLE or you'll get a perfect prediction for this simple example!
gp.fit(X, y)
# Evaluate real function, the prediction and its MSE on a grid
res = 50
x1, x2 = np.meshgrid(np.linspace(- lim, lim, res),
np.linspace(- lim, lim, res))
xx = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size)]).T
y_true = g(xx)
y_pred, MSE = gp.predict(xx, eval_MSE=True)
sigma = np.sqrt(MSE)
y_true = y_true.reshape((res, res))
y_pred = y_pred.reshape((res, res))
sigma = sigma.reshape((res, res))
k = PHIinv(.975)
# Plot the probabilistic classification iso-values using the Gaussian property
# of the prediction
fig = pl.figure(1)
ax = fig.add_subplot(111)
ax.axes.set_aspect('equal')
pl.xticks([])
pl.yticks([])
ax.set_xticklabels([])
ax.set_yticklabels([])
pl.xlabel('$x_1$')
pl.ylabel('$x_2$')
cax = pl.imshow(np.flipud(PHI(- y_pred / sigma)), cmap=cm.gray_r, alpha=0.8,
extent=(- lim, lim, - lim, lim))
norm = pl.matplotlib.colors.Normalize(vmin=0., vmax=0.9)
cb = pl.colorbar(cax, ticks=[0., 0.2, 0.4, 0.6, 0.8, 1.], norm=norm)
cb.set_label('${\\rm \mathbb{P}}\left[\widehat{G}(\mathbf{x}) \leq 0\\right]$')
pl.plot(X[y <= 0, 0], X[y <= 0, 1], 'r.', markersize=12)
pl.plot(X[y > 0, 0], X[y > 0, 1], 'b.', markersize=12)
cs = pl.contour(x1, x2, y_true, [0.], colors='k', linestyles='dashdot')
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.025], colors='b',
linestyles='solid')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.5], colors='k',
linestyles='dashed')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.975], colors='r',
linestyles='solid')
pl.clabel(cs, fontsize=11)
pl.show()
| bsd-3-clause |
perrygeo/python-rasterstats | tests/test_zonal.py | 1 | 18577 | # test zonal stats
import os
import pytest
import simplejson
import json
import sys
import numpy as np
import rasterio
from rasterstats import zonal_stats, raster_stats
from rasterstats.utils import VALID_STATS
from rasterstats.io import read_featurecollection, read_features
from shapely.geometry import Polygon
from affine import Affine
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
raster = os.path.join(DATA, 'slope.tif')
def test_main():
polygons = os.path.join(DATA, 'polygons.shp')
stats = zonal_stats(polygons, raster)
for key in ['count', 'min', 'max', 'mean']:
assert key in stats[0]
assert len(stats) == 2
assert stats[0]['count'] == 75
assert stats[1]['count'] == 50
assert round(stats[0]['mean'], 2) == 14.66
# remove after band_num alias is removed
def test_band_alias():
polygons = os.path.join(DATA, 'polygons.shp')
stats_a = zonal_stats(polygons, raster)
stats_b = zonal_stats(polygons, raster, band=1)
with pytest.deprecated_call():
stats_c = zonal_stats(polygons, raster, band_num=1)
assert stats_a[0]['count'] == stats_b[0]['count'] == stats_c[0]['count']
def test_zonal_global_extent():
polygons = os.path.join(DATA, 'polygons.shp')
stats = zonal_stats(polygons, raster)
global_stats = zonal_stats(polygons, raster, global_src_extent=True)
assert stats == global_stats
def test_zonal_nodata():
polygons = os.path.join(DATA, 'polygons.shp')
stats = zonal_stats(polygons, raster, nodata=0)
assert len(stats) == 2
assert stats[0]['count'] == 75
assert stats[1]['count'] == 50
def test_doesnt_exist():
nonexistent = os.path.join(DATA, 'DOESNOTEXIST.shp')
with pytest.raises(ValueError):
zonal_stats(nonexistent, raster)
def test_nonsense():
polygons = os.path.join(DATA, 'polygons.shp')
with pytest.raises(ValueError):
zonal_stats("blaghrlargh", raster)
with pytest.raises(IOError):
zonal_stats(polygons, "blercherlerch")
with pytest.raises(ValueError):
zonal_stats(["blaghrlargh", ], raster)
# Different geometry types
def test_points():
points = os.path.join(DATA, 'points.shp')
stats = zonal_stats(points, raster)
# three features
assert len(stats) == 3
# three pixels
assert sum([x['count'] for x in stats]) == 3
assert round(stats[0]['mean'], 3) == 11.386
assert round(stats[1]['mean'], 3) == 35.547
def test_points_categorical():
points = os.path.join(DATA, 'points.shp')
categorical_raster = os.path.join(DATA, 'slope_classes.tif')
stats = zonal_stats(points, categorical_raster, categorical=True)
# three features
assert len(stats) == 3
assert 'mean' not in stats[0]
assert stats[0][1.0] == 1
assert stats[1][2.0] == 1
def test_lines():
lines = os.path.join(DATA, 'lines.shp')
stats = zonal_stats(lines, raster)
assert len(stats) == 2
assert stats[0]['count'] == 58
assert stats[1]['count'] == 32
# Test multigeoms
def test_multipolygons():
multipolygons = os.path.join(DATA, 'multipolygons.shp')
stats = zonal_stats(multipolygons, raster)
assert len(stats) == 1
assert stats[0]['count'] == 125
def test_multilines():
multilines = os.path.join(DATA, 'multilines.shp')
stats = zonal_stats(multilines, raster)
assert len(stats) == 1
# can differ slightly based on platform/gdal version
assert stats[0]['count'] in [89, 90]
def test_multipoints():
multipoints = os.path.join(DATA, 'multipoints.shp')
stats = zonal_stats(multipoints, raster)
assert len(stats) == 1
assert stats[0]['count'] == 3
def test_categorical():
polygons = os.path.join(DATA, 'polygons.shp')
categorical_raster = os.path.join(DATA, 'slope_classes.tif')
stats = zonal_stats(polygons, categorical_raster, categorical=True)
assert len(stats) == 2
assert stats[0][1.0] == 75
assert 5.0 in stats[1]
def test_categorical_map():
polygons = os.path.join(DATA, 'polygons.shp')
categorical_raster = os.path.join(DATA, 'slope_classes.tif')
catmap = {5.0: 'cat5'}
stats = zonal_stats(polygons, categorical_raster,
categorical=True, category_map=catmap)
assert len(stats) == 2
assert stats[0][1.0] == 75
assert 5.0 not in stats[1]
assert 'cat5' in stats[1]
def test_specify_stats_list():
polygons = os.path.join(DATA, 'polygons.shp')
stats = zonal_stats(polygons, raster, stats=['min', 'max'])
assert sorted(stats[0].keys()) == sorted(['min', 'max'])
assert 'count' not in list(stats[0].keys())
def test_specify_all_stats():
polygons = os.path.join(DATA, 'polygons.shp')
stats = zonal_stats(polygons, raster, stats='ALL')
assert sorted(stats[0].keys()) == sorted(VALID_STATS)
stats = zonal_stats(polygons, raster, stats='*')
assert sorted(stats[0].keys()) == sorted(VALID_STATS)
def test_specify_stats_string():
polygons = os.path.join(DATA, 'polygons.shp')
stats = zonal_stats(polygons, raster, stats='min max')
assert sorted(stats[0].keys()) == sorted(['min', 'max'])
assert 'count' not in list(stats[0].keys())
def test_specify_stats_invalid():
polygons = os.path.join(DATA, 'polygons.shp')
with pytest.raises(ValueError):
zonal_stats(polygons, raster, stats='foo max')
def test_optional_stats():
polygons = os.path.join(DATA, 'polygons.shp')
stats = zonal_stats(polygons, raster,
stats='min max sum majority median std')
assert stats[0]['min'] <= stats[0]['median'] <= stats[0]['max']
def test_range():
polygons = os.path.join(DATA, 'polygons.shp')
stats = zonal_stats(polygons, raster, stats="range min max")
for stat in stats:
assert stat['range'] == stat['max'] - stat['min']
ranges = [x['range'] for x in stats]
# without min/max specified
stats = zonal_stats(polygons, raster, stats="range")
assert 'min' not in stats[0]
assert ranges == [x['range'] for x in stats]
def test_nodata():
polygons = os.path.join(DATA, 'polygons.shp')
categorical_raster = os.path.join(DATA, 'slope_classes.tif')
stats = zonal_stats(polygons, categorical_raster, stats="*",
categorical=True, nodata=1.0)
assert stats[0]['majority'] is None
assert stats[0]['count'] == 0 # no pixels; they're all null
assert stats[1]['minority'] == 2.0
assert stats[1]['count'] == 49 # used to be 50 if we allowed 1.0
assert '1.0' not in stats[0]
def test_dataset_mask():
polygons = os.path.join(DATA, 'polygons.shp')
raster = os.path.join(DATA, 'dataset_mask.tif')
stats = zonal_stats(polygons, raster, stats="*")
assert stats[0]['count'] == 75
assert stats[1]['count'] == 0
def test_partial_overlap():
polygons = os.path.join(DATA, 'polygons_partial_overlap.shp')
stats = zonal_stats(polygons, raster, stats="count")
for res in stats:
# each polygon should have at least a few pixels overlap
assert res['count'] > 0
def test_no_overlap():
polygons = os.path.join(DATA, 'polygons_no_overlap.shp')
stats = zonal_stats(polygons, raster, stats="count")
for res in stats:
# no polygon should have any overlap
assert res['count'] is 0
def test_all_touched():
polygons = os.path.join(DATA, 'polygons.shp')
stats = zonal_stats(polygons, raster, all_touched=True)
assert stats[0]['count'] == 95 # 75 if ALL_TOUCHED=False
assert stats[1]['count'] == 73 # 50 if ALL_TOUCHED=False
def test_ndarray_without_affine():
with rasterio.open(raster) as src:
polygons = os.path.join(DATA, 'polygons.shp')
with pytest.raises(ValueError):
zonal_stats(polygons, src.read(1)) # needs affine kwarg
def _assert_dict_eq(a, b):
"""Assert that dicts a and b similar within floating point precision
"""
err = 1e-5
for k in set(a.keys()).union(set(b.keys())):
if a[k] == b[k]:
continue
try:
if abs(a[k]-b[k]) > err:
raise AssertionError("{}: {} != {}".format(k, a[k], b[k]))
except TypeError: # can't take abs, nan
raise AssertionError("{} != {}".format(a[k], b[k]))
def test_ndarray():
with rasterio.open(raster) as src:
arr = src.read(1)
affine = src.transform
polygons = os.path.join(DATA, 'polygons.shp')
stats = zonal_stats(polygons, arr, affine=affine)
stats2 = zonal_stats(polygons, raster)
for s1, s2 in zip(stats, stats2):
_assert_dict_eq(s1, s2)
with pytest.raises(AssertionError):
_assert_dict_eq(stats[0], stats[1])
assert stats[0]['count'] == 75
assert stats[1]['count'] == 50
points = os.path.join(DATA, 'points.shp')
stats = zonal_stats(points, arr, affine=affine)
assert stats == zonal_stats(points, raster)
assert sum([x['count'] for x in stats]) == 3
assert round(stats[0]['mean'], 3) == 11.386
assert round(stats[1]['mean'], 3) == 35.547
def test_alias():
polygons = os.path.join(DATA, 'polygons.shp')
stats = zonal_stats(polygons, raster)
with pytest.deprecated_call():
stats2 = raster_stats(polygons, raster)
assert stats == stats2
def test_add_stats():
polygons = os.path.join(DATA, 'polygons.shp')
def mymean(x):
return np.ma.mean(x)
stats = zonal_stats(polygons, raster, add_stats={'mymean': mymean})
for i in range(len(stats)):
assert stats[i]['mean'] == stats[i]['mymean']
def test_add_stats_prop():
polygons = os.path.join(DATA, 'polygons.shp')
def mymean_prop(x, prop):
return np.ma.mean(x) * prop['id']
stats = zonal_stats(polygons, raster, add_stats={'mymean_prop': mymean_prop})
for i in range(len(stats)):
assert stats[i]['mymean_prop'] == stats[i]['mean'] * (i+1)
def test_mini_raster():
polygons = os.path.join(DATA, 'polygons.shp')
stats = zonal_stats(polygons, raster, raster_out=True)
stats2 = zonal_stats(polygons, stats[0]['mini_raster_array'],
raster_out=True, affine=stats[0]['mini_raster_affine'])
assert (stats[0]['mini_raster_array'] == stats2[0]['mini_raster_array']).sum() == \
stats[0]['count']
def test_percentile_good():
polygons = os.path.join(DATA, 'polygons.shp')
stats = zonal_stats(polygons, raster,
stats="median percentile_50 percentile_90")
assert 'percentile_50' in stats[0].keys()
assert 'percentile_90' in stats[0].keys()
assert stats[0]['percentile_50'] == stats[0]['median']
assert stats[0]['percentile_50'] <= stats[0]['percentile_90']
def test_zone_func_has_return():
def example_zone_func(zone_arr):
return np.ma.masked_array(np.full(zone_arr.shape, 1))
polygons = os.path.join(DATA, 'polygons.shp')
stats = zonal_stats(polygons,
raster,
zone_func=example_zone_func)
assert stats[0]['max'] == 1
assert stats[0]['min'] == 1
assert stats[0]['mean'] == 1
def test_zone_func_good():
def example_zone_func(zone_arr):
zone_arr[:] = 0
polygons = os.path.join(DATA, 'polygons.shp')
stats = zonal_stats(polygons,
raster,
zone_func=example_zone_func)
assert stats[0]['max'] == 0
assert stats[0]['min'] == 0
assert stats[0]['mean'] == 0
def test_zone_func_bad():
not_a_func = 'jar jar binks'
polygons = os.path.join(DATA, 'polygons.shp')
with pytest.raises(TypeError):
zonal_stats(polygons, raster, zone_func=not_a_func)
def test_percentile_nodata():
polygons = os.path.join(DATA, 'polygons.shp')
categorical_raster = os.path.join(DATA, 'slope_classes.tif')
# By setting nodata to 1, one of our polygons is within the raster extent
# but has an empty masked array
stats = zonal_stats(polygons, categorical_raster,
stats=["percentile_90"], nodata=1)
assert 'percentile_90' in stats[0].keys()
assert [None, 5.0] == [x['percentile_90'] for x in stats]
def test_percentile_bad():
polygons = os.path.join(DATA, 'polygons.shp')
with pytest.raises(ValueError):
zonal_stats(polygons, raster, stats="percentile_101")
def test_json_serializable():
polygons = os.path.join(DATA, 'polygons.shp')
stats = zonal_stats(polygons, raster,
stats=VALID_STATS + ["percentile_90"],
categorical=True)
try:
json.dumps(stats)
simplejson.dumps(stats)
except TypeError:
pytest.fail("zonal_stats returned a list that wasn't JSON-serializable")
def test_direct_features_collections():
polygons = os.path.join(DATA, 'polygons.shp')
features = read_features(polygons)
collection = read_featurecollection(polygons)
stats_direct = zonal_stats(polygons, raster)
stats_features = zonal_stats(features, raster)
stats_collection = zonal_stats(collection, raster)
assert stats_direct == stats_features == stats_collection
def test_all_nodata():
polygons = os.path.join(DATA, 'polygons.shp')
raster = os.path.join(DATA, 'all_nodata.tif')
stats = zonal_stats(polygons, raster, stats=['nodata', 'count'])
assert stats[0]['nodata'] == 75
assert stats[0]['count'] == 0
assert stats[1]['nodata'] == 50
assert stats[1]['count'] == 0
def test_some_nodata():
polygons = os.path.join(DATA, 'polygons.shp')
raster = os.path.join(DATA, 'slope_nodata.tif')
stats = zonal_stats(polygons, raster, stats=['nodata', 'count'])
assert stats[0]['nodata'] == 36
assert stats[0]['count'] == 39
assert stats[1]['nodata'] == 19
assert stats[1]['count'] == 31
# update this if nan end up being incorporated into nodata
def test_nan_nodata():
polygon = Polygon([[0, 0], [2, 0], [2, 2], [0, 2]])
arr = np.array([
[np.nan, 12.25],
[-999, 12.75]
])
affine = Affine(1, 0, 0,
0, -1, 2)
stats = zonal_stats(polygon, arr, affine=affine, nodata=-999,
stats='nodata count sum mean min max')
assert stats[0]['nodata'] == 1
assert stats[0]['count'] == 2
assert stats[0]['mean'] == 12.5
assert stats[0]['min'] == 12.25
assert stats[0]['max'] == 12.75
def test_some_nodata_ndarray():
polygons = os.path.join(DATA, 'polygons.shp')
raster = os.path.join(DATA, 'slope_nodata.tif')
with rasterio.open(raster) as src:
arr = src.read(1)
affine = src.transform
# without nodata
stats = zonal_stats(polygons, arr, affine=affine, stats=['nodata', 'count', 'min'])
assert stats[0]['min'] == -9999.0
assert stats[0]['nodata'] == 0
assert stats[0]['count'] == 75
# with nodata
stats = zonal_stats(polygons, arr, affine=affine,
nodata=-9999.0, stats=['nodata', 'count', 'min'])
assert stats[0]['min'] >= 0.0
assert stats[0]['nodata'] == 36
assert stats[0]['count'] == 39
def test_transform():
with rasterio.open(raster) as src:
arr = src.read(1)
affine = src.transform
polygons = os.path.join(DATA, 'polygons.shp')
stats = zonal_stats(polygons, arr, affine=affine)
with pytest.deprecated_call():
stats2 = zonal_stats(polygons, arr, transform=affine.to_gdal())
assert stats == stats2
def test_prefix():
polygons = os.path.join(DATA, 'polygons.shp')
stats = zonal_stats(polygons, raster, prefix="TEST")
for key in ['count', 'min', 'max', 'mean']:
assert key not in stats[0]
for key in ['TESTcount', 'TESTmin', 'TESTmax', 'TESTmean']:
assert key in stats[0]
def test_geojson_out():
polygons = os.path.join(DATA, 'polygons.shp')
features = zonal_stats(polygons, raster, geojson_out=True)
for feature in features:
assert feature['type'] == 'Feature'
assert 'id' in feature['properties'] # from orig
assert 'count' in feature['properties'] # from zonal stats
# do not think this is actually testing the line i wanted it to
# since the read_features func for this data type is generating
# the properties field
def test_geojson_out_with_no_properties():
polygon = Polygon([[0, 0], [0, 0,5], [1, 1.5], [1.5, 2], [2, 2], [2, 0]])
arr = np.array([
[100, 1],
[100, 1]
])
affine = Affine(1, 0, 0,
0, -1, 2)
stats = zonal_stats(polygon, arr, affine=affine, geojson_out=True)
assert 'properties' in stats[0]
for key in ['count', 'min', 'max', 'mean']:
assert key in stats[0]['properties']
assert stats[0]['properties']['mean'] == 34
# remove when copy_properties alias is removed
def test_copy_properties_warn():
polygons = os.path.join(DATA, 'polygons.shp')
# run once to trigger any other unrelated deprecation warnings
# so the test does not catch them instead
stats_a = zonal_stats(polygons, raster)
with pytest.deprecated_call():
stats_b = zonal_stats(polygons, raster, copy_properties=True)
assert stats_a == stats_b
def test_nan_counts():
from affine import Affine
transform = Affine(1, 0, 1, 0, -1, 3)
data = np.array([
[np.nan, np.nan, np.nan],
[0, 0, 0],
[1, 4, 5]
])
# geom extends an additional row to left
geom = 'POLYGON ((1 0, 4 0, 4 3, 1 3, 1 0))'
# nan stat is requested
stats = zonal_stats(geom, data, affine=transform, nodata=0.0, stats="*")
for res in stats:
assert res['count'] == 3 # 3 pixels of valid data
assert res['nodata'] == 3 # 3 pixels of nodata
assert res['nan'] == 3 # 3 pixels of nans
# nan are ignored if nan stat is not requested
stats = zonal_stats(geom, data, affine=transform, nodata=0.0, stats="count nodata")
for res in stats:
assert res['count'] == 3 # 3 pixels of valid data
assert res['nodata'] == 3 # 3 pixels of nodata
assert 'nan' not in res
# Optional tests
def test_geodataframe_zonal():
polygons = os.path.join(DATA, 'polygons.shp')
try:
import geopandas as gpd
df = gpd.read_file(polygons)
if not hasattr(df, '__geo_interface__'):
pytest.skip("This version of geopandas doesn't support df.__geo_interface__")
except ImportError:
pytest.skip("Can't import geopands")
expected = zonal_stats(polygons, raster)
assert zonal_stats(df, raster) == expected
| bsd-3-clause |
lessalgorithm/LESSAlgorithm | simulator.py | 1 | 21572 | # coding: utf-8
# This is the combination of several scripts that download data from the
# NASA NSRDB database and process.
# The propose of this code it to run experiments around adaptive multitenancy
# driven ENO WSN for an EWSN submission.
import pandas as pd
import time
import math
import simplejson
from NREL import *
import numpy as np
import random
import matplotlib.pyplot as plt
#import matlab.engine
import os
import sys
sys.path.append('energy_prediction')
sys.path.append('eno')
from orchestrator import Orchestrator
from wcewma import WCEWMA
from eno_static import StaticENO
from eno_orchestrator import OrchestratorENO
from eno_less import LESSENO
from eno_kansal import KansalENO
""" Global store of the performance for later graphing """
output_jsons = [] # output file
refSolarPowerVector = [[]]
wcewma_pred_vector = []
# --------------------------------------------------------------------------- #
""" For loading in lighting data for energy harvesting calculation. """
def dfLoad(test):
df = pd.read_csv('datasets/env_data/{}_solarcalc_raw.csv'.format(test),
low_memory=False, index_col=0) # change file name to loop
return df
# --------------------------------------------------------------------------- #
""" This sanitizes the input data, there's some strange temperature artifacts
this removes """
def getTemplist(df):
temperature_list, result = df["Temperature"].tolist(), []
for item in temperature_list:
if (type(item) == str) and item.endswith('.1'):
item = item[:-2]
result.append(float(item))
else:
result.append(float(item))
return result
# --------------------------------------------------------------------------- #
# This function calls the orchastrator to find system requirements and adds
# them to the dataframe. for now it's a placeholder for the length of the file,
# it'll change to be dynamic
def sysRequirements(df, test, orchest):
""" This takes the config file dt and multiplies it to be the length of
the dataframe in multiples of a day """
# print("Shape =>", df.shape[0]/ len(orchest))
# N = df.shape[0] / len(orchest)
# df['Orchastration Requirements'] = list(map(lambda x: x*N, orchest))
df['Orchastration Requirements'] = orchest * int(df.shape[0] / len(orchest))
return df
# --------------------------------------------------------------------------- #
# This function works out the energy generation of the target test
def panelEnergyGen(df, test):
DHI_list = df["DHI"].tolist()
DNI_list = df["DNI"].tolist()
Zenith_list = df["Solar Zenith Angle"].tolist()
E_list = [a * (math.cos(math.radians(c))) + b for a, b,
c in zip(DNI_list, DHI_list, Zenith_list)]
if debug:
print(" => E coefficient has been calculated ")
# reflection coefficients
rd = (1 + (math.cos(math.radians(sensor_angle_H))) / 2)
rr = (1 - (math.cos(math.radians(sensor_angle_H))) / 2)
# Where variable x is DNI*cos(theta)
x_list = [a * (math.cos(math.radians(sensor_angle_V - b)))
for a, b in zip(DNI_list, Zenith_list)]
# energy in w/m2 impinging on the surface of a solar cell
ES_list = [(a + (b * rd) + (foreground_albedo * c * rr))
for a, b, c in zip(x_list, DHI_list, E_list)]
if debug:
print(" => Energy hitting the cell has been calculated")
# including temperature as a function of solar cell efficiency in calculations
temperature_list = getTemplist(df)
tc_list = [(a + (((Noct - 20) / 800) * b))
for a, b in zip(temperature_list, ES_list)]
efficiency_pvg = [((solar_panel_efficiency * power_conditioning_efficiency)
* (1 - temperature_coefficient * (a - ref_cell_temp))) for a in tc_list]
if debug:
print(" => Efficiency of the solar cell over time has been calculated")
# conversion from w/m2 to energy generated by solar panel in mA
EG_list = [(abs(a * (solar_panel_active_area_m2 * b * 1000.00)) / (solar_panel_voltage))
for a, b in zip(ES_list, efficiency_pvg)] # change ABS here, can't be right
df['Energy Solar Gen'] = EG_list
return df
# function to take in environmental variables and return current consumption
# --------------------------------------------------------------------------- #
def NRELtoWindPower(df):
energy_type = 3
if debug:
print(" => Working out wind parameters")
pressure_list = df["Pressure"].tolist()
# Making an assumption that as the system is light it will have a fan to point in the direction of the wind
wind_speed_list = df["Wind Speed"].tolist()
temperature_list = getTemplist(df)
# 100 here convers millibar to pascal and 273.15 is c to kelvin conversion
air_density_list = [((a * 100) / (R_spec * (b + 273.15)))
for a, b in zip(pressure_list, temperature_list)]
power_e_list = [] # extractable energy by my wind turbine within usable conditions
for a, b in zip(wind_speed_list, air_density_list):
if Ve < a and a < Vo:
temp_power = ((0.5 * b * Area_wind * math.pow(a, 3)
* cp) / wind_turbine_voltage)
power_e_list.append(temp_power)
else:
power_e_list.append(0.0)
df['Energy Wind Gen'] = power_e_list
if debug:
print(" => Finished working out wind parameters")
return df
# sometimes returning negative values, which probably isn't right - need to check DC DC rectifies neg voltages
# --------------------------------------------------------------------------- #
def NRELtoTEGPower(df):
energy_type = 2
if debug:
print(" => Working out TEG parameters")
temperature_list = getTemplist(df)
vteg_list = []
for key in temperature_list:
temp = abs(key - T_ambient)
if temp > 5:
# Check to confirm that the temperature in reverse generates current with neg voltage also and DC/DC can handle that
vteg_list.append(
abs(((N_TEG * seedback_coeff) * (key - T_ambient) / 4)))
else:
vteg_list.append(0)
Iout_list = [(gm * (a - vmin)) for a in vteg_list]
# pout_list = [(ef*a*b) for a,b in zip(Iout_list,vteg_list)] # Original equation here says that Vout, make sure VTEG is Vout in this context
df['Energy TEG Gen'] = Iout_list
# PlotNREL(df,location,energy_type)
if debug:
print(" => Finished with TEG calculations")
return df
# --------------------------------------------------------------------------- #
""" This function creates a prediction of current energy generated. This is a
placeholder """
# def createPrediction(df):
# pred, predoutput, GHI_list, length = [0 for j in range(48)], [], df["GHI"].tolist(
# ), 1000 # This is updated at end of time window (next time window)
# for x in range(0, length):
# for a, b in zip(GHI_list, pred):
# predoutput.append((b + a) * 0.5)
# pred, predoutput = predoutput, []
# df['Prediction'] = pred
# return df
# --------------------------------------------------------------------------- #
# This function calcualtes the performance of a test
def calcPerf(df, test, name):
print("Running ", name);
print("------------------------")
# Calculate metrics for how well system performs
quarter_year_data = len(df['Sense Frequency'].tolist()) // 4
sens_freq_list = df['Sense Frequency'].tolist()
print("sens_freq_list before =>", len(sens_freq_list))
# sens_freq_list = [2] + sens_freq_list
print("sens_freq_list after =>", len(sens_freq_list))
# del sens_freq_list[-1]
# less_graph.append(2)
sens_freq_by_quarter_year = [sens_freq_list[i:i+quarter_year_data]
for i in range(0, len(sens_freq_list), quarter_year_data)]
currentgen_list = df['Energy Generation Total'].tolist()
currentgen_list_by_quarter_year = [currentgen_list[i:i+quarter_year_data]
for i in range(0, len(currentgen_list), quarter_year_data)]
batterylevelflag_list = df['Battery Level Flag'].tolist()
batterylevelflag_by_quarter_year = [batterylevelflag_list[i:i+quarter_year_data]
for i in range(0, len(batterylevelflag_list), quarter_year_data)]
energygensurplus_list = df['Energy Surplus List'].tolist()
energygensurplus_by_quarter_year = [energygensurplus_list[i:i+quarter_year_data]
for i in range(0, len(energygensurplus_list), quarter_year_data)]
orchastPlace_list = df['Orchastration Requirements'].tolist()
orchastPlace_by_quarter_year = [orchastPlace_list[i:i+quarter_year_data]
for i in range(0, len(orchastPlace_list), quarter_year_data)]
# print("orchastPlace_list size =>", len(orchastPlace_list))
# print("sens_freq_list =>", len(sens_freq_list))
for i in range(0,4):
print("(",i,")")
average = round(sum(sens_freq_by_quarter_year[i]) / len(sens_freq_by_quarter_year[i]), 2) # average sensing rate for ENO
dead_metric = batterylevelflag_by_quarter_year[i].count(0)
dead_metric_per = (dead_metric / len(batterylevelflag_by_quarter_year[i])*100)
#waste_metric = batterylevelflag_by_quarter_year[i].count(2)
total_gen = round(sum(currentgen_list_by_quarter_year[i]))
waste_energy = round(sum(energygensurplus_by_quarter_year[i]))
waste_metric_per = waste_energy/total_gen * 100
print(waste_metric_per)
varience = np.var(sens_freq_by_quarter_year[i])
orchestrator_fullfilment = []
for sense_freq, orch_reqs in zip(sens_freq_by_quarter_year[i],
orchastPlace_by_quarter_year[i]):
if(sense_freq < orch_reqs):
orchest_met_per = (sense_freq / orch_reqs) * 100
else:
orchest_met_per = 100.0
orchestrator_fullfilment.append(orchest_met_per)
# if name == 'LESS':
# print("sense_freq =>", sense_freq, "orch_reqs =>", orch_reqs, "orchest_met_per =>", orchest_met_per);
orchestrator_fullfilment_per = (round(sum(orchestrator_fullfilment) / len(orchestrator_fullfilment), 2))
# Time the orchastrator requirements were met - got to think about how this is represented (especically over provisioning)
orchas = []
for a, b in zip(sens_freq_by_quarter_year[i], orchastPlace_by_quarter_year[i]):
orchas.append(a - b)
season = {0: 'jan-march', 1: 'april-jun', 2:'jul-sep', 3:'oct-dec'}
if storage:
output_jsons.append({'source': test, 'test': name, 'season': season[i], 'Dt_average': average, 'variance': varience, 'perTimeDead': dead_metric_per,
'perTimeWasted': waste_metric_per, 'orchFullfilment': orchestrator_fullfilment_per, 'orchas': orchastPlace_list, 'sense_freq': sens_freq_list, 'orchas_diff': orchas})
# Performance here is described as the number of transmissions, time alive, time dead, variance, wasted energy.
# --------------------------------------------------------------------------- #
def dumpData(test):
if output_jsons:
epoch_time = int(time.time())
resultFile = open(
"datasets/results/{}_{}_solartracking_results.json".format(epoch_time, test), 'w+')
simplejson.dump(output_jsons, resultFile)
resultFile.close()
# --------------------------------------------------------------------------- #
def graphData(df):
tests = ['orchas', 'static', 'LESS', 'eno']
static_graph, eno_graph, less_graph, orchas_graph, graph = [], [], [], [], []
# graph.append(min_tx_freq)
for name in tests:
for key in output_jsons:
if key['test'] in "orchas":
orchas_graph.append(key['sense_freq'])
# orchas_graph.append(key['orchas'])
if key['test'] in "eno":
eno_graph.append(key['sense_freq'])
if key['test'] in "LESS":
less_graph.append(key['sense_freq'])
if key['test'] in "static":
static_graph.append(key['sense_freq'])
graph.append(key['orchas'])
print ('\n================================================'
'=================================================')
# index=df.index.get_values()
# plt.plot(orchas_graph[0], c='blue', linewidth=1.5, label='Orchestrator')
# plt.plot(static_graph[0], c='green', linewidth=1.5, label='Static')
plt.plot(eno_graph[0], c='red', linewidth=2.5, label='ENO')
# less_graph[0].pop(0)
# less_graph.append(2)
# plt.plot(less_graph[0], c='orange', linewidth=2.5, label='LESS')
plt.plot(graph[0], '--', linewidth=2.0, c='blue', label='Target')
# plt.plot() plot the orchestration requirement as dotted line TD
legend = plt.legend(loc='upper right', shadow=True)
plt.xlabel('Time Slot, t', {'color': 'black',
'fontsize': 22})
plt.ylabel('Duty Cycle, D_t', {'color': 'black',
'fontsize': 22})
plt.grid(True, which='both')
plt.minorticks_on
plt.ylim(ymax=35, ymin=0)
plt.xlim(xmax=350, xmin=0)
plt.show()
# Add labelling automatically
# Change show graph to save graph
# --------------------------------------------------------------------------- #
# Adding function to take care of summing energy sources
def energyGenTotal(df, energy_source):
if debug:
print(" => Calculating Total Energy Production")
solar_list = df["Energy Solar Gen"].tolist()
wind_list = df["Energy Wind Gen"].tolist()
teg_list = df["Energy TEG Gen"].tolist()
currentgen_list = []
for a, b, c in zip(solar_list, wind_list, teg_list):
x = 0
if "s" in energy_source:
x += (a * (random.uniform(solar_prod_var[0], solar_prod_var[1])))
if "w" in energy_source:
x += (b * (random.uniform(wind_prod_var[0], wind_prod_var[1])))
if "t" in energy_source:
x += (c * (random.uniform(teg_prod_var[0], teg_prod_var[1])))
currentgen_list.append(x)
df['Energy Generation Total'] = currentgen_list
if debug:
print(" => Energy level calculated and added to dataframe")
return df
# --------------------------------------------------------------------------- #
def graphEg(df):
solar_list = df["Energy Solar Gen"].tolist()
wind_list = df["Energy Wind Gen"].tolist()
teg_list = df["Energy TEG Gen"].tolist()
plt.plot(solar_list, c='blue', linewidth=1.5, label='Solar')
plt.plot(wind_list, c='green', linewidth=1.5, label='Wind')
plt.plot(teg_list, c='red', linewidth=1.5, label='TEG')
# legend = plt.legend(loc='upper right', shadow=True)
plt.xlabel('Time Slot, t', fontsize='x-large')
plt.ylabel('Energy Generated (mAh)', fontsize='x-large')
plt.grid(True, which='both')
plt.minorticks_on
plt.ylim(ymax=33, ymin=0)
plt.xlim(xmax=350, xmin=0)
plt.show()
# --------------------------------------------------------------------------- #
def plotSolarEgen(df, wvList, wcewma_pred_vector):
# print(wcewma_pred_vector)
solar_list = df["Energy Solar Gen"].tolist()
pre_list = []
for i in range(len(refSolarPowerVector)):
for j in range(len(refSolarPowerVector[i])):
pre_list.append(refSolarPowerVector[i][j])
plt.figure(1)
# plt.subplot(211)
plt.plot(pre_list, c='red', linewidth=1.5, label='Pre')
plt.plot(solar_list, c='blue', linewidth=1.5, label='Real Solar Data')
plt.plot(wcewma_pred_vector, c='green', linewidth=1.5, label='WC-EWMA')
plt.xlabel('Time Slot, t', fontsize='x-large')
plt.ylabel('Energy Generated (mAh)', fontsize='x-large')
# plt.grid(True, which='both')
plt.minorticks_on
plt.ylim(ymax=70, ymin=0)
# plt.xlim(xmax=350, xmin=0)
plt.xlim(xmax=366, xmin=0)
# plt.subplot(212)
# x = np.arange(7)
# plt.bar(x, wvList, width=0.4)
# plt.xlabel('Day, t', fontsize='x-large')
# plt.ylabel('Weather volatility', fontsize='x-large')
# plt.gca().yaxis.grid(True)
# plt.minorticks_on
# plt.ylim(ymax=8, ymin=0)
# plt.xlim(xmax=6.5, xmin=-0.5)
plt.show()
# --------------------------------------------------------------------------- #
def plotWeatherVolatility(wvList):
x = np.arange(7)
fig, ax = plt.subplots()
plt.bar(x, wvList)
plt.ylim(ymax=8, ymin=0)
plt.xlim(xmax=7, xmin=-1)
plt.show()
# --------------------------------------------------------------------------- #
# Main
def main(orch_profile, energy_source):
wcewma = WCEWMA(48);
eno_static = StaticENO();
eno_orchestrator = OrchestratorENO();
eno_kansal = KansalENO();
eno_less = LESSENO();
orchest_loop = []
# orchest_loop.append(orchastLamps)
# orchest_loop.append(orchastMicro)
# orchest_loop.append(orchastMulti)
orchest_loop.append(orch_profile)
for orchest in orchest_loop:
# print("orchest =>", orchest)
for dataset in dataset_list:
# print("dataset =>", dataset)
# loads environmental variables from location and time defined in NREL.py. If not local it downloads them from the NREL database and parses them for use.
df = dfLoad(dataset)
if not df.empty:
# Currently only works for one loop because of this line but can be fixed later
if dataset_list.index(dataset) == 0:
df = sysRequirements(df, dataset, orchest)
# Calculates energy generated by solar panel for day
df = panelEnergyGen(df, dataset)
# calculated wind power produciton from environmental variables
df = NRELtoWindPower(df)
# calculates thermal energy generation from environmental variables
df = NRELtoTEGPower(df)
# calculates energy total by summing the above
if not energy_source:
energy_source = energy_combination
df = energyGenTotal(df, energy_source)
eno_static.staticWSN(df, dataset)
calcPerf(df, dataset, 'static')
if debug:
print(" => Calculating the static WSN performance")
eno_orchestrator.orchasWSN(df, dataset)
calcPerf(df, dataset, 'orchas')
if debug:
print(" => Calculating the centrally controlled WSN performance")
# eno_kansal.enoWSN(df, dataset)
currentgen_list = df['Energy Generation Total'].tolist()
# print("currentgen_list =>", currentgen_list)
# wcewma_pred_vector = wcewma.compute_wcewma_pred_vector(df)
# print("wcewma_pred_vector =>", wcewma_pred_vector)
eno_kansal.enoBaseline(df, currentgen_list)
calcPerf(df, dataset, 'eno')
if debug:
print(" => Calculating the solely ENO controlled WSN performance")
eno_less.lessWSN(df, dataset)
calcPerf(df, dataset, 'LESS')
if debug:
print(" => Calculating the LESS=MORE WSN performance")
dumpData(dataset)
# if debug:
# print (output_jsons)
# wvList = []
# for i in range(1, 5):
# start_slot = (i - 1) * wcewma.slotPerDayCount
# end_slot = (i * wcewma.slotPerDayCount) - 1
# wvList.append(wcewma.weather_volatility_value(df, start_slot, end_slot, 5, 0.8))
# print("wv(", i,") = ", wvList[i-1])
# # df, cloudiness_degree_threshold, currentDayIndex, currentDayRefSolarPower, weighting_factor
# for i in range(1, 5):
# refSolarPowerVector.insert(i,(wcewma.getNextDayRefSolarPowerVector(df, 3, i, 0.5)))
# wcewma_pred_vector = wcewma.get_wcewma_for_day(df, refSolarPowerVector)
# print("len(wcewma_pred_vector)", len(wcewma_pred_vector))
# print(wcewma_pred_vector)
# plotSolarEgen(df, wvList, wcewma_pred_vector)
# plotWeatherVolatility(wvList)
graphData(df)
# tableData(df)
del output_jsons[:]
# graphEg(df)
orchestrator = Orchestrator()
dir_path = os.path.dirname(os.path.realpath(__file__))
orch_data_loc = dir_path + '/requirements.txt'
app_req_dict = orchestrator.read_app_reqs(orch_data_loc)
main(orchestrator.parse_reqs(("App1", app_req_dict.get("App1"))), "s")
# profile.run('main()') # Run this if you want timing of each run of the code
# Do I want to use the ICDCS style testing. Robustness to different Nw, different energy sources, different battery sizes etc...
# Different requirements ? ? ? or test for multiple dynamic requirements
# figure out how to include the overhead of transmission for the MORE and LESS algorithms at start of time window
# Do we want to include the conservative (min instead of max) LESS algorithm in discussion - Will
| bsd-3-clause |
eickenberg/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 1 | 33274 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
import numpy as np
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.testing import (assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
# Check the old interface
in_warning_message = 'charset'
ca = assert_warns_message(DeprecationWarning, in_warning_message,
CountVectorizer, analyzer='char',
ngram_range=(3, 6),
charset='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), stoplist)
def test_countvectorizer_empty_vocabulary():
try:
CountVectorizer(vocabulary=[])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
assert_false(tv.fixed_vocabulary)
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
"""Regression test: max_features didn't work correctly in 0.14."""
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('l1', 'l2')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('l1', 'l2'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
assert_true(vect.fixed_vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
assert_raises(ValueError, CountVectorizer, vocabulary=vocab)
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
| bsd-3-clause |
xaratustrah/iq_suite | iqtools/plotters.py | 1 | 5836 | """
Collection of plotters
Xaratustrah
2017
"""
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from iqtools.iqbase import IQBase
from iqtools.tools import *
import subprocess
import struct
import os
# ------------ PLOTTERS ----------------------------
def plot_hilbert(x_bar):
"""Show Hilbert plot."""
plt.plot(np.real(x_bar), np.imag(x_bar))
plt.grid(True)
plt.xlabel('Real Part')
plt.ylabel('Imag. Part')
def plot_frame_power(yy, frame_power):
"""
Plot frame power, i.e. trapezoid along each time frame
:param yy:
:param frame_power:
:return:
"""
plt.plot(yy[:, 0], IQBase.get_dbm(frame_power))
plt.ylabel('Power [dBm]')
plt.xlabel('Time [sec]')
plt.title('Frame power')
def plot_spectrogram(xx, yy, zz, cen=0.0, cmap=cm.jet, dpi=300, dbm=False, filename=None, title='Spectrogram'):
"""
Plot the calculated spectrogram
:param xx:
:param yy:
:param zz:
:param cen:
:return:
"""
delta_f = np.abs(np.abs(xx[0, 1]) - np.abs(xx[0, 0]))
delta_t = np.abs(np.abs(yy[1, 0]) - np.abs(yy[0, 0]))
if dbm:
sp = plt.pcolormesh(xx, yy, IQBase.get_dbm(zz), cmap=cmap)
else:
sp = plt.pcolormesh(xx, yy, zz, cmap=cmap)
cb = plt.colorbar(sp)
plt.xlabel(
"Delta f @ {} (resolution = {})".format(get_eng_notation(cen, unit='Hz'), get_eng_notation(delta_f, unit='Hz')))
plt.ylabel('Time [sec] (resolution = {})'.format(
get_eng_notation(delta_t, 's')))
plt.title(title)
if dbm:
cb.set_label('Power Spectral Density [dBm/Hz]')
else:
cb.set_label('Power Spectral Density')
if filename is not None:
plt.savefig(filename + '.png', dpi=dpi, bbox_inches='tight')
plt.close()
def plot_spectrum(f, p, cen=0.0, span=None, dbm=False, filename=None, title='Spectrum'):
"""Plot average power in dBm per Hz"""
if not span:
mask = (f != 0) | (f == 0)
else:
mask = (f <= span / 2) & (f >= -span / 2)
if dbm:
plt.plot(f[mask], IQBase.get_dbm(p[mask]))
else:
plt.plot(f[mask], p[mask])
plt.xlabel("Delta f [Hz] @ {}".format(get_eng_notation(cen, 'Hz')))
plt.title(title)
if dbm:
plt.ylabel('Power Spectral Density [dBm/Hz]')
else:
plt.ylabel('Power Spectral Density')
plt.grid(True)
if filename is not None:
plt.savefig(filename + '.png') # , bbox_inches='tight')
plt.close()
def plot_spectrogram_with_gnuplot(zz):
"""
zz: reshaped data in form of a matrix for plotting
based on https://stackoverflow.com/a/15885230/5177935
"""
temp_file = 'foo.bin'
with open(temp_file, 'wb') as foo:
for (i, j), dat in np.ndenumerate(np.rot90(zz, 3)):
s = struct.pack('4f', i, j, dat, dat)
foo.write(s)
gnuplot = subprocess.Popen(
['gnuplot'], stdin=subprocess.PIPE, universal_newlines=True)
gnuplot.stdin.write("""
set pm3d map;
unset clabel;
set terminal png size 1024,768;
set palette defined (0 0.0 0.0 0.5, \
1 0.0 0.0 1.0, \
2 0.0 0.5 1.0, \
3 0.0 1.0 1.0, \
4 0.5 1.0 0.5, \
5 1.0 1.0 0.0, \
6 1.0 0.5 0.0, \
7 1.0 0.0 0.0, \
8 0.5 0.0 0.0 );
""")
gnuplot.stdin.write("set output '{}.png';".format(temp_file))
gnuplot.stdin.write(
"splot '{}' binary record=(10,-1) format='%float' u 1:2:3:4 w pm3d;".format(temp_file))
# the following command needs terminating the process
# os.remove(temp_file)
def plot_phase_shift(x, phase):
"""
Plots the signal before and after the phase shift
"""
plt.rcParams['axes.grid'] = True
fig, axs = plt.subplots(2, 2, sharex=True, sharey=True)
axs[0, 0].plot(np.real(x))
axs[0, 1].plot(np.imag(x))
axs[1, 0].plot(np.real(shift_phase(x, phase)))
axs[1, 1].plot(np.imag(shift_phase(x, phase)))
def get_iq_object(filename, header_filename):
"""
Return suitable object accorting to extension.
Parameters
----------
filename
Returns
-------
"""
# Object generation
_, file_extension = os.path.splitext(filename)
iq_data = None
if file_extension.lower() == '.txt' or file_extension.lower() == '.csv':
log.info('This is an ASCII file.')
iq_data = ASCIIData(filename)
if file_extension.lower() == '.bin':
log.info('This is a raw binary file.')
iq_data = RAWData(filename)
if file_extension.lower() == '.wav':
log.info('This is a wav file.')
iq_data = WAVData(filename)
if file_extension.lower() == '.iqt':
log.info('This is an iqt file.')
iq_data = IQTData(filename)
if file_extension.lower() == '.iq':
log.info('This is an iq file.')
iq_data = IQTData(filename)
if file_extension.lower() == '.tiq':
log.info('This is a tiq file.')
iq_data = TIQData(filename)
if file_extension.lower() == '.tdms':
log.info('This is a TDMS file.')
iq_data = TDMSData(filename)
if file_extension.lower() == '.dat':
log.info('This is a TCAP file.')
if not header_filename:
log.info('TCAP files need a text header file as well. Aborting....')
return None
else:
iq_data = TCAPData(filename, header_filename)
if file_extension.lower() == '.xdat':
log.info('This is a XDAT file.')
if not header_filename:
log.info('XDAT files need a text header file as well. Aborting....')
return None
else:
iq_data = XDATData(filename, header_filename)
return iq_data
| gpl-2.0 |
zhangmianhongni/MyPractice | Python/MachineLearning/ud120-projects-master/feature_selection/find_signature.py | 9 | 1436 | #!/usr/bin/python
import pickle
import numpy
numpy.random.seed(42)
### The words (features) and authors (labels), already largely processed.
### These files should have been created from the previous (Lesson 10)
### mini-project.
words_file = "../text_learning/your_word_data.pkl"
authors_file = "../text_learning/your_email_authors.pkl"
word_data = pickle.load( open(words_file, "r"))
authors = pickle.load( open(authors_file, "r") )
### test_size is the percentage of events assigned to the test set (the
### remainder go into training)
### feature matrices changed to dense representations for compatibility with
### classifier functions in versions 0.15.2 and earlier
from sklearn import cross_validation
features_train, features_test, labels_train, labels_test = cross_validation.train_test_split(word_data, authors, test_size=0.1, random_state=42)
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
features_train = vectorizer.fit_transform(features_train)
features_test = vectorizer.transform(features_test).toarray()
### a classic way to overfit is to use a small number
### of data points and a large number of features;
### train on only 150 events to put ourselves in this regime
features_train = features_train[:150].toarray()
labels_train = labels_train[:150]
### your code goes here
| apache-2.0 |
rspavel/spack | var/spack/repos/builtin/packages/py-cogent/package.py | 5 | 1573 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyCogent(PythonPackage):
"""A toolkit for statistical analysis of biological sequences."""
homepage = "http://pycogent.org"
url = "https://pypi.io/packages/source/c/cogent/cogent-1.9.tar.gz"
version('1.9', sha256='57d8c58e0273ffe4f2b907874f9b49dadfd0600f5507b7666369f4e44d56ce14')
version('1.5.3', url="https://pypi.io/packages/source/c/cogent/cogent-1.5.3.tgz",
sha256='1215ac219070b7b2207b0b47b4388510f3e30ccd88160aa9f02f25d24bcbcd95')
variant('matplotlib', default=False, description="graphs related to codon usage")
variant('mpi', default=False, description='MPI required for parallel computation.')
variant('mysql', default=False, description='Required for the Ensembl querying code.')
depends_on('py-setuptools', type=('build'), when='@1.9')
depends_on('[email protected]:2.999', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('zlib')
depends_on('py-matplotlib', when='+matplotlib', type=('build', 'run'))
depends_on('py-mpi4py', when='+mpi', type=('build', 'run'))
depends_on('py-sqlalchemy', when='+mysql', type=('build', 'run'))
depends_on('py-pymysql', when='+mysql', type=('build', 'run'))
depends_on('[email protected]:', type='build')
def setup_build_environment(self, env):
env.set('DONT_USE_PYREX', '1')
| lgpl-2.1 |
asgeirrr/word_cloud | doc/sphinxext/gen_rst.py | 17 | 33207 | """
Example generation for the python wordcloud project. Stolen from scikit-learn with modifications from PyStruct.
Generate the rst files for the examples by iterating over the python
example files.
Hacked to plot every example (not only those that start with 'plot').
"""
from time import time
import os
import shutil
import traceback
import glob
import sys
from StringIO import StringIO
import cPickle
import re
import urllib2
import gzip
import posixpath
import codecs
try:
from PIL import Image
except:
import Image
import matplotlib
matplotlib.use('Agg')
import token
import tokenize
import numpy as np
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
resp = urllib2.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
'package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[value.keys()[0]]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
for comb_name in comb_names:
if html.find(comb_name) >= 0:
url = link + '#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
def extract_docstring(filename):
""" Extract a module-level docstring, if any
"""
lines = file(filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
tokens = tokenize.generate_tokens(iter(lines).next)
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(line.rstrip()
for line in docstring.split('\n')).split('\n\n')
if len(paragraphs) > 0:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(app.builder.srcdir + '/../' + 'examples')
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
# we create an index.rst with all examples
fhindex = file(os.path.join(root_dir, 'index.rst'), 'w')
#Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
display: none;
}
.figure {
float: left;
margin: 16px;
top: 0;
left: 0;
-webkit-border-radius: 10px; /* Saf3-4, iOS 1-3.2, Android <1.6 */
-moz-border-radius: 10px; /* FF1-3.6 */
border-radius: 10px; /* Opera 10.5, IE9, Saf5, Chrome, FF4, iOS 4, Android 2.1+ */
border: 2px solid #fff;
-webkit-transition: all 0.15s ease-out; /* Saf3.2+, Chrome */
-moz-transition: all 0.15s ease-out; /* FF4+ */
-ms-transition: all 0.15s ease-out; /* IE10? */
-o-transition: all 0.15s ease-out; /* Opera 10.5+ */
transition: all 0.15s ease-out;
background-repeat: no-repeat;
/* --> Thumbnail image size */
width: 150px;
height: 130px;
}
.figure img {
display: inline;
}
.figure .caption {
text-align: center !important;
}
</style>
Examples
========
.. _examples-index:
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery)
for dir in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, dir)):
generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
lines = file(example_file).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
tokens = tokenize.generate_tokens(lines.__iter__().next)
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif ((tok_type == 'STRING') and (check_docstring == True)):
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = filter(lambda x: x.endswith('.py'), file_list)
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:,0].astype(np.str),
unsorted[:,1].astype(np.float)))
return np.array(unsorted[index][:,0]).tolist()
def generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery):
""" Generate the rst file for an example directory.
"""
if not dir == '.':
target_dir = os.path.join(root_dir, dir)
src_dir = os.path.join(example_dir, dir)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
print 80 * '_'
print ('Example directory %s does not have a README.txt file'
% src_dir)
print 'Skipping this directory'
print 80 * '_'
return
fhindex.write("""
%s
""" % file(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
for fname in sorted_listdir:
if fname.endswith('py'):
generate_file_rst(fname, target_dir, src_dir, plot_gallery)
thumb = os.path.join(dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(dir, fname).replace(os.path.sep, '_')
fhindex.write("""
.. raw:: html
<div class="thumbnailContainer">
""")
fhindex.write('.. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if dir != '.':
fhindex.write(' :target: ./%s/%s.html\n\n' % (dir,
fname[:-3]))
else:
fhindex.write(' :target: ./%s.html\n\n' % link_name[:-3])
fhindex.write(""" :ref:`%s`
.. raw:: html
</div>
.. toctree::
:hidden:
%s/%s
""" % (link_name, dir, fname[:-3]))
fhindex.write("""
.. raw:: html
<div style="clear: both"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy', 'wordcloud']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) / 2, (height - height_sc) / 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
def generate_file_rst(fname, target_dir, src_dir, plot_gallery):
""" Generate the rst file for a given example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%s.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png')
time_elapsed = 0
if plot_gallery:
# generate the plot as png image if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if (not os.path.exists(first_image_file) or
os.stat(first_image_file).st_mtime <=
os.stat(src_file).st_mtime):
# We need to execute the code
print 'plotting %s' % fname
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt, '__file__': src_file}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
# get variables so we can later add links to the documentation
example_code_obj = {}
for var_name, var in my_globals.iteritems():
if not hasattr(var, '__module__'):
continue
if not isinstance(var.__module__, basestring):
continue
if var.__module__.split('.')[0] not in DOCMODULES:
continue
# get the type as a string with other things stripped
tstr = str(type(var))
tstr = (tstr[tstr.find('\'')
+ 1:tstr.rfind('\'')].split('.')[-1])
# get shortened module name
module_short = get_short_module_name(var.__module__,
tstr)
cobj = {'name': tstr, 'module': var.__module__,
'module_short': module_short,
'obj_type': 'object'}
example_code_obj[var_name] = cobj
# find functions so we can later add links to the documentation
funregex = re.compile('[\w.]+\(')
with open(src_file, 'rt') as fid:
for line in fid.readlines():
if line.startswith('#'):
continue
for match in funregex.findall(line):
fun_name = match[:-1]
try:
exec('this_fun = %s' % fun_name, my_globals)
except Exception:
#print 'extracting function failed'
#print err
continue
this_fun = my_globals['this_fun']
if not callable(this_fun):
continue
if not hasattr(this_fun, '__module__'):
continue
if not isinstance(this_fun.__module__, basestring):
continue
if (this_fun.__module__.split('.')[0]
not in DOCMODULES):
continue
# get shortened module name
fun_name_short = fun_name.split('.')[-1]
module_short = get_short_module_name(
this_fun.__module__, fun_name_short)
cobj = {'name': fun_name_short,
'module': this_fun.__module__,
'module_short': module_short,
'obj_type': 'function'}
example_code_obj[fun_name] = cobj
fid.close()
if len(example_code_obj) > 0:
# save the dictionary, so we can later add hyperlinks
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
cPickle.dump(example_code_obj, fid,
cPickle.HIGHEST_PROTOCOL)
fid.close()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
for fig_num in (fig_mngr.num for fig_mngr in
matplotlib._pylab_helpers.Gcf.get_all_fig_managers()):
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
plt.figure(fig_num)
plt.savefig(image_path % fig_num)
figure_list.append(image_fname % fig_num)
except:
print 80 * '_'
print '%s is not compiling:' % fname
traceback.print_exc()
print 80 * '_'
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print " - time elapsed : %.2g sec" % time_elapsed
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path % '[1-9]')]
#for f in glob.glob(image_path % '*')]
# generate thumb file
this_template = plot_rst_template
if os.path.exists(first_image_file):
make_thumbnail(first_image_file, thumb_file, 200, 140)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w')
f.write(this_template % locals())
f.flush()
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
try:
if exception is not None:
return
print 'Embedding documentation hyperlinks in examples..'
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['wordcloud'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
doc_resolvers['sklearn'] = SphinxDocLinkResolver(
'http://scikit-learn.org/stable')
doc_resolvers['matplotlib'] = SphinxDocLinkResolver(
'http://matplotlib.org')
doc_resolvers['numpy'] = SphinxDocLinkResolver(
'http://docs.scipy.org/doc/numpy-1.6.0')
doc_resolvers['scipy'] = SphinxDocLinkResolver(
'http://docs.scipy.org/doc/scipy-0.11.0/reference')
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print '\tprocessing: %s' % fname
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = cPickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.iteritems():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
if link is not None:
parts = name.split('.')
name_html = orig_pattern % parts[0]
for part in parts[1:]:
name_html += period + orig_pattern % part
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
if len(str_repl) > 0:
with codecs.open(full_fname, 'rt', encoding='utf-8') as fid:
lines_in = fid.readlines()
fid.close()
with open(full_fname, 'wt') as fid:
for line in lines_in:
for name, link in str_repl.iteritems():
try:
line = line.encode("ascii", 'ignore').replace(name, link)
except Exception as e:
print(line)
print(name)
print(link)
raise e
fid.write(line)
fid.close()
except urllib2.HTTPError, e:
print ("The following HTTP Error has occurred:\n")
print e.code
except urllib2.URLError, e:
print ("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding due to a URL Error: \n")
print e.args
print '[done]'
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The model is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
| mit |
ehealthafrica-ci/onadata | onadata/apps/viewer/tests/test_pandas_mongo_bridge.py | 2 | 29553 | import csv
import os
from tempfile import NamedTemporaryFile
from django.utils.dateparse import parse_datetime
from django.core.urlresolvers import reverse
from onadata.apps.main.tests.test_base import TestBase
from onadata.apps.logger.models.xform import XForm
from onadata.apps.logger.xform_instance_parser import xform_instance_to_dict
from onadata.apps.viewer.pandas_mongo_bridge import AbstractDataFrameBuilder,\
CSVDataFrameBuilder, CSVDataFrameWriter, ExcelWriter,\
get_prefix_from_xpath, get_valid_sheet_name, XLSDataFrameBuilder,\
XLSDataFrameWriter, remove_dups_from_list_maintain_order
from onadata.libs.utils.common_tags import NA_REP
def xls_filepath_from_fixture_name(fixture_name):
"""
Return an xls file path at tests/fixtures/[fixture]/fixture.xls
"""
# TODO currently this only works for fixtures in this app because of
# __file__
return os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"fixtures", fixture_name, fixture_name + ".xls"
)
def xml_inst_filepath_from_fixture_name(fixture_name, instance_name):
return os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"fixtures", fixture_name, "instances",
fixture_name + "_" + instance_name + ".xml"
)
class TestPandasMongoBridge(TestBase):
def setUp(self):
self._create_user_and_login()
self._submission_time = parse_datetime('2013-02-18 15:54:01Z')
def _publish_xls_fixture_set_xform(self, fixture):
"""
Publish an xls file at tests/fixtures/[fixture]/fixture.xls
"""
xls_file_path = xls_filepath_from_fixture_name(fixture)
count = XForm.objects.count()
self._publish_xls_file(xls_file_path)
self.assertEqual(XForm.objects.count(), count + 1)
self.xform = XForm.objects.all().reverse()[0]
def _submit_fixture_instance(
self, fixture, instance, submission_time=None):
"""
Submit an instance at
tests/fixtures/[fixture]/instances/[fixture]_[instance].xml
"""
xml_submission_file_path = xml_inst_filepath_from_fixture_name(
fixture, instance)
self._make_submission(
xml_submission_file_path, forced_submission_time=submission_time)
self.assertEqual(self.response.status_code, 201)
def _publish_single_level_repeat_form(self):
self._publish_xls_fixture_set_xform("new_repeats")
self.survey_name = u"new_repeats"
def _publish_nested_repeats_form(self):
self._publish_xls_fixture_set_xform("nested_repeats")
self.survey_name = u"nested_repeats"
def _publish_grouped_gps_form(self):
self._publish_xls_fixture_set_xform("grouped_gps")
self.survey_name = u"grouped_gps"
def _xls_data_for_dataframe(self):
xls_df_builder = XLSDataFrameBuilder(self.user.username,
self.xform.id_string)
cursor = xls_df_builder._query_mongo()
return xls_df_builder._format_for_dataframe(cursor)
def _csv_data_for_dataframe(self):
csv_df_builder = CSVDataFrameBuilder(self.user.username,
self.xform.id_string)
cursor = csv_df_builder._query_mongo()
return csv_df_builder._format_for_dataframe(cursor)
def test_generated_sections(self):
self._publish_single_level_repeat_form()
self._submit_fixture_instance("new_repeats", "01")
xls_df_builder = XLSDataFrameBuilder(self.user.username,
self.xform.id_string)
expected_section_keys = [self.survey_name, u"kids_details"]
section_keys = xls_df_builder.sections.keys()
self.assertEqual(sorted(expected_section_keys), sorted(section_keys))
def test_row_counts(self):
"""
Test the number of rows in each sheet
We expect a single row in the main new_repeats sheet and 2 rows in the
kids details sheet one for each repeat
"""
self._publish_single_level_repeat_form()
self._submit_fixture_instance("new_repeats", "01")
data = self._xls_data_for_dataframe()
self.assertEqual(len(data[self.survey_name]), 1)
self.assertEqual(len(data[u"kids_details"]), 2)
def test_xls_columns(self):
"""
Test that our expected columns are in the data
"""
self._publish_single_level_repeat_form()
self._submit_fixture_instance("new_repeats", "01")
data = self._xls_data_for_dataframe()
# columns in the default sheet
expected_default_columns = [
u"gps",
u"_gps_latitude",
u"_gps_longitude",
u"_gps_altitude",
u"_gps_precision",
u"web_browsers/firefox",
u"web_browsers/safari",
u"web_browsers/ie",
u"info/age",
u"web_browsers/chrome",
u"kids/has_kids",
u"info/name",
u"meta/instanceID"
] + AbstractDataFrameBuilder.ADDITIONAL_COLUMNS +\
XLSDataFrameBuilder.EXTRA_COLUMNS
# get the header
default_columns = [k for k in data[self.survey_name][0]]
self.assertEqual(sorted(expected_default_columns),
sorted(default_columns))
# columns in the kids_details sheet
expected_kids_details_columns = [
u"kids/kids_details/kids_name",
u"kids/kids_details/kids_age"
] + AbstractDataFrameBuilder.ADDITIONAL_COLUMNS +\
XLSDataFrameBuilder.EXTRA_COLUMNS
kids_details_columns = [k for k in data[u"kids_details"][0]]
self.assertEqual(sorted(expected_kids_details_columns),
sorted(kids_details_columns))
def test_xls_columns_for_gps_within_groups(self):
"""
Test that a valid xpath is generated for extra gps fields that are NOT
top level
"""
self._publish_grouped_gps_form()
self._submit_fixture_instance("grouped_gps", "01")
data = self._xls_data_for_dataframe()
# columns in the default sheet
expected_default_columns = [
u"gps_group/gps",
u"gps_group/_gps_latitude",
u"gps_group/_gps_longitude",
u"gps_group/_gps_altitude",
u"gps_group/_gps_precision",
u"web_browsers/firefox",
u"web_browsers/safari",
u"web_browsers/ie",
u"web_browsers/chrome",
u"meta/instanceID"
] + AbstractDataFrameBuilder.ADDITIONAL_COLUMNS +\
XLSDataFrameBuilder.EXTRA_COLUMNS
default_columns = [k for k in data[self.survey_name][0]]
self.assertEqual(sorted(expected_default_columns),
sorted(default_columns))
def test_xlsx_output_when_data_exceeds_limits(self):
self._publish_xls_fixture_set_xform("xlsx_output")
self._submit_fixture_instance("xlsx_output", "01")
xls_builder = XLSDataFrameBuilder(username=self.user.username,
id_string=self.xform.id_string)
self.assertEqual(xls_builder.exceeds_xls_limits, True)
# test that the view returns an xlsx file instead
url = reverse('xls_export', kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
})
self.response = self.client.get(url)
self.assertEqual(self.response.status_code, 200)
self.assertEqual(self.response["content-type"],
'application/vnd.openxmlformats')
def test_xlsx_export_for_repeats(self):
"""
Make sure exports run fine when the xlsx file has multiple sheets
"""
self._publish_xls_fixture_set_xform("new_repeats")
self._submit_fixture_instance("new_repeats", "01")
XLSDataFrameBuilder(username=self.user.username,
id_string=self.xform.id_string)
# test that the view returns an xlsx file instead
url = reverse('xls_export', kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
})
params = {
'xlsx': 'true' # force xlsx
}
self.response = self.client.get(url, params)
self.assertEqual(self.response.status_code, 200)
self.assertEqual(self.response["content-type"],
'application/vnd.openxmlformats')
def test_csv_dataframe_export_to(self):
self._publish_nested_repeats_form()
self._submit_fixture_instance(
"nested_repeats", "01", submission_time=self._submission_time)
self._submit_fixture_instance(
"nested_repeats", "02", submission_time=self._submission_time)
csv_df_builder = CSVDataFrameBuilder(self.user.username,
self.xform.id_string)
temp_file = NamedTemporaryFile(suffix=".csv", delete=False)
csv_df_builder.export_to(temp_file.name)
csv_fixture_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"fixtures", "nested_repeats", "nested_repeats.csv"
)
temp_file.close()
fixture, output = '', ''
with open(csv_fixture_path) as f:
fixture = f.read()
with open(temp_file.name) as f:
output = f.read()
os.unlink(temp_file.name)
self.assertEqual(fixture, output)
def test_csv_columns_for_gps_within_groups(self):
self._publish_grouped_gps_form()
self._submit_fixture_instance("grouped_gps", "01")
data = self._csv_data_for_dataframe()
columns = data[0].keys()
expected_columns = [
u'gps_group/gps',
u'gps_group/_gps_latitude',
u'gps_group/_gps_longitude',
u'gps_group/_gps_altitude',
u'gps_group/_gps_precision',
u'web_browsers/firefox',
u'web_browsers/chrome',
u'web_browsers/ie',
u'web_browsers/safari',
] + AbstractDataFrameBuilder.ADDITIONAL_COLUMNS +\
AbstractDataFrameBuilder.IGNORED_COLUMNS
try:
expected_columns.remove(u'_deleted_at')
except ValueError:
pass
self.maxDiff = None
self.assertEqual(sorted(expected_columns), sorted(columns))
def test_format_mongo_data_for_csv(self):
self.maxDiff = None
self._publish_single_level_repeat_form()
self._submit_fixture_instance("new_repeats", "01")
self.xform.data_dictionary()
data_0 = self._csv_data_for_dataframe()[0]
# remove AbstractDataFrameBuilder.INTERNAL_FIELDS
for key in AbstractDataFrameBuilder.IGNORED_COLUMNS:
if key in data_0:
data_0.pop(key)
for key in AbstractDataFrameBuilder.ADDITIONAL_COLUMNS:
if key in data_0:
data_0.pop(key)
expected_data_0 = {
u'gps': u'-1.2627557 36.7926442 0.0 30.0',
u'_gps_latitude': u'-1.2627557',
u'_gps_longitude': u'36.7926442',
u'_gps_altitude': u'0.0',
u'_gps_precision': u'30.0',
u'kids/has_kids': u'1',
u'info/age': u'80',
u'kids/kids_details[1]/kids_name': u'Abel',
u'kids/kids_details[1]/kids_age': u'50',
u'kids/kids_details[2]/kids_name': u'Cain',
u'kids/kids_details[2]/kids_age': u'76',
u'web_browsers/chrome': True,
u'web_browsers/ie': True,
u'web_browsers/safari': False,
u'web_browsers/firefox': False,
u'info/name': u'Adam',
}
self.assertEqual(expected_data_0, data_0)
def test_split_select_multiples(self):
self._publish_nested_repeats_form()
dd = self.xform.data_dictionary()
self._submit_fixture_instance("nested_repeats", "01")
csv_df_builder = CSVDataFrameBuilder(self.user.username,
self.xform.id_string)
cursor = csv_df_builder._query_mongo()
record = cursor[0]
select_multiples = CSVDataFrameBuilder._collect_select_multiples(dd)
result = CSVDataFrameBuilder._split_select_multiples(record,
select_multiples)
expected_result = {
u'web_browsers/ie': True,
u'web_browsers/safari': True,
u'web_browsers/firefox': False,
u'web_browsers/chrome': False
}
# build a new dictionary only composed of the keys we want to use in
# the comparison
result = dict([(key, result[key]) for key in result.keys() if key in
expected_result.keys()])
self.assertEqual(expected_result, result)
csv_df_builder = CSVDataFrameBuilder(self.user.username,
self.xform.id_string,
binary_select_multiples=True)
result = csv_df_builder._split_select_multiples(record,
select_multiples)
expected_result = {
u'web_browsers/ie': 1,
u'web_browsers/safari': 1,
u'web_browsers/firefox': 0,
u'web_browsers/chrome': 0
}
# build a new dictionary only composed of the keys we want to use in
# the comparison
result = dict([(key, result[key]) for key in result.keys() if key in
expected_result.keys()])
self.assertEqual(expected_result, result)
def test_split_select_multiples_within_repeats(self):
self.maxDiff = None
record = {
'name': 'Tom',
'age': 23,
'browser_use': [
{
'browser_use/year': '2010',
'browser_use/browsers': 'firefox safari'
},
{
'browser_use/year': '2011',
'browser_use/browsers': 'firefox chrome'
}
]
}
expected_result = {
'name': 'Tom',
'age': 23,
'browser_use': [
{
'browser_use/year': '2010',
'browser_use/browsers/firefox': True,
'browser_use/browsers/safari': True,
'browser_use/browsers/ie': False,
'browser_use/browsers/chrome': False
},
{
'browser_use/year': '2011',
'browser_use/browsers/firefox': True,
'browser_use/browsers/safari': False,
'browser_use/browsers/ie': False,
'browser_use/browsers/chrome': True
}
]
}
select_multiples = {
'browser_use/browsers': [
'browser_use/browsers/firefox',
'browser_use/browsers/safari',
'browser_use/browsers/ie',
'browser_use/browsers/chrome']}
result = CSVDataFrameBuilder._split_select_multiples(record,
select_multiples)
self.assertEqual(expected_result, result)
def test_split_gps_fields(self):
record = {
'gps': '5 6 7 8'
}
gps_fields = ['gps']
expected_result = {
'gps': '5 6 7 8',
'_gps_latitude': '5',
'_gps_longitude': '6',
'_gps_altitude': '7',
'_gps_precision': '8',
}
AbstractDataFrameBuilder._split_gps_fields(record, gps_fields)
self.assertEqual(expected_result, record)
def test_split_gps_fields_within_repeats(self):
record = {
'a_repeat': [
{
'a_repeat/gps': '1 2 3 4'
},
{
'a_repeat/gps': '5 6 7 8'
}
]
}
gps_fields = ['a_repeat/gps']
expected_result = {
'a_repeat': [
{
'a_repeat/gps': '1 2 3 4',
'a_repeat/_gps_latitude': '1',
'a_repeat/_gps_longitude': '2',
'a_repeat/_gps_altitude': '3',
'a_repeat/_gps_precision': '4',
},
{
'a_repeat/gps': '5 6 7 8',
'a_repeat/_gps_latitude': '5',
'a_repeat/_gps_longitude': '6',
'a_repeat/_gps_altitude': '7',
'a_repeat/_gps_precision': '8',
}
]
}
AbstractDataFrameBuilder._split_gps_fields(record, gps_fields)
self.assertEqual(expected_result, record)
def test_unicode_export(self):
unicode_char = unichr(40960)
# fake data
data = [{"key": unicode_char}]
columns = ["key"]
# test xls
xls_df_writer = XLSDataFrameWriter(data, columns)
temp_file = NamedTemporaryFile(suffix=".xls")
excel_writer = ExcelWriter(temp_file.name)
passed = False
try:
xls_df_writer.write_to_excel(excel_writer, "default")
passed = True
except UnicodeEncodeError:
pass
finally:
temp_file.close()
self.assertTrue(passed)
# test csv
passed = False
csv_df_writer = CSVDataFrameWriter(data, columns)
temp_file = NamedTemporaryFile(suffix=".csv")
try:
csv_df_writer.write_to_csv(temp_file)
passed = True
except UnicodeEncodeError:
pass
finally:
temp_file.close()
temp_file.close()
self.assertTrue(passed)
def test_repeat_child_name_matches_repeat(self):
"""
ParsedInstance.to_dict creates a list within a repeat if a child has
the same name as the repeat. This test makes sure that doesnt happen
"""
self.maxDiff = None
fixture = "repeat_child_name_matches_repeat"
# publish form so we have a dd to pass to xform inst. parser
self._publish_xls_fixture_set_xform(fixture)
submission_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"fixtures", fixture, fixture + ".xml"
)
# get submission xml str
with open(submission_path, "r") as f:
xml_str = f.read()
dict = xform_instance_to_dict(xml_str, self.xform.data_dictionary())
expected_dict = {
u'test_item_name_matches_repeat': {
u'formhub': {
u'uuid': u'c911d71ce1ac48478e5f8bac99addc4e'
},
u'gps': [
{
u'info': u'Yo',
u'gps': u'-1.2625149 36.7924478 0.0 30.0'
},
{
u'info': u'What',
u'gps': u'-1.2625072 36.7924328 0.0 30.0'
}
]
}
}
self.assertEqual(dict, expected_dict)
def test_remove_dups_from_list_maintain_order(self):
l = ["a", "z", "b", "y", "c", "b", "x"]
result = remove_dups_from_list_maintain_order(l)
expected_result = ["a", "z", "b", "y", "c", "x"]
self.assertEqual(result, expected_result)
def test_valid_sheet_name(self):
sheet_names = ["sheet_1", "sheet_2"]
desired_sheet_name = "sheet_3"
expected_sheet_name = "sheet_3"
generated_sheet_name = get_valid_sheet_name(desired_sheet_name,
sheet_names)
self.assertEqual(generated_sheet_name, expected_sheet_name)
def test_invalid_sheet_name(self):
sheet_names = ["sheet_1", "sheet_2"]
desired_sheet_name = "sheet_3_with_more_than_max_expected_length"
expected_sheet_name = "sheet_3_with_more_than_max_exp"
generated_sheet_name = get_valid_sheet_name(desired_sheet_name,
sheet_names)
self.assertEqual(generated_sheet_name, expected_sheet_name)
def test_duplicate_sheet_name(self):
sheet_names = ["sheet_2_with_duplicate_sheet_n",
"sheet_2_with_duplicate_sheet_1"]
duplicate_sheet_name = "sheet_2_with_duplicate_sheet_n"
expected_sheet_name = "sheet_2_with_duplicate_sheet_2"
generated_sheet_name = get_valid_sheet_name(duplicate_sheet_name,
sheet_names)
self.assertEqual(generated_sheet_name, expected_sheet_name)
def test_query_mongo(self):
"""
Test querying for record count and records using
AbstractDataFrameBuilder._query_mongo
"""
self._publish_single_level_repeat_form()
# submit 3 instances
for i in range(3):
self._submit_fixture_instance("new_repeats", "01")
df_builder = XLSDataFrameBuilder(self.user.username,
self.xform.id_string)
record_count = df_builder._query_mongo(count=True)
self.assertEqual(record_count, 3)
cursor = df_builder._query_mongo()
records = [record for record in cursor]
self.assertTrue(len(records), 3)
# test querying using limits
cursor = df_builder._query_mongo(start=2, limit=2)
records = [record for record in cursor]
self.assertTrue(len(records), 1)
def test_prefix_from_xpath(self):
xpath = "parent/child/grandhild"
prefix = get_prefix_from_xpath(xpath)
self.assertEqual(prefix, 'parent/child/')
xpath = "parent/child"
prefix = get_prefix_from_xpath(xpath)
self.assertEqual(prefix, 'parent/')
xpath = "parent"
prefix = get_prefix_from_xpath(xpath)
self.assertTrue(prefix is None)
def test_csv_export_with_df_size_limit(self):
"""
To fix pandas limitation of 30k rows on csv export, we specify a max
number of records in a dataframe on export - lets test it
"""
self._publish_single_level_repeat_form()
# submit 7 instances
for i in range(4):
self._submit_fixture_instance("new_repeats", "01")
self._submit_fixture_instance("new_repeats", "02")
for i in range(2):
self._submit_fixture_instance("new_repeats", "01")
csv_df_builder = CSVDataFrameBuilder(self.user.username,
self.xform.id_string)
record_count = csv_df_builder._query_mongo(count=True)
self.assertEqual(record_count, 7)
temp_file = NamedTemporaryFile(suffix=".csv", delete=False)
csv_df_builder.export_to(temp_file.name, data_frame_max_size=3)
csv_file = open(temp_file.name)
csv_reader = csv.reader(csv_file)
header = csv_reader.next()
self.assertEqual(
len(header), 17 + len(AbstractDataFrameBuilder.ADDITIONAL_COLUMNS))
rows = []
for row in csv_reader:
rows.append(row)
self.assertEqual(len(rows), 7)
self.assertEqual(rows[4][5], NA_REP)
# close and delete file
csv_file.close()
os.unlink(temp_file.name)
def test_csv_column_indices_in_groups_within_repeats(self):
self._publish_xls_fixture_set_xform("groups_in_repeats")
self._submit_fixture_instance("groups_in_repeats", "01")
dd = self.xform.data_dictionary()
dd.get_keys()
data_0 = self._csv_data_for_dataframe()[0]
# remove dynamic fields
ignore_list = [
'_uuid', 'meta/instanceID', 'formhub/uuid', '_submission_time',
'_id', '_bamboo_dataset_id']
for item in ignore_list:
data_0.pop(item)
expected_data_0 = {
u'_xform_id_string': u'groups_in_repeats',
u'_status': u'submitted_via_web',
u'_tags': u'',
u'_notes': u'',
u'name': u'Abe',
u'age': u'88',
u'has_children': u'1',
u'_attachments': [],
u'children[1]/childs_info/name': u'Cain',
u'children[2]/childs_info/name': u'Abel',
u'children[1]/childs_info/age': u'56',
u'children[2]/childs_info/age': u'48',
u'children[1]/immunization/immunization_received/polio_1': True,
u'children[1]/immunization/immunization_received/polio_2': False,
u'children[2]/immunization/immunization_received/polio_1': True,
u'children[2]/immunization/immunization_received/polio_2': True,
u'web_browsers/chrome': True,
u'web_browsers/firefox': False,
u'web_browsers/ie': False,
u'web_browsers/safari': False,
u'gps': u'-1.2626156 36.7923571 0.0 30.0',
u'_geolocation': [u'-1.2626156', u'36.7923571'],
u'_gps_latitude': u'-1.2626156',
u'_gps_longitude': u'36.7923571',
u'_gps_altitude': u'0.0',
u'_gps_precision': u'30.0',
}
self.maxDiff = None
self.assertEqual(data_0, expected_data_0)
# todo: test nested repeats as well on xls
def test_xls_groups_within_repeats(self):
self._publish_xls_fixture_set_xform("groups_in_repeats")
self._submit_fixture_instance("groups_in_repeats", "01")
dd = self.xform.data_dictionary()
dd.get_keys()
data = self._xls_data_for_dataframe()
# remove dynamic fields
ignore_list = [
'_uuid', 'meta/instanceID', 'formhub/uuid', '_submission_time',
'_id', '_bamboo_dataset_id']
for item in ignore_list:
# pop unwanted keys from main section
for d in data["groups_in_repeats"]:
if item in d:
d.pop(item)
# pop unwanted keys from children's section
for d in data["children"]:
if item in d:
d.pop(item)
# todo: add _id to xls export
expected_data = {
u"groups_in_repeats":
[
{
u'picture': None,
u'has_children': u'1',
u'name': u'Abe',
u'age': u'88',
u'web_browsers/chrome': True,
u'web_browsers/safari': False,
u'web_browsers/ie': False,
u'web_browsers/firefox': False,
u'gps': u'-1.2626156 36.7923571 0.0 30.0',
u'_gps_latitude': u'-1.2626156',
u'_gps_longitude': u'36.7923571',
u'_gps_altitude': u'0.0',
u'_gps_precision': u'30.0',
u'_index': 1,
u'_parent_table_name': None,
u'_parent_index': -1,
u'_tags': [],
u'_notes': []
}
],
u"children": [
{
u'children/childs_info/name': u'Cain',
u'children/childs_info/age': u'56',
u'children/immunization/immunization_received/polio_1':
True,
u'children/immunization/immunization_received/polio_2':
False,
u'_index': 1,
u'_parent_table_name': u'groups_in_repeats',
u'_parent_index': 1,
},
{
u'children/childs_info/name': u'Able',
u'children/childs_info/age': u'48',
u'children/immunization/immunization_received/polio_1':
True,
u'children/immunization/immunization_received/polio_2':
True,
u'_index': 2,
u'_parent_table_name': u'groups_in_repeats',
u'_parent_index': 1,
}
]
}
self.maxDiff = None
self.assertEqual(
data["groups_in_repeats"][0],
expected_data["groups_in_repeats"][0])
# each of the children should have children/... keys, we can guratnee
# the order so we cant check the values, just make sure they are not
# none
self.assertEqual(len(data["children"]), 2)
for child in data["children"]:
self.assertTrue("children/childs_info/name" in child)
self.assertIsNotNone(child["children/childs_info/name"])
self.assertTrue("children/childs_info/age" in child)
self.assertIsNotNone(child["children/childs_info/name"])
self.assertTrue(
"children/immunization/immunization_received/polio_1" in child)
self.assertEqual(type(child[
"children/immunization/immunization_received/polio_1"]), bool)
self.assertTrue(
"children/immunization/immunization_received/polio_2" in child)
self.assertEqual(type(child[
"children/immunization/immunization_received/polio_2"]), bool)
| bsd-2-clause |
larsmans/scikit-learn | sklearn/linear_model/tests/test_passive_aggressive.py | 31 | 6147 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_refit():
"""Classifier can be retrained on different labels and features."""
clf = PassiveAggressiveClassifier().fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
| bsd-3-clause |
maginatics/swift | swift/common/middleware/x_profile/html_viewer.py | 15 | 21038 | # Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cgi
import os
import random
import re
import string
import tempfile
from swift import gettext_ as _
from exceptions import PLOTLIBNotInstalled, ODFLIBNotInstalled,\
NotFoundException, MethodNotAllowed, DataLoadFailure, ProfileException
from profile_model import Stats2
PLOTLIB_INSTALLED = True
try:
import matplotlib
# use agg backend for writing to file, not for rendering in a window.
# otherwise some platform will complain "no display name and $DISPLAY
# environment variable"
matplotlib.use('agg')
import matplotlib.pyplot as plt
except ImportError:
PLOTLIB_INSTALLED = False
empty_description = """
The default profile of current process or the profile you requested is
empty. <input type="submit" name="refresh" value="Refresh"/>
"""
profile_tmpl = """
<select name="profile">
<option value="current">current</option>
<option value="all">all</option>
${profile_list}
</select>
"""
sort_tmpl = """
<select name="sort">
<option value="time">time</option>
<option value="cumulative">cumulative</option>
<option value="calls">calls</option>
<option value="pcalls">pcalls</option>
<option value="name">name</option>
<option value="file">file</option>
<option value="module">module</option>
<option value="line">line</option>
<option value="nfl">nfl</option>
<option value="stdname">stdname</option>
</select>
"""
limit_tmpl = """
<select name="limit">
<option value="-1">all</option>
<option value="0.1">10%</option>
<option value="0.2">20%</option>
<option value="0.3">30%</option>
<option value="10">10</option>
<option value="20">20</option>
<option value="30">30</option>
<option value="50">50</option>
<option value="100">100</option>
<option value="200">200</option>
<option value="300">300</option>
<option value="400">400</option>
<option value="500">500</option>
</select>
"""
fulldirs_tmpl = """
<input type="checkbox" name="fulldirs" value="1"
${fulldir_checked}/>
"""
mode_tmpl = """
<select name="mode">
<option value="stats">stats</option>
<option value="callees">callees</option>
<option value="callers">callers</option>
</select>
"""
nfl_filter_tmpl = """
<input type="text" name="nfl_filter" value="${nfl_filter}"
placeholder="filename part" />
"""
formelements_tmpl = """
<div>
<table>
<tr>
<td>
<strong>Profile</strong>
<td>
<strong>Sort</strong>
</td>
<td>
<strong>Limit</strong>
</td>
<td>
<strong>Full Path</strong>
</td>
<td>
<strong>Filter</strong>
</td>
<td>
</td>
<td>
<strong>Plot Metric</strong>
</td>
<td>
<strong>Plot Type</strong>
<td>
</td>
<td>
<strong>Format</strong>
</td>
<td>
<td>
</td>
<td>
</td>
</tr>
<tr>
<td>
${profile}
<td>
${sort}
</td>
<td>
${limit}
</td>
<td>
${fulldirs}
</td>
<td>
${nfl_filter}
</td>
<td>
<input type="submit" name="query" value="query"/>
</td>
<td>
<select name='metric'>
<option value='nc'>call count</option>
<option value='cc'>primitive call count</option>
<option value='tt'>total time</option>
<option value='ct'>cumulative time</option>
</select>
</td>
<td>
<select name='plottype'>
<option value='bar'>bar</option>
<option value='pie'>pie</option>
</select>
<td>
<input type="submit" name="plot" value="plot"/>
</td>
<td>
<select name='format'>
<option value='default'>binary</option>
<option value='json'>json</option>
<option value='csv'>csv</option>
<option value='ods'>ODF.ods</option>
</select>
</td>
<td>
<input type="submit" name="download" value="download"/>
</td>
<td>
<input type="submit" name="clear" value="clear"/>
</td>
</tr>
</table>
</div>
"""
index_tmpl = """
<html>
<head>
<title>profile results</title>
<style>
<!--
tr.normal { background-color: #ffffff }
tr.hover { background-color: #88eeee }
//-->
</style>
</head>
<body>
<form action="${action}" method="POST">
<div class="form-text">
${description}
</div>
<hr />
${formelements}
</form>
<pre>
${profilehtml}
</pre>
</body>
</html>
"""
class HTMLViewer(object):
format_dict = {'default': 'application/octet-stream',
'json': 'application/json',
'csv': 'text/csv',
'ods': 'application/vnd.oasis.opendocument.spreadsheet',
'python': 'text/html'}
def __init__(self, app_path, profile_module, profile_log):
self.app_path = app_path
self.profile_module = profile_module
self.profile_log = profile_log
def _get_param(self, query_dict, key, default=None, multiple=False):
value = query_dict.get(key, default)
if value is None or value == '':
return default
if multiple:
return value
if isinstance(value, list):
return eval(value[0]) if isinstance(default, int) else value[0]
else:
return value
def render(self, url, method, path_entry, query_dict, clear_callback):
plot = self._get_param(query_dict, 'plot', None)
download = self._get_param(query_dict, 'download', None)
clear = self._get_param(query_dict, 'clear', None)
action = plot or download or clear
profile_id = self._get_param(query_dict, 'profile', 'current')
sort = self._get_param(query_dict, 'sort', 'time')
limit = self._get_param(query_dict, 'limit', -1)
fulldirs = self._get_param(query_dict, 'fulldirs', 0)
nfl_filter = self._get_param(query_dict, 'nfl_filter', '').strip()
metric_selected = self._get_param(query_dict, 'metric', 'cc')
plot_type = self._get_param(query_dict, 'plottype', 'bar')
download_format = self._get_param(query_dict, 'format', 'default')
content = ''
# GET /__profile, POST /__profile
if len(path_entry) == 2 and method in ['GET', 'POST']:
log_files = self.profile_log.get_logfiles(profile_id)
if action == 'plot':
content, headers = self.plot(log_files, sort, limit,
nfl_filter, metric_selected,
plot_type)
elif action == 'download':
content, headers = self.download(log_files, sort, limit,
nfl_filter, download_format)
else:
if action == 'clear':
self.profile_log.clear(profile_id)
clear_callback and clear_callback()
content, headers = self.index_page(log_files, sort, limit,
fulldirs, nfl_filter,
profile_id, url)
# GET /__profile__/all
# GET /__profile__/current
# GET /__profile__/profile_id
# GET /__profile__/profile_id/
# GET /__profile__/profile_id/account.py:50(GETorHEAD)
# GET /__profile__/profile_id/swift/proxy/controllers
# /account.py:50(GETorHEAD)
# with QUERY_STRING: ?format=[default|json|csv|ods]
elif len(path_entry) > 2 and method == 'GET':
profile_id = path_entry[2]
log_files = self.profile_log.get_logfiles(profile_id)
pids = self.profile_log.get_all_pids()
# return all profiles in a json format by default.
# GET /__profile__/
if profile_id == '':
content = '{"profile_ids": ["' + '","'.join(pids) + '"]}'
headers = [('content-type', self.format_dict['json'])]
else:
if len(path_entry) > 3 and path_entry[3] != '':
nfl_filter = '/'.join(path_entry[3:])
if path_entry[-1].find(':0') == -1:
nfl_filter = '/' + nfl_filter
content, headers = self.download(log_files, sort, -1,
nfl_filter, download_format)
headers.append(('Access-Control-Allow-Origin', '*'))
else:
raise MethodNotAllowed(_('method %s is not allowed.') % method)
return content, headers
def index_page(self, log_files=None, sort='time', limit=-1,
fulldirs=0, nfl_filter='', profile_id='current', url='#'):
headers = [('content-type', 'text/html')]
if len(log_files) == 0:
return empty_description, headers
try:
stats = Stats2(*log_files)
except (IOError, ValueError):
raise DataLoadFailure(_('Can not load profile data from %s.')
% log_files)
if not fulldirs:
stats.strip_dirs()
stats.sort_stats(sort)
nfl_filter_esc =\
nfl_filter.replace('(', '\(').replace(')', '\)')
amount = [nfl_filter_esc, limit] if nfl_filter_esc else [limit]
profile_html = self.generate_stats_html(stats, self.app_path,
profile_id, *amount)
description = "Profiling information is generated by using\
'%s' profiler." % self.profile_module
sort_repl = '<option value="%s">' % sort
sort_selected = '<option value="%s" selected>' % sort
sort = sort_tmpl.replace(sort_repl, sort_selected)
plist = ''.join(['<option value="%s">%s</option>' % (p, p)
for p in self.profile_log.get_all_pids()])
profile_element = string.Template(profile_tmpl).substitute(
{'profile_list': plist})
profile_repl = '<option value="%s">' % profile_id
profile_selected = '<option value="%s" selected>' % profile_id
profile_element = profile_element.replace(profile_repl,
profile_selected)
limit_repl = '<option value="%s">' % limit
limit_selected = '<option value="%s" selected>' % limit
limit = limit_tmpl.replace(limit_repl, limit_selected)
fulldirs_checked = 'checked' if fulldirs else ''
fulldirs_element = string.Template(fulldirs_tmpl).substitute(
{'fulldir_checked': fulldirs_checked})
nfl_filter_element = string.Template(nfl_filter_tmpl).\
substitute({'nfl_filter': nfl_filter})
form_elements = string.Template(formelements_tmpl).substitute(
{'description': description,
'action': url,
'profile': profile_element,
'sort': sort,
'limit': limit,
'fulldirs': fulldirs_element,
'nfl_filter': nfl_filter_element,
}
)
content = string.Template(index_tmpl).substitute(
{'formelements': form_elements,
'action': url,
'description': description,
'profilehtml': profile_html,
})
return content, headers
def download(self, log_files, sort='time', limit=-1, nfl_filter='',
output_format='default'):
if len(log_files) == 0:
raise NotFoundException(_('no log file found'))
try:
nfl_esc = nfl_filter.replace('(', '\(').replace(')', '\)')
# remove the slash that is intentionally added in the URL
# to avoid failure of filtering stats data.
if nfl_esc.startswith('/'):
nfl_esc = nfl_esc[1:]
stats = Stats2(*log_files)
stats.sort_stats(sort)
if output_format == 'python':
data = self.format_source_code(nfl_filter)
elif output_format == 'json':
data = stats.to_json(nfl_esc, limit)
elif output_format == 'csv':
data = stats.to_csv(nfl_esc, limit)
elif output_format == 'ods':
data = stats.to_ods(nfl_esc, limit)
else:
data = stats.print_stats()
return data, [('content-type', self.format_dict[output_format])]
except ODFLIBNotInstalled as ex:
raise ex
except Exception as ex:
raise ProfileException(_('Data download error: %s') % ex)
def plot(self, log_files, sort='time', limit=10, nfl_filter='',
metric_selected='cc', plot_type='bar'):
if not PLOTLIB_INSTALLED:
raise PLOTLIBNotInstalled(_('python-matplotlib not installed.'))
if len(log_files) == 0:
raise NotFoundException(_('no log file found'))
try:
stats = Stats2(*log_files)
stats.sort_stats(sort)
stats_dict = stats.stats
__, func_list = stats.get_print_list([nfl_filter, limit])
nfls = []
performance = []
names = {'nc': 'Total Call Count', 'cc': 'Primitive Call Count',
'tt': 'Total Time', 'ct': 'Cumulative Time'}
for func in func_list:
cc, nc, tt, ct, __ = stats_dict[func]
metric = {'cc': cc, 'nc': nc, 'tt': tt, 'ct': ct}
nfls.append(func[2])
performance.append(metric[metric_selected])
y_pos = range(len(nfls))
error = [random.random() for __ in y_pos]
plt.clf()
if plot_type == 'pie':
plt.pie(x=performance, explode=None, labels=nfls,
autopct='%1.1f%%')
else:
plt.barh(y_pos, performance, xerr=error, align='center',
alpha=0.4)
plt.yticks(y_pos, nfls)
plt.xlabel(names[metric_selected])
plt.title('Profile Statistics (by %s)' % names[metric_selected])
#plt.gcf().tight_layout(pad=1.2)
with tempfile.TemporaryFile() as profile_img:
plt.savefig(profile_img, format='png', dpi=300)
profile_img.seek(0)
data = profile_img.read()
return data, [('content-type', 'image/jpg')]
except Exception as ex:
raise ProfileException(_('plotting results failed due to %s') % ex)
def format_source_code(self, nfl):
nfls = re.split('[:()]', nfl)
file_path = nfls[0]
try:
lineno = int(nfls[1])
except (TypeError, ValueError, IndexError):
lineno = 0
# for security reason, this need to be fixed.
if not file_path.endswith('.py'):
return _('The file type are forbidden to access!')
try:
data = []
i = 0
with open(file_path) as f:
lines = f.readlines()
max_width = str(len(str(len(lines))))
fmt = '<span id="L%d" rel="#L%d">%' + max_width\
+ 'd|<code>%s</code></span>'
for line in lines:
l = cgi.escape(line, quote=None)
i = i + 1
if i == lineno:
fmt2 = '<span id="L%d" style="background-color: \
rgb(127,255,127)">%' + max_width +\
'd|<code>%s</code></span>'
data.append(fmt2 % (i, i, l))
else:
data.append(fmt % (i, i, i, l))
data = ''.join(data)
except Exception:
return _('Can not access the file %s.') % file_path
return '<pre>%s</pre>' % data
def generate_stats_html(self, stats, app_path, profile_id, *selection):
html = []
for filename in stats.files:
html.append('<p>%s</p>' % filename)
try:
for func in stats.top_level:
html.append('<p>%s</p>' % func[2])
html.append('%s function calls' % stats.total_calls)
if stats.total_calls != stats.prim_calls:
html.append("(%d primitive calls)" % stats.prim_calls)
html.append('in %.3f seconds' % stats.total_tt)
if stats.fcn_list:
stat_list = stats.fcn_list[:]
msg = "<p>Ordered by: %s</p>" % stats.sort_type
else:
stat_list = stats.stats.keys()
msg = '<p>Random listing order was used</p>'
for sel in selection:
stat_list, msg = stats.eval_print_amount(sel, stat_list, msg)
html.append(msg)
html.append('<table style="border-width: 1px">')
if stat_list:
html.append('<tr><th>#</th><th>Call Count</th>\
<th>Total Time</th><th>Time/Call</th>\
<th>Cumulative Time</th>\
<th>Cumulative Time/Call</th>\
<th>Filename:Lineno(Function)</th>\
<th>JSON</th>\
</tr>')
count = 0
for func in stat_list:
count = count + 1
html.append('<tr onMouseOver="this.className=\'hover\'"\
onMouseOut="this.className=\'normal\'">\
<td>%d)</td>' % count)
cc, nc, tt, ct, __ = stats.stats[func]
c = str(nc)
if nc != cc:
c = c + '/' + str(cc)
html.append('<td>%s</td>' % c)
html.append('<td>%f</td>' % tt)
if nc == 0:
html.append('<td>-</td>')
else:
html.append('<td>%f</td>' % (float(tt) / nc))
html.append('<td>%f</td>' % ct)
if cc == 0:
html.append('<td>-</td>')
else:
html.append('<td>%f</td>' % (float(ct) / cc))
nfls = cgi.escape(stats.func_std_string(func))
if nfls.split(':')[0] not in ['', 'profile'] and\
os.path.isfile(nfls.split(':')[0]):
html.append('<td><a href="%s/%s%s?format=python#L%d">\
%s</a></td>' % (app_path, profile_id,
nfls, func[1], nfls))
else:
html.append('<td>%s</td>' % nfls)
if not nfls.startswith('/'):
nfls = '/' + nfls
html.append('<td><a href="%s/%s%s?format=json">\
--></a></td></tr>' % (app_path,
profile_id, nfls))
except Exception as ex:
html.append("Exception:" % ex.message)
return ''.join(html)
| apache-2.0 |
mxjl620/scikit-learn | sklearn/neighbors/tests/test_nearest_centroid.py | 305 | 4121 | """
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
| bsd-3-clause |
bdweave/repetitive_element_parser | setup.py | 1 | 4107 | # Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='Repetitive Element Parser',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='1.0.0',
description='This package defines a repetitive element parser to be used with \
rna-seq datasets',
long_description=long_description,
# The project's main homepage.
url='https://github.com/pypa/sampleproject',
# Author details
author='Bradley Don Weaver',
author_email='[email protected]',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Biologists/Geneticists',
'Topic :: Genomic Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='genomics repetitive-element expression analysis',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['numpy'],['pandas'],['sqlalchemy'],['ipywidgets'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
#extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
#},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
#package_data={
# 'sample': ['package_data.dat'],
#},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'Repetitive Element Parser=Repetitive Element Parser:main',
],
},
) | mit |
EricSB/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/finance.py | 69 | 20558 | """
A collection of modules for collecting, analyzing and plotting
financial data. User contributions welcome!
"""
#from __future__ import division
import os, time, warnings
from urllib import urlopen
try:
from hashlib import md5
except ImportError:
from md5 import md5 #Deprecated in 2.5
try: import datetime
except ImportError:
raise ImportError('The finance module requires datetime support (python2.3)')
import numpy as np
from matplotlib import verbose, get_configdir
from dates import date2num
from matplotlib.cbook import Bunch
from matplotlib.collections import LineCollection, PolyCollection
from matplotlib.colors import colorConverter
from lines import Line2D, TICKLEFT, TICKRIGHT
from patches import Rectangle
from matplotlib.transforms import Affine2D
configdir = get_configdir()
cachedir = os.path.join(configdir, 'finance.cache')
def parse_yahoo_historical(fh, asobject=False, adjusted=True):
"""
Parse the historical data in file handle fh from yahoo finance and return
results as a list of
d, open, close, high, low, volume
where d is a floating poing representation of date, as returned by date2num
if adjust=True, use adjusted prices
"""
results = []
lines = fh.readlines()
datefmt = None
for line in lines[1:]:
vals = line.split(',')
if len(vals)!=7: continue
datestr = vals[0]
if datefmt is None:
try:
datefmt = '%Y-%m-%d'
dt = datetime.date(*time.strptime(datestr, datefmt)[:3])
except ValueError:
datefmt = '%d-%b-%y' # Old Yahoo--cached file?
dt = datetime.date(*time.strptime(datestr, datefmt)[:3])
d = date2num(dt)
open, high, low, close = [float(val) for val in vals[1:5]]
volume = int(vals[5])
if adjusted:
aclose = float(vals[6])
m = aclose/close
open *= m
high *= m
low *= m
close = aclose
results.append((d, open, close, high, low, volume))
results.reverse()
if asobject:
if len(results)==0: return None
else:
date, open, close, high, low, volume = map(np.asarray, zip(*results))
return Bunch(date=date, open=open, close=close, high=high, low=low, volume=volume)
else:
return results
def fetch_historical_yahoo(ticker, date1, date2, cachename=None):
"""
Fetch historical data for ticker between date1 and date2. date1 and
date2 are datetime instances
Ex:
fh = fetch_historical_yahoo('^GSPC', d1, d2)
cachename is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
a file handle is returned
"""
ticker = ticker.upper()
d1 = (date1.month-1, date1.day, date1.year)
d2 = (date2.month-1, date2.day, date2.year)
urlFmt = 'http://table.finance.yahoo.com/table.csv?a=%d&b=%d&c=%d&d=%d&e=%d&f=%d&s=%s&y=0&g=d&ignore=.csv'
url = urlFmt % (d1[0], d1[1], d1[2],
d2[0], d2[1], d2[2], ticker)
if cachename is None:
cachename = os.path.join(cachedir, md5(url).hexdigest())
if os.path.exists(cachename):
fh = file(cachename)
verbose.report('Using cachefile %s for %s'%(cachename, ticker))
else:
if not os.path.isdir(cachedir): os.mkdir(cachedir)
fh = file(cachename, 'w')
fh.write(urlopen(url).read())
fh.close()
verbose.report('Saved %s data to cache file %s'%(ticker, cachename))
fh = file(cachename, 'r')
return fh
def quotes_historical_yahoo(ticker, date1, date2, asobject=False, adjusted=True, cachename=None):
"""
Get historical data for ticker between date1 and date2. date1 and
date2 are datetime instances
results are a list of tuples
(d, open, close, high, low, volume)
where d is a floating poing representation of date, as returned by date2num
if asobject is True, the return val is an object with attrs date,
open, close, high, low, volume, which are equal length arrays
if adjust=True, use adjusted prices
Ex:
sp = f.quotes_historical_yahoo('^GSPC', d1, d2, asobject=True, adjusted=True)
returns = (sp.open[1:] - sp.open[:-1])/sp.open[1:]
[n,bins,patches] = hist(returns, 100)
mu = mean(returns)
sigma = std(returns)
x = normpdf(bins, mu, sigma)
plot(bins, x, color='red', lw=2)
cachename is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
"""
fh = fetch_historical_yahoo(ticker, date1, date2, cachename)
try: ret = parse_yahoo_historical(fh, asobject, adjusted)
except IOError, exc:
warnings.warn('urlopen() failure\n' + url + '\n' + exc.strerror[1])
return None
return ret
def plot_day_summary(ax, quotes, ticksize=3,
colorup='k', colordown='r',
):
"""
quotes is a list of (time, open, close, high, low, ...) tuples
Represent the time, open, close, high, low as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
time must be in float date format - see date2num
ax : an Axes instance to plot to
ticksize : open/close tick marker in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
return value is a list of lines added
"""
lines = []
for q in quotes:
t, open, close, high, low = q[:5]
if close>=open : color = colorup
else : color = colordown
vline = Line2D(
xdata=(t, t), ydata=(low, high),
color=color,
antialiased=False, # no need to antialias vert lines
)
oline = Line2D(
xdata=(t, t), ydata=(open, open),
color=color,
antialiased=False,
marker=TICKLEFT,
markersize=ticksize,
)
cline = Line2D(
xdata=(t, t), ydata=(close, close),
color=color,
antialiased=False,
markersize=ticksize,
marker=TICKRIGHT)
lines.extend((vline, oline, cline))
ax.add_line(vline)
ax.add_line(oline)
ax.add_line(cline)
ax.autoscale_view()
return lines
def candlestick(ax, quotes, width=0.2, colorup='k', colordown='r',
alpha=1.0):
"""
quotes is a list of (time, open, close, high, low, ...) tuples.
As long as the first 5 elements of the tuples are these values,
the tuple can be as long as you want (eg it may store volume).
time must be in float days format - see date2num
Plot the time, open, close, high, low as a vertical line ranging
from low to high. Use a rectangular bar to represent the
open-close span. If close >= open, use colorup to color the bar,
otherwise use colordown
ax : an Axes instance to plot to
width : fraction of a day for the rectangle width
colorup : the color of the rectangle where close >= open
colordown : the color of the rectangle where close < open
alpha : the rectangle alpha level
return value is lines, patches where lines is a list of lines
added and patches is a list of the rectangle patches added
"""
OFFSET = width/2.0
lines = []
patches = []
for q in quotes:
t, open, close, high, low = q[:5]
if close>=open :
color = colorup
lower = open
height = close-open
else :
color = colordown
lower = close
height = open-close
vline = Line2D(
xdata=(t, t), ydata=(low, high),
color='k',
linewidth=0.5,
antialiased=True,
)
rect = Rectangle(
xy = (t-OFFSET, lower),
width = width,
height = height,
facecolor = color,
edgecolor = color,
)
rect.set_alpha(alpha)
lines.append(vline)
patches.append(rect)
ax.add_line(vline)
ax.add_patch(rect)
ax.autoscale_view()
return lines, patches
def plot_day_summary2(ax, opens, closes, highs, lows, ticksize=4,
colorup='k', colordown='r',
):
"""
Represent the time, open, close, high, low as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
ax : an Axes instance to plot to
ticksize : size of open and close ticks in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
return value is a list of lines added
"""
# note this code assumes if any value open, close, low, high is
# missing they all are missing
rangeSegments = [ ((i, low), (i, high)) for i, low, high in zip(xrange(len(lows)), lows, highs) if low != -1 ]
# the ticks will be from ticksize to 0 in points at the origin and
# we'll translate these to the i, close location
openSegments = [ ((-ticksize, 0), (0, 0)) ]
# the ticks will be from 0 to ticksize in points at the origin and
# we'll translate these to the i, close location
closeSegments = [ ((0, 0), (ticksize, 0)) ]
offsetsOpen = [ (i, open) for i, open in zip(xrange(len(opens)), opens) if open != -1 ]
offsetsClose = [ (i, close) for i, close in zip(xrange(len(closes)), closes) if close != -1 ]
scale = ax.figure.dpi * (1.0/72.0)
tickTransform = Affine2D().scale(scale, 0.0)
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,1
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,1
colord = { True : colorup,
False : colordown,
}
colors = [colord[open<close] for open, close in zip(opens, closes) if open!=-1 and close !=-1]
assert(len(rangeSegments)==len(offsetsOpen))
assert(len(offsetsOpen)==len(offsetsClose))
assert(len(offsetsClose)==len(colors))
useAA = 0, # use tuple here
lw = 1, # and here
rangeCollection = LineCollection(rangeSegments,
colors = colors,
linewidths = lw,
antialiaseds = useAA,
)
openCollection = LineCollection(openSegments,
colors = colors,
antialiaseds = useAA,
linewidths = lw,
offsets = offsetsOpen,
transOffset = ax.transData,
)
openCollection.set_transform(tickTransform)
closeCollection = LineCollection(closeSegments,
colors = colors,
antialiaseds = useAA,
linewidths = lw,
offsets = offsetsClose,
transOffset = ax.transData,
)
closeCollection.set_transform(tickTransform)
minpy, maxx = (0, len(rangeSegments))
miny = min([low for low in lows if low !=-1])
maxy = max([high for high in highs if high != -1])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(rangeCollection)
ax.add_collection(openCollection)
ax.add_collection(closeCollection)
return rangeCollection, openCollection, closeCollection
def candlestick2(ax, opens, closes, highs, lows, width=4,
colorup='k', colordown='r',
alpha=0.75,
):
"""
Represent the open, close as a bar line and high low range as a
vertical line.
ax : an Axes instance to plot to
width : the bar width in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
alpha : bar transparency
return value is lineCollection, barCollection
"""
# note this code assumes if any value open, close, low, high is
# missing they all are missing
delta = width/2.
barVerts = [ ( (i-delta, open), (i-delta, close), (i+delta, close), (i+delta, open) ) for i, open, close in zip(xrange(len(opens)), opens, closes) if open != -1 and close!=-1 ]
rangeSegments = [ ((i, low), (i, high)) for i, low, high in zip(xrange(len(lows)), lows, highs) if low != -1 ]
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,alpha
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,alpha
colord = { True : colorup,
False : colordown,
}
colors = [colord[open<close] for open, close in zip(opens, closes) if open!=-1 and close !=-1]
assert(len(barVerts)==len(rangeSegments))
useAA = 0, # use tuple here
lw = 0.5, # and here
rangeCollection = LineCollection(rangeSegments,
colors = ( (0,0,0,1), ),
linewidths = lw,
antialiaseds = useAA,
)
barCollection = PolyCollection(barVerts,
facecolors = colors,
edgecolors = ( (0,0,0,1), ),
antialiaseds = useAA,
linewidths = lw,
)
minx, maxx = 0, len(rangeSegments)
miny = min([low for low in lows if low !=-1])
maxy = max([high for high in highs if high != -1])
corners = (minx, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(barCollection)
ax.add_collection(rangeCollection)
return rangeCollection, barCollection
def volume_overlay(ax, opens, closes, volumes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""
Add a volume overlay to the current axes. The opens and closes
are used to determine the color of the bar. -1 is missing. If a
value is missing on one it must be missing on all
ax : an Axes instance to plot to
width : the bar width in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
alpha : bar transparency
"""
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,alpha
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,alpha
colord = { True : colorup,
False : colordown,
}
colors = [colord[open<close] for open, close in zip(opens, closes) if open!=-1 and close !=-1]
delta = width/2.
bars = [ ( (i-delta, 0), (i-delta, v), (i+delta, v), (i+delta, 0)) for i, v in enumerate(volumes) if v != -1 ]
barCollection = PolyCollection(bars,
facecolors = colors,
edgecolors = ( (0,0,0,1), ),
antialiaseds = (0,),
linewidths = (0.5,),
)
corners = (0, 0), (len(bars), max(volumes))
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
return barCollection
def volume_overlay2(ax, closes, volumes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""
Add a volume overlay to the current axes. The closes are used to
determine the color of the bar. -1 is missing. If a value is
missing on one it must be missing on all
ax : an Axes instance to plot to
width : the bar width in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
alpha : bar transparency
nb: first point is not displayed - it is used only for choosing the
right color
"""
return volume_overlay(ax,closes[:-1],closes[1:],volumes[1:],colorup,colordown,width,alpha)
def volume_overlay3(ax, quotes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""
Add a volume overlay to the current axes. quotes is a list of (d,
open, close, high, low, volume) and close-open is used to
determine the color of the bar
kwarg
width : the bar width in points
colorup : the color of the lines where close1 >= close0
colordown : the color of the lines where close1 < close0
alpha : bar transparency
"""
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,alpha
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,alpha
colord = { True : colorup,
False : colordown,
}
dates, opens, closes, highs, lows, volumes = zip(*quotes)
colors = [colord[close1>=close0] for close0, close1 in zip(closes[:-1], closes[1:]) if close0!=-1 and close1 !=-1]
colors.insert(0,colord[closes[0]>=opens[0]])
right = width/2.0
left = -width/2.0
bars = [ ( (left, 0), (left, volume), (right, volume), (right, 0)) for d, open, close, high, low, volume in quotes]
sx = ax.figure.dpi * (1.0/72.0) # scale for points
sy = ax.bbox.height / ax.viewLim.height
barTransform = Affine2D().scale(sx,sy)
dates = [d for d, open, close, high, low, volume in quotes]
offsetsBars = [(d, 0) for d in dates]
useAA = 0, # use tuple here
lw = 0.5, # and here
barCollection = PolyCollection(bars,
facecolors = colors,
edgecolors = ( (0,0,0,1), ),
antialiaseds = useAA,
linewidths = lw,
offsets = offsetsBars,
transOffset = ax.transData,
)
barCollection.set_transform(barTransform)
minpy, maxx = (min(dates), max(dates))
miny = 0
maxy = max([volume for d, open, close, high, low, volume in quotes])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
#print 'datalim', ax.dataLim.get_bounds()
#print 'viewlim', ax.viewLim.get_bounds()
ax.add_collection(barCollection)
ax.autoscale_view()
return barCollection
def index_bar(ax, vals,
facecolor='b', edgecolor='l',
width=4, alpha=1.0, ):
"""
Add a bar collection graph with height vals (-1 is missing).
ax : an Axes instance to plot to
width : the bar width in points
alpha : bar transparency
"""
facecolors = (colorConverter.to_rgba(facecolor, alpha),)
edgecolors = (colorConverter.to_rgba(edgecolor, alpha),)
right = width/2.0
left = -width/2.0
bars = [ ( (left, 0), (left, v), (right, v), (right, 0)) for v in vals if v != -1 ]
sx = ax.figure.dpi * (1.0/72.0) # scale for points
sy = ax.bbox.height / ax.viewLim.height
barTransform = Affine2D().scale(sx,sy)
offsetsBars = [ (i, 0) for i,v in enumerate(vals) if v != -1 ]
barCollection = PolyCollection(bars,
facecolors = facecolors,
edgecolors = edgecolors,
antialiaseds = (0,),
linewidths = (0.5,),
offsets = offsetsBars,
transOffset = ax.transData,
)
barCollection.set_transform(barTransform)
minpy, maxx = (0, len(offsetsBars))
miny = 0
maxy = max([v for v in vals if v!=-1])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(barCollection)
return barCollection
| agpl-3.0 |
himkt/tasks | nlp/baseline.py | 1 | 5955 | import re
import os
import csv
import codecs
import pickle
import argparse
import sklearn
import scipy
# import pandas as pd
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
parser = argparse.ArgumentParser()
parser.add_argument('--remove-stopword', action='store_true',
dest='REMOVE_STOPWORD', default=False)
parser.add_argument('--stemming', action='store_true',
dest='STEMMING', default=False)
args = parser.parse_args()
TRAIN_DATA_FILE = 'input/train.csv'
TEST_DATA_FILE = 'input/test.csv'
REMOVE_STOPWORD = args.REMOVE_STOPWORD
STEMMING = args.STEMMING
########################################
# process texts in datasets
########################################
print('Processing text dataset')
# https://www.kaggle.com/currie32/quora-question-pairs/the-importance-of-cleaning-text
# def text_to_wordlist(text, remove_stopwords=False, stem_words=False):
def text_to_wordlist(text, remove_stopwords, stem_words):
# Clean the text, with the option to remove stopwords and to stem words.
# Convert words to lower case and split them
text = text.lower().split()
# Optionally, remove stop words
if remove_stopwords:
stops = set(stopwords.words("english"))
text = [w for w in text if w not in stops]
text = " ".join(text)
# Clean the text
text = re.sub(r"[^A-Za-z0-9^,!.\/'+-=]", " ", text)
text = re.sub(r"what's", "what is ", text)
text = re.sub(r"\'s", " ", text)
text = re.sub(r"\'ve", " have ", text)
text = re.sub(r"can't", "cannot ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"i'm", "i am ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r",", " ", text)
text = re.sub(r"\.", " ", text)
text = re.sub(r"!", " ! ", text)
text = re.sub(r"\/", " ", text)
text = re.sub(r"\^", " ^ ", text)
text = re.sub(r"\+", " + ", text)
text = re.sub(r"\-", " - ", text)
text = re.sub(r"\=", " = ", text)
text = re.sub(r"'", " ", text)
text = re.sub(r"(\d+)(k)", r"\g<1>000", text)
text = re.sub(r":", " : ", text)
text = re.sub(r" e g ", " eg ", text)
text = re.sub(r" b g ", " bg ", text)
text = re.sub(r" u s ", " american ", text)
text = re.sub(r"\0s", "0", text)
text = re.sub(r" 9 11 ", "911", text)
text = re.sub(r"e - mail", "email", text)
text = re.sub(r"j k", "jk", text)
text = re.sub(r"\s{2,}", " ", text)
# Optionally, shorten words to their stems
if stem_words:
text = text.split()
stemmer = SnowballStemmer('english')
stemmed_words = [stemmer.stem(word) for word in text]
text = " ".join(stemmed_words)
return text
def preprop_train(input_fpath, remove_stopwords=True, stem_words=True):
stop = '1' if remove_stopwords else '0'
stem = '1' if stem_words else '0'
fpath = f'work/{stop}_{stem}_train.pickle'
if os.path.isfile(fpath):
print('cache hit...')
texts_1, texts_2, labels = pickle.load(open(fpath, 'rb'))
return texts_1, texts_2, labels
texts_1, texts_2, labels = ([], [], [])
with codecs.open(input_fpath, encoding='utf-8') as f:
reader = csv.reader(f, delimiter=',')
next(reader) # remove header
for values in reader:
texts_1.append(text_to_wordlist(values[3],
remove_stopwords, stem_words))
texts_2.append(text_to_wordlist(values[4],
remove_stopwords, stem_words))
labels.append(int(values[5]))
pickle.dump((texts_1, texts_2, labels), open(fpath, 'wb'))
return texts_1, texts_2, labels
def preprop_test(input_fpath, remove_stopwords=True, stem_words=True):
stop = '1' if remove_stopwords else '0'
stem = '1' if stem_words else '0'
fpath = f'work/{stop}_{stem}_test.pickle'
if os.path.isfile(fpath):
print('cache hit...')
test_texts_1, test_texts_2, test_ids = pickle.load(open(fpath, 'rb'))
return test_texts_1, test_texts_2, test_ids
test_texts_1, test_texts_2, test_ids = ([], [], [])
with codecs.open(TEST_DATA_FILE, encoding='utf-8') as f:
reader = csv.reader(f, delimiter=',')
next(reader) # remove header
for values in reader:
test_texts_1.append(text_to_wordlist(values[1],
remove_stopwords, stem_words))
test_texts_2.append(text_to_wordlist(values[2],
remove_stopwords, stem_words))
test_ids.append(values[0])
pickle.dump((test_texts_1, test_texts_2, test_ids), open(fpath, 'wb'))
return test_texts_1, test_texts_2, test_ids
# ------------
# model
print(f'remove_stopwords: {REMOVE_STOPWORD}')
print(f'stem_words: {STEMMING}')
texts_1, texts_2, labels = preprop_train(TRAIN_DATA_FILE,
REMOVE_STOPWORD, STEMMING)
print('Found %s texts in train.csv' % len(texts_1))
test_texts_1, test_texts_2, test_ids = preprop_test(TEST_DATA_FILE,
REMOVE_STOPWORD, STEMMING)
print('Found %s texts in test.csv' % len(test_texts_1))
vectorizer = sklearn.feature_extraction.text.TfidfVectorizer()
vectorizer.fit(texts_1 + texts_2 + test_texts_1 + test_texts_2)
test_1_vec_list = vectorizer.transform(test_texts_1)
test_2_vec_list = vectorizer.transform(test_texts_2)
print('is_duplicate,test_id')
for test_id, test_1_vec, test_2_vec in zip(test_ids,
test_1_vec_list,
test_2_vec_list):
sim = 1 - scipy.spatial.distance.cosine(test_1_vec.todense(),
test_2_vec.todense())
print(f'{sim},{test_id}')
| mit |
smartscheduling/scikit-learn-categorical-tree | sklearn/metrics/tests/test_score_objects.py | 84 | 14181 | import pickle
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
REGRESSION_SCORERS = ['r2', 'mean_absolute_error', 'mean_squared_error',
'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'log_loss',
'adjusted_rand_score' # not really, but works
]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should a be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(return_indicator=True,
allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
# Test clustering scorers against gold standard labeling.
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
return_indicator=True,
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
estimator = dict([(name, sensible_regr)
for name in REGRESSION_SCORERS] +
[(name, sensible_clf)
for name in CLF_SCORERS] +
[(name, sensible_ml_clf)
for name in MULTILABEL_ONLY_SCORERS])
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
| bsd-3-clause |
ahoyosid/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_hyperparameters.py | 227 | 5170 | """
=================================================
Hyper-parameters of Approximate Nearest Neighbors
=================================================
This example demonstrates the behaviour of the
accuracy of the nearest neighbor queries of Locality Sensitive Hashing
Forest as the number of candidates and the number of estimators (trees)
vary.
In the first plot, accuracy is measured with the number of candidates. Here,
the term "number of candidates" refers to maximum bound for the number of
distinct points retrieved from each tree to calculate the distances. Nearest
neighbors are selected from this pool of candidates. Number of estimators is
maintained at three fixed levels (1, 5, 10).
In the second plot, the number of candidates is fixed at 50. Number of trees
is varied and the accuracy is plotted against those values. To measure the
accuracy, the true nearest neighbors are required, therefore
:class:`sklearn.neighbors.NearestNeighbors` is used to compute the exact
neighbors.
"""
from __future__ import division
print(__doc__)
# Author: Maheshakya Wijewardena <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Initialize size of the database, iterations and required neighbors.
n_samples = 10000
n_features = 100
n_queries = 30
rng = np.random.RandomState(42)
# Generate sample data
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=10,
random_state=0)
X_index = X[:n_samples]
X_query = X[n_samples:]
# Get exact neighbors
nbrs = NearestNeighbors(n_neighbors=1, algorithm='brute',
metric='cosine').fit(X_index)
neighbors_exact = nbrs.kneighbors(X_query, return_distance=False)
# Set `n_candidate` values
n_candidates_values = np.linspace(10, 500, 5).astype(np.int)
n_estimators_for_candidate_value = [1, 5, 10]
n_iter = 10
stds_accuracies = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]),
dtype=float)
accuracies_c = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]), dtype=float)
# LSH Forest is a stochastic index: perform several iteration to estimate
# expected accuracy and standard deviation displayed as error bars in
# the plots
for j, value in enumerate(n_estimators_for_candidate_value):
for i, n_candidates in enumerate(n_candidates_values):
accuracy_c = []
for seed in range(n_iter):
lshf = LSHForest(n_estimators=value,
n_candidates=n_candidates, n_neighbors=1,
random_state=seed)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query,
return_distance=False)
accuracy_c.append(np.sum(np.equal(neighbors_approx,
neighbors_exact)) /
n_queries)
stds_accuracies[j, i] = np.std(accuracy_c)
accuracies_c[j, i] = np.mean(accuracy_c)
# Set `n_estimators` values
n_estimators_values = [1, 5, 10, 20, 30, 40, 50]
accuracies_trees = np.zeros(len(n_estimators_values), dtype=float)
# Calculate average accuracy for each value of `n_estimators`
for i, n_estimators in enumerate(n_estimators_values):
lshf = LSHForest(n_estimators=n_estimators, n_neighbors=1)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query, return_distance=False)
accuracies_trees[i] = np.sum(np.equal(neighbors_approx,
neighbors_exact))/n_queries
###############################################################################
# Plot the accuracy variation with `n_candidates`
plt.figure()
colors = ['c', 'm', 'y']
for i, n_estimators in enumerate(n_estimators_for_candidate_value):
label = 'n_estimators = %d ' % n_estimators
plt.plot(n_candidates_values, accuracies_c[i, :],
'o-', c=colors[i], label=label)
plt.errorbar(n_candidates_values, accuracies_c[i, :],
stds_accuracies[i, :], c=colors[i])
plt.legend(loc='upper left', fontsize='small')
plt.ylim([0, 1.2])
plt.xlim(min(n_candidates_values), max(n_candidates_values))
plt.ylabel("Accuracy")
plt.xlabel("n_candidates")
plt.grid(which='both')
plt.title("Accuracy variation with n_candidates")
# Plot the accuracy variation with `n_estimators`
plt.figure()
plt.scatter(n_estimators_values, accuracies_trees, c='k')
plt.plot(n_estimators_values, accuracies_trees, c='g')
plt.ylim([0, 1.2])
plt.xlim(min(n_estimators_values), max(n_estimators_values))
plt.ylabel("Accuracy")
plt.xlabel("n_estimators")
plt.grid(which='both')
plt.title("Accuracy variation with n_estimators")
plt.show()
| bsd-3-clause |
Aegeaner/spark | python/pyspark/sql/tests/test_pandas_udf_window.py | 7 | 12906 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from pyspark.sql.utils import AnalysisException
from pyspark.sql.functions import array, explode, col, lit, mean, min, max, rank, \
udf, pandas_udf, PandasUDFType
from pyspark.sql.window import Window
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
class WindowPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i * 1.0) + col('id') for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))) \
.drop('vs') \
.withColumn('w', lit(1.0))
@property
def python_plus_one(self):
return udf(lambda v: v + 1, 'double')
@property
def pandas_scalar_time_two(self):
return pandas_udf(lambda v: v * 2, 'double')
@property
def pandas_agg_count_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('long', PandasUDFType.GROUPED_AGG)
def count(v):
return len(v)
return count
@property
def pandas_agg_mean_udf(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def avg(v):
return v.mean()
return avg
@property
def pandas_agg_max_udf(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def max(v):
return v.max()
return max
@property
def pandas_agg_min_udf(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def min(v):
return v.min()
return min
@property
def unbounded_window(self):
return Window.partitionBy('id') \
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing).orderBy('v')
@property
def ordered_window(self):
return Window.partitionBy('id').orderBy('v')
@property
def unpartitioned_window(self):
return Window.partitionBy()
@property
def sliding_row_window(self):
return Window.partitionBy('id').orderBy('v').rowsBetween(-2, 1)
@property
def sliding_range_window(self):
return Window.partitionBy('id').orderBy('v').rangeBetween(-2, 4)
@property
def growing_row_window(self):
return Window.partitionBy('id').orderBy('v').rowsBetween(Window.unboundedPreceding, 3)
@property
def growing_range_window(self):
return Window.partitionBy('id').orderBy('v') \
.rangeBetween(Window.unboundedPreceding, 4)
@property
def shrinking_row_window(self):
return Window.partitionBy('id').orderBy('v').rowsBetween(-2, Window.unboundedFollowing)
@property
def shrinking_range_window(self):
return Window.partitionBy('id').orderBy('v') \
.rangeBetween(-3, Window.unboundedFollowing)
def test_simple(self):
df = self.data
w = self.unbounded_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('mean_v', mean_udf(df['v']).over(w))
expected1 = df.withColumn('mean_v', mean(df['v']).over(w))
result2 = df.select(mean_udf(df['v']).over(w))
expected2 = df.select(mean(df['v']).over(w))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
def test_multiple_udfs(self):
df = self.data
w = self.unbounded_window
result1 = df.withColumn('mean_v', self.pandas_agg_mean_udf(df['v']).over(w)) \
.withColumn('max_v', self.pandas_agg_max_udf(df['v']).over(w)) \
.withColumn('min_w', self.pandas_agg_min_udf(df['w']).over(w))
expected1 = df.withColumn('mean_v', mean(df['v']).over(w)) \
.withColumn('max_v', max(df['v']).over(w)) \
.withColumn('min_w', min(df['w']).over(w))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_replace_existing(self):
df = self.data
w = self.unbounded_window
result1 = df.withColumn('v', self.pandas_agg_mean_udf(df['v']).over(w))
expected1 = df.withColumn('v', mean(df['v']).over(w))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_mixed_sql(self):
df = self.data
w = self.unbounded_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('v', mean_udf(df['v'] * 2).over(w) + 1)
expected1 = df.withColumn('v', mean(df['v'] * 2).over(w) + 1)
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_mixed_udf(self):
df = self.data
w = self.unbounded_window
plus_one = self.python_plus_one
time_two = self.pandas_scalar_time_two
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn(
'v2',
plus_one(mean_udf(plus_one(df['v'])).over(w)))
expected1 = df.withColumn(
'v2',
plus_one(mean(plus_one(df['v'])).over(w)))
result2 = df.withColumn(
'v2',
time_two(mean_udf(time_two(df['v'])).over(w)))
expected2 = df.withColumn(
'v2',
time_two(mean(time_two(df['v'])).over(w)))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
def test_without_partitionBy(self):
df = self.data
w = self.unpartitioned_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('v2', mean_udf(df['v']).over(w))
expected1 = df.withColumn('v2', mean(df['v']).over(w))
result2 = df.select(mean_udf(df['v']).over(w))
expected2 = df.select(mean(df['v']).over(w))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
def test_mixed_sql_and_udf(self):
df = self.data
w = self.unbounded_window
ow = self.ordered_window
max_udf = self.pandas_agg_max_udf
min_udf = self.pandas_agg_min_udf
result1 = df.withColumn('v_diff', max_udf(df['v']).over(w) - min_udf(df['v']).over(w))
expected1 = df.withColumn('v_diff', max(df['v']).over(w) - min(df['v']).over(w))
# Test mixing sql window function and window udf in the same expression
result2 = df.withColumn('v_diff', max_udf(df['v']).over(w) - min(df['v']).over(w))
expected2 = expected1
# Test chaining sql aggregate function and udf
result3 = df.withColumn('max_v', max_udf(df['v']).over(w)) \
.withColumn('min_v', min(df['v']).over(w)) \
.withColumn('v_diff', col('max_v') - col('min_v')) \
.drop('max_v', 'min_v')
expected3 = expected1
# Test mixing sql window function and udf
result4 = df.withColumn('max_v', max_udf(df['v']).over(w)) \
.withColumn('rank', rank().over(ow))
expected4 = df.withColumn('max_v', max(df['v']).over(w)) \
.withColumn('rank', rank().over(ow))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
self.assertPandasEqual(expected4.toPandas(), result4.toPandas())
def test_array_type(self):
df = self.data
w = self.unbounded_window
array_udf = pandas_udf(lambda x: [1.0, 2.0], 'array<double>', PandasUDFType.GROUPED_AGG)
result1 = df.withColumn('v2', array_udf(df['v']).over(w))
self.assertEquals(result1.first()['v2'], [1.0, 2.0])
def test_invalid_args(self):
df = self.data
w = self.unbounded_window
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'.*not supported within a window function'):
foo_udf = pandas_udf(lambda x: x, 'v double', PandasUDFType.GROUPED_MAP)
df.withColumn('v2', foo_udf(df['v']).over(w))
def test_bounded_simple(self):
from pyspark.sql.functions import mean, max, min, count
df = self.data
w1 = self.sliding_row_window
w2 = self.shrinking_range_window
plus_one = self.python_plus_one
count_udf = self.pandas_agg_count_udf
mean_udf = self.pandas_agg_mean_udf
max_udf = self.pandas_agg_max_udf
min_udf = self.pandas_agg_min_udf
result1 = df.withColumn('mean_v', mean_udf(plus_one(df['v'])).over(w1)) \
.withColumn('count_v', count_udf(df['v']).over(w2)) \
.withColumn('max_v', max_udf(df['v']).over(w2)) \
.withColumn('min_v', min_udf(df['v']).over(w1))
expected1 = df.withColumn('mean_v', mean(plus_one(df['v'])).over(w1)) \
.withColumn('count_v', count(df['v']).over(w2)) \
.withColumn('max_v', max(df['v']).over(w2)) \
.withColumn('min_v', min(df['v']).over(w1))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_growing_window(self):
from pyspark.sql.functions import mean
df = self.data
w1 = self.growing_row_window
w2 = self.growing_range_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('m1', mean_udf(df['v']).over(w1)) \
.withColumn('m2', mean_udf(df['v']).over(w2))
expected1 = df.withColumn('m1', mean(df['v']).over(w1)) \
.withColumn('m2', mean(df['v']).over(w2))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_sliding_window(self):
from pyspark.sql.functions import mean
df = self.data
w1 = self.sliding_row_window
w2 = self.sliding_range_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('m1', mean_udf(df['v']).over(w1)) \
.withColumn('m2', mean_udf(df['v']).over(w2))
expected1 = df.withColumn('m1', mean(df['v']).over(w1)) \
.withColumn('m2', mean(df['v']).over(w2))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_shrinking_window(self):
from pyspark.sql.functions import mean
df = self.data
w1 = self.shrinking_row_window
w2 = self.shrinking_range_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('m1', mean_udf(df['v']).over(w1)) \
.withColumn('m2', mean_udf(df['v']).over(w2))
expected1 = df.withColumn('m1', mean(df['v']).over(w1)) \
.withColumn('m2', mean(df['v']).over(w2))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_bounded_mixed(self):
from pyspark.sql.functions import mean, max
df = self.data
w1 = self.sliding_row_window
w2 = self.unbounded_window
mean_udf = self.pandas_agg_mean_udf
max_udf = self.pandas_agg_max_udf
result1 = df.withColumn('mean_v', mean_udf(df['v']).over(w1)) \
.withColumn('max_v', max_udf(df['v']).over(w2)) \
.withColumn('mean_unbounded_v', mean_udf(df['v']).over(w1))
expected1 = df.withColumn('mean_v', mean(df['v']).over(w1)) \
.withColumn('max_v', max(df['v']).over(w2)) \
.withColumn('mean_unbounded_v', mean(df['v']).over(w1))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_udf_window import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/mpl_examples/pylab_examples/scatter_demo2.py | 6 | 1057 | """
make a scatter plot with varying color and size arguments
"""
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.cbook as cbook
# load a numpy record array from yahoo csv data with fields date,
# open, close, volume, adj_close from the mpl-data/example directory.
# The record array stores python datetime.date as an object array in
# the date column
datafile = cbook.get_sample_data('goog.npy')
r = np.load(datafile).view(np.recarray)
r = r[-250:] # get the most recent 250 trading days
delta1 = np.diff(r.adj_close)/r.adj_close[:-1]
# size in points ^2
volume = (15*r.volume[:-2]/r.volume[0])**2
close = 0.003*r.close[:-2]/0.003*r.open[:-2]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(delta1[:-1], delta1[1:], c=close, s=volume, alpha=0.75)
#ticks = arange(-0.06, 0.061, 0.02)
#xticks(ticks)
#yticks(ticks)
ax.set_xlabel(r'$\Delta_i$', fontsize=20)
ax.set_ylabel(r'$\Delta_{i+1}$', fontsize=20)
ax.set_title('Volume and percent change')
ax.grid(True)
plt.show()
| mit |
bblais/Classy | classy/image.py | 1 | 18262 | from PIL import Image
from .Struct import Struct
import os
import glob
import numpy as np
import classy.datasets
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
def split(images,test_size=0.2,verbose=True,shuffle=True):
from numpy import where,array
import numpy.random
d1=Struct(split=True)
d2=Struct(split=True)
skip_names=['files','data','targets']
for name in images:
if name in skip_names:
pass
d1[name]=images[name]
d2[name]=images[name]
num_targets=len(images.target_names)
d1.targets=[]
d2.targets=[]
d1.data=[]
d2.data=[]
d1.files=[]
d2.files=[]
for k in range(num_targets):
idx=where(images.targets==k)[0]
N=len(idx)
if test_size<1: # fraction
N_test=int(test_size*N)+1
else:
N_test=test_size
N_train=N-N_test
for i in idx[:N_test]:
d1.targets.append(images.targets[i])
d1.files.append(images.files[i])
d1.data.append(images.data[i])
for i in idx[N_test:]:
d2.targets.append(images.targets[i])
d2.files.append(images.files[i])
d2.data.append(images.data[i])
d1.targets=array(d1.targets,dtype=np.int32)
d2.targets=array(d2.targets,dtype=np.int32)
if shuffle:
idx=np.array(range(len(d1.targets)))
np.random.shuffle(idx)
d1.targets=d1.targets[idx]
d1.files=[d1.files[i] for i in idx]
d1.data=[d1.data[i] for i in idx]
idx=np.array(range(len(d2.targets)))
np.random.shuffle(idx)
d2.targets=d2.targets[idx]
d2.files=[d2.files[i] for i in idx]
d2.data=[d2.data[i] for i in idx]
if verbose:
print("Files in Test Set:")
print("\t",','.join(d1.files))
print("Files in Train Set:")
print("\t",','.join(d2.files))
return d2,d1
def show_images(images,which_images=None,max_images=None):
from pylab import imshow,subplot,sqrt,ceil,title,cm,gca
from random import shuffle
if which_images is None:
which_images=list(range(len(images.data)))
if isinstance(which_images[0],str): # target names
which_names=which_images
which_images=[]
for idx in range(len(images.data)):
name=images.target_names[images.targets[idx]]
if name in which_names:
which_images.append(idx)
if not max_images is None:
shuffle(which_images)
which_images=which_images[:max_images]
if not which_images:
raise ValueError("No images selected")
L=len(which_images)
c=ceil(sqrt(L))
r=ceil(L/c)
for i,idx in enumerate(which_images):
im=images.data[idx]
name=images.target_names[images.targets[idx]]
subplot(r,c,i+1)
imshow(im,interpolation='nearest',cmap=cm.gray)
title(name)
if i<(L-c):
gca().set_xticklabels([])
if i%c!=0:
gca().set_yticklabels([])
def show_image_vector(vector,shape):
from matplotlib.pyplot import imshow
from matplotlib.cm import gray
im=vector.reshape(shape)
imshow(im,interpolation='nearest',cmap=gray)
def vector_to_image(vector,shape,fname=None):
from PIL import Image
from matplotlib.pyplot import imshow
from matplotlib.cm import gray
arr=vector.reshape(shape)
if fname is None:
imshow(arr,interpolation='nearest',cmap=gray)
else:
if arr.max()>255:
arr=np.array(arr,dtype=np.uint16)
elif arr.max()>1:
arr=np.array(arr,dtype=np.uint8)
else:
arr=np.array(arr*2**16,dtype=np.uint16)
im=Image.fromarray(arr)
im.save(fname)
def array_to_image_struct(arr):
if isinstance(arr,list):
N=len(arr)
data=Struct()
data.DESCR="Images"
data.files=[None]*N
data.data=arr
data.targets=[0]*N
data.target_names=['None']*N
else:
data=Struct()
data.DESCR="Images"
data.files=[None]
data.data=[arr]
data.targets=[0]
data.target_names=['None']
return data
def load_images_from_filepatterns(delete_alpha=False,**kwargs):
from glob import glob
data=Struct()
data.DESCR="Images"
data.files=[]
data.data=[]
data.targets=[]
data.target_names=[]
filenames={}
verbose=None
for key in sorted(kwargs):
if key=='verbose':
verbose=kwargs[key]
continue
if isinstance(kwargs[key],str):
fnames=glob(kwargs[key])
else:
fnames=kwargs[key]
if not fnames:
continue
data.target_names.append(key)
filenames[key]=fnames
if verbose is None:
verbose=True
if not data.target_names:
print("No images matching the patterns found.")
return None
for i,name in enumerate(data.target_names):
values=filenames[name]
if verbose:
print("[%s]: %d files found" % (name,len(values)))
for v in values:
print("\t%s" % v)
data.files.extend(values)
data.targets.extend([i]*len(values))
data.targets=np.array(data.targets,dtype=np.int32)
for fname in data.files:
im=Image.open(fname)
if im.mode=='1' or im.mode=='LA':
im=im.convert('L')
ima=np.asarray(im)
if delete_alpha and len(ima.shape)==3:
ima=ima[:,:,:3] # take out the alpha channel if it exists
data.data.append(ima)
return data
def process_images(filter,newdirname='.',resize=None,colormode=None,ext=None):
# filter='caltech101/*/*.jpg'
# newdirname='blah'
# resize=(300,200)
# colormode='color'
# ext='.png'
mode={'color':'RGB','gray':'L','bw':'1'}
revmode={'RGB':'color','L':'gray','1':'bw'}
files=glob.glob(filter)
files=[_ for _ in files if 'desktop.ini' not in _]
im2=None
for fname in files:
if os.path.isdir(fname):
continue
try:
im=Image.open(fname)
if im.mode=='LA':
im=im.convert('L')
if im.mode=='RGBA':
im=im.convert('RGB')
except IOError:
print("failed to open %s" % fname)
im_orig=im
if not resize is None:
im=im.resize(resize)
if not colormode is None:
im=im.convert(mode[colormode])
if im is im_orig:
print("%s: size %s mode %s" % (fname,im.size,revmode.get(im.mode,im.mode)))
else:
newfname=os.path.join(newdirname,fname)
if not ext is None:
ext=ext.replace(".","")
ext="."+ext
newfname,oldext=os.path.splitext(newfname)
newfname=newfname+ext
print("%s: size %s mode %s -> %s: size %s mode %s" % (fname,
im_orig.size,
revmode.get(im_orig.mode,im_orig.mode),
newfname,
im.size,
revmode.get(im.mode,im.mode),
))
dname,junk=os.path.split(newfname)
if not os.path.exists(dname):
os.makedirs(dname)
im.save(newfname)
def load_images(dirname,test_dirname=None,filter='*.*',delete_alpha=False,max_per_folder=None,verbose=True,make_grayscale=False):
data=Struct()
data.DESCR="Images"
data.files=[]
data.data=[]
data.targets=[]
data.target_names=[]
if not os.path.isdir(dirname): # this should be a filename, or a regex
base,fname=os.path.split(dirname)
if not base:
base='./'
dirname=base
filter=fname
else:
files=os.listdir(dirname)
for f in files:
if os.path.isdir(os.path.join(dirname,f)):
if ".ipynb_checkpoints" in f:
continue
data.target_names.append(f)
if data.target_names:
for i,name in enumerate(data.target_names):
files_filter=os.path.join(dirname,name,filter)
values=glob.glob(files_filter)
values=[_ for _ in values if 'desktop.ini' not in _]
if not max_per_folder is None:
if verbose:
print("[%s]: %d files found...%s used." % (name,len(values),max_per_folder))
values=values[:max_per_folder]
else:
if verbose:
print("[%s]: %d files found" % (name,len(values)))
data.files.extend(values)
data.targets.extend([i]*len(values))
data.targets=np.array(data.targets,dtype=np.int32)
else:
data.targets=None
name='None'
files_filter=os.path.join(dirname,filter)
values=glob.glob(files_filter)
values=[_ for _ in values if 'desktop.ini' not in _]
if not max_per_folder is None:
if verbose:
print("[%s]: %d files found...%s used." % (name,len(values),max_per_folder))
values=values[:max_per_folder]
else:
if verbose:
print("[%s]: %d files found" % (name,len(values)))
data.files.extend(values)
all_same_size=True
size=None
for fname in data.files:
im=Image.open(fname)
if im.mode=='1' or im.mode=='LA':
im=im.convert('L')
if make_grayscale:
im=im.convert('L')
ima=np.asarray(im)
if delete_alpha and len(ima.shape)==3:
ima=ima[:,:,:3] # take out the alpha channel if it exists
if size is None:
size=ima.shape
else:
if ima.shape!=size:
all_same_size=False
data.data.append(ima)
if not all_same_size:
print("Warning: not all images the same size.")
return data
def images_to_vectors(origdata,truncate=False,verbose=True):
same_shape=True
first_time=True
smallest_shape=None
for im in origdata.data:
shape=im.shape
if first_time:
smallest_shape=im.shape
first_time=False
if im.shape!=smallest_shape:
if not truncate:
raise ValueError('Not all images have the same shape')
smallest_shape=[min(x,y) for x,y in zip(im.shape,smallest_shape)]
same_shape=False
if smallest_shape is None:
raise ValueError("No images read.")
data=Struct()
data.target_names=origdata.target_names
data.targets=origdata.targets
data.files=origdata.files
data.vectors=[]
data.shape=smallest_shape
for ima in origdata.data:
if not same_shape:
if len(smallest_shape)==2:
ima=ima[:smallest_shape[0],:smallest_shape[1]]
elif len(smallest_shape)==3:
ima=ima[:smallest_shape[0],:smallest_shape[1],:smallest_shape[2]]
else:
raise ValueError(">3D shapes not supported")
vec=ima.ravel()
vec=vec.astype(np.float)
data.vectors.append(vec)
data.vectors=np.array(data.vectors)
data.feature_names=['p%d' % p for p in range(data.vectors.shape[1])]
if verbose:
classy.datasets.summary(data)
return data
def extract_patches_2d_nooverlap(ima,patch_size,max_patches=1e500):
patches=[]
pr,pc=patch_size
ir,ic=ima.shape[:2]
r=0
while (r+pr)<=ir:
c=0
while (c+pc)<=ic:
patches.append(ima[r:(r+pr),c:(c+pc),...])
c+=pc
r+=pr
patches=np.array(patches)
return patches
def reconstruct_from_patches_2d_nooverlap(patches,original_shape):
ima=np.zeros(original_shape)
patch_size=patches[0].shape
pr,pc=patch_size
ir,ic=ima.shape[:2]
count=0
r=0
while (r+pr)<=ir:
c=0
while (c+pc)<=ic:
patch=patches[count]
ima[r:(r+pr),c:(c+pc)]=patch
c+=pc
count+=1
r+=pr
return ima
def images_to_patch_vectors(origdata,patch_size,max_patches=None,overlap=True,
with_transparent_patches=True,grayscale=True,
verbose=True):
from sklearn.feature_extraction.image import extract_patches_2d
data=Struct()
data.DESCR="Patches"
data.target_names=origdata.target_names
data.files=origdata.files
data.targets=[]
data.vectors=[]
data.overlap=overlap
data.with_transparent_patches=with_transparent_patches
data.grayscale=grayscale
data.shape=patch_size
data.original_shapes=[]
data.original_targets=origdata.targets
data.original_vector_number=[]
for k,ima in enumerate(origdata.data):
if not origdata.targets is None:
target=origdata.targets[k]
else:
target=None
data.original_shapes.append(ima.shape)
if overlap:
patches=extract_patches_2d(ima, patch_size, max_patches=max_patches)
else:
patches=extract_patches_2d_nooverlap(ima, patch_size, max_patches=max_patches)
for patch_num in range(patches.shape[0]):
patch=patches[patch_num,...]
if not with_transparent_patches:
if len(patch.shape)==3:
visible=patch[:,:,2]
if not visible.all():
continue
elif (patch==1.0).any():
continue
if grayscale:
try:
patch=patch[:,:,0]
except IndexError:
pass
vec=patch.ravel()
data.vectors.append(vec)
if not target is None:
data.targets.append(target)
data.original_vector_number.append(k)
data.vectors=np.array(data.vectors,dtype=np.float)
data.original_vector_number=np.array(data.original_vector_number,dtype=np.int32)
if not target is None:
data.targets=np.array(data.targets)
else:
data.targets=None
L=np.prod(patch_size)
if len(data.vectors[0])==L: # 1 channel
data.feature_names=['%d' % _i for _i in range(L)]
elif len(data.vectors[0])==3*L: # 3 channels:
data.feature_names=['r%d' % _i for _i in range(L)]+['g%d' % _i for _i in range(L)]+['b%d' % _i for _i in range(L)]
elif len(data.vectors[0])==4*L: # 4 channels:
data.feature_names=['r%d' % _i for _i in range(L)]+['g%d' % _i for _i in range(L)]+['b%d' % _i for _i in range(L)]+['a%d' % _i for _i in range(L)]
else:
data.feature_names=['%d' % _i for _i in range(len(data.vectors[0]))]
if verbose:
classy.datasets.summary(data)
return data
def patch_vectors_to_images(origdata,verbose=True):
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
data=Struct()
data.DESCR="Images"
data.target_names=origdata.target_names
data.files=origdata.files
data.targets=origdata.original_targets
data.data=[]
max_vector_number=len(data.targets)
patch_array=[]
for c in range(max_vector_number):
patches=[vec.reshape(origdata.shape)
for vec,i in zip(origdata.vectors,origdata.original_vector_number) if i==c]
patch_array=np.array(patches)
if origdata.overlap:
data.data.append(reconstruct_from_patches_2d(patch_array,origdata.original_shapes[c]))
else:
data.data.append(reconstruct_from_patches_2d_nooverlap(patch_array,origdata.original_shapes[c]))
if verbose:
classy.datasets.summary(data)
return data
def images_to_random_pixel_vectors(origdata,number_of_pixels,maximum_points=None,verbose=True):
from random import shuffle
sz=number_of_pixels
data=Struct()
data.DESCR="Pixel Vectors"
data.target_names=origdata.target_names
data.files=origdata.files
data.targets=[]
data.vectors=[]
data.original_targets=origdata.targets
data.original_vector_number=[]
for k,im in enumerate(origdata.data):
if not origdata.targets is None:
target=origdata.targets[k]
else:
target=None
mx=im.max()
try:
grayim=im[:,:,0]
visible=im[:,:,3]
X=grayim[visible>0]
except IndexError: # not transparent
grayim=im
X=grayim[(grayim<mx) & (grayim>0)]
if not maximum_points is None:
X=X[:maximum_points]
shuffle(X)
L=len(X)
for i in range(L//sz):
vec=X[(i*sz):((i+1)*sz)].ravel()
data.vectors.append(vec)
if not target is None:
data.targets.append(target)
data.original_vector_number.append(k)
data.feature_names=list(range(sz))
data.vectors=np.array(data.vectors)
if not target is None:
data.targets=np.array(data.targets)
else:
data.targets=None
data.original_vector_number=np.array(data.original_vector_number,dtype=np.int32)
if verbose:
classy.datasets.summary(data)
return data
| mit |
altairpearl/scikit-learn | sklearn/ensemble/partial_dependence.py | 25 | 15121 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``matplotlib.pyplot.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``matplotlib.pyplot.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| bsd-3-clause |
charanpald/tyre-hug | tyrehug/exp/svdbenchmark.py | 1 | 4877 | import time
import numpy
import sppy
import sppy.linalg
import matplotlib.pyplot as plt
import scipy.sparse
import os
from scipy.sparse.linalg import svds
from pypropack import svdp
from sparsesvd import sparsesvd
from sklearn.decomposition import TruncatedSVD
from sppy.linalg import GeneralLinearOperator
def time_reps(func, params, reps):
start = time.time()
for s in range(reps):
func(*params)
print("Executed " + str(func))
return (time.time() - start) / reps
"""
Compare and contrast different methods of computing the SVD
"""
# Fix issue with CPU affinity
os.system("taskset -p 0xff %d" % os.getpid())
k = 50
p = 0
reps = 2
density = 10**-3
n_iter = 5
truncated_svd = TruncatedSVD(k, n_iter=5)
var_range = numpy.arange(1, 11)
def time_densities():
n = 10**4
densities = var_range * 2 * 10**-3
times = numpy.zeros((5, densities.shape[0]))
for i, density in enumerate(densities):
# Generate random sparse matrix
inds = numpy.random.randint(n, size=(2, n * n * density))
data = numpy.random.rand(n * n * density)
A = scipy.sparse.csc_matrix((data, inds), (n, n))
A_sppy = sppy.csarray(A, storagetype="row")
L = GeneralLinearOperator.asLinearOperator(A_sppy, parallel=True)
print(A.shape, A.nnz)
times[0, i] = time_reps(svds, (A, k), reps)
times[1, i] = time_reps(svdp, (A, k), reps)
# Remove SparseSVD since it is significantly slower than the other methods
# times[2, i] = time_reps(sparsesvd, (A, k), reps)
times[3, i] = time_reps(truncated_svd.fit, (A,), reps)
times[4, i] = time_reps(sppy.linalg.rsvd, (L, k, p, n_iter), reps)
print(n, density, times[:, i])
plt.figure(0)
plt.plot(densities, times[0, :], 'k-', label="ARPACK")
plt.plot(densities, times[1, :], 'r-', label="PROPACK")
# plt.plot(densities, times[2, :], 'b-', label="SparseSVD")
plt.plot(densities, times[3, :], 'k--', label="sklearn RSVD")
plt.plot(densities, times[4, :], 'r--', label="sppy RSVD")
plt.legend(loc="upper left")
plt.xlabel("density")
plt.ylabel("time (s)")
plt.savefig("time_densities.png", format="png")
# Next, vary the matrix size
def time_ns():
density = 10**-3
ns = var_range * 10**4
times = numpy.zeros((5, ns.shape[0]))
for i, n in enumerate(ns):
# Generate random sparse matrix
inds = numpy.random.randint(n, size=(2, n * n * density))
data = numpy.random.rand(n * n * density)
A = scipy.sparse.csc_matrix((data, inds), (n, n))
A_sppy = sppy.csarray(A, storagetype="row")
L = GeneralLinearOperator.asLinearOperator(A_sppy, parallel=True)
print(A.shape, A.nnz)
times[0, i] = time_reps(svds, (A, k), reps)
times[1, i] = time_reps(svdp, (A, k), reps)
# times[2, i] = time_reps(sparsesvd, (A, k), reps)
times[3, i] = time_reps(truncated_svd.fit, (A,), reps)
times[4, i] = time_reps(sppy.linalg.rsvd, (L, k, p, n_iter), reps)
print(n, density, times[:, i])
plt.figure(1)
plt.plot(ns, times[0, :], 'k-', label="ARPACK")
plt.plot(ns, times[1, :], 'r-', label="PROPACK")
# plt.plot(ns, times[2, :], 'b-', label="SparseSVD")
plt.plot(ns, times[3, :], 'k--', label="sklearn RSVD")
plt.plot(ns, times[4, :], 'r--', label="sppy RSVD")
plt.legend(loc="upper left")
plt.xlabel("n")
plt.ylabel("time (s)")
plt.savefig("time_ns.png", format="png")
def time_ks():
n = 10**4
density = 10**-3
ks = var_range * 20
times = numpy.zeros((5, ks.shape[0]))
for i, k in enumerate(ks):
# Generate random sparse matrix
inds = numpy.random.randint(n, size=(2, n * n * density))
data = numpy.random.rand(n * n * density)
A = scipy.sparse.csc_matrix((data, inds), (n, n))
A_sppy = sppy.csarray(A, storagetype="row")
L = GeneralLinearOperator.asLinearOperator(A_sppy, parallel=True)
print(A.shape, A.nnz)
times[0, i] = time_reps(svds, (A, k), reps)
times[1, i] = time_reps(svdp, (A, k), reps)
# times[2, i] = time_reps(sparsesvd, (A, k), reps)
truncated_svd = TruncatedSVD(k, n_iter=5)
times[3, i] = time_reps(truncated_svd.fit, (A,), reps)
times[4, i] = time_reps(sppy.linalg.rsvd, (L, k, p, n_iter), reps)
print(n, density, times[:, i])
plt.figure(2)
plt.plot(ks, times[0, :], 'k-', label="ARPACK")
plt.plot(ks, times[1, :], 'r-', label="PROPACK")
# plt.plot(ks, times[2, :], 'b-', label="SparseSVD")
plt.plot(ks, times[3, :], 'k--', label="sklearn RSVD")
plt.plot(ks, times[4, :], 'r--', label="sppy RSVD")
plt.legend(loc="upper left")
plt.xlabel("k")
plt.ylabel("time (s)")
plt.savefig("time_ks.png", format="png")
time_densities()
time_ks()
time_ns()
plt.show()
| mit |
mwmuni/LIGGGHTS_GUI | trimesh/path/polygons.py | 1 | 12464 | import numpy as np
import networkx as nx
from shapely.geometry import Polygon, Point, LineString
from rtree import Rtree
from collections import deque
from .. import bounds
from ..geometry import medial_axis as _medial_axis
from ..constants import tol_path as tol
from ..constants import log
from ..transformations import transform_points, planar_matrix
from ..util import is_sequence
from .traversal import resample_path
def polygons_enclosure_tree(polygons):
'''
Given a list of shapely polygons, which are the root (aka outermost)
polygons, and which represent the holes which penetrate the root
curve. We do this by creating an R-tree for rough collision detection,
and then do polygon queries for a final result
'''
tree = Rtree()
for i, polygon in enumerate(polygons):
tree.insert(i, polygon.bounds)
count = len(polygons)
g = nx.DiGraph()
g.add_nodes_from(np.arange(count))
for i in range(count):
if polygons[i] is None:
continue
# we first query for bounding box intersections from the R-tree
for j in tree.intersection(polygons[i].bounds):
if (i == j):
continue
# we then do a more accurate polygon in polygon test to generate
# the enclosure tree information
if polygons[i].contains(polygons[j]):
g.add_edge(i, j)
elif polygons[j].contains(polygons[i]):
g.add_edge(j, i)
roots = [n for n, deg in list(g.in_degree().items()) if deg == 0]
return roots, g
def polygons_obb(polygons):
'''
Find the OBBs for a list of shapely.geometry.Polygons
'''
rectangles = [None] * len(polygons)
transforms = [None] * len(polygons)
for i, p in enumerate(polygons):
transforms[i], rectangles[i] = polygon_obb(p)
return np.array(transforms), np.array(rectangles)
def polygon_obb(polygon):
'''
Find the oriented bounding box of a Shapely polygon.
The OBB is always aligned with an edge of the convex hull of the polygon.
Arguments
-------------
polygons: shapely.geometry.Polygon
Returns
-------------
transform: (3,3) float, transformation matrix
which will move input polygon from its original position
to the first quadrant where the AABB is the OBB
extents: (2,) float, extents of transformed polygon
'''
points = np.asanyarray(polygon.exterior.coords)
return bounds.oriented_bounds_2D(points)
def transform_polygon(polygon, transform, plot=False):
if is_sequence(polygon):
result = [transform_polygon(p, t) for p, t in zip(polygon, transform)]
else:
shell = transform_points(np.array(polygon.exterior.coords), transform)
holes = [transform_points(np.array(i.coords), transform)
for i in polygon.interiors]
result = Polygon(shell=shell, holes=holes)
if plot:
plot_polygon(result)
return result
def rasterize_polygon(polygon, pitch):
'''
Given a shapely polygon, find the raster representation at a given angle
relative to the oriented bounding box
Arguments
----------
polygon: shapely polygon
pitch: what is the edge length of a pixel
Returns
----------
offset: (2,) float, where the origin of the raster array is located
grid: (n,m) bool, where filled areas are True
grid_points: (p,2) float, points in space
'''
bounds = np.reshape(polygon.bounds, (2, 2))
offset = bounds[0]
shape = np.ceil(np.ptp(bounds, axis=0) / pitch).astype(int)
grid = np.zeros(shape, dtype=np.bool)
def fill(ranges):
ranges = (np.array(ranges) - offset[0]) / pitch
x_index = np.array([np.floor(ranges[0]),
np.ceil(ranges[1])]).astype(int)
if np.any(x_index < 0):
return
grid[x_index[0]:x_index[1], y_index] = True
if (y_index > 0):
grid[x_index[0]:x_index[1], y_index - 1] = True
def handler_multi(geometries):
for geometry in geometries:
handlers[geometry.__class__.__name__](geometry)
def handler_line(line):
fill(line.xy[0])
def handler_null(data):
pass
handlers = {'GeometryCollection': handler_multi,
'MultiLineString': handler_multi,
'MultiPoint': handler_multi,
'LineString': handler_line,
'Point': handler_null}
x_extents = bounds[:, 0] + [-pitch, pitch]
for y_index in range(grid.shape[1]):
y = offset[1] + y_index * pitch
test = LineString(np.column_stack((x_extents, [y, y])))
hits = polygon.intersection(test)
handlers[hits.__class__.__name__](hits)
grid_points = ((np.transpose(np.nonzero(grid)).astype(np.float64) * pitch) +
offset +
(pitch / 2.0))
return offset, grid, grid_points
def plot_polygon(polygon, show=True):
import matplotlib.pyplot as plt
def plot_single(single):
plt.plot(*single.exterior.xy, color='b')
for interior in single.interiors:
plt.plot(*interior.xy, color='r')
plt.axes().set_aspect('equal', 'datalim')
if is_sequence(polygon):
[plot_single(i) for i in polygon]
else:
plot_single(polygon)
if show:
plt.show()
def plot_raster(raster, pitch, offset=[0, 0]):
'''
Plot a raster representation.
raster: (n,m) array of booleans, representing filled/empty area
pitch: the edge length of a box from raster, in cartesian space
offset: offset in cartesian space to the lower left corner of the raster grid
'''
import matplotlib.pyplot as plt
plt.axes().set_aspect('equal', 'datalim')
filled = (np.column_stack(np.nonzero(raster)) * pitch) + offset
for location in filled:
plt.gca().add_patch(plt.Rectangle(location,
pitch,
pitch,
facecolor="grey"))
def resample_boundaries(polygon, resolution, clip=None):
def resample_boundary(boundary):
# add a polygon.exterior or polygon.interior to
# the deque after resampling based on our resolution
count = boundary.length / resolution
count = int(np.clip(count, *clip))
return resample_path(boundary.coords, count=count)
if clip is None:
clip = [8, 200]
# create a sequence of [(n,2)] points
result = {'shell': resample_boundary(polygon.exterior),
'holes': deque()}
for interior in polygon.interiors:
result['holes'].append(resample_boundary(interior))
result['holes'] = np.array(result['holes'])
return result
def stack_boundaries(boundaries):
if len(boundaries['holes']) == 0:
return boundaries['shell']
result = np.vstack((boundaries['shell'],
np.vstack(boundaries['holes'])))
return result
def medial_axis(polygon, resolution=.01, clip=None):
'''
Given a shapely polygon, find the approximate medial axis based
on a voronoi diagram of evenly spaced points on the boundary of the polygon.
Arguments
----------
polygon: a shapely.geometry.Polygon
resolution: target distance between each sample on the polygon boundary
clip: [minimum number of samples, maximum number of samples]
specifying a very fine resolution can cause the sample count to
explode, so clip specifies a minimum and maximum number of samples
to use per boundary region. To not clip, this can be specified as:
[0, np.inf]
Returns
----------
lines: (n,2,2) set of line segments
'''
def contains(points):
return np.array([polygon.contains(Point(i)) for i in points])
boundary = resample_boundaries(polygon=polygon,
resolution=resolution,
clip=clip)
boundary = stack_boundaries(boundary)
return _medial_axis(samples=boundary,
contains=contains)
class InversePolygon:
'''
Create an inverse polygon.
The primary use case is that given a point inside a polygon,
you want to find the minimum distance to the boundary of the polygon.
'''
def __init__(self, polygon):
_DIST_BUFFER = .05
# create a box around the polygon
bounds = (np.array(polygon.bounds))
bounds += (_DIST_BUFFER * np.array([-1, -1, 1, 1]))
coord_ext = bounds[
np.array([2, 1, 2, 3, 0, 3, 0, 1, 2, 1])].reshape((-1, 2))
# set the interior of the box to the exterior of the polygon
coord_int = [np.array(polygon.exterior.coords)]
# a box with an exterior- shaped hole in it
exterior = Polygon(shell=coord_ext,
holes=coord_int)
# make exterior polygons out of all of the interiors
interiors = [Polygon(i.coords) for i in polygon.interiors]
# save these polygons to a flat list
self._polygons = np.append(exterior, interiors)
def distances(self, point):
'''
Find the minimum distances from a point to the exterior and interiors
Arguments
---------
point: (2) list or shapely.geometry.Point
Returns
---------
distances: (n) list of floats
'''
distances = [i.distance(Point(point)) for i in self._polygons]
return distances
def distance(self, point):
'''
Find the minimum distance from a point to the boundary of the polygon.
Arguments
---------
point: (2) list or shapely.geometry.Point
Returns
---------
distance: float
'''
distance = np.min(self.distances(point))
return distance
def polygon_hash(polygon):
'''
An approximate hash of a a shapely Polygon object.
Arguments
---------
polygon: shapely.geometry.Polygon object
Returns
---------
hash: (5) length list of hash representing input polygon
'''
result = [len(polygon.interiors),
polygon.convex_hull.area,
polygon.convex_hull.length,
polygon.area,
polygon.length]
return result
def random_polygon(segments=8, radius=1.0):
'''
Generate a random polygon with a maximum number of sides and approximate radius.
Arguments
---------
segments: int, the maximum number of sides the random polygon will have
radius: float, the approximate radius of the polygon desired
Returns
---------
polygon: shapely.geometry.Polygon object with random exterior, and no interiors.
'''
angles = np.sort(np.cumsum(np.random.random(
segments) * np.pi * 2) % (np.pi * 2))
radii = np.random.random(segments) * radius
points = np.column_stack(
(np.cos(angles), np.sin(angles))) * radii.reshape((-1, 1))
points = np.vstack((points, points[0]))
polygon = Polygon(points).buffer(0.0)
if is_sequence(polygon):
return polygon[0]
return polygon
def polygon_scale(polygon):
box = np.abs(np.diff(np.reshape(polygon, (2, 2)), axis=0))
scale = box.max()
return scale
def path_to_polygon(path, scale=None):
try:
polygon = Polygon(path)
except ValueError:
return None
return repair_invalid(polygon, scale)
def repair_invalid(polygon, scale=None):
'''
Given a shapely.geometry.Polygon, attempt to return a
valid version of the polygon. If one can't be found, return None
'''
if hasattr(polygon, 'is_valid') and polygon.is_valid:
return polygon
# basic repair involves buffering the polygon outwards
# this will fix a subset of problems.
basic = polygon.buffer(tol.zero)
if basic.area < tol.zero:
return None
if basic.is_valid:
return basic
if scale is None:
scale = polygon_scale(polygon)
buffered = basic.buffer(tol.buffer * scale)
unbuffered = buffered.buffer(-tol.buffer * scale)
if unbuffered.is_valid and not is_sequence(unbuffered):
log.debug('Recovered invalid polygon through double buffering')
return unbuffered
log.warn('Unable to recover polygon! Returning None!')
return None
| gpl-3.0 |
sergpolly/GlycoMadness | SILAC_stage3_gsites_catalog.py | 1 | 13856 | import os
import sys
from Bio import Seq
from Bio import SeqIO
from Bio import SeqRecord
import pandas as pd
import numpy as np
import ms_module as ms
import re
############################
from StringIO import StringIO
#
import argparse
#
#
# HOW TO LAUNCH THIS THING ...
# %run stage3_gsites_catalog.py --prefix ../raw_data/New_files_to_analyze/011216\ glycocapture\ 90-90 -m raw_prot_map.csv -g pulled_proteins.gb -s specs.xls
#
# do some arguments parsing to make the script looks civilized ...
parser = argparse.ArgumentParser()
parser.add_argument("-e","--exp_num",
help="specify number of an experiment",required=True)
parser.add_argument("-m","--raw_prot_map",
help="specify file name of raw data with unique fetchids of matching proteins (with/without path)",required=True)
parser.add_argument("-g","--genbank",
help="specify file name of genbank records with pulled proteins (with/without path)",required=True)
# parser.add_argument("-s","--spec_summary", help="speicfy spectrum file name (with/without path)",required=True)
parser.add_argument("-q","--quant_silac", help="speicfy quantification SILAC file name (with/without path)",required=True)
parser.add_argument("--prefix", help="specify common part of the path for peptide and spectrum files")
parser.add_argument("--separator", help="speicfy separator type in the input data",default='tab')
args = parser.parse_args()
#
###############################################
if args.prefix is not None:
raw_map_fname = os.path.join( args.prefix, args.raw_prot_map )
quant_fname = os.path.join( args.prefix, args.quant_silac )
gb_fname = os.path.join( args.prefix, args.genbank )
else:
raw_map_fname = args.raw_prot_map
quant_fname = args.quant_silac
gb_fname = args.genbank
# get the common path for later use ...
common_path = os.path.commonprefix([raw_map_fname,quant_fname,gb_fname])
common_path = os.path.dirname(common_path)
#
# Reading genbank mindfully next ...
gbrecs = ms.genebank_fix_n_read(gb_fname,key_func_type='id')
######################################
# assign some module internal stuff ...
ms.gbrecs = gbrecs
#
# separator type choice is needed only for the ORIGINAL input files ...
if args.separator == "tab":
separator = '\t'
elif args.separator == "comma":
separator = ','
else:
separator = '\t'
#################
raw_info = pd.read_csv(raw_map_fname)
quant_info = pd.read_csv(quant_fname,sep=separator)
# fix their peptide sequence thing right away ...
quant_info['pept'] = quant_info['Sequence'].str.upper()
quant_info['pept_with_mod'] = quant_info['Sequence']
raw_info['fetchid'] = raw_info['fetchid'].apply(int)
# this is an UGLY fix that we'd have to implement here just to save everything ...
if args.exp_num:
raw_info['enzyme'] = 'T'
#
# fasta = SeqIO.to_dict(SeqIO.parse(fasta_fname,"fasta"),key_function=lambda _: _.id.split('|')[1])
# 1-BASED NOTATION FOR PROTEINS INDEXING ENFORCED ...
# pep_df = pd.read_csv(uniq_pept_fname)
# connection between peptide info and spectrum info to be established ...
##########################################################################
# unroll that spec table to have 1 deamid per row ...
#
# !!!!!!!!!!!!!!!!!!!!
####################################################################################################################################################
# Deamidation eextraction to be modified in SILAC pipeline ... both Sequence and Modifications columns to be used
# Modification are different than in the non-SILAC data, it lacks positional index,
# which now has to inferred from the Sequence column, by looking at lower case Amino Aicds ...
####################################################################################################################################################
# !!!!!!!!!!!!!!!!!!!!
#
#
quant_info_unrolled = ms.unroll_by_mfunc(quant_info,['Modifications','Sequence'],(lambda row: ms.extract_deamids(row[0],row[1])),'deamid_info')
# now we'd have to determine the type of the 'Prob' column, object,float, or somethgin else ...
# a new fix @ August 3 2016 ...
if quant_info_unrolled['Prob'].dtype == 'float':
quant_info_unrolled['pept_ident_probab'] = quant_info_unrolled['Prob']
elif quant_info_unrolled['Prob'].dtype == 'object':
quant_info_unrolled['pept_ident_probab'] = quant_info_unrolled['Prob'].str.strip('%').apply(float)
# quant_info_unrolled['prot_ident_probab'] = quant_info_unrolled['Protein identification probability'].str.strip('%').apply(float)
# quant_info_unrolled['pept_ident_probab'] = quant_info_unrolled['Peptide identification probability'].str.strip('%').apply(float)
##########################################################
# so far the following merge seems to be 100% sufficient for the desired final output ...
# we could add on extra features if needed ...
quant_n_raw = quant_info_unrolled[['pept',
'deamid_info',
'pept_with_mod',
'Weight',
'Spectrum ID',
'Mascot Ion Score',
'Mascot Identity Score',
'Mascot Delta Ion Score',
# 'prot_ident_probab',
'pept_ident_probab']].merge(raw_info,how='right',on='pept',suffixes=('','_x'))
#######################################################
# Now, extract those gsites ...
dg_func = lambda x: pd.Series( ms.deamid_to_gsite(x['deamid_info'], x['start_fetched'], str(gbrecs[x['fetchacc']].seq)) )
# and add them back to the main table ...
gs_res = quant_n_raw[['deamid_info','start_fetched','fetchacc']].apply( dg_func, axis=1 )
quant_n_raw = quant_n_raw.merge(gs_res,left_index=True,right_index=True)
print
print "Now we'd need to add theoretical glycosilation sites as a separate column ..."
print "full protein sequence and its length is added as well ..."
# this analysis must be done, once for each 'fetchid', and then merged back to the main table ...
get_theor_sites_fid = lambda facc: ms.get_theor_sites(str(gbrecs[str(facc)].seq))
get_theor_sites_number_fid = lambda facc: ms.get_theor_sites_number(str(gbrecs[str(facc)].seq))
theor_sites_info = lambda facc: pd.Series(
{'fetchacc':facc,
'gsites_predicted':get_theor_sites_fid(facc),
'gsites_predicted_number':get_theor_sites_number_fid(facc),
'prot_seq':str(gbrecs[str(facc)].seq),
'prot_len':len(str(gbrecs[str(facc)].seq))} )
###################################################
predicted_gsite_info = quant_n_raw['fetchacc'].drop_duplicates().apply(theor_sites_info)
# add back to the main table ...
quant_n_raw = quant_n_raw.merge(predicted_gsite_info,on='fetchacc',how='right')
print "done ..."
print "numbering appears to be 1-based and overall correct!"
print
# print " 'gsites_predicted' column uses 1-based numbering. Enforced and checked."
# SOME FINAL PREPARATIONS TO COMPLY WITH THE REQUESTED FORMAT ...
# extract gsite AAs as separate columns ...
quant_n_raw['gsite_AA1'] = quant_n_raw['gsite_seq'].str.get(0)
quant_n_raw['gsite_AA2'] = quant_n_raw['gsite_seq'].str.get(1)
quant_n_raw['gsite_AA3'] = quant_n_raw['gsite_seq'].str.get(2)
# print "\n\n\nSHOULD BE WORKING UP UNTIL HERE ...\n\n\n"
# print "\n\n\nTOBECONTINUED ...\n\n\n"
# locus protein_name uid Protein_ID_PERCENT peptides best_peptide Peptide_probability protease Expt_NUMBER prev_aa next_aa pept_start pept_stop Location match g_site gsite_start gsites_AA1_N gsites_AA2_XbutP gsites_AA3_TS Best Mascot Ion score Best Mascot Identity score Best Mascot Delta Ion score Prot_seq signal signal_loc tm_span protein length
requested_cols = ['gsite',
'pept',
'enzyme',
'start_fetched',
'prot_name',
'fetchid',
'fetchacc',
'uid_fetched',
'GN_fetched',
'pept_ident_probab',
'gsites_predicted',
'gsites_predicted_number',
'gsite_seq',
'gstart',
'gsite_AA1',
'gsite_AA2',
'gsite_AA3',
'signal',
'signal_loc',
'tm_span']
requested_cols = ['locus',
'spec_name',
'exp_name',
'prot_name',
'uid_fetched',
# 'peptides', # THIS IS NEW STUFF ...
'pept',
'pept_with_mod',
'fetchid',
'fetchacc',
# 'best_pept', # THIS IS NEW STUFF ...
'pept_ident_probab', # BEWARE, pept ID % of the BEST PEPTIDE ...
'enzyme',
# 'experiment_num', # THIS IS NEW STUFF ...
###########################
# 'prev_aa',
# 'next_aa',
'prev_aa_fetched',
'next_aa_fetched',
# 'pept_start',
# 'pept_stop',
'start_fetched',
'stop_fetched',
###########################
'Weight',
'ch1',
'ch2',
'norm_ch1',
'norm_ch2',
'Mascot Ion Score',
'Mascot Identity Score',
'Mascot Delta Ion Score',
###########################
'gsite_seq',
'gstart',
'gsite_AA1',
'gsite_AA2',
'gsite_AA3',
'prot_seq', # PROTEIN SEQUENCE TO BE ADDED ...
'signal',
'signal_loc',
'tm_span',
'prot_len', # PROTEIN LENGTH TO BE ADDED ...
'SCORE',
'crit_pept_in',
'Spectrum ID']
#ADD FLANKING SEQUENCE ....
# ###################################################
# # TO BE CONTINUED ...
THE_MOST_FINAL_DF = quant_n_raw[requested_cols].drop_duplicates().reset_index(drop=True)
# THE_MOST_FINAL_DF = quant_n_raw.drop_duplicates(subset=requested_cols)[requested_cols].reset_index(drop=True)
# choose peptide with highest Pept_ident_probab
# Let's collpase (gsite,pept,fetchid) using the highest pept_ident_probab ...
THE_MOST_FINAL_DF_max_prob = THE_MOST_FINAL_DF.loc[THE_MOST_FINAL_DF.groupby(['gsite_seq','gstart','pept','fetchid','fetchacc','enzyme'],sort=False)['pept_ident_probab'].idxmax() ].reset_index(drop=True)
# rename pept to best_pept AND enzyme to protease ...
THE_MOST_FINAL_DF_max_prob = THE_MOST_FINAL_DF_max_prob.rename(columns={'enzyme':'protease',})
# add experiment number, something new ...
THE_MOST_FINAL_DF_max_prob['exp_num'] = int(args.exp_num)
# # location match instead of fetched/Scaffold-based resutls ...
THE_MOST_FINAL_DF_max_prob['spec_match'] = THE_MOST_FINAL_DF_max_prob['spec_name'] == THE_MOST_FINAL_DF_max_prob['Spectrum ID']
THE_MOST_FINAL_DF_max_prob['spec_match'] = THE_MOST_FINAL_DF_max_prob['spec_match'].map({True:'Y',False:'N'})
def get_flank(prot_seq,gstart,prot_len):
start = max(0,gstart-10)
stop = min(gstart+10,prot_len)
return prot_seq[start:stop]
THE_MOST_FINAL_DF_max_prob['pept_flank'] = THE_MOST_FINAL_DF_max_prob[['prot_seq','gstart','prot_len']].apply(lambda r: get_flank(r[0],r[1],r[2]), axis=1)
requested_cols = ['locus',
'prot_name',
'uid_fetched',
'prot_seq', # PROTEIN SEQUENCE TO BE ADDED ...
'gsite_seq',
'gstart',
'pept',
'fetchacc',
'pept_ident_probab', # BEWARE, pept ID % of the BEST PEPTIDE ...
'Mascot Ion Score',
'Mascot Identity Score',
'Mascot Delta Ion Score',
'spec_name',
'spec_match',
'exp_name',
'pept_with_mod',
'pept_flank',
'ch1',
'ch2',
'norm_ch1',
'norm_ch2',
'Weight']
# ###########################
# ###########################
# 'fetchid',
# # 'best_pept', # THIS IS NEW STUFF ...
# 'enzyme',
# # 'experiment_num', # THIS IS NEW STUFF ...
# ###########################
# # 'prev_aa',
# # 'next_aa',
# 'prev_aa_fetched',
# 'next_aa_fetched',
# # 'pept_start',
# # 'pept_stop',
# 'start_fetched',
# 'stop_fetched',
# ###########################
# ###########################
# 'gsite_AA1',
# 'gsite_AA2',
# 'gsite_AA3',
# 'signal',
# 'signal_loc',
# 'tm_span',
# 'prot_len', # PROTEIN LENGTH TO BE ADDED ...
# 'SCORE',
# 'crit_pept_in']
THE_MOST_FINAL_DF_max_prob[requested_cols].to_csv(os.path.join(common_path,'FINAL_gsite_anthology.csv'),index=False)
# THE_MOST_FINAL_DF_uniq.to_csv(os.path.join(common_path,'FINAL_gsite_anthology.csv'),index=False)
# # DESIREd COLUMNS ...
# # ############################################
# # # columns that needs to be delivered ... #
# # ############################################
# # # A gsites, 1 per line
# # # B pept, 1 per line
# # # B1 enzyme, G or T, derive from 'Biological sample category', like this: {'TrypsinSample1':'T','GluC_Sample2':'G'}
# # # C peptide_start, 1 per line accordingly
# # # D all_uids, REPLACE WITH col:H
# # # E prot_seq, try to get those from NCBI, not from UniProt ...
# # # F protein, ??? sequence, name or what???
# # # G uid_max, UID for major form instead or something like that ...
# # # H prot_name, parsed out human-readable name from 'Protein name'
# # # H1 gene_name, parsed out GN=xxx from 'Protein name'
# # # I uniq_peptide_count, discrad that column ...
# # # J pept_probability, output number not the string - this would be the criteria
# # # K gsites_predicted, OK
# # # L gsites_predicted_number, OK
# # # M gsite_start, beware of 0 or 1 type of indexing ...
# # # N,O,P - gsites AAs in separate columns
# # # M1, NOP combined, gsite sequence basically!
# # # Q signal, from GeneBank record on the protein, simply Y,N on whether there is a 'Signal' in gb.
# # # R signal_location, location of the signal from Q
# # # S tm_span, Y,N just for the fact of having TM span as a protein feature.
# # THINGS TO BE DONE:
# # 1) MERGE PEP_INFO WITH SPEC_INFO, SO THAT PEPT-PROT RELATIONSHIP WOULD BE SET UP ...
# # probably getting read of many-many columns in the spec_info along the way ...
# # 2) MODIFY AND TEST THE 'deamid_to_gsite' FUNCTION (PAY ATTENTION TO 1-BASED AND 0-BASED NUMBERING OF AAs)
# # 3) USE 'deamid_to_gsite' TO FINALLY EXTRACT GSITES ...
# # 4) ANALYSE(?) GSITES: GROUPBY UNIQUE GSITES (GSITE[seq],GSITE_START,PROT_IDENTIFICATOR) TO SEE HOW MANY PEPTS YIELD THE SAME GSITES ...
# # 5) SELECT A SINGLE PEPT PER GSITE??????? USING REAID'S CRITERIA, OR LEAVE ALL GSITE-PEPTS PAIRS ???????????
# #######################################
# # 6) COMPLY WITH THE #columns that needs to be delivered...# FOR THE VERY VERY FINAL OUTPUT ...
# #######################################
| mit |
MaxHalford/xam | tests/test_model_selection.py | 1 | 1398 | import datetime as dt
import unittest
import numpy as np
import pandas as pd
import xam
class TestOrderedCrossValidation(unittest.TestCase):
def test_with_ints(self):
X = pd.DataFrame(index=[1, 2, 3, 4])
n_splits = 2
delta = 1
splits = [
[[0, 1, 2], [3]],
[[0, 1], [2]]
]
cv = xam.model_selection.OrderedCV(n_splits=n_splits, delta=delta)
for i, (train_idx, test_idx) in enumerate(cv.split(X)):
np.testing.assert_array_equal(train_idx, splits[i][0])
np.testing.assert_array_equal(test_idx, splits[i][1])
def test_with_dates(self):
date = dt.datetime
X = pd.DataFrame(index=[
date(2017, 1, 1),
date(2017, 1, 2),
date(2017, 1, 4),
date(2017, 1, 5),
date(2017, 1, 7),
date(2017, 1, 8),
date(2017, 1, 9),
])
n_splits = 3
delta = dt.timedelta(days=2)
splits = [
[[0, 1, 2, 3, 4], [5, 6]],
[[0, 1, 2, 3], [4]],
[[0, 1], [2, 3]],
]
cv = xam.model_selection.OrderedCV(n_splits=n_splits, delta=delta)
for i, (train_idx, test_idx) in enumerate(cv.split(X)):
np.testing.assert_array_equal(train_idx, splits[i][0])
np.testing.assert_array_equal(test_idx, splits[i][1])
| mit |
jcarva/digital_image_processing_assignments | assignment2/python/task2b.py | 1 | 2743 | # coding=UTF-8
# 2. Para uma imagem com dimensões 1024x1024, calcule o tempo de processamento para:
# b. Aplicação do Filtro da Média Mx1 sobre a imagem resultante da aplicação do Filtro da Média 1xN;
import matplotlib.pyplot as plt
import filter
import utils
import timer
def main():
# image = utils.plt_load_image("lena_headey_1024.jpg")
image = utils.plt_load_image("lenna.png")
image = (image / float(image.max())) ** (1 / 1.0)
print("Shape:", image.shape)
print("Values min/max:", image.min(), image.max())
rows, columns, channels = utils.image_shape(image)
#1xN
image_out1x3 = timer.count('[Average k=1x3]', filter.average, [image, utils.m_n_average_kernel(1, 3)])
image_out1x25 = timer.count('[Average k=1x25]', filter.average, [image, utils.m_n_average_kernel(1, 25)])
image_out1x53 = timer.count('[Average k=1x53]', filter.average, [image, utils.m_n_average_kernel(1, 53)])
#Mx1
image_out3x1_on_1x3 = timer.count('[Average k=3x1 on k=1x3]', filter.average, [image_out1x3, utils.m_n_average_kernel(3, 1)])
image_out25x1_on_1x3 = timer.count('[Average k=25x1 on k=1x3]', filter.average, [image_out1x3, utils.m_n_average_kernel(25, 1)])
image_out3x1_on_1x25 = timer.count('[Average k=3x1 on k=1x25]', filter.average, [image_out1x25, utils.m_n_average_kernel(3, 1)])
image_out25x1_on_1x25 = timer.count('[Average k=25x1 on k=1x25]', filter.average, [image_out1x25, utils.m_n_average_kernel(25, 1)])
image_out13x1_on_1x53 = timer.count('[Average k=13x1 on k=1x53]', filter.average, [image_out1x53, utils.m_n_average_kernel(13, 1)])
fig = plt.figure(figsize=(6, 4))
sub1 = plt.subplot(2, 3, 1)
sub1.set_title('Original')
sub2 = plt.subplot(2, 3, 2)
sub2.set_title('k=3x1 on k=1x3')
sub3 = plt.subplot(2, 3, 3)
sub3.set_title('k=3x1 on k=1x25')
sub4 = plt.subplot(2, 3, 4)
sub4.set_title('k=25x1 on k=1x3')
sub5 = plt.subplot(2, 3, 5)
sub5.set_title('k=25x1 on k=1x25')
sub6 = plt.subplot(2, 3, 6)
sub6.set_title('k=13x1 on k=1x53')
if channels == 1:
sub1.imshow(image, cmap='gray')
sub2.imshow(image_out3x1_on_1x3, cmap='gray')
sub3.imshow(image_out3x1_on_1x25, cmap='gray')
sub4.imshow(image_out25x1_on_1x3, cmap='gray')
sub5.imshow(image_out25x1_on_1x25, cmap='gray')
sub6.imshow(image_out13x1_on_1x53, cmap='gray')
else:
sub1.imshow(image)
sub2.imshow(image_out3x1_on_1x3)
sub3.imshow(image_out3x1_on_1x25)
sub4.imshow(image_out25x1_on_1x3)
sub5.imshow(image_out25x1_on_1x25)
sub6.imshow(image_out13x1_on_1x53)
fig.tight_layout()
plt.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
antoinecarme/pyaf | tests/HourOfWeek/test_Business_Hourly_LunchTime.py | 1 | 2087 | import pandas as pd
import numpy as np
import pyaf.ForecastEngine as autof
np.random.seed(seed=1960)
#get_ipython().magic('matplotlib inline')
df = pd.DataFrame()
lTimeVar = 'Time'
lSignalVar = 'Signal'
N = 10000
df[lTimeVar + '_Hourly'] = pd.date_range('2000-1-1', periods=N, freq='1h')
df['Hour'] = df[lTimeVar + '_Hourly'].dt.hour
df['Day'] = df[lTimeVar + '_Hourly'].dt.dayofweek
df[lSignalVar] = 5 + np.random.randn(N) + 10 * df['Hour'].apply(lambda x : x if (12 <= x and x < 14) else 23) * df['Day'].apply(lambda x : x if (x < 4) else 12)
print(df.head())
print(df.info())
#df.to_csv("outputs/ozone_WDHMS.csv");
#df.tail(10)
#df[:-10].tail()
#df[:-10:-1]
#df.describe()
for k in [1]:
for timevar in [lTimeVar + '_Hourly']:
lEngine = autof.cForecastEngine()
lEngine
H = 24;
# lEngine.mOptions.enable_slow_mode();
lEngine.mOptions.mDebugPerformance = True;
lEngine.mOptions.mFilterSeasonals = True;
lEngine.mOptions.mDebugCycles = False;
lEngine.mOptions.set_active_autoregressions([]);
lEngine.train(df , timevar , lSignalVar, H);
lEngine.getModelInfo();
print(lEngine.mSignalDecomposition.mTrPerfDetails.head());
lEngine.mSignalDecomposition.mBestModel.mTimeInfo.mResolution
dfapp_in = df.copy();
dfapp_in.tail()
# H = 12
dfapp_out = lEngine.forecast(dfapp_in, H);
#dfapp_out.to_csv("outputs/ozone_" + timevar + "apply_out.csv")
dfapp_out.tail(2 * H)
print("Forecast Columns " , dfapp_out.columns);
Forecast_DF = dfapp_out[[timevar , lSignalVar, lSignalVar + '_Forecast']]
print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(H));
print("\n\n<ModelInfo>")
print(lEngine.to_json());
print("</ModelInfo>\n\n")
print("\n\n<Forecast>")
print(Forecast_DF.tail(2*H).to_json(date_format='iso'))
print("</Forecast>\n\n")
lEngine.standardPlots(name = "outputs/ozone_LunchTime_" + timevar)
| bsd-3-clause |
wesm/arrow | python/pyarrow/tests/test_adhoc_memory_leak.py | 5 | 1410 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import numpy as np
import pyarrow as pa
import pyarrow.tests.util as test_util
try:
import pandas as pd
except ImportError:
pass
@pytest.mark.memory_leak
@pytest.mark.pandas
def test_deserialize_pandas_arrow_7956():
df = pd.DataFrame({'a': np.arange(10000),
'b': [test_util.rands(5) for _ in range(10000)]})
def action():
df_bytes = pa.ipc.serialize_pandas(df).to_pybytes()
buf = pa.py_buffer(df_bytes)
pa.ipc.deserialize_pandas(buf)
# Abort at 128MB threshold
test_util.memory_leak_check(action, threshold=1 << 27, iterations=100)
| apache-2.0 |
Averroes/statsmodels | statsmodels/datasets/tests/test_utils.py | 26 | 1697 | import os
import sys
from statsmodels.datasets import get_rdataset, webuse, check_internet
from numpy.testing import assert_, assert_array_equal, dec
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_get_rdataset():
# smoke test
if sys.version_info[0] >= 3:
#NOTE: there's no way to test both since the cached files were
#created with Python 2.x, they're strings, but Python 3 expects
#bytes and the index file path is hard-coded so both can't live
#side by side
pass
#duncan = get_rdataset("Duncan-py3", "car", cache=cur_dir)
else:
duncan = get_rdataset("Duncan", "car", cache=cur_dir)
assert_(duncan.from_cache)
#internet_available = check_internet()
#@dec.skipif(not internet_available)
def t_est_webuse():
# test copied and adjusted from iolib/tests/test_foreign
from statsmodels.iolib.tests.results.macrodata import macrodata_result as res2
#base_gh = "http://github.com/statsmodels/statsmodels/raw/master/statsmodels/datasets/macrodata/"
base_gh = "http://statsmodels.sourceforge.net/devel/_static/"
res1 = webuse('macrodata', baseurl=base_gh, as_df=False)
assert_array_equal(res1 == res2, True)
#@dec.skipif(not internet_available)
def t_est_webuse_pandas():
# test copied and adjusted from iolib/tests/test_foreign
from pandas.util.testing import assert_frame_equal
from statsmodels.datasets import macrodata
dta = macrodata.load_pandas().data
base_gh = "http://github.com/statsmodels/statsmodels/raw/master/statsmodels/datasets/macrodata/"
res1 = webuse('macrodata', baseurl=base_gh)
res1 = res1.astype(float)
assert_frame_equal(res1, dta)
| bsd-3-clause |
chrsrds/scikit-learn | sklearn/feature_extraction/dict_vectorizer.py | 2 | 12365 | # Authors: Lars Buitinck
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from array import array
from collections.abc import Mapping
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, tosequence
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(BaseEstimator, TransformerMixin):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
However, note that this transformer will only do a binary one-hot encoding
when feature values are of type string. If categorical features are
represented as numeric values such as int, the DictVectorizer can be
followed by :class:`sklearn.preprocessing.OneHotEncoder` to complete
binary one-hot encoding.
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Read more in the :ref:`User Guide <dict_feature_extraction>`.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator : string, optional
Separator string used when constructing new features for one-hot
coding.
sparse : boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort : boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be
sorted when fitting. True by default.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[2., 0., 1.],
[0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[0., 0., 4.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OrdinalEncoder : handles nominal/categorical
features encoded as columns of arbitrary data types.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True,
sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
for x in X:
for f, v in x.items():
if isinstance(v, str):
f = "%s%s%s" % (f, self.separator, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = {f: i for i, f in enumerate(feature_names)}
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report")
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = array("i", [0])
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in x.items():
if isinstance(v, str):
f = "%s%s%s" % (f, self.separator, v)
v = 1
if f in vocab:
indices.append(vocab[f])
values.append(dtype(v))
else:
if fitting:
feature_names.append(f)
vocab[f] = len(vocab)
indices.append(vocab[f])
values.append(dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = np.frombuffer(indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix((values, indices, indptr),
shape=shape, dtype=dtype)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Sample matrix.
dict_type : callable, optional
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects, length = n_samples
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in range(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
if self.sparse:
return self._transform(X, fitting=False)
else:
dtype = self.dtype
vocab = self.vocabulary_
X = _tosequence(X)
Xa = np.zeros((len(X), len(vocab)), dtype=dtype)
for i, x in enumerate(X):
for f, v in x.items():
if isinstance(v, str):
f = "%s%s%s" % (f, self.separator, v)
v = 1
try:
Xa[i, vocab[f]] = dtype(v)
except KeyError:
pass
return Xa
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : boolean, optional
Whether support is a list of indices.
Returns
-------
self
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names()
['bar', 'baz', 'foo']
>>> v.restrict(support.get_support())
DictVectorizer()
>>> v.get_feature_names()
['bar', 'foo']
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(new_vocab.items(),
key=itemgetter(1))]
return self
def _more_tags(self):
return {'X_types': ["dict"]}
| bsd-3-clause |
lazywei/scikit-learn | examples/model_selection/plot_precision_recall.py | 249 | 6150 | """
================
Precision-Recall
================
Example of Precision-Recall metric to evaluate classifier output quality.
In information retrieval, precision is a measure of result relevancy, while
recall is a measure of how many truly relevant results are returned. A high
area under the curve represents both high recall and high precision, where high
precision relates to a low false positive rate, and high recall relates to a
low false negative rate. High scores for both show that the classifier is
returning accurate results (high precision), as well as returning a majority of
all positive results (high recall).
A system with high recall but low precision returns many results, but most of
its predicted labels are incorrect when compared to the training labels. A
system with high precision but low recall is just the opposite, returning very
few results, but most of its predicted labels are correct when compared to the
training labels. An ideal system with high precision and high recall will
return many results, with all results labeled correctly.
Precision (:math:`P`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false positives
(:math:`F_p`).
:math:`P = \\frac{T_p}{T_p+F_p}`
Recall (:math:`R`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false negatives
(:math:`F_n`).
:math:`R = \\frac{T_p}{T_p + F_n}`
These quantities are also related to the (:math:`F_1`) score, which is defined
as the harmonic mean of precision and recall.
:math:`F1 = 2\\frac{P \\times R}{P+R}`
It is important to note that the precision may not decrease with recall. The
definition of precision (:math:`\\frac{T_p}{T_p + F_p}`) shows that lowering
the threshold of a classifier may increase the denominator, by increasing the
number of results returned. If the threshold was previously set too high, the
new results may all be true positives, which will increase precision. If the
previous threshold was about right or too low, further lowering the threshold
will introduce false positives, decreasing precision.
Recall is defined as :math:`\\frac{T_p}{T_p+F_n}`, where :math:`T_p+F_n` does
not depend on the classifier threshold. This means that lowering the classifier
threshold may increase recall, by increasing the number of true positive
results. It is also possible that lowering the threshold may leave recall
unchanged, while the precision fluctuates.
The relationship between recall and precision can be observed in the
stairstep area of the plot - at the edges of these steps a small change
in the threshold considerably reduces precision, with only a minor gain in
recall. See the corner at recall = .59, precision = .8 for an example of this
phenomenon.
Precision-recall curves are typically used in binary classification to study
the output of a classifier. In order to extend Precision-recall curve and
average precision to multi-class or multi-label classification, it is necessary
to binarize the output. One curve can be drawn per label, but one can also draw
a precision-recall curve by considering each element of the label indicator
matrix as a binary prediction (micro-averaging).
.. note::
See also :func:`sklearn.metrics.average_precision_score`,
:func:`sklearn.metrics.recall_score`,
:func:`sklearn.metrics.precision_score`,
:func:`sklearn.metrics.f1_score`
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn import svm, datasets
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# Split into training and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=random_state)
# Run classifier
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute Precision-Recall and plot curve
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y_test[:, i],
y_score[:, i])
average_precision[i] = average_precision_score(y_test[:, i], y_score[:, i])
# Compute micro-average ROC curve and ROC area
precision["micro"], recall["micro"], _ = precision_recall_curve(y_test.ravel(),
y_score.ravel())
average_precision["micro"] = average_precision_score(y_test, y_score,
average="micro")
# Plot Precision-Recall curve
plt.clf()
plt.plot(recall[0], precision[0], label='Precision-Recall curve')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall example: AUC={0:0.2f}'.format(average_precision[0]))
plt.legend(loc="lower left")
plt.show()
# Plot Precision-Recall curve for each class
plt.clf()
plt.plot(recall["micro"], precision["micro"],
label='micro-average Precision-recall curve (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i in range(n_classes):
plt.plot(recall[i], precision[i],
label='Precision-recall curve of class {0} (area = {1:0.2f})'
''.format(i, average_precision[i]))
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Extension of Precision-Recall curve to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
jszopi/repESP | repESP_old/graphs.py | 1 | 23402 | import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.backends.backend_pdf import PdfPages
import os
import numpy as np
from numpy.linalg import norm as vec_norm
import random
import math
import re
# This was necessary to prevent y-axis label from being cut off when plotting
# http://stackoverflow.com/a/17390833
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
import field_comparison
DIR_LABELS = ['x', 'y', 'z']
def _plot_common(dimension, title, guideline=False):
"""Set up plot of correct dimensionality and return related objects"""
fig = plt.figure()
if dimension == 3:
ax = fig.add_subplot(111, projection='3d')
elif dimension == 2:
ax = fig.add_subplot(111)
else:
raise NotImplementedError("Plotting of dimension {0} not implemented"
.format(dimension))
# Add a horizontal line at 0 for 2D plots
if guideline and dimension == 2:
ax.axhline(color='k', linestyle='--')
if title is not None:
plt.title(title)
return fig, ax
def plot(*fields, color=None, color_span=None, dist_field_filter=None,
exclusion_dist=0, rand_skim=0.01, extra_filter=None, save_to=None,
axes_limits=None, title=None, get_limits=None):
assert 2 <= len(fields) <= 3
# Pack fields and color together
if color is not None:
fields_and_color = list(fields) + [color]
else:
fields_and_color = fields
field_comparison._check_grids(*fields_and_color)
field_comparison._check_fields_for_nans(*fields_and_color)
# Necessary, as the original Field will be overwritten when filtering
dist_field_filter_type = dist_field_filter.field_type
fig, ax = _plot_common(len(fields), title, guideline=True)
_set_axis_labels(ax, *fields)
# This function got really fat due to all that filtering and it can still
# handle only one additional filter. Some refactoring is due. TODO
if extra_filter is not None:
fields_and_color = extra_filter(*fields_and_color)
# This filtering step changes all Fields to np.arrays. As a result, in
# the next filtering step, by dist_field_filter, a mixture of np.arrays
# and Fields is passed, which is not handled by the filters. While that
# deficiency was intentional, I don't think there's a reason it should
# not be handled (TODO). But for now, a kludge:
if dist_field_filter is not None:
dist_field_filter = dist_field_filter.values
if dist_field_filter is not None:
if dist_field_filter_type != 'dist':
print("WARNING: The field selected for filtering is not of type "
"'dist' but ", dist_field_filter.field_type)
dist_field_filter, *fields_and_color = field_comparison.filter_by_dist(
exclusion_dist, *([dist_field_filter] + fields_and_color))
elif exclusion_dist:
print("WARNING: exclusion distance specified but no Field passed to "
"filter by.")
fields_and_color = field_comparison.skim(rand_skim, *fields_and_color)
fields_and_color = list(map(field_comparison._flatten_no_nans,
fields_and_color))
if color is not None:
cmap = _get_cmap(len(fields), color.field_type)
cmap_name = color.lookup_name()
*fields, color = fields_and_color
# ax.scatter has to be inside of the 'color is not None' conditional
# because an error occurs when the kwarg ``c`` is explicitly set to
# None, even though it's the default value.
vmin, vmax = color_span if color_span is not None else None, None
image = ax.scatter(*fields, c=color, cmap=cmap, vmin=vmin, vmax=vmax,
lw=0, s=5)
cbar = fig.colorbar(image, label=cmap_name)
else:
fields = fields_and_color
ax.scatter(*fields, lw=0, s=5)
_set_axes_limits(len(fields), ax, axes_limits)
_save_or_display(save_to)
# Save limits to get_limits. This is useful when they are to be reused in
# other plots. Saving the limits to an argument was more intuitive than
# returning them.
if get_limits is not None:
# Code copied from _set_axes_limits (TODO: DRY)
limits = []
for dir_label in DIR_LABELS[:len(fields)]:
# Get current limits
limits.append(getattr(ax, "get_" + dir_label + "lim")())
get_limits[:] = limits
def _set_axes_limits(dimension, ax, axes_limits):
"""Set axes limits"""
if axes_limits is None:
return
# Smaller lengths are allowed, will be interpreted as the first few axes.
# This should be an Exception not assertion though.
assert len(axes_limits) <= dimension
for axis_limits, dir_label in zip(axes_limits, DIR_LABELS):
# Get current limits
limits = list(getattr(ax, "get_" + dir_label + "lim")())
for i, axis_limit in enumerate(axis_limits):
if axis_limit is not None:
limits[i] = axis_limit
getattr(ax, "set_" + dir_label + "lim")(limits)
# Although **not for my purposes at the moment** (I only want to set limits
# so that different plots can be easily compared, so both axes will be
# getting set), it would be nice to rescale the axes which were not
# modified. However, when autoscaling, matplotlib always uses all the data.
# ax.relim() with ax.autoscale_view() seemed to be relevant but they do not
# easily operate on datapoints I think.
def _set_axis_labels(ax, *fields):
"""Set axis labels based on free-form names of Fields being plotted"""
for field, dir_label in zip(fields, DIR_LABELS):
getattr(ax, "set_" + dir_label + "label")(field.lookup_name())
def _get_cmap(dimension, field_type):
"""Return a color map based on plot dimensionality and field type"""
if field_type == 'dist':
if dimension != 3:
# Shading by distance is more intuitive
return plt.get_cmap('Blues_r')
else:
print("WARNING: Shading by distance doesn't look good on a 3D "
"plot. Colouring instead.")
return plt.get_cmap('coolwarm_r')
def _save_or_display(save_to=None):
"""Save the plot or display it if save_to is None"""
if save_to is None:
plt.show()
else:
if type(save_to) is PdfPages:
# Need to check the type first, because it may be a file object if
# a pdf is to be created, see:
# http://matplotlib.org/faq/howto_faq.html#save-multiple-plots-to-one-pdf-file
plt.savefig(save_to, format="pdf")
elif os.path.isfile(save_to):
raise FileExistsError("File exists: " + save_to)
else:
# DPI may need to be increased
plt.savefig(save_to)
plt.close()
def plot_points(points_field, dimension, title=None, color_span=None,
axes_limits=None, save_to=None, rand_skim=1, plane_eqn=None,
dist_thresh=None, molecule=None, atom_dist_threshs=None,
atom_format=None, show_all_atoms=False):
"""Plot fitting or cube points in 2 or 3D coloured by values
Parameters
----------
points_field : Field
The ``Field`` object containint the points to be plotted.
dimension : {2, 3}
Dimensions of the plot.
title : str, optional
Plot title.
color_span : [float, float], optional
The lower and upper limits for the color range for field values at
fitting points. If this option is not specified, the limits will be
calculated automatically based on all data points, not only the plotted
slice of points.
axes_limits : [float, float], optional
A pair of values for the axes limits in angstroms. The same limits will
be applied to all axes, non-square/cubic plots are currently not
supported.
save_to : str, optional
The file to which the graph is to be saved. If not specified, the graph
will be displayed in interactive mode.
rand_skim : float, optional
For plots with a large number of points, it may be necessary to plot
only a fraction of the points. The points to be plotted are selected
randomly and this option specifies the probability for a given point to
be plotted. Values in the range (0, 1] are allowed, 1 is the default
(all points plotted).
plane_eqn : List[float], optional
The equation for the slicing plane specified with a list of parameters
of the following plane equation: Ax + By + Cz + D = 0. The default is
``None``.
dist_thresh : float, optional
The distance in angstrom from the slicing plane within which points are
to be plotted. If all points are to be plotted, specify a very high
number. The default is ``None``.
molecule : Molecule, optional
The molecule to be plotted. The default is ``None``.
atom_dist_threshs : List[float], optional
The thresholds for atom distance from slicing plane, which will be used
to choose the formatting of atom labels as specified in
``atom_format``. The default is ``None`` and results in the thresholds
[0, 0.5, 1] i.e. four ranges: equal zero, between 0 and 0.5, between
0,.5 and 1, and above 1.
atom_format : List[dict], optional
The formatting for the atom labels for each of the distance ranges
specified with the ``atom_dist_thresh`` option. The default is ``None``
and results in:
.. code:: python
[{
'color': 'red',
'bbox': dict(
facecolor='none',
edgecolor='red'
)
}, {
'color': 'red',
'bbox': dict(
facecolor='none',
edgecolor='red',
linestyle='dashed'
)
}, {
'color': 'grey',
}, {
'color': 'grey',
}]
show_all_atoms : bool, optional
If the ``atom_format`` option specifies a formatting option for the
last, open range specified by ``atom_dist_threshs``, this option
decides whether atoms in that range are to be plotted. The default is
``False``.
"""
project_onto_plane = _check_args(dimension, plane_eqn, dist_thresh)
field_comparison._check_fields_for_nans(points_field)
fig, ax = _plot_common(dimension, title)
# Skimming, filtering and projecting
points, values = _points_dist_filter(
points_field.get_points(), points_field.get_values(), plane_eqn,
dist_thresh)
points, values = _points_rand_skim(points, values, rand_skim)
points = _project_points(points, project_onto_plane, dimension, plane_eqn)
_plot_atoms(molecule, ax, dimension, plane_eqn, project_onto_plane,
atom_dist_threshs, atom_format, show_all_atoms)
cmap_name = points_field.lookup_name()
cmap = plt.get_cmap('RdYlBu')
vmin, vmax = color_span if color_span is not None else None, None
image = ax.scatter(*list(zip(*points))[:dimension], c=values,
cmap=cmap, vmin=vmin, vmax=vmax, s=50, lw=0.5)
cbar = fig.colorbar(image, label=cmap_name)
_set_axis_labels2(ax, dimension, project_onto_plane, plane_eqn)
_set_axes_limits(dimension, ax, axes_limits)
if dimension == 2:
plt.axes().set_aspect('equal')
_save_or_display(save_to)
def _check_args(dimension, plane_eqn, dist_thresh):
"""Checks arguments and decides whether to project points"""
if dimension == 3:
project_onto_plane = False
elif dimension == 2:
if plane_eqn is None:
project_onto_plane = False
else:
project_onto_plane = True
else:
raise ValueError("Parameter `dimension` needs to be either 2 or 3 but "
"{0} was given.".format(dimension))
if dist_thresh is not None and plane_eqn is None:
raise ValueError("`dist_thresh` was specified but no `plane_eqn` was "
"given.")
if dist_thresh is None and dimension == 2:
print("WARNING: A 2D plot will look cluttered without cut-off value "
"for the distance from the specified plane (`dist_thresh`).")
return project_onto_plane
def _set_axis_labels2(ax, dimension, project_onto_plane, plane_eqn):
if project_onto_plane:
ax.set_xlabel(r'Coordinates mapped onto plane ${0:.2f}x {1:+.2f}y '
'{2:+.2f}z {3:+.2f} = 0$'.format(*plane_eqn))
else:
# Zip with dimension to stop early if it's less than 3 dimensions
for dir_label, dim in zip(DIR_LABELS, range(dimension)):
getattr(ax, "set_" + dir_label + "label")(dir_label)
def _plot_atoms(molecule, ax, dimension, plane_eqn, project_onto_plane,
atom_dist_threshs, atom_format, show_all_atoms):
# When writing docstrings, have a look at plot_points, where some of these
# options are already documented.
if molecule is None:
return
# Default values for formatting
if atom_format is None:
atom_format = [
{
'color': 'red',
'bbox': dict(
facecolor='none',
edgecolor='red'
)
}, {
'color': 'red',
'bbox': dict(
facecolor='none',
edgecolor='red',
linestyle='dashed'
)
}, {
'color': 'grey',
}, {
'color': 'grey',
}]
if atom_dist_threshs is None:
atom_dist_threshs = [0, 0.5, 1]
# This is outside of the loop to take advantage of projecting all atoms at
# once with _project_points
coords = [atom.coords for atom in molecule]
coords = _project_points(coords, project_onto_plane, dimension, plane_eqn)
for atom, coord in zip(molecule, coords):
assert 0 <= len(atom_format) - len(atom_dist_threshs) <= 1
atom_string = '{0}{1}'.format(atom.identity, atom.label)
# Avoid retyping _plot_atom arguments by creating a lambda
plot_atom = lambda curr_format, marker_fill: _plot_atom(
ax, coord, atom_string, dimension, curr_format,
marker_fill=marker_fill)
if plane_eqn is None:
plot_atom({'color': 'red'}, 'k')
else:
# This big for-else loop checks into which threshold range fits the
# atom's distance
for curr_thresh, curr_format in zip(atom_dist_threshs,
atom_format):
dist = _plane_point_dist(plane_eqn, atom.coords)
if _check_dist(dist, curr_thresh):
plot_atom(curr_format, 'k')
break
else:
# If it doesn't fit into any threshold, check if such atoms
# should be plotted and if their plotting arguments have been
# supplied as the additional, hanging element of `atom_format`
if (len(atom_format) == len(atom_dist_threshs) + 1 and
show_all_atoms):
plot_atom(atom_format[-1], 'grey')
def _plot_atom(ax, coords, atom_string, dimension, curr_format, marker='D',
marker_fill='b'):
"""Plot atom as text and optionally marker"""
ax.text(*coords[:dimension], atom_string, **curr_format)
if marker is not None:
ax.scatter(*coords[:dimension], marker='D', c=marker_fill)
def _plane_point_dist(equation, point):
"""Calculate the distance between a point and a plane given by equation
Parameters
----------
equation : List[float]
A list of coefficients of the equation describing the plane :math:`Ax +
By + Cz + D = 0`. The length should hence be 4. For example, the
plane :math:`z = 0` corresponds to the argument ``[0, 0, 1, 0]``.
point : List[float]
The coordinates of the point ``[x, y, z]``. A list of length 3.
Returns
-------
float
The calculated distance according to the equation:
.. math::
d = \\frac{A x + B y + C z + D}{\sqrt{A^2 + B^2 + C^2}}
Returning the signed value of this expression allows to distinguish
between points lying on the opposite sides of the plane.
"""
normal = np.array(equation[:3])
point = np.array(point)
return (np.dot(normal, point) + equation[3])/vec_norm(normal)
def _plane_through_points(point1, point2, point3):
point1 = np.array(point1)
point2 = np.array(point2)
point3 = np.array(point3)
u = point2 - point1
v = point3 - point1
cross = np.cross(u, v)
if not np.count_nonzero(cross):
raise ValueError("The supplied points appear to be colinear.")
a, b, c = cross[:3]
d = - (a*point1[0] + b*point1[1] + c*point1[2])
return a, b, c, d
def plane_through_atoms(molecule, label1, label2, label3):
points = [molecule[label - 1].coords for label in [label1, label2, label3]]
return _plane_through_points(*points)
def _project_point_onto_plane(equation, point):
"""Calculate coordinates of a point perpendicularly projected onto plane
Parameters
----------
equation : List[float]
A list of coefficients of the equation describing the plane :math:`Ax +
By + Cz + D = 0`. The length should hence be 4. For example, the
plane :math:`z = 0` corresponds to the argument ``[0, 0, 1, 0]``.
point : List[float]
The coordinates of the point ``[x, y, z]``. A list of length 3.
Returns
-------
np.ndarray[float]
The coordinates of the given point projected perpendicularly to the
given plane. Calculated according to equation:
.. math::
\\vec{OA'} = \\vec{OA} - d \\frac{\mathbf{n}}{\|\mathbf{n}\|},
where :math:`\mathbf{n}` is the vector normal to the plane.
"""
normal = np.array(equation[:3])
point = np.array(point)
return point - _plane_point_dist(equation, point)*normal/vec_norm(normal)
def _get_alt_coords(plane_eqn):
"""Create new coordinate system with z-axis orthogonal to given plane"""
# Normal to the plane
normal = np.array(plane_eqn[:3])
# Normalize (set magnitude to 1)
normal = normal/np.linalg.norm(normal)
# Set suggested direction of i, here it is the old x-direction.
i = np.array([1, 0, 0])
# Check if the normal coincides with the x-direction. If so, the i vector
# needs to be initially pointed in a different direction, e.g. that of y.
if np.dot(i, normal) == 1:
i = np.array([0, 1, 0])
# Select direction as close to the suggested one but orthogonal to the
# normal vector. This is done by taking the *rejected* vector when
# projecting i onto normal (the subtrahend).
i_prime = i - np.dot(i, normal)*normal
# Normalize
i_prime = i_prime/np.linalg.norm(i_prime)
# Find vector orthogonal to both i and the normal vector by taking their
# cross product. The order there is significant and was chosen to obtain a
# right-handed coordinate system (i, j, normal), just like (x, y, z)
j_prime = np.cross(normal, i_prime)
# No need to normalize
return i_prime, j_prime, normal
def _new_coord_matrix(new_coord_system):
"""Calculate matrix of transformation from old to new coordinate system"""
# This is an implementation of the formula after 6 on page 10 of:
# http://ocw.mit.edu/courses/aeronautics-and-astronautics/16-07-dynamics-fall-2009/lecture-notes/MIT16_07F09_Lec03.pdf
# The code below just calculates the elements of the matrix
old = np.identity(3)
# The templates are used to create the elements of the desired 3x3 matrix.
# They are populated in a meshgrid fashion, but I couldn't get it to work
# due to the nesting, so I settled on a list comprehension kludge.
# To simplify the code, the 9 elements of the matrix are kept as a
# contiguous array of 9 vectors, hence the reshaping.
old_template = np.array([old]*3).reshape(9, 3)
new_template = np.array([[elem]*3 for elem in new_coord_system])
new_template = new_template.reshape(9, 3)
# The desired matrix is calculated as an element-wise dot product
matrix = np.array([np.dot(old_elem, new_elem) for old_elem, new_elem in
zip(old_template, new_template)])
return matrix.reshape(3, 3)
def _project_points(points, project_onto_plane, dimension, plane_eqn):
"""Project points onto the given plane (3D) or its new coordinate system"""
if project_onto_plane:
if dimension == 3:
# Simple perpendicular projection onto 3D plane (this is expected
# to be rarely used and is not accessible through the 'public'
# `plot_points` as it switches projection off in 3D
points = [_project_point_onto_plane(plane_eqn, point) for point in
points]
elif dimension == 2:
# This is actually more than a projection, as 'looking' at the
# plane in a perpendicular manner requires a change of coordinate
# system. Otherwise the points would then be projected onto the
# (x, y) plane when flattening for plotting.
matrix = _new_coord_matrix(_get_alt_coords(plane_eqn))
points = [np.dot(matrix, point) for point in points]
return points
def _check_dist(dist, thresh):
"""Check if a distance is below the given threshold value"""
# The second condition ensures that floats are rounded correctly. With some
# of the grids some points may lie on the threshold value but would not be
# caught by the first condition due to float precision.
# Absolute tolerance was selected as one decimal place fewer than what
# seems to be the precision of Gaussian .esp coordinates.
return abs(dist) <= thresh or math.isclose(abs(dist), thresh, abs_tol=1e-4)
def _points_dist_filter(points, values, plane_eqn, dist_thresh):
if dist_thresh is None or plane_eqn is None:
return points, values
_points, _values = [], []
for point, value in zip(points, values):
dist = _plane_point_dist(plane_eqn, point)
if _check_dist(dist, dist_thresh):
_points.append(point)
_values.append(value)
return _points, _values
def _points_rand_skim(points, values, rand_skim):
if rand_skim == 1:
return points, values
_points, _values = [], []
for point, value in zip(points, values):
if random.random() <= rand_skim:
_points.append(point)
_values.append(value)
return _points, _values
def pretty_molecule_name(molecule_name):
if molecule_name.endswith("_plus"):
molecule_name = molecule_name[:-5] + "$^\oplus$"
elif molecule_name.endswith("_minus"):
molecule_name = molecule_name[:-6] + "$^\ominus$"
# Make all numbers subscripts
molecule_name = re.sub(r'(\d+)', r'$_{\1}$', molecule_name)
return molecule_name
| gpl-3.0 |
necromuralist/boston_housing | boston_housing/statistical_analysis.py | 1 | 7434 |
# python standard library
import os
import pickle
from distutils.util import strtobool
# third-party
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plot
import numpy
import pandas
import seaborn
from scikits.bootstrap import ci
import statsmodels.api as statsmodels
# this code
from boston_housing.common import load_housing_data, CLIENT_FEATURES
from boston_housing.common import print_image_directive
seaborn.set_style('whitegrid')
seaborn.color_palette('hls')
REDO_FIGURES = strtobool(os.environ.get('REDO_FIGURES', 'off'))
housing_features, housing_prices, feature_names = load_housing_data()
housing_data = pandas.DataFrame(housing_features, columns=feature_names)
housing_data['median_value'] = housing_prices
new_columns = ('crime_rate',
'large_lots',
'industrial',
'charles_river',
'nitric_oxide',
'rooms',
'old_houses',
'distances',
'highway_access',
'property_taxes',
'pupil_teacher_ratio',
'proportion_blacks',
'lower_status')
old_names = ('CRIM',
'ZN',
'INDUS',
'CHAS',
'NOX',
'RM',
'AGE',
'DIS',
'RAD',
'TAX',
'PTRATIO',
'B',
'LSTAT')
re_map_names = dict(zip(new_columns, old_names))
for new_key, old_key in re_map_names.iteritems():
housing_data[new_key] = housing_data[old_key]
client_features = pandas.DataFrame(CLIENT_FEATURES, columns=new_columns)
housing_data.to_hdf('data/housing_data.h5', 'table')
client_features.to_hdf('data/client_features.h5', 'table')
for index, old_name in enumerate(old_names):
print(" {0},{1}".format(old_name, new_columns[index]))
description = housing_data.describe()
for item in description.index:
formatter = " {0},{1:.0f}" if item == 'count' else ' {0},{1:.2f}'
print(formatter.format(item, description.median_value.loc[item]))
q_3 = description.median_value.loc["75%"]
q_1 = description.median_value.loc["25%"]
iqr = (q_3 - q_1)
assert iqr - 7.975 < 0.001
print(" IQR,{0}".format(iqr))
outlier_limit = 1.5 * iqr
low_outlier_limit = q_1 - outlier_limit
high_outlier_limit = q_3 + outlier_limit
print(" Low Outlier Limit (LOL),{0:.2f}".format(low_outlier_limit))
print(" LOL - min,{0:.2f}".format(low_outlier_limit - housing_data.median_value.min()))
print(" Upper Outlier Limit (UOL),{0:.2f}".format(high_outlier_limit))
print(" max - UOL,{0:.2f}".format(housing_data.median_value.max() - high_outlier_limit))
print(" Low Outlier Count,{0}".format(len(housing_data.median_value[housing_data.median_value < low_outlier_limit])))
print(' High Outlier Count,{0}'.format(len(housing_data.median_value[housing_data.median_value > high_outlier_limit])))
filename = 'median_value_distribution'
figure = plot.figure()
axe = figure.gca()
grid = seaborn.distplot(housing_data.median_value, ax=axe)
axe.axvline(housing_data.median_value.mean(), label='mean')
axe.axvline(housing_data.median_value.median(), label='median',
color='firebrick')
axe.legend()
title = axe.set_title("Boston Housing Median Values")
print_image_directive(filename, figure, scale='95%')
filename = 'median_value_boxplots'
figure = plot.figure()
axe = figure.gca()
grid = seaborn.boxplot(housing_data.median_value, ax=axe)
title = axe.set_title("Boston Housing Median Values")
print_image_directive(filename, figure, scale='95%')
def qqline_s(ax, x, y, dist, fmt='r-', **plot_kwargs):
"""
plot qq-line (taken from statsmodels.graphics.qqplot)
:param:
- `ax`: matplotlib axes
- `x`: theoretical_quantiles
- `y`: sample_quantiles
- `dist`: scipy.stats distribution
- `fmt`: format string for line
- `plot_kwargs`: matplotlib 2Dline keyword arguments
"""
m, b = y.std(), y.mean()
reference_line = m * x + b
ax.plot(x, reference_line, fmt, **plot_kwargs)
return
filename = 'median_value_qqplot'
figure = plot.figure()
axe = figure.gca()
color_map = plot.get_cmap('Blues')
prob_plot = statsmodels.ProbPlot(housing_data.median_value)
prob_plot.qqplot(ax=axe, color='b', alpha=.25)
qqline_s(ax=axe, dist=prob_plot.dist,
x=prob_plot.theoretical_quantiles, y=prob_plot.sample_quantiles,
fmt='-', color=seaborn.xkcd_rgb['medium green'])
#color=(.33, .66, .27))
title = axe.set_title("Boston Housing Median Values (QQ-Plot)")
print_image_directive(filename, figure)
filename = 'median_value_cdf'
figure = plot.figure()
axe = figure.gca()
grid = plot.plot(sorted(housing_data.median_value), numpy.linspace(0, 1, housing_data.median_value.count()))
title = axe.set_title("Boston Housing Median Values (CDF)")
axe.axhline(0.5, color='firebrick')
axe.set_xlabel("Median Home Value in $1,000's")
print_image_directive(filename, figure)
percentile_90 = housing_data.quantile(.90).median_value
def summary_table(variables, title='Variables Summaries',
number_format="{0:.2f}", data=housing_data):
"""
Print a csv-table with variable summaries
:param:
- `variables`: collection of variables to summarize
- `title`: Title for the table
- `number_format`: format string to set decimals
- `data`: source data to summarize
"""
statistics = ('min', '25%', '50%', '75%', 'max', 'mean', 'std')
print(".. csv-table:: {0}".format(title))
print(" :header: Variable, Min, Q1, Median, Q3, Max, Mean, Std\n")
for variable in variables:
description = data[variable].describe()
stats = ','.join([number_format.format(description.loc[stat])
for stat in statistics])
print(" {0},{1}".format(variable, stats))
return
features = re_map_names.keys()
rows = (len(features) // 3)
slice_start = 0
for row in range(1, rows + 1):
filename = 'housing_data_regression_plots_{0}'.format(row)
if REDO_FIGURES:
grid = seaborn.PairGrid(housing_data, x_vars=features[slice_start:row * 3], y_vars=['median_value'])
grid.map(seaborn.regplot)
print_image_directive(filename, grid, print_only=not REDO_FIGURES)
slice_start = row * 3
if rows % 3:
print()
filename = 'housing_data_regression_plots_{0}'.format(row + 1)
if REDO_FIGURES:
grid = seaborn.PairGrid(housing_data, x_vars=features[slice_start:slice_start + rows % 3], y_vars=['median_value'])
grid.map(seaborn.regplot, ci=95)
print_image_directive(filename, grid, print_only=not REDO_FIGURES)
for index, feature in enumerate(new_columns):
print(" {0},{1}".format(feature, CLIENT_FEATURES[0][index]))
chosen_variables = ('lower_status', 'nitric_oxide', 'rooms')
for variable in chosen_variables:
boston_variable = housing_data[variable]
q_1 = boston_variable.quantile(.25)
median = boston_variable.median()
q_3 = boston_variable.quantile(.75)
print(" {v},{c:.2f},{q1:.2f},{m:.2f},{q3:.2f}".format(v=variable,
c=client_features[variable][0],
q1=q_1,
m=median,
q3=q_3)) | mit |
ramansbach/cluster_analysis | examples/sg_tutorial/plot_sg_tutorial.py | 7 | 3828 | """
======================================
A quick tour of sphinx-gallery and rST
======================================
One of the most important components of any package is its documentation.
For packages that involve data analysis, visualization of results / data is
a key element of the docs. Sphinx-gallery is an excellent tool for building
narrative-style documentation in the form of `.py` files, as well as for
generating a gallery of sample images that are generated by your various
scripts.
This is a short demo for how sphinx-gallery can be used to generate beautiful,
HTML-rendered documentation using regular python files.
"""
import numpy as np
import matplotlib.pyplot as plt
###############################################################################
# reStructuredText
# ----------------
#
# The primary benefit of sphinx-gallery is that it allows you to interweave
# `reStructuredText <http://docutils.sourceforge.net/rst.html>`_ along with
# your regular python code. This means that you can include formatted text
# with the script, all using regular text files. rST has a particular structure
# it expects in order to render properly (it is what sphinx uses as well).
#
# File headers and naming
# -----------------------
# Sphinx-gallery files must be initialized with a header like the one above.
# It must exist as a part of the triple-quotes docstring at the start of the
# file, and tells SG the title of the page. If you wish, you can include text
# that comes after the header, which will be rendered as a contextual bit of
# information.
#
# In addition, if you want to render a file with sphinx-gallery, it must match
# the file naming structure that the gallery is configured to look for. By
# default, this is `plot_*.py`.
#
# Interweaving code with text
# ---------------------------
#
# Sphinx-gallery allows you to interweave code with your text. For example, if
# put a few lines of text below...
N = 1000
# They will be rendered as regular code. Note that now I am typing in a
# comment, because we've broken the chain of commented lines above.
x = np.random.randn(N)
# If we want to create another formatted block of text, we need to add a line
# of `#` spanning the whole line below. Like this:
###############################################################################
# Now we can once again have nicely formatted $t_{e}\chi^t$!
# Let's create our y-variable so we can make some plots
y = .2 * x + .4 * np.random.randn(N)
###############################################################################
# Plotting images
# ---------------
#
# Sphinx-gallery captures the images generated by matplotlib. This means that
# we can plot things as normal, and these images will be grouped with the
# text block that the fall underneath. For example, we could plot these two
# variables and the image will be shown below:
fig, ax = plt.subplots()
ax.plot(x, y, 'o')
###############################################################################
# Multiple images
# ---------------
#
# If we want multiple images, this is easy too. Sphinx-gallery will group
# everything together that's within the latest text block.
fig, axs = plt.subplots(1, 2)
axs[0].hist(x, bins=20)
axs[1].hist(y, bins=20)
fig, ax = plt.subplots()
ax.hist2d(x, y, bins=20)
###############################################################################
# Other kinds of formatting
# -------------------------
#
# Remember, rST can do all kinds of other cool stuff. We can even do things
# like add references to other packages and insert images. Check out this
# `guide <http://docutils.sourceforge.net/docs/user/rst/quickref.html>`_ for
# some sample rST code.
#
# .. image:: http://www.sphinx-doc.org/en/stable/_static/sphinxheader.png
# :width: 80%
#
# In the meantime, enjoy sphinx-gallery!
| mit |
zooniverse/aggregation | experimental/algorithms/automatic_optics.py | 2 | 9956 | __author__ = 'greg'
from clustering import Cluster
import pandas as pd
import numpy as np
from scipy.spatial.distance import pdist,squareform
from scipy.cluster.hierarchy import linkage
import abc
import math
class AbstractNode:
def __init__(self):
self.value = None
self.rchild = None
self.lchild = None
self.parent = None
self.depth = None
self.height = None
self.users = None
def __set_parent__(self,node):
assert isinstance(node,InnerNode)
self.parent = node
@abc.abstractmethod
def __traversal__(self):
return []
def __set_depth__(self,depth):
self.depth = depth
class LeafNode(AbstractNode):
def __init__(self,value,index,user=None):
AbstractNode.__init__(self)
self.value = value
self.index = index
self.users = [user,]
self.height = 0
self.pts = [value,]
def __traversal__(self):
return [(self.value,self.index),]
class InnerNode(AbstractNode):
def __init__(self,rchild,lchild,dist=None):
AbstractNode.__init__(self)
assert isinstance(rchild,(LeafNode,InnerNode))
assert isinstance(lchild,(LeafNode,InnerNode))
self.rchild = rchild
self.lchild = lchild
rchild.__set_parent__(self)
lchild.__set_parent__(self)
self.dist = dist
assert (self.lchild.users is None) == (self.rchild.users is None)
if self.lchild.users is not None:
self.users = self.lchild.users[:]
self.users.extend(self.rchild.users)
self.pts = self.lchild.pts[:]
self.pts.extend(self.rchild.pts[:])
self.height = max(rchild.height,lchild.height)+1
def __traversal__(self):
retval = self.rchild.__traversal__()
retval.extend(self.lchild.__traversal__())
return retval
def set_depth(node,depth=0):
assert isinstance(node,AbstractNode)
node.__set_depth__(depth)
if node.rchild is not None:
set_depth(node.rchild,depth+1)
if node.lchild is not None:
set_depth(node.lchild,depth+1)
def lowest_common_ancestor(node1,node2):
assert isinstance(node1,LeafNode)
assert isinstance(node2,LeafNode)
depth1 = node1.depth
depth2 = node2.depth
# make sure that the first node is the "shallower" node
if depth1 > depth2:
temp = node2
node2 = node1
node1 = temp
depth1 = node1.depth
depth2 = node2.depth
while depth2 > depth1:
node2 = node2.parent
depth2 = node2.depth
while node1 != node2:
node1 = node1.parent
node2 = node2.parent
return node1.height
def create_clusters(ordering,maxima):
if maxima == []:
return [ordering,]
next_maxima = max(maxima,key=lambda x:x[1])
split = next_maxima[0]
left_split = ordering[:split]
right_split = ordering[split:]
maxima_index = maxima.index(next_maxima)
left_maximia = maxima[:maxima_index]
right_maximia = maxima[maxima_index+1:]
# print right_maximia
# need to adjust the indices for the right hand values
right_maximia = [(i-split,j) for (i,j) in right_maximia]
# print right_maximia
retval = create_clusters(left_split,left_maximia)
retval.extend(create_clusters(right_split,right_maximia))
return retval
class AutomaticOptics(Cluster):
def __init__(self, project_api,min_cluster_size=1):
Cluster.__init__(self, project_api,min_cluster_size)
# def __correct__(self,subject_id):
# """
# find any nearest neighbour tuples of clusters which have no users in common and merge them
# :return:
# """
# results = self.clusterResults[subject_id]
# i = 0
# # the length of results[2] may and probably will change as we correct things
# # so don't use a for loop
# # -1 so we always have at least one more element to compare against
#
# while i < len(results[2])-1:
# users_i = results[2][i]
# pts_i = results[1][i]
# cluster_i = results[0][i]
#
# closest_distance = float("inf")
# closest_neighbour = None
# overlap = None
# # check the overlap between i and all clusters "above" it - overlap is symmetrical so we don't need
# # to check both ways. Also we are going backwards so that we can pop stuff from the list without
# # messing the indexing up
# for j in range(len(results[2])-1,i,-1):
# assert j != i
# users_j = results[2][j]
# cluster_j = results[0][j]
# dist = math.sqrt(sum([(pi-pj)**2 for (pi,pj) in zip(cluster_i,cluster_j)]))
#
# if dist < closest_distance:
# closest_distance = dist
# overlap = [u for u in users_j if u in users_i]
# closest_neighbour = j
#
# if len(overlap) == 0:
# # remove the j'th element and merge it with the i'th one
# center = results[0].pop(closest_neighbour)
# pts = results[1].pop(closest_neighbour)
# users = results[2].pop(closest_neighbour)
#
# # to allow for generalizations where the overlap is non-empty, we need a way to merge points
# for users in overlap:
# # todo: do generalization
# pass
#
# # todo: find a better way to do this, probably stop it from being a tuple in the first place
# results[1][i] = list(results[1][i])
# results[1][i].extend(pts)
# results[2][i].extend(users)
#
# # calculate the new center
# results[0][i] = [np.mean(axis) for axis in zip(*results[1][i])]
# # move on to the next element
# i += 1
#
# print "ending length is " + str(len(results[2]))
# def __fit__(self,markings,user_ids,jpeg_file=None,debug=False):
def __inner_fit__(self,markings,user_ids,tools,reduced_markings):
# print len(user_ids)
# print len(markings)
# l = [[(u,m) for m in marking] for u,marking in zip(user_ids,markings)]
# user_list,pts_list = zip(*[item for sublist in l for item in sublist])
# assert len(pts_list) == len(list(set(pts_list)))
labels = range(len(markings))
variables = ["X","Y"]
# X = np.random.random_sample([5,3])*10
df = pd.DataFrame(list(markings),columns=variables, index=labels)
row_dist = pd.DataFrame(squareform(pdist(df, metric='euclidean')), columns=labels, index=labels)
row_clusters = linkage(row_dist, method='single')
nodes = [LeafNode(pt,ii) for ii,pt in enumerate(markings)]
for merge in row_clusters:
rchild_index = int(merge[0])
lchild_index = int(merge[1])
dist = float(merge[2])
rnode = nodes[rchild_index]
lnode = nodes[lchild_index]
# if both nodes are leaf nodes, just merge them
if isinstance(rnode,LeafNode) and isinstance(lnode,LeafNode):
nodes.append(InnerNode(rnode,lnode,dist))
# if rnode is an inner node - we might need to merge into it
elif isinstance(lnode,LeafNode):
r_dist = rnode.dist
if r_dist == dist:
# merge
pass
else:
# create a new parent node
nodes.append(InnerNode(rnode,lnode,dist))
elif isinstance(rnode,LeafNode):
l_dist = lnode.dist
if l_dist == dist:
# merge
pass
else:
# create a new parent node
nodes.append(InnerNode(rnode,lnode,dist))
else:
# we have two inner nodes
l_dist = lnode.dist
r_dist = rnode.dist
if dist == l_dist:
assert dist == r_dist
assert False
else:
nodes.append(InnerNode(rnode,lnode,dist))
# set the depths of all of the nodes
set_depth(nodes[-1])
reachability_ordering = nodes[-1].__traversal__()
reachability_distance = [float("inf"),]
for ii, leaf in enumerate(reachability_ordering[1:]):
# print reachability_ordering[ii]
# print reachability_ordering[ii+1]
node1 = nodes[reachability_ordering[ii][1]]
node2 = nodes[reachability_ordering[ii+1][1]]
reachability_distance.append(lowest_common_ancestor(node1,node2))
# find the "important" local maxima
important_local_maxima = []
for i in range(1,len(reachability_distance)-1):
dist = reachability_distance[i]
other_distances = []
if i > 0:
other_distances.append(reachability_distance[i-1])
if i < (len(reachability_distance)-1):
other_distances.append(reachability_distance[i+1])
if dist > max(other_distances):
if np.mean(other_distances) < 0.75*dist:
important_local_maxima.append((i,dist))
clusters = create_clusters(zip(*reachability_ordering)[0],important_local_maxima)
users_per_cluster = [[user_ids[markings.index(p)] for p in c] for c in clusters]
cluster_centers = [[np.mean(axis) for axis in zip(*c)] for c in clusters]
results = []
for centers,pts,users in zip(cluster_centers,clusters,users_per_cluster):
results.append({"users":users,"cluster members":pts,"tools":[],"num users":len(users),"center":centers})
return results,0 | apache-2.0 |
nvkhedkar/Python1 | NiftyPE/src/niftype.py | 2 | 4491 | import datetime as dt
import pandas as pd
import numpy as np
import sys
import os
from bokeh.plotting import *
def GetData():
df = pd.read_csv("../data/nifty_pe_mov_avg.csv")
print "done"
print df
ndf = df.loc[:,['Date','PE']]
ndf.to_csv("../data/pe.csv")
class NiftyPE:
def __init__(self):
self.pe_data = pd.DataFrame()
self.mov_avg = pd.DataFrame()
self.curr_pe = 0
self.p = figure(plot_width=1200, plot_height=400, x_axis_type="datetime")
def setBokLineY(self,x,y,w=2,col="navy"):
self.p.line(x,y,line_width=w,line_alpha=0.7,line_color=col)
def getData(self,csv):
self.pe_data = pd.read_csv(csv,index_col=False)
self.pe_data['Date'] = pd.to_datetime(self.pe_data['Date'])
self.curr_pe = self.pe_data.loc[len(self.pe_data.index)-1,'PE']
self.pe_data.sort_values('Date',ascending=False,inplace=True)
self.setBokLineY(self.pe_data['Date'],self.pe_data['PE'],1.5,rgbToHex(25,65,175))
# print self.pe_data
def movAvg(self,days,col):
avgs = []
nn = len(self.pe_data.index)
pecol = self.pe_data.columns.get_loc('PE')
print pecol
for ii in range(0,nn):
if ii < nn - days:
df = self.pe_data.iloc[ii:ii+days,pecol]
avgs.append(np.average(df))
else:
avgs.append(0)
self.mov_avg[str(days)] = avgs
self.setBokLineY(self.pe_data['Date'],\
self.mov_avg.loc[0:nn-days-1,[str(days)]],\
1,col)
print self.mov_avg
def getBokehHist(self,titl,data,bin,w,h):
hist, edges = np.histogram(data, density=False, bins=bin)
chist = []
chist.append(0)
for ii in range(1,len(hist)):
chist.append(hist[ii]+ chist[ii-1])
p = figure(plot_width=w, plot_height=h,title=titl, background_fill="#E8DDCB",tools="save")
p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:],\
fill_color="#036564", line_color="#033649", alpha=0.6)
# p.line(edges,chist,line_width=2,line_alpha=0.7,line_color="#033649")
print edges
print hist
# print titl
# print len(data),np.mean(data)
# print hist
return p
def getBokehCumHist(self,titl,data,bin,w,h,curPE):
hist, edges = np.histogram(data, density=False, bins=bin)
chist = []
for ii in range(1,len(hist)):
hist[ii] = hist[ii]+hist[ii-1]
for ii in range(len(hist)):
chist.append(100*(float(hist[ii])/float(hist[len(hist)-1])))
cloc = 0
for ii in range(len(edges)):
if curPE <= edges[ii]:
cloc = ii
break
p = figure(plot_width=w, plot_height=h,title=titl, background_fill="#E8DDCB",tools="save")
p.quad(top=chist, bottom=0, left=edges[:-1], right=edges[1:],\
fill_color="#036564", line_color="#033649", alpha=0.6)
p.circle(edges[cloc],chist[cloc-1],size=15, color="orange", alpha=0.8)
# print titl
# print len(data),np.mean(data)
# print hist
return p
def setPETable(self,fdata,days,div):
data = fdata.iloc[0:days,]
avg = np.mean(data)
stdev = np.std(data)
pmin = np.min(data)
pmax = np.max(data)
updivs = int((pmax - avg)/(div*stdev))
downdivs = int((avg - pmin)/(div*stdev))
upList=[avg+j*stdev for j in range(updivs+1)]
downList=[avg-i*stdev for i in range(downdivs,0,-1)]
rangeList = downList+upList
print "UPLIST",pmax,updivs,avg,stdev
print upList
print "DOWNLIST",pmin,downdivs,avg
print rangeList
hist, edges = np.histogram(data, density=False, bins=rangeList)
print "Freqs"
print hist
print edges
print data
def bplot(self):
output_file("niftype.html", title="Nifty P/E")
expr = self.pe_data['PE']
h0 = self.getBokehHist("Nifty P/E",expr,40,600,400)
h1 = self.getBokehCumHist("Nifty P/E: Cuml Freq",expr,30,600,400,self.curr_pe)
print self.curr_pe
hh = hplot(h0,h1)
f = vplot(self.p,hh)
show(f)
def qcutEx():
df = pd.DataFrame(
{'A' : range(1,14),
'B' : range(11,24) })
print df.loc[:,['A']]
df['A Bucs'] = pd.qcut(df['A'],5,[1,2,3,4,5])
print df
return
df['A Bucs'] = bs
print df
return
def rgbToHex(r,g,b):
return '#%02x%02x%02x' % (r, g, b)
def main(args):
# qcutEx()
# return
npe = NiftyPE()
npe.getData("../data/nifty_pe.csv")
# npe.movAvg(200,rgbToHex(100,100,250))
# npe.movAvg(400,rgbToHex(70,250,70))
npe.movAvg(1500,rgbToHex(30,30,250))
npe.movAvg(750,rgbToHex(30,250,30))
npe.movAvg(250,rgbToHex(250,30,30))
npe.setPETable(npe.pe_data['PE'],7,0.5)
npe.bplot()
return
if __name__ == "__main__":
main(sys.argv)
os._exit(0)
| mit |
Denisolt/Tensorflow_Chat_Bot | local/lib/python2.7/site-packages/scipy/stats/kde.py | 25 | 17715 | #-------------------------------------------------------------------------------
#
# Define classes for (uni/multi)-variate kernel density estimation.
#
# Currently, only Gaussian kernels are implemented.
#
# Written by: Robert Kern
#
# Date: 2004-08-09
#
# Modified: 2005-02-10 by Robert Kern.
# Contributed to Scipy
# 2005-10-07 by Robert Kern.
# Some fixes to match the new scipy_core
#
# Copyright 2004-2005 by Enthought, Inc.
#
#-------------------------------------------------------------------------------
from __future__ import division, print_function, absolute_import
# Standard library imports.
import warnings
# Scipy imports.
from scipy._lib.six import callable, string_types
from scipy import linalg, special
from numpy import atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, \
ravel, power, atleast_1d, squeeze, sum, transpose
import numpy as np
from numpy.random import randint, multivariate_normal
# Local imports.
from . import mvn
__all__ = ['gaussian_kde']
class gaussian_kde(object):
"""Representation of a kernel-density estimate using Gaussian kernels.
Kernel density estimation is a way to estimate the probability density
function (PDF) of a random variable in a non-parametric way.
`gaussian_kde` works for both uni-variate and multi-variate data. It
includes automatic bandwidth determination. The estimation works best for
a unimodal distribution; bimodal or multi-modal distributions tend to be
oversmoothed.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a scalar,
this will be used directly as `kde.factor`. If a callable, it should
take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used. See Notes for more details.
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
d : int
Number of dimensions.
n : int
Number of datapoints.
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
evaluate
__call__
integrate_gaussian
integrate_box_1d
integrate_box
integrate_kde
pdf
logpdf
resample
set_bandwidth
covariance_factor
Notes
-----
Bandwidth selection strongly influences the estimate obtained from the KDE
(much more so than the actual shape of the kernel). Bandwidth selection
can be done by a "rule of thumb", by cross-validation, by "plug-in
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
uses a rule of thumb, the default is Scott's Rule.
Scott's Rule [1]_, implemented as `scotts_factor`, is::
n**(-1./(d+4)),
with ``n`` the number of data points and ``d`` the number of dimensions.
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
(n * (d + 2) / 4.)**(-1. / (d + 4)).
Good general descriptions of kernel density estimation can be found in [1]_
and [2]_, the mathematics for this multi-dimensional implementation can be
found in [1]_.
References
----------
.. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and
Visualization", John Wiley & Sons, New York, Chicester, 1992.
.. [2] B.W. Silverman, "Density Estimation for Statistics and Data
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
Chapman and Hall, London, 1986.
.. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
.. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel
conditional density estimation", Computational Statistics & Data
Analysis, Vol. 36, pp. 279-298, 2001.
Examples
--------
Generate some random two-dimensional data:
>>> from scipy import stats
>>> def measure(n):
... "Measurement model, return two coupled measurements."
... m1 = np.random.normal(size=n)
... m2 = np.random.normal(scale=0.5, size=n)
... return m1+m2, m1-m2
>>> m1, m2 = measure(2000)
>>> xmin = m1.min()
>>> xmax = m1.max()
>>> ymin = m2.min()
>>> ymax = m2.max()
Perform a kernel density estimate on the data:
>>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
>>> positions = np.vstack([X.ravel(), Y.ravel()])
>>> values = np.vstack([m1, m2])
>>> kernel = stats.gaussian_kde(values)
>>> Z = np.reshape(kernel(positions).T, X.shape)
Plot the results:
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
... extent=[xmin, xmax, ymin, ymax])
>>> ax.plot(m1, m2, 'k.', markersize=2)
>>> ax.set_xlim([xmin, xmax])
>>> ax.set_ylim([ymin, ymax])
>>> plt.show()
"""
def __init__(self, dataset, bw_method=None):
self.dataset = atleast_2d(dataset)
if not self.dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.d, self.n = self.dataset.shape
self.set_bandwidth(bw_method=bw_method)
def evaluate(self, points):
"""Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different than
the dimensionality of the KDE.
"""
points = atleast_2d(points)
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
result = zeros((m,), dtype=float)
if m >= self.n:
# there are more points than data, so loop over data
for i in range(self.n):
diff = self.dataset[:, i, newaxis] - points
tdiff = dot(self.inv_cov, diff)
energy = sum(diff*tdiff,axis=0) / 2.0
result = result + exp(-energy)
else:
# loop over points
for i in range(m):
diff = self.dataset - points[:, i, newaxis]
tdiff = dot(self.inv_cov, diff)
energy = sum(diff * tdiff, axis=0) / 2.0
result[i] = sum(exp(-energy), axis=0)
result = result / self._norm_factor
return result
__call__ = evaluate
def integrate_gaussian(self, mean, cov):
"""
Multiply estimated density by a multivariate Gaussian and integrate
over the whole space.
Parameters
----------
mean : aray_like
A 1-D array, specifying the mean of the Gaussian.
cov : array_like
A 2-D array, specifying the covariance matrix of the Gaussian.
Returns
-------
result : scalar
The value of the integral.
Raises
------
ValueError
If the mean or covariance of the input Gaussian differs from
the KDE's dimensionality.
"""
mean = atleast_1d(squeeze(mean))
cov = atleast_2d(cov)
if mean.shape != (self.d,):
raise ValueError("mean does not have dimension %s" % self.d)
if cov.shape != (self.d, self.d):
raise ValueError("covariance does not have dimension %s" % self.d)
# make mean a column vector
mean = mean[:, newaxis]
sum_cov = self.covariance + cov
# This will raise LinAlgError if the new cov matrix is not s.p.d
# cho_factor returns (ndarray, bool) where bool is a flag for whether
# or not ndarray is upper or lower triangular
sum_cov_chol = linalg.cho_factor(sum_cov)
diff = self.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
energies = sum(diff * tdiff, axis=0) / 2.0
result = sum(exp(-energies), axis=0) / norm_const / self.n
return result
def integrate_box_1d(self, low, high):
"""
Computes the integral of a 1D pdf between two bounds.
Parameters
----------
low : scalar
Lower bound of integration.
high : scalar
Upper bound of integration.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDE is over more than one dimension.
"""
if self.d != 1:
raise ValueError("integrate_box_1d() only handles 1D pdfs")
stdev = ravel(sqrt(self.covariance))[0]
normalized_low = ravel((low - self.dataset) / stdev)
normalized_high = ravel((high - self.dataset) / stdev)
value = np.mean(special.ndtr(normalized_high) -
special.ndtr(normalized_low))
return value
def integrate_box(self, low_bounds, high_bounds, maxpts=None):
"""Computes the integral of a pdf over a rectangular interval.
Parameters
----------
low_bounds : array_like
A 1-D array containing the lower bounds of integration.
high_bounds : array_like
A 1-D array containing the upper bounds of integration.
maxpts : int, optional
The maximum number of points to use for integration.
Returns
-------
value : scalar
The result of the integral.
"""
if maxpts is not None:
extra_kwds = {'maxpts': maxpts}
else:
extra_kwds = {}
value, inform = mvn.mvnun(low_bounds, high_bounds, self.dataset,
self.covariance, **extra_kwds)
if inform:
msg = ('An integral in mvn.mvnun requires more points than %s' %
(self.d * 1000))
warnings.warn(msg)
return value
def integrate_kde(self, other):
"""
Computes the integral of the product of this kernel density estimate
with another.
Parameters
----------
other : gaussian_kde instance
The other kde.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDEs have different dimensionality.
"""
if other.d != self.d:
raise ValueError("KDEs are not the same dimensionality")
# we want to iterate over the smallest number of points
if other.n < self.n:
small = other
large = self
else:
small = self
large = other
sum_cov = small.covariance + large.covariance
sum_cov_chol = linalg.cho_factor(sum_cov)
result = 0.0
for i in range(small.n):
mean = small.dataset[:, i, newaxis]
diff = large.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result += sum(exp(-energies), axis=0)
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
result /= norm_const * large.n * small.n
return result
def resample(self, size=None):
"""
Randomly sample a dataset from the estimated pdf.
Parameters
----------
size : int, optional
The number of samples to draw. If not provided, then the size is
the same as the underlying dataset.
Returns
-------
resample : (self.d, `size`) ndarray
The sampled dataset.
"""
if size is None:
size = self.n
norm = transpose(multivariate_normal(zeros((self.d,), float),
self.covariance, size=size))
indices = randint(0, self.n, size=size)
means = self.dataset[:, indices]
return means + norm
def scotts_factor(self):
return power(self.n, -1./(self.d+4))
def silverman_factor(self):
return power(self.n*(self.d+2.0)/4.0, -1./(self.d+4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
covariance_factor.__doc__ = """Computes the coefficient (`kde.factor`) that
multiplies the data covariance matrix to obtain the kernel covariance
matrix. The default is `scotts_factor`. A subclass can overwrite this
method to provide a different method, or set it through a call to
`kde.set_bandwidth`."""
def set_bandwidth(self, bw_method=None):
"""Compute the estimator bandwidth with given method.
The new bandwidth calculated after a call to `set_bandwidth` is used
for subsequent evaluations of the estimated density.
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a callable,
it should take a `gaussian_kde` instance as only parameter and
return a scalar. If None (default), nothing happens; the current
`kde.covariance_factor` method is kept.
Notes
-----
.. versionadded:: 0.11
Examples
--------
>>> import scipy.stats as stats
>>> x1 = np.array([-7, -5, 1, 4, 5.])
>>> kde = stats.gaussian_kde(x1)
>>> xs = np.linspace(-10, 10, num=50)
>>> y1 = kde(xs)
>>> kde.set_bandwidth(bw_method='silverman')
>>> y2 = kde(xs)
>>> kde.set_bandwidth(bw_method=kde.factor / 3.)
>>> y3 = kde(xs)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.plot(x1, np.ones(x1.shape) / (4. * x1.size), 'bo',
... label='Data points (rescaled)')
>>> ax.plot(xs, y1, label='Scott (default)')
>>> ax.plot(xs, y2, label='Silverman')
>>> ax.plot(xs, y3, label='Const (1/3 * Silverman)')
>>> ax.legend()
>>> plt.show()
"""
if bw_method is None:
pass
elif bw_method == 'scott':
self.covariance_factor = self.scotts_factor
elif bw_method == 'silverman':
self.covariance_factor = self.silverman_factor
elif np.isscalar(bw_method) and not isinstance(bw_method, string_types):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
"or a callable."
raise ValueError(msg)
self._compute_covariance()
def _compute_covariance(self):
"""Computes the covariance matrix for each Gaussian kernel using
covariance_factor().
"""
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
self._data_covariance = atleast_2d(np.cov(self.dataset, rowvar=1,
bias=False))
self._data_inv_cov = linalg.inv(self._data_covariance)
self.covariance = self._data_covariance * self.factor**2
self.inv_cov = self._data_inv_cov / self.factor**2
self._norm_factor = sqrt(linalg.det(2*pi*self.covariance)) * self.n
def pdf(self, x):
"""
Evaluate the estimated pdf on a provided set of points.
Notes
-----
This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``
docstring for more details.
"""
return self.evaluate(x)
def logpdf(self, x):
"""
Evaluate the log of the estimated pdf on a provided set of points.
Notes
-----
See `gaussian_kde.evaluate` for more details; this method simply
returns ``np.log(gaussian_kde.evaluate(x))``.
"""
return np.log(self.evaluate(x))
| gpl-3.0 |
crichardson17/starburst_atlas | JWST_Comparison/Supersolar_dusty_cut/Baseline_plotter2super.py | 1 | 12651 | ############################################################
############# Plotting File for Contour Plots ##############
################## Data read from Cloudy ###################
################ Helen Meskhidze, Fall 2015 ################
#################### Elon University #######################
#------------------------------------------------------------------------------------------------------
'''
The inputs this code takes are .grd and .txt files from Cloudy.
It can take in as many input files (in case you have a grid and haven't concatenated all the files)- just change the numFiles value
This code outputs a set of contour plots, saved to the working directory
'''
#------------------------------------------------------------------------------------------------------
#Packages importing
import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
import time
# ------------------------------------------------------------------------------------------------------
# keep track of how long the code takes to run
t0 = time.clock()
headerloc = "/Users/helen/Documents/Elon/Thesis_Research/github_repo/starburst_atlas/headers_dir/headers.txt"
# ------------------------------------------------------------------------------------------------------
#data files' names from source directory constructed here. default source directory is working directory
numFiles = 8 #change this if you have more/less files
gridFiles = [None]*numFiles
emissionFiles = [None]*numFiles
for i in range(numFiles):
for file in os.listdir('.'):
if file.endswith("grid{:d}.grd".format(i+1)):
gridFiles[i] = file
print file
if file.endswith("grid{:d}.txt".format(i+1)):
emissionFiles[i] = file
print file
print ("Files names constructed")
# ------------------------------------------------------------------------------------------------------
#Patches data
#this section adds the rectangles on the plots of the three other studies
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.)] # ignored
codes = [Path.MOVETO,Path.LINETO,Path.LINETO,Path.LINETO,Path.CLOSEPOLY]
path = Path(verts, codes)
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.)] # ignored
path = Path(verts, codes)
path2 = Path(verts2, codes)
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.)] # ignored
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the patches routine: to add patches for others peoples' data onto our plots.
#Adds patches to the first subplot
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='blue', lw=0)
patch = patches.PathPatch(path, facecolor='grey', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
plt.figure(figsize=(12,10))
def add_sub_plot(sub_num, elinesplot):
numplots = 16
plt.subplot(numplots/4.,4,sub_num) #row, column
#choose which z array, then which subplot
z_subnum = z_total[elinesplot]
z_line = z_subnum[:,:,sub_num-1]
contour1 = plt.contour(x_axis, y_axis, z_line, levels, colors='k', origin='lower', extent=extent) #teal contours, dashed
contourmap = plt.imshow(z_line, cmap='Reds', extent= extent, aspect = "auto",origin='lower', vmin=0, vmax =4)
plt.scatter(max_values[line[elinesplot][sub_num-1],2], max_values[line[elinesplot][sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[elinesplot][sub_num-1]], xy=(4,9.5), xytext=(4,9.5), fontsize = 10)
plt.annotate("Dusty 5 Z$\odot$", xy=(3.8,8.5), xytext=(3.8,8.5), fontsize = 10)
plt.annotate(max_values[line[elinesplot][sub_num-1],0], xy = (max_values[line[elinesplot][sub_num-1],2], max_values[line[elinesplot][sub_num-1],3]),
xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10, color='k')
if sub_num == 4:
cb = plt.colorbar(contourmap, pad = 0.05, ticks=np.arange(0,4.5,0.5))
cb.ax.tick_params(labelsize=10)
if sub_num == 8:
cb = plt.colorbar(contourmap, pad = 0.05, ticks=np.arange(0,4.0,0.5))
cb.ax.tick_params(labelsize=10)
if sub_num == 12:
cb = plt.colorbar(contourmap, pad = 0.05, ticks=np.arange(0,4.0,0.5))
cb.ax.tick_params(labelsize=10)
if sub_num == 0:
cb = plt.colorbar(contourmap, pad = 0.05, ticks=np.arange(0,4.0,0.5))
cb.ax.tick_params(labelsize=10)
#if sub_num == (4,8,12,16):
#axColor = plt.axes([7,7.5,0,0.5])
#axis limits
yt_min = 8 ; yt_max = 17; xt_min = 0; xt_max = 6
plt.ylim(yt_min,yt_max); plt.xlim(xt_min,xt_max)
#ticks
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
#axes labels
if sub_num == 0:
plt.tick_params(labelbottom = 'on')
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 12:
plt.tick_params(labelbottom = 'off')
if sub_num%(numplots/4) == 1:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
else:
plt.tick_params(labelleft = 'off')
if sub_num > 12:
plt.tick_params(labelbottom = 'on')
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
plt.xlabel('Log($n _{\mathrm{H}} $)')
#else:
# plt.tick_params(labelbottom = 'off')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
#to print progress to the terminal
if sub_num == numplots/2:
print("half the sub-plots of plot{:d} are complete".format(elinesplot+1))
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
print("Beginning file import")
for i in range(numFiles):
gridI = [];
with open(gridFiles[i], 'rb') as f:
csvReader = csv.reader(f, delimiter='\t')
for row in csvReader:
gridI.append(row)
gridI = asarray(gridI)
gridI = gridI[1:,6:8]
if ( i == 0 ):
grid = gridI
else :
grid = concatenate((grid,gridI))
for i in range(numFiles):
emissionLineI = [];
with open(emissionFiles[i], 'rb') as f:
csvReader = csv.reader(f, delimiter='\t')
headers = csvReader.next()
for row in csvReader:
emissionLineI.append(row)
emissionLineI = asarray(emissionLineI)
emissionLineI = emissionLineI[:,1:]
if ( i == 0 ):
Emissionlines = emissionLineI
else :
Emissionlines = concatenate((Emissionlines,emissionLineI))
hdens_values = grid[:,1]
phi_values = grid[:,0]
print("Import files complete")
# ---------------------------------------------------
#To fix when hdens > 10
#many of my grids were run off with hdens up to 12 so we needed to cut off part of the data
#first create temorary arrays
print("modifications begun")
hdens_values_2 = empty(shape=[0, 1])
phi_values_2 = empty(shape=[0, 1])
Emissionlines_2 = empty(shape=[0, len(Emissionlines[0,:])])
#save data in range desired to temp arrays
for i in range(len(hdens_values)):
if (float(hdens_values[i]) < 6.100) & (float(phi_values[i]) < 17.100) :
hdens_values_2 = append(hdens_values_2, hdens_values[i])
phi_values_2 = append(phi_values_2, phi_values[i])
Emissionlines_2 = vstack([Emissionlines_2, Emissionlines[i,:]])
#overwrite old arrays
hdens_values = hdens_values_2
phi_values = phi_values_2
Emissionlines = Emissionlines_2
print("modifications complete")
# ---------------------------------------------------
#there are the emission line names properly formatted
print("Importing headers from header file")
headersFile = open(headerloc,'r')
headers = headersFile.read().splitlines()
headersFile.close()
# ---------------------------------------------------
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
#select the scaling factor
#for 4860
incidentnum = 58 #reference index of 4860
incidentline = 4860. #wavelength
incident = Emissionlines[:,58]
print("Scaling data")
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(incidentline*(float(Emissionlines[i,j])/float(Emissionlines[i,incidentnum])), 10) > 0:
concatenated_data[i,j] = math.log(incidentline*(float(Emissionlines[i,j])/float(Emissionlines[i,incidentnum])), 10)
else:
concatenated_data[i,j] == 0
print("Finding peaks")
#find the maxima (having cut the arrays already) to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print("Data arranged")
# ---------------------------------------------------
gridarray = zeros((len(Emissionlines),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
#change desired lines to plot here! indexes of desired lines
line = [
#UV1Lines
[0, 1, 2, 3, 5, 165, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
#977, 991, 1026, 1216, 1218, 1239, 1240, 1243, 1263, 1304, 1308, 1397, 1402, 1406, 1486, 1531
#UV2line
[16, 17, 18, 19, 20, 21, 23, 24, 25, 27, 29, 30,31, 32, 33, 34],
#1549, 1640, 1665, 1671, 1750, 1860, 1888, 1907, 2297, 2321, 2471, 2326, 2335, 2665, 2798
#Optical Lines
[36, 37, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52],
#NE 3 3343A, NE 5 3426, 3646, 3726, 3727, 3729, 3869, 3889, 3933, 4026, 4070, 4074, 4078, 4102, 4340, 4363
#Optical Lines 2
[53, 55, 56, 57, 59, 60, 61, 64, 65, 66, 67, 68, 69, 70, 71, 73],
#NE 4 4720A, AR 4 4740, 4861, O III 4959, O 3 5007, O 1 5577, N 2 5755, HE 1 5876, O 1 6300;
#S 3 6312, O 1 6363, H 1 6563, N 2 6584, S II 6716, S 2 6720, S II 6731
#IR Lines
[75, 76, 77, 78, 79, 80, 81, 82, 84, 83, 85, 86, 87, 88, 89, 90],
#AR 5 7005A, AR 3 7135A, TOTL 7325A, AR 3 7751, 6LEV 8446, CA2X 8498, CA2Y 8542, CA2Z 8662;
#CA 2 8579A, S 3 9069, H 1 9229, S 3 9532... H 1 9546
#More Lines
[97,112, 107, 110, 108, 111, 106, 109, 104, 101, 102, 105, 99, 103, 98, 100],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
]
# ---------------------------------------------------
Nx = len(np.where(y == y[0])[0])
Ny = len(np.where(x == x[0])[0])
x_axis = x[0:Nx]
y_axis = np.unique(y)
extent = [min(x_axis),max(x_axis),min(y_axis),max(y_axis)]
# ---------------------------------------------------
z_total = [None] * (len(line)-1)
#create z array for this plot
for i in range(len(z_total)):
zi1 = [concatenated_data[:,line[i]]]
zi2 = np.reshape(zi1,(Ny,Nx,16))
z_total[i] = zi2
# ---------------------------------------------------
#plotting features (and contour levels)
#remove space between plots
#levels = arange(10**-1,10, .2) #teal levels
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-2,10**2, 1) #black levels
# ---------------------------------------------------
#loop through desired plots and desired subplots
print("Beginning plotting")
plt.clf()
for j in range (2):
for i in range(16):
add_sub_plot(i,j)
ax1 = plt.subplot(4,4,3)
add_patches(ax1)
#plt.show()
plt.savefig(("Full_lines_edit_%d.pdf")%j)
print("plot {:d} complete".format(j+1))
plt.clf()
if (time.clock() - t0) > 120:
print(time.clock() - t0)/60., "minutes process time"
else:
print(time.clock() - t0, "seconds process time")
| gpl-2.0 |
annahs/atmos_research | WHI_mass_data_to_db_run2.py | 1 | 7257 | #this script is used to add rBC mass to the database
import sys
import os
import datetime
import pickle
import numpy as np
import matplotlib.pyplot as plt
from pprint import pprint
from scipy.optimize import curve_fit
from scipy import stats
from SP2_particle_record_UTC import ParticleRecord
from struct import *
import hk_new
import hk_new_no_ts_LEO
from scipy import linspace, polyval, polyfit, sqrt, stats
import math
import mysql.connector
from datetime import datetime
import calendar
#setup
data_dir = 'D:/2010/WHI_ECSP2/Binary/all data/'
start_analysis_at = datetime(2010,7,12)
end_analysis_at = datetime(2010,7,26)
SP2_number = 'ECSP2'
min_incand_BBHG = 10
max_incand_BBHG = 3600
record_size_bytes = 2458 #size of a single particle record in bytes(UBC_SP2 = 1498, EC_SP2 in 2009 and 2010 = 2458, Alert SP2 #4 and #58 = 1658)
#database connection
cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon')
cursor = cnx.cursor()
#**********parameters dictionary**********
parameters = {
'acq_rate': 5000000, #5000000,
}
add_data = ('''INSERT INTO whi_sp2_particle_data
(sp2b_file,
file_index,
instrument_ID,
UNIX_UTC_ts_int_start,
UNIX_UTC_ts_int_end,
BB_incand_HG,
NB_incand_HG,
rBC_mass_fg_BBHG,
rBC_mass_fg_BBHG_err,
BB_incand_pk_pos,
BB_scat_pk_pos,
BB_scat_pkht,
HK_id)
VALUES (
%(sp2b_file)s,
%(file_index)s,
%(instrument_ID)s,
%(UNIX_UTC_ts_int_start)s,
%(UNIX_UTC_ts_int_end)s,
%(BB_incand_HG)s,
%(NB_incand_HG)s,
%(rBC_mass_fg_BBHG)s,
%(rBC_mass_fg_BBHG_err)s,
%(BB_incand_pk_pos)s,
%(BB_scat_pk_pos)s,
%(BB_scat_pkht)s,
%(HK_id)s)''')
def checkHKId(particle_event_time):
event_minute = int(particle_event_time-particle_event_time%60) #hk data is in 1min intervals, so need event minute
cursor.execute(('SELECT id FROM whi_hk_data WHERE UNIX_UTC_ts = %s and id > %s'),(event_minute,1))
hk_data = cursor.fetchall()
if hk_data != []:
hk_id= hk_data[0][0]
else:
hk_id = None
return hk_id
def make_plot(record):
center = record.beam_center_pos
x_vals_all = record.getAcqPoints()
y_vals_all = record.getScatteringSignal()
y_vals_split = record.getSplitDetectorSignal()
y_vals_incand = record.getWidebandIncandSignal()
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.plot(x_vals_all,y_vals_all,'o', markerfacecolor='None', label = 'scattering signal')
ax1.plot(x_vals_all, y_vals_incand, color ='red',marker = 'o', linestyle = 'None', label = 'incandescent signal')
ax1.set_xlabel('data point #')
ax1.set_ylabel('amplitude (a.u.)')
#ax1.plot(x_vals_all, y_vals_split, 'o', color ='green')
#plt.axvline(x=record.zeroCrossingPos, ymin=0, ymax=1)
#plt.axvline(x=record.beam_center_pos, ymin=0, ymax=1, color='red')
plt.legend()
plt.show()
def getParticleData(parameters_dict,instr_number,bbhg_incand_min,prev_particle_ts,bbhg_incand_max):
f = open(parameters_dict['file'], 'rb')
record_index = 0
multiple_records = []
i=1
while record_index < parameters['number_of_records']:
#read the binary for a particle
record = f.read(parameters['record_size'])
particle_record = ParticleRecord(record, parameters_dict['acq_rate'])
#run the wideband HG incandPeakInfo method to retrieve various HG BB incandescence peak attributes
particle_record.incandPeakInfo()
bbhg_incand_pk_amp = float(particle_record.incandMax)
bbhg_incand_pk_pos = float(particle_record.incandMaxPos)
#if this is an incandescent particle that we can detect with the HG channel then continue
if bbhg_incand_pk_amp >= bbhg_incand_min:
#print record_index
#make_plot(particle_record)
event_time = particle_record.timestamp #UTC
##Retrieve HK id for the event to join tables on later
housekeeping_id = checkHKId(event_time)
particle_record.narrowIncandPeakInfo()
nbhg_incand_pk_amp = float(particle_record.narrowIncandMax)
particle_record.scatteringPeakInfo()
bbhg_scat_pk_pos = float(particle_record.scatteringMaxPos)
BB_scat_pkht = float(particle_record.scatteringMax)
####calculate masses
#HG
#bbhg_mass_uncorr = 0.01244 + 0.01204*bbhg_incand_pk_amp #ECSP2 2009
#bbhg_mass_uncertainty_uncorr = 0.14983 + 2.58886E-4*bbhg_incand_pk_amp #ECSP2 2009
bbhg_mass_uncorr = -0.32619 + 0.00757*bbhg_incand_pk_amp #ECSP2 2010 - lin
bbhg_mass_uncertainty_uncorr = 0.16465 + 1.53954E-4*bbhg_incand_pk_amp #ECSP2 2010 - lin
#bbhg_mass_uncorr = 0.12998 + 0.006137*bbhg_incand_pk_amp + 6.1825e-7*bbhg_incand_pk_amp*bbhg_incand_pk_amp #ECSP2 2010 - poly
#bbhg_mass_uncertainty_uncorr = 0.05224 + 1.15458E-4*bbhg_incand_pk_amp + 4.73331E-8*bbhg_incand_pk_amp*bbhg_incand_pk_amp #ECSP2 2010 - poly
bbhg_mass_corr = bbhg_mass_uncorr/0.7 #AD correction factor is 0.7 +- 0.05
bbhg_mass_only_rel_err = bbhg_mass_uncertainty_uncorr/bbhg_mass_uncorr
bbhg_ADcorr_rel_err = (0.05/0.7)
bbhg_mass_abs_uncertainty_corr = (bbhg_ADcorr_rel_err + bbhg_mass_only_rel_err) * bbhg_mass_corr
single_record ={
'sp2b_file' : parameters['file'],
'file_index' : record_index,
'instrument_ID' :instr_number,
'UNIX_UTC_ts_int_start' :prev_particle_ts,
'UNIX_UTC_ts_int_end' :event_time,
'BB_incand_HG': bbhg_incand_pk_amp,
'NB_incand_HG' : nbhg_incand_pk_amp,
'rBC_mass_fg_BBHG': bbhg_mass_corr,
'rBC_mass_fg_BBHG_err': bbhg_mass_abs_uncertainty_corr,
'BB_incand_pk_pos': bbhg_incand_pk_pos,
'BB_scat_pk_pos': bbhg_scat_pk_pos,
'BB_scat_pkht': BB_scat_pkht,
'HK_id': housekeeping_id,
}
multiple_records.append((single_record))
#bulk insert to db table
if i%2000 == 0:
cursor.executemany(add_data, multiple_records)
cnx.commit()
multiple_records = []
prev_particle_ts = event_time
#increment count of detectible incandescent particles
i+= 1
record_index+=1
#bulk insert of remaining records to db
if multiple_records != []:
cursor.executemany(add_data, multiple_records)
cnx.commit()
multiple_records = []
#close file
f.close()
return prev_particle_ts
prev_event_ts = 0
os.chdir(data_dir)
for directory in os.listdir(data_dir):
if os.path.isdir(directory) == True and directory.startswith('20'):
parameters['folder']= directory
folder_date = datetime.strptime(directory, '%Y%m%d')
if folder_date >= start_analysis_at and folder_date < end_analysis_at:
parameters['directory']=os.path.abspath(directory)
os.chdir(parameters['directory'])
print folder_date
#start the fitting
for file in os.listdir('.'):
if file.endswith('.sp2b') and (file.endswith('gnd.sp2b')==False):
print file
parameters['file'] = file
path = parameters['directory'] + '/' + str(file)
file_bytes = os.path.getsize(path) #size of entire file in bytes
parameters['record_size'] = record_size_bytes
parameters['number_of_records']= (file_bytes/parameters['record_size'])
st = datetime.now()
prev_event_ts = getParticleData(parameters,SP2_number,min_incand_BBHG,prev_event_ts,max_incand_BBHG)
print 'e',datetime.now() - st
os.chdir(data_dir)
cnx.close()
| mit |
dilawar/moose-full | moose-examples/snippets/MULTI/minimal.py | 2 | 11414 | # minimal.py ---
# Upi Bhalla, NCBS Bangalore 2014.
#
# Commentary:
#
# Minimal model for loading rdesigneur: reac-diff elec signaling in neurons
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
# Code:
import sys
sys.path.append('../../python')
import os
os.environ['NUMPTHREADS'] = '1'
import math
import numpy
import matplotlib.pyplot as plt
import moose
#import proto18
EREST_ACT = -70e-3
def loadElec():
library = moose.Neutral( '/library' )
moose.setCwe( '/library' )
model = moose.Neutral( '/model' )
cellId = moose.loadModel( 'mincell.p', '/model/elec', "Neutral" )
return cellId
def loadChem( diffLength ):
chem = moose.Neutral( '/model/chem' )
neuroCompt = moose.NeuroMesh( '/model/chem/kinetics' )
neuroCompt.separateSpines = 1
neuroCompt.geometryPolicy = 'cylinder'
spineCompt = moose.SpineMesh( '/model/chem/compartment_1' )
moose.connect( neuroCompt, 'spineListOut', spineCompt, 'spineList', 'OneToOne' )
psdCompt = moose.PsdMesh( '/model/chem/compartment_2' )
#print 'Meshvolume[neuro, spine, psd] = ', neuroCompt.mesh[0].volume, spineCompt.mesh[0].volume, psdCompt.mesh[0].volume
moose.connect( neuroCompt, 'psdListOut', psdCompt, 'psdList', 'OneToOne' )
modelId = moose.loadModel( 'minimal.g', '/model/chem', 'ee' )
neuroCompt.name = 'dend'
spineCompt.name = 'spine'
psdCompt.name = 'psd'
def makeNeuroMeshModel():
diffLength = 20e-6 # Aim for just 3 compts.
elec = loadElec()
loadChem( diffLength )
neuroCompt = moose.element( '/model/chem/dend' )
neuroCompt.diffLength = diffLength
neuroCompt.cellPortion( elec, '/model/elec/#' )
for x in moose.wildcardFind( '/model/chem/##[ISA=PoolBase]' ):
if (x.diffConst > 0):
x.diffConst = 1e-11
for x in moose.wildcardFind( '/model/chem/##/Ca' ):
x.diffConst = 1e-10
# Put in dend solvers
ns = neuroCompt.numSegments
ndc = neuroCompt.numDiffCompts
print 'ns = ', ns, ', ndc = ', ndc
assert( neuroCompt.numDiffCompts == neuroCompt.mesh.num )
assert( ns == 1 ) # dend, 5x (shaft+head)
assert( ndc == 1 )
nmksolve = moose.Ksolve( '/model/chem/dend/ksolve' )
nmdsolve = moose.Dsolve( '/model/chem/dend/dsolve' )
nmstoich = moose.Stoich( '/model/chem/dend/stoich' )
nmstoich.compartment = neuroCompt
nmstoich.ksolve = nmksolve
nmstoich.dsolve = nmdsolve
nmstoich.path = "/model/chem/dend/##"
print 'done setting path, numPools = ', nmdsolve.numPools
assert( nmdsolve.numPools == 1 )
assert( nmdsolve.numAllVoxels == 1 )
assert( nmstoich.numAllPools == 1 )
# oddly, numLocalFields does not work.
ca = moose.element( '/model/chem/dend/DEND/Ca' )
assert( ca.numData == ndc )
# Put in spine solvers. Note that these get info from the neuroCompt
spineCompt = moose.element( '/model/chem/spine' )
sdc = spineCompt.mesh.num
print 'sdc = ', sdc
assert( sdc == 1 )
smksolve = moose.Ksolve( '/model/chem/spine/ksolve' )
smdsolve = moose.Dsolve( '/model/chem/spine/dsolve' )
smstoich = moose.Stoich( '/model/chem/spine/stoich' )
smstoich.compartment = spineCompt
smstoich.ksolve = smksolve
smstoich.dsolve = smdsolve
smstoich.path = "/model/chem/spine/##"
assert( smstoich.numAllPools == 3 )
assert( smdsolve.numPools == 3 )
assert( smdsolve.numAllVoxels == 1 )
# Put in PSD solvers. Note that these get info from the neuroCompt
psdCompt = moose.element( '/model/chem/psd' )
pdc = psdCompt.mesh.num
assert( pdc == 1 )
pmksolve = moose.Ksolve( '/model/chem/psd/ksolve' )
pmdsolve = moose.Dsolve( '/model/chem/psd/dsolve' )
pmstoich = moose.Stoich( '/model/chem/psd/stoich' )
pmstoich.compartment = psdCompt
pmstoich.ksolve = pmksolve
pmstoich.dsolve = pmdsolve
pmstoich.path = "/model/chem/psd/##"
assert( pmstoich.numAllPools == 3 )
assert( pmdsolve.numPools == 3 )
assert( pmdsolve.numAllVoxels == 1 )
foo = moose.element( '/model/chem/psd/Ca' )
print 'PSD: numfoo = ', foo.numData
print 'PSD: numAllVoxels = ', pmksolve.numAllVoxels
"""
CaNpsd = moose.vec( '/model/chem/psdMesh/PSD/PP1_PSD/CaN' )
print 'numCaN in PSD = ', CaNpsd.nInit, ', vol = ', CaNpsd.volume
CaNspine = moose.vec( '/model/chem/spine/SPINE/CaN_BULK/CaN' )
print 'numCaN in spine = ', CaNspine.nInit, ', vol = ', CaNspine.volume
"""
# set up adaptors
aCa = moose.Adaptor( '/model/chem/psd/adaptCa', pdc )
adaptCa = moose.vec( '/model/chem/psd/adaptCa' )
chemCa = moose.vec( '/model/chem/psd/Ca' )
print 'aCa = ', aCa, ' foo = ', foo, "len( ChemCa ) = ", len( chemCa ), ", numData = ", chemCa.numData
assert( len( adaptCa ) == pdc )
assert( len( chemCa ) == pdc )
path = '/model/elec/spine_head'
elecCa = moose.element( path )
moose.connect( elecCa, 'VmOut', adaptCa[0], 'input', 'Single' )
#moose.connect( adaptCa, 'outputSrc', chemCa, 'setConc', 'OneToOne' )
adaptCa.inputOffset = 0.0 #
adaptCa.outputOffset = 0.00008 # 80 nM offset in chem.
adaptCa.scale = 1e-5 # 520 to 0.0052 mM
#print adaptCa.outputOffset
#print adaptCa.scale
def addPlot( objpath, field, plot ):
#assert moose.exists( objpath )
if moose.exists( objpath ):
tab = moose.Table( '/graphs/' + plot )
obj = moose.element( objpath )
if obj.className == 'Neutral':
print "addPlot failed: object is a Neutral: ", objpath
return moose.element( '/' )
else:
#print "object was found: ", objpath, obj.className
moose.connect( tab, 'requestOut', obj, field )
return tab
else:
print "addPlot failed: object not found: ", objpath
return moose.element( '/' )
def makeElecPlots():
graphs = moose.Neutral( '/graphs' )
elec = moose.Neutral( '/graphs/elec' )
addPlot( '/model/elec/soma', 'getVm', 'elec/somaVm' )
addPlot( '/model/elec/spine_head', 'getVm', 'elec/spineVm' )
def makeChemPlots():
graphs = moose.Neutral( '/graphs' )
chem = moose.Neutral( '/graphs/chem' )
addPlot( '/model/chem/psd/Ca_CaM', 'getConc', 'chem/psdCaCam' )
addPlot( '/model/chem/psd/Ca', 'getConc', 'chem/psdCa' )
addPlot( '/model/chem/spine/Ca_CaM', 'getConc', 'chem/spineCaCam' )
addPlot( '/model/chem/spine/Ca', 'getConc', 'chem/spineCa' )
addPlot( '/model/chem/dend/DEND/Ca', 'getConc', 'chem/dendCa' )
def testNeuroMeshMultiscale():
elecDt = 50e-6
chemDt = 1e-1
plotDt = 1e-1
plotName = 'nm.plot'
makeNeuroMeshModel()
print "after model is completely done"
for i in moose.wildcardFind( '/model/chem/#/#/#/transloc#' ):
print i[0].name, i[0].Kf, i[0].Kb, i[0].kf, i[0].kb
"""
for i in moose.wildcardFind( '/model/chem/##[ISA=PoolBase]' ):
if ( i[0].diffConst > 0 ):
grandpaname = i.parent[0].parent.name + '/'
paname = i.parent[0].name + '/'
print grandpaname + paname + i[0].name, i[0].diffConst
print 'Neighbors:'
for t in moose.element( '/model/chem/spine/ksolve/junction' ).neighbors['masterJunction']:
print 'masterJunction <-', t.path
for t in moose.wildcardFind( '/model/chem/#/ksolve' ):
k = moose.element( t[0] )
print k.path + ' localVoxels=', k.numLocalVoxels, ', allVoxels= ', k.numAllVoxels
"""
'''
moose.useClock( 4, '/model/chem/dend/dsolve', 'process' )
moose.useClock( 5, '/model/chem/dend/ksolve', 'process' )
moose.useClock( 5, '/model/chem/spine/ksolve', 'process' )
moose.useClock( 5, '/model/chem/psd/ksolve', 'process' )
'''
makeChemPlots()
makeElecPlots()
moose.setClock( 0, elecDt )
moose.setClock( 1, elecDt )
moose.setClock( 2, elecDt )
moose.setClock( 4, chemDt )
moose.setClock( 5, chemDt )
moose.setClock( 6, chemDt )
moose.setClock( 7, plotDt )
moose.setClock( 8, plotDt )
moose.useClock( 0, '/model/elec/##[ISA=Compartment]', 'init' )
moose.useClock( 1, '/model/elec/##[ISA=Compartment]', 'process' )
moose.useClock( 1, '/model/elec/##[ISA=SpikeGen]', 'process' )
moose.useClock( 2, '/model/elec/##[ISA=ChanBase],/model/##[ISA=SynBase],/model/##[ISA=CaConc]','process')
#moose.useClock( 5, '/model/chem/##[ISA=PoolBase],/model/##[ISA=ReacBase],/model/##[ISA=EnzBase]', 'process' )
#moose.useClock( 6, '/model/chem/##[ISA=Adaptor]', 'process' )
moose.useClock( 7, '/graphs/chem/#', 'process' )
moose.useClock( 8, '/graphs/elec/#', 'process' )
moose.useClock( 5, '/model/chem/#/dsolve', 'process' )
moose.useClock( 6, '/model/chem/#/ksolve', 'process' )
#hsolve = moose.HSolve( '/model/elec/hsolve' )
#moose.useClock( 1, '/model/elec/hsolve', 'process' )
#hsolve.dt = elecDt
#hsolve.target = '/model/elec/compt'
#moose.reinit()
moose.element( '/model/elec/spine_head' ).inject = 1e-9
moose.element( '/model/chem/psd/Ca' ).concInit = 0.001
moose.element( '/model/chem/spine/Ca' ).concInit = 0.002
moose.element( '/model/chem/dend/DEND/Ca' ).concInit = 0.003
moose.reinit()
"""
print 'pre'
eca = moose.vec( '/model/chem/psd/PSD/CaM/Ca' )
for i in range( 3 ):
print eca[i].concInit, eca[i].conc, eca[i].nInit, eca[i].n, eca[i].volume
print 'dend'
eca = moose.vec( '/model/chem/dend/DEND/Ca' )
#for i in ( 0, 1, 2, 30, 60, 90, 120, 144 ):
for i in range( 13 ):
print i, eca[i].concInit, eca[i].conc, eca[i].nInit, eca[i].n, eca[i].volume
print 'PSD'
eca = moose.vec( '/model/chem/psd/PSD/CaM/Ca' )
for i in range( 3 ):
print eca[i].concInit, eca[i].conc, eca[i].nInit, eca[i].n, eca[i].volume
print 'spine'
eca = moose.vec( '/model/chem/spine/SPINE/CaM/Ca' )
for i in range( 3 ):
print eca[i].concInit, eca[i].conc, eca[i].nInit, eca[i].n, eca[i].volume
"""
moose.start( 0.5 )
plt.ion()
fig = plt.figure( figsize=(8,8) )
chem = fig.add_subplot( 211 )
chem.set_ylim( 0, 0.004 )
plt.ylabel( 'Conc (mM)' )
plt.xlabel( 'time (seconds)' )
for x in moose.wildcardFind( '/graphs/chem/#[ISA=Table]' ):
pos = numpy.arange( 0, len(x.vector), 1 )
line1, = chem.plot( pos, x.vector, label=x.name )
plt.legend()
elec = fig.add_subplot( 212 )
plt.ylabel( 'Vm (V)' )
plt.xlabel( 'time (seconds)' )
for x in moose.wildcardFind( '/graphs/elec/#[ISA=Table]' ):
pos = numpy.arange( 0, len(x.vector), 1 )
line1, = elec.plot( pos, x.vector, label=x.name )
plt.legend()
fig.canvas.draw()
raw_input()
'''
for x in moose.wildcardFind( '/graphs/##[ISA=Table]' ):
t = numpy.arange( 0, x.vector.size, 1 )
pylab.plot( t, x.vector, label=x.name )
pylab.legend()
pylab.show()
'''
print 'All done'
def main():
testNeuroMeshMultiscale()
if __name__ == '__main__':
main()
#
# minimal.py ends here.
| gpl-2.0 |
YinongLong/scikit-learn | examples/linear_model/plot_ols.py | 104 | 1936 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean squared error
print("Mean squared error: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
jereze/scikit-learn | sklearn/linear_model/tests/test_omp.py | 272 | 7752 | # Author: Vlad Niculae
# Licence: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
| bsd-3-clause |
jstoxrocky/statsmodels | tools/examples_rst.py | 30 | 5894 | #! /usr/bin/env python
import os
import sys
import re
import subprocess
import pickle
from StringIO import StringIO
# 3rd party
from matplotlib import pyplot as plt
# Ours
import hash_funcs
#----------------------------------------------------
# Globals
#----------------------------------------------------
# these files do not get made into .rst files because of
# some problems, they may need a simple cleaning up
exclude_list = ['run_all.py',
# these need to be cleaned up
'example_ols_tftest.py',
'example_glsar.py',
'example_ols_table.py',
#not finished yet
'example_arima.py',
'try_wls.py']
file_path = os.path.dirname(__file__)
docs_rst_dir = os.path.realpath(os.path.join(file_path,
'../docs/source/examples/generated/'))
example_dir = os.path.realpath(os.path.join(file_path,
'../examples/'))
def check_script(filename):
"""
Run all the files in filelist from run_all. Add any with problems
to exclude_list and return it.
"""
file_to_run = "python -c\"import warnings; "
file_to_run += "warnings.simplefilter('ignore'); "
file_to_run += "from matplotlib import use; use('Agg'); "
file_to_run += "execfile(r'%s')\"" % os.path.join(example_dir, filename)
proc = subprocess.Popen(file_to_run, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
#NOTE: use communicate to wait for process termination
stdout, stderr = proc.communicate()
result = proc.returncode
if result != 0: # raised an error
msg = "Not generating reST from %s. An error occurred.\n" % filename
msg += stderr
print msg
return False
return True
def parse_docstring(block):
"""
Strips the docstring from a string representation of the file.
Returns the docstring and block without it
"""
ds = "\"{3}|'{3}"
try:
start = re.search(ds, block).end()
end = re.search(ds, block[start:]).start()
except: #TODO: make more informative
raise IOError("File %s does not have a docstring?")
docstring = block[start:start+end]
block = block[start+end+3:]
return docstring.strip(), block
def parse_file(block):
"""
Block is a raw string file.
"""
docstring, block = parse_docstring(block)
# just get the first line from the docstring
docstring = docstring.split('\n')[0] or docstring.split('\n')[1]
outfile = [docstring,'='*len(docstring),'']
block = block.split('\n')
# iterate through the rest of block, anything in comments is stripped of #
# anything else is fair game to go in an ipython directive
code_snippet = False
for line in block:
#if not len(line):
# continue
# preserve blank lines
if line.startswith('#') and not (line.startswith('#%') or
line.startswith('#@')):
# on some ReST text
if code_snippet: # were on a code snippet
outfile.append('')
code_snippet = False
line = line.strip()
# try to remove lines like # hello -> #hello
line = re.sub("(?<=#) (?!\s)", "", line)
# make sure commented out things have a space
line = re.sub("#\.\.(?!\s)", "#.. ", line)
line = re.sub("^#+", "", line) # strip multiple hashes
outfile.append(line)
else:
if not code_snippet: # new code block
outfile.append('\n.. ipython:: python\n')
code_snippet = True
# handle decorators and magic functions
if line.startswith('#%') or line.startswith('#@'):
line = line[1:]
outfile.append(' '+line.strip('\n'))
return '\n'.join(outfile)
def write_file(outfile, rst_file_pth):
"""
Write outfile to rst_file_pth
"""
print "Writing ", os.path.basename(rst_file_pth)
write_file = open(rst_file_pth, 'w')
write_file.writelines(outfile)
write_file.close()
def restify(example_file, filehash, fname):
"""
Takes a whole file ie., the result of file.read(), its md5 hash, and
the filename
Parse the file
Write the new .rst
Update the hash_dict
"""
write_filename = os.path.join(docs_rst_dir, fname[:-2] + 'rst')
try:
rst_file = parse_file(example_file)
except IOError as err:
raise IOError(err.message % fname)
write_file(rst_file, write_filename)
if filehash is not None:
hash_funcs.update_hash_dict(filehash, fname)
if __name__ == "__main__":
sys.path.insert(0, example_dir)
from run_all import filelist
sys.path.remove(example_dir)
if not os.path.exists(docs_rst_dir):
os.makedirs(docs_rst_dir)
if len(sys.argv) > 1: # given a file,files to process, no help flag yet
for example_file in sys.argv[1:]:
whole_file = open(example_file, 'r').read()
restify(whole_file, None, example_file)
else: # process the whole directory
for root, dirnames, filenames in os.walk(example_dir):
if 'notebooks' in root:
continue
for example in filenames:
example_file = os.path.join(root, example)
whole_file = open(example_file, 'r').read()
to_write, filehash = hash_funcs.check_hash(whole_file,
example)
if not to_write:
print "Hash has not changed for file %s" % example
continue
elif (not example.endswith('.py') or example in exclude_list or
not check_script(example_file)):
continue
restify(whole_file, filehash, example)
| bsd-3-clause |
schets/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 142 | 18692 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)]
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)]
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [1., 0.]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slighltly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
lshf.fit(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consitent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
lshf.partial_fit(X)
assert_array_equal(X, lshf._fit_X)
lshf.fit(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
lshf.partial_fit(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
lshf.fit(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
lshf.fit(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause |
JasonKessler/scattertext | demo_pca_documents.py | 1 | 1737 | import pandas as pd
from sklearn.feature_extraction.text import TfidfTransformer
import umap
import scattertext as st
from scipy.sparse.linalg import svds
convention_df = st.SampleCorpora.ConventionData2012.get_data()
convention_df['parse'] = convention_df['text'].apply(st.whitespace_nlp_with_sentences)
corpus = (st.CorpusFromParsedDocuments(convention_df,
category_col='party',
parsed_col='parse')
.build()
.get_stoplisted_unigram_corpus())
corpus = corpus.add_doc_names_as_metadata(corpus.get_df()['speaker'])
embeddings = TfidfTransformer().fit_transform(corpus.get_term_doc_mat())
u, s, vt = svds(embeddings, k=3, maxiter=20000, which='LM')
projection = pd.DataFrame({'term': corpus.get_metadata(), 'x': u.T[0], 'y': u.T[1]}).set_index('term')
category = 'democrat'
scores = (corpus.get_category_ids() == corpus.get_categories().index(category)).astype(int)
html = st.produce_pca_explorer(corpus,
category=category,
category_name='Democratic',
not_category_name='Republican',
metadata=convention_df['speaker'],
width_in_pixels=1000,
show_axes=False,
use_non_text_features=True,
use_full_doc=True,
projection=projection,
scores=scores,
show_top_terms=False)
file_name = 'demo_pca_documents.html'
open(file_name, 'wb').write(html.encode('utf-8'))
print('Open ./%s in Chrome.' % (file_name))
| apache-2.0 |
IndraVikas/scikit-learn | examples/covariance/plot_sparse_cov.py | 300 | 5078 | """
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.